diff --git "a/5204.jsonl" "b/5204.jsonl" new file mode 100644--- /dev/null +++ "b/5204.jsonl" @@ -0,0 +1,630 @@ +{"seq_id":"204605443","text":"#5. 补全穴位数据\n\nimport sqlite3\nimport urllib\nimport random\nimport bs4\nimport Acupoint\n\nprint('running insert_acupoint.py')\n\nacupoint = Acupoint.Acupoint()\npath = \"../04_database/temp.db\"\nconnect = sqlite3.connect(path)\ncursor = connect.cursor()\n\nstart = 140\nwhile True:\n sql = 'SELECT \"id\", \"url\", \"description\" FROM \"acupoint\" limit ' + str(start) + ', 20'\n cursor.execute(sql)\n result = cursor.fetchall()\n if len(result) == 0:\n break\n for row in result:\n if row[2] is None or len(row[2]) == 0:\n # 获取穴位详细信息\n page = acupoint.getPage(row[1])\n if page == None:\n break\n detail = acupoint.getAcupointDetail(page)\n position = acupoint.getPosition(detail)\n indication = acupoint.getIndication(detail)\n acupuncture = acupoint.getAcupuncture(detail)\n cooperation = acupoint.getCooperation(detail)\n dict = {'position':position, 'indication':indication, 'acupuncture':acupuncture, 'cooperation':cooperation, 'description':detail}\n insertSQL = 'UPDATE \"acupoint\" SET \"position\" = \\'' + dict['position'] + '\\', \"indication\" = \\'' + dict['indication'] + '\\', \"acupuncture\" = \\'' + dict['acupuncture'] + '\\', \"cooperation\" = \\'' + dict['cooperation'] + '\\', \"description\" = \\'' + dict['description'] + '\\' WHERE \"id\" = \\'' + str(row[0]) + '\\''\n print(insertSQL)\n connect.execute(insertSQL)\n connect.commit()\n start += 20\nconnect.close()\n\n","sub_path":"03_python/Healthy/temp_complete_acupoint.py","file_name":"temp_complete_acupoint.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"480780803","text":"import nest\nimport pylab\nimport numpy\nimport random\nimport raster_plot_modified\nimport matplotlib.pyplot as plt\n\n\"\"\"V_m double - Membrane potential in mV\nE_L double - Resting membrane potential in mV.\nC_m double - Capacity of the membrane in pF\ntau_m double - Membrane time constant in ms.\nt_ref double - Duration of refractory period in ms.\nV_th double - Spike threshold in mV.\nV_reset double - Reset potential of the membrane in mV.\ntau_syn double - Rise time of the excitatory synaptic alpha function in ms.\nI_e double - Constant external input current in pA.\n\nC_( 250.0 ) // pF\nTau_( 10.0 ) // ms\ntau_syn_( 2.0 ) // ms\nTauR_( 2.0 ) // ms\nU0_( -70.0 ) // mV\nV_reset_( -70.0 - U0_ ) // mV, rel to U0_\nTheta_( -55.0 - U0_ ) // mV, rel to U0_\nI_e_( 0.0 ) // pA\"\"\"\n\ndef inh_neuron_parameters(inh_dictionary, neuron_population):\n\n nest.CopyModel(\"iaf_neuron\", \"inh_iaf_neuron\", params=inh_dictionary)\n\n ipop = nest.Create(\"inh_iaf_neuron\", neuron_population)\n\n for neuron in ipop:\n nest.SetStatus([neuron], {\"V_m\": inh_dictionary[\"E_L\"]+(inh_dictionary[\"V_th\"]-inh_dictionary[\"E_L\"])*numpy.random.rand()})\n\n return ipop\n\ndef exc_neuron_parameters(exc_dictionary, neuron_population):\n\n nest.CopyModel(\"iaf_neuron\", \"exc_iaf_neuron\", params=exc_dictionary)\n\n epop = nest.Create(\"exc_iaf_neuron\", neuron_population)\n\n for neuron in epop:\n nest.SetStatus([neuron], {\"V_m\": exc_dictionary[\"E_L\"]+(exc_dictionary[\"V_th\"]-exc_dictionary[\"E_L\"])*numpy.random.rand()})\n\n return epop\n\ndef random_neuron_generator(neuron_population, epop, ipop, simulation_time, d, Je, Ke, Ji, Ki, inh_dictionary, exc_dictionary):\n\n conn_dict_ex = {\"rule\": \"fixed_indegree\", \"indegree\": Ke}\n conn_dict_in = {\"rule\": \"fixed_indegree\", \"indegree\": Ki}\n syn_dict_ex = {\"delay\": d, \"weight\": Je}\n syn_dict_in = {\"delay\": d, \"weight\": Ji}\n nest.Connect(epop, ipop, conn_dict_ex, syn_dict_ex)\n nest.Connect(ipop, epop, conn_dict_in, syn_dict_in)\n nest.Connect(epop, epop, conn_dict_ex, syn_dict_ex)\n nest.Connect(ipop, ipop, conn_dict_in, syn_dict_in)\n\n multimeter_exc = nest.Create(\"multimeter\")\n multimeter_inh = nest.Create(\"multimeter\")\n\n nest.SetStatus(multimeter_exc, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n nest.SetStatus(multimeter_inh, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n\n multimeter_small = nest.Create(\"multimeter\")\n multimeter_large = nest.Create(\"multimeter\")\n\n nest.SetStatus(multimeter_small, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n nest.SetStatus(multimeter_large, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n\n multimeter_exc_0 = nest.Create(\"multimeter\")\n multimeter_exc_1 = nest.Create(\"multimeter\")\n multimeter_exc_2 = nest.Create(\"multimeter\")\n\n multimeter_inh_0 = nest.Create(\"multimeter\")\n multimeter_inh_1 = nest.Create(\"multimeter\")\n multimeter_inh_2 = nest.Create(\"multimeter\")\n\n nest.SetStatus(multimeter_exc_0, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n nest.SetStatus(multimeter_exc_1, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n nest.SetStatus(multimeter_exc_2, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n\n nest.SetStatus(multimeter_inh_0, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n nest.SetStatus(multimeter_inh_1, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n nest.SetStatus(multimeter_inh_2, {\"withtime\":True, \"record_from\":[\"V_m\"]})\n\n nest.Connect(multimeter_exc, epop[0:2])\n nest.Connect(multimeter_inh, ipop[0:2])\n\n nest.Connect(multimeter_exc_0, [epop[0]])\n nest.Connect(multimeter_exc_1, [epop[1]])\n nest.Connect(multimeter_exc_2, [epop[2]])\n\n nest.Connect(multimeter_inh_0, [ipop[0]])\n nest.Connect(multimeter_inh_1, [ipop[1]])\n nest.Connect(multimeter_inh_2, [ipop[2]])\n\n nest.Connect(multimeter_small, [epop[0]])\n nest.Connect(multimeter_small, [ipop[0]])\n\n nest.Connect(multimeter_large, epop)\n nest.Connect(multimeter_large, ipop)\n\n spikedetector_exc = nest.Create(\"spike_detector\", params={\"withgid\": True, \"withtime\": True})\n spikedetector_inh = nest.Create(\"spike_detector\", params={\"withgid\": True, \"withtime\": True})\n\n spikedetector = nest.Create(\"spike_detector\", params={\"withgid\": True, \"withtime\": True})\n\n for neuron_exc in epop:\n nest.Connect([neuron_exc], spikedetector_exc)\n nest.Connect([neuron_exc], spikedetector)\n for neuron_inh in ipop:\n nest.Connect([neuron_inh], spikedetector_inh)\n nest.Connect([neuron_inh], spikedetector)\n\n nest.Simulate(simulation_time)\n\n pylab.figure(\"Inhibitatory and Excitatory Figures\")\n\n dmm_exc_0 = nest.GetStatus(multimeter_exc_0)[0]\n Vms_exc_0 = dmm_exc_0[\"events\"][\"V_m\"]\n ts_exc_0 = dmm_exc_0[\"events\"][\"times\"]\n\n dmm_exc_1 = nest.GetStatus(multimeter_exc_1)[0]\n Vms_exc_1 = dmm_exc_1[\"events\"][\"V_m\"]\n ts_exc_1 = dmm_exc_1[\"events\"][\"times\"]\n\n dmm_exc_2 = nest.GetStatus(multimeter_exc_2)[0]\n Vms_exc_2 = dmm_exc_2[\"events\"][\"V_m\"]\n ts_exc_2 = dmm_exc_2[\"events\"][\"times\"]\n\n pylab.subplot2grid((3,3),(0,0), colspan=1)\n pylab.plot(ts_exc_0, Vms_exc_0)\n pylab.plot(ts_exc_1, Vms_exc_1)\n pylab.plot(ts_exc_2, Vms_exc_2)\n pylab.ylabel(\"Membrance Potential: mV\")\n pylab.xlabel(\"Time: ms\")\n pylab.title(\"Excitatory Neurons\")\n\n dmm_inh_0 = nest.GetStatus(multimeter_inh_0)[0]\n Vms_inh_0 = dmm_inh_0[\"events\"][\"V_m\"]\n ts_inh_0 = dmm_inh_0[\"events\"][\"times\"]\n\n dmm_inh_1 = nest.GetStatus(multimeter_inh_1)[0]\n Vms_inh_1 = dmm_inh_1[\"events\"][\"V_m\"]\n ts_inh_1 = dmm_inh_1[\"events\"][\"times\"]\n\n dmm_inh_2 = nest.GetStatus(multimeter_inh_2)[0]\n Vms_inh_2 = dmm_inh_2[\"events\"][\"V_m\"]\n ts_inh_2 = dmm_inh_2[\"events\"][\"times\"]\n\n pylab.subplot2grid((3,3),(0,1), colspan=1)\n pylab.plot(ts_inh_0, Vms_inh_0)\n pylab.plot(ts_inh_1, Vms_inh_1)\n pylab.plot(ts_inh_2, Vms_inh_2)\n pylab.ylabel(\"Membrance Potential: mV\")\n pylab.xlabel(\"Time: ms\")\n pylab.title(\"Inhibitatory Neurons\")\n\n dSD = nest.GetStatus(spikedetector_exc, keys='events')[0]\n evs = dSD[\"senders\"]\n ts = dSD[\"times\"]\n pylab.subplot2grid((3,3),(1,0), colspan=1)\n pylab.plot(ts, evs, \".\")\n pylab.ylabel(\"Neuron Number\")\n pylab.xlabel(\"Time: ms\")\n\n dSD = nest.GetStatus(spikedetector_inh, keys='events')[0]\n evs = dSD[\"senders\"]\n ts = dSD[\"times\"]\n pylab.subplot2grid((3,3),(1,1))\n pylab.plot(ts, evs, \"r.\")\n pylab.ylabel(\"Neuron Number\")\n pylab.xlabel(\"Time: ms\")\n\n dmm_exc = nest.GetStatus(multimeter_exc)[0]\n Vms_exc = dmm_exc[\"events\"][\"V_m\"]\n ts_exc = dmm_exc[\"events\"][\"times\"]\n\n pylab.subplot2grid((3,3),(2,0), colspan=1)\n pylab.plot(ts_exc, Vms_exc)\n pylab.ylabel(\"Neuron Number\")\n pylab.xlabel(\"Time: ms\")\n\n dmm_inh = nest.GetStatus(multimeter_inh)[0]\n Vms_inh = dmm_inh[\"events\"][\"V_m\"]\n ts_inh = dmm_inh[\"events\"][\"times\"]\n pylab.ylabel(\"Membrance Potential: mV\")\n pylab.xlabel(\"Time: ms\")\n\n pylab.subplot2grid((3,3),(2,1), colspan=1)\n pylab.plot(ts_inh, Vms_inh)\n pylab.ylabel(\"Membrance Potential: mV\")\n pylab.xlabel(\"Time: ms\")\n\n pylab.subplot2grid((3,3),(0,2), rowspan=3)\n pylab.title(\"Parameters\")\n i = 0\n pylab.text(0.1, 0.95,\"Inhibitatory\",horizontalalignment='left',verticalalignment='center',)\n pylab.text(0.5, 0.95,\"Excitatory\",horizontalalignment='left',verticalalignment='center',)\n\n for keys in inh_dictionary.items():\n\n pylab.text(0.1, 0.9 - i,keys,horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n i = 0\n for keys in exc_dictionary.items():\n\n pylab.text(0.5, 0.9 - i,keys,horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n pylab.text(0.1, 0.9 - i,\"Synaptic Connections\",horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n pylab.text(0.1, 0.9 - i,\"d: \" + str(d),horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n pylab.text(0.1, 0.9 - i,\"Je: \" + str(Je),horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n pylab.text(0.1, 0.9 - i,\"Ke: \" + str(Ke),horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n pylab.text(0.1, 0.9 - i,\"Ji: \" + str(Ji),horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n pylab.text(0.1, 0.9 - i,\"Ki: \" + str(Ki),horizontalalignment='left',verticalalignment='center',)\n i += 0.05\n\n pylab.text(0.05, 0.9 - i,\"dV_m/dt = - ( V_m - E_L ) / tau_m + ...\",horizontalalignment='left',verticalalignment='center',)\n i += 0.03\n\n pylab.text(0.05, 0.9 - i,\"... I_syn(t) / C_m + I_e / C_m\",horizontalalignment='left',verticalalignment='center',)\n i += 0.03\n\n pylab.text(0.05, 0.9 - i,\"I_syn(t) = Sum[w_j alpha(t-t_j)]\",horizontalalignment='left',verticalalignment='center',)\n i += 0.03\n\n pylab.text(0.05, 0.9 - i,\"t_j in input spike times\",horizontalalignment='left',verticalalignment='center',)\n i += 0.03\n\n pylab.text(0.05, 0.9 - i,\"alpha(t) = e * t/tau_s * e^{-t/tau_s} * Heaviside(t)\",horizontalalignment='left',verticalalignment='center',)\n i += 0.03\n\n pylab.figure(\"Mean Spike Rate\")\n\n #raster_plot_modified.from_device(spikedetector)\n #raster_plot_modified.from_device(spikedetector_inh, hist=False, red=True)\n #raster_plot_modified.from_device(spikedetector_exc, hist=False)\n\n pylab.figure(\"General Neuron Population\")\n dmm = nest.GetStatus(multimeter_small)[0]\n Vms = dmm[\"events\"][\"V_m\"]\n ts = dmm[\"events\"][\"times\"]\n pylab.subplot()\n pylab.plot(ts, Vms)\n pylab.ylabel(\"Membrance Potential: mV\")\n pylab.xlabel(\"Time: ms\")\n pylab.title(\"Neuron Population\")\n\n\n pylab.show()\n","sub_path":"Inh_Exc_Neurons.py","file_name":"Inh_Exc_Neurons.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"173207025","text":"import pathlib\nfrom pathlib import Path\nfrom typing import Dict, Tuple\n\nfrom gdsfactory.component import Component\nfrom gdsfactory.config import CONFIG\nfrom gdsfactory.name import dict2name\nfrom gdsfactory.tech import LAYER\n\n\ndef get_sparameters_path(\n component: Component,\n layer_to_material: Dict[Tuple[int, int], str],\n layer_to_thickness_nm: Dict[Tuple[int, int], int],\n dirpath: Path = CONFIG[\"sp\"],\n) -> Path:\n \"\"\"Returns Sparameters filepath.\n\n Args:\n component:\n dirpath\n layer_to_material: GDSlayer to material alias (see aliases in gf.sp.write)\n layer_to_thickness_nm: GDSlayer to thickness (nm)\n \"\"\"\n dirpath = pathlib.Path(dirpath)\n dirpath = (\n dirpath / component.function_name\n if hasattr(component, \"function_name\")\n else dirpath\n )\n dirpath.mkdir(exist_ok=True, parents=True)\n material2nm = {\n layer_to_material[layer]: layer_to_thickness_nm[layer]\n for layer in layer_to_thickness_nm.keys()\n if tuple(layer) in component.get_layers()\n }\n suffix = dict2name(**material2nm)\n return dirpath / f\"{component.get_name_long()}_{suffix}.dat\"\n\n\ndef test_get_sparameters_path() -> None:\n import gdsfactory as gf\n\n layer_to_thickness_nm_sample = {\n LAYER.WG: 220,\n LAYER.SLAB90: 90,\n }\n layer_to_material_sample = {\n LAYER.WG: \"si\",\n LAYER.SLAB90: \"si\",\n }\n\n c = gf.components.straight()\n p = get_sparameters_path(\n component=c,\n layer_to_thickness_nm=layer_to_thickness_nm_sample,\n layer_to_material=layer_to_material_sample,\n )\n assert p.stem == \"straight_si220\", p.stem\n\n c = gf.components.straight(layer=LAYER.SLAB90)\n p = get_sparameters_path(\n c,\n layer_to_thickness_nm=layer_to_thickness_nm_sample,\n layer_to_material=layer_to_material_sample,\n )\n assert p.stem == \"straight_layer3_0_si90\", p.stem\n\n\nif __name__ == \"__main__\":\n # import gdsfactory as gf\n # c = gf.components.straight()\n # p = get_sparameters_path(c)\n # print(p)\n\n test_get_sparameters_path()\n","sub_path":"gdsfactory/sp/get_sparameters_path.py","file_name":"get_sparameters_path.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"272791566","text":"\nimport sqlite3\n\nconn = sqlite3.connect('/home/jonathan/forward/db.sqlite')\n\nc = conn.cursor()\n\nc.execute('''CREATE TABLE aliases\n(alias text, forw_addr text, expire text)''')\n\n#c.execute('''INSERT INTO aliases VALUES(?,?,?)''',t)\n\nconn.commit()\n\nc.close()\n","sub_path":"createdb.py","file_name":"createdb.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"316553517","text":"import click\n\nfrom rastervision.command import Command\n\n\nclass PredictCommand(Command):\n def __init__(self, task, scenes):\n self.task = task\n self.scenes = scenes\n\n def run(self, tmp_dir=None):\n if not tmp_dir:\n tmp_dir = self.get_tmp_dir()\n msg = 'Making predictions...'\n click.echo(click.style(msg, fg='green'))\n self.task.predict(self.scenes, tmp_dir)\n","sub_path":"rastervision/command/predict_command.py","file_name":"predict_command.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"64969898","text":"import os\nimport sys\n\ndef reverse_file(path):\n assert os.path.exists(path)\n assert os.path.isfile(path)\n abspath = os.path.abspath(path)\n print(abspath)\n with open(abspath) as f:\n s_old = f.read()\n s_new = reverse_lines(s_old)\n with open(abspath, \"w\") as f:\n f.write(s_new)\n\ndef reverse_lines(s):\n liness = s.split(\"\\n\\n\");\n def f(s):\n lines = s.split(\"\\n\")\n r = reversed(lines)\n return \"\\n\".join(r)\n liness = [f(x) for x in liness]\n s_new = \"\\n\\n\".join(liness)\n return s_new\n\nif __name__ == \"__main__\":\n assert len(sys.argv) == 2\n reverse_file(sys.argv[1])\n","sub_path":"docs/htmltx/programming/190303_clojure_reverse_lines/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"582692103","text":"'''\nGiven a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking about the node number and not the value in the nodes.\n\nYou should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.\n\nExample 1:\n\nInput: 1->2->3->4->5->NULL\nOutput: 1->3->5->2->4->NULL\nExample 2:\n\nInput: 2->1->3->5->6->4->7->NULL\nOutput: 2->3->6->7->1->5->4->NULL\nNote:\n\nThe relative order inside both the even and odd groups should remain as it was in the input.\nThe first node is considered odd, the second node even and so on ...\n'''\n##############################################################################3\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def oddEvenList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head is None or head.next is None:\n return head\n p1,p2=head,head.next\n tmp1,tmp2=p1,p2\n \n while tmp2.next is not None and tmp2.next.next is not None:\n tmp1.next=tmp1.next.next\n tmp1=tmp1.next\n tmp2.next=tmp2.next.next\n tmp2=tmp2.next\n if tmp2.next is not None:\n tmp1.next=tmp1.next.next\n tmp1=tmp1.next\n tmp2.next=None\n tmp1.next=p2\n elif tmp2.next is None:\n tmp1.next=p2\n return p1\n \n \n","sub_path":"Linked List/00328. Odd Even Linked List.py","file_name":"00328. Odd Even Linked List.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"181739359","text":"import os\nimport logging\nimport requests\nimport json\n\n# Libs\nimport libs.time\n\n# Env vars\nROBIN_API_ORG_NAME = os.environ.get('ROBIN_API_ORG_NAME')\nROBIN_ACCESS_TOKEN = os.environ.get('ROBIN_ACCESS_TOKEN')\n\nlogging.info(ROBIN_API_ORG_NAME)\nlogging.info(ROBIN_ACCESS_TOKEN)\n\n# Constant vars\nSPACE_API = 'https://api.robinpowered.com/v1.0/free-busy/spaces'\nLOCATION_API = 'https://api.robinpowered.com/v1.0/organizations/{}/locations'\nBOOK_ROOM_API = 'https://api.robinpowered.com/v1.0/spaces/{}/events'\nMEETING_TITLE = 'Meeting booked for {}'\nHEADERS = {\n 'Authorization': 'Access-Token {}'.format(ROBIN_ACCESS_TOKEN),\n 'Content-Type': 'application/json'\n}\n\n\ndef get_locations(floor):\n params = {\n 'query': floor\n }\n location = json.loads(\n requests.get(\n LOCATION_API.format(ROBIN_API_ORG_NAME),\n headers=HEADERS,\n params=params).text)\n logging.info(location)\n return location['data']\n\n\ndef get_free_spaces(floors, start, end):\n query = {\n 'scope': {\n 'location_ids': floors\n },\n 'filters': {\n # 'min_capacity': 1,\n 'types': [],\n 'include_unbookable': False\n },\n 'view_options': {\n 'bounds': {\n 'from': start,\n 'to': end,\n 'time_zone': libs.time.TIMEZONE_AREA\n },\n 'prioritization_type': 'specific_time'\n }\n # 'paging_info': {'page': 1,'per_page': 2}\n }\n query = json.dumps(query)\n logging.info(query)\n spaces = json.loads(\n requests.post(\n SPACE_API,\n data=query,\n headers=HEADERS).text)\n logging.info(spaces)\n free_spaces = []\n for space in spaces['data']:\n if space['busy'] == []:\n free_spaces.append(space)\n\n logging.info(free_spaces)\n return free_spaces\n\n\ndef get_free_space(floors, start, end, capacity):\n query = {\n 'scope': {\n 'location_ids': floors\n },\n 'filters': {\n 'min_capacity': capacity,\n # 'max_capacity': capacity,\n 'types': [],\n 'include_unbookable': False\n },\n 'view_options': {\n 'bounds': {\n 'from': start,\n 'to': end,\n 'time_zone': libs.time.TIMEZONE_AREA\n },\n 'prioritization_type': 'specific_time'\n },\n # 'paging_info': {'page': 1,'per_page': 1}\n }\n\n query = json.dumps(query)\n logging.info(query)\n spaces = json.loads(\n requests.post(\n SPACE_API,\n data=query,\n headers=HEADERS).text)\n logging.info(spaces)\n\n for space in spaces['data']:\n if space['busy'] == []:\n return space\n\n return {}\n\n\ndef book_space(space_id, start, end, user_name, email):\n query = {\n 'start': {\n 'date_time': start,\n 'time_zone': libs.time.TIMEZONE_AREA\n },\n 'end': {\n 'date_time': end,\n 'time_zone': libs.time.TIMEZONE_AREA\n },\n 'title': MEETING_TITLE.format(user_name),\n 'include_in_demand': True,\n 'invitees': [\n {\n 'email': email\n }\n ]\n }\n\n query = json.dumps(query)\n logging.info(query)\n meeting = json.loads(\n requests.post(\n BOOK_ROOM_API.format(space_id),\n data=query,\n headers=HEADERS).text)\n logging.info(meeting)\n\n return meeting\n","sub_path":"libs/robin.py","file_name":"robin.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"562797063","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Yandong\n# Time :2019/12/7 20:42\nfrom gym import Env, spaces\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import deque\nfrom keras.optimizers import Adam\nfrom keras.layers import Dense, Input, PReLU\nfrom keras.models import Model, Sequential\nimport gc\n\ndef dqn_model(input_dim, hidden_dim, out_dim):\n \"\"\"\n Building a Neural Network Model\n \"\"\"\n model = Sequential()\n model.add(Dense(hidden_dim, input_shape=(input_dim,)))\n model.add(PReLU())\n model.add(Dense(int(hidden_dim/2)))\n model.add(PReLU())\n model.add(Dense(int(hidden_dim/4)))\n model.add(PReLU())\n model.add(Dense(out_dim))\n model.summary()\n return model\n\nclass QAgent(object):\n\n def __init__(self, envs, memory_capacity=2000, hidden_dim=100, model_file=None):\n\n self.input_dim = envs[0].observation_space.n\n self.output_dim = envs[0].action_space.n\n self.envs = envs\n # replay experiment parameters\n self.replay_counter = 1\n self.replay_buffer = list()\n self.replay_buffer_capacity = memory_capacity\n # Q Network weights filename\n self.load_weights_file = model_file\n self.save_weights_file = 'multi_envs.h5'\n # the double DQN, q_model and q_target_model\n self.q_model = dqn_model(self.input_dim, hidden_dim, self.output_dim)\n self.q_model.compile(optimizer='adam', loss='mse')\n # target Q Network\n self.target_q_model = dqn_model(self.input_dim, hidden_dim, self.output_dim)\n if self.load_weights_file:\n self.q_model.load_weights(self.load_weights_file)\n else:\n pass\n # copy Q Network params to target Q Network\n self._update_weights()\n\n # copy trained Q Network params to target Q Network\n def _update_weights(self):\n self.target_q_model.set_weights(self.q_model.get_weights())\n\n # compute Q_max\n # use of target Q Network solves the non-stationarity problem\n def _get_target_q_value(self, next_state, reward, q_double=False):\n # max Q value among next state's actions\n if q_double:\n # DDQN\n # current Q Network selects the action\n # a'_max = argmax_a' Q(s', a')\n action = np.argmax(self.q_model.predict(next_state)[0])\n # target Q Network evaluates the action\n # Q_max = Q_target(s', a'_max)\n q_value = self.target_q_model.predict(next_state)[0][action]\n else:\n # DQN chooses the max Q value among next actions\n # selection and evaluation of action is on the target Q Network\n # Q_max = max_a' Q_target(s', a')\n q_value = np.amax(self.target_q_model.predict(next_state)[0])\n\n # Q_max = reward + gamma * Q_max\n q_value *= self.gamma\n q_value += reward\n return q_value\n\n def _learn_from_memory(self, batch_size):\n # Sample experience\n trans_pieces = random.sample(self.replay_buffer, batch_size) # the transition \n state_batch, q_values_batch = [], []\n for state, action, reward, next_state, done in trans_pieces:\n # policy prediction for a given state\n q_values = self.q_model.predict(state)\n # get Q_max\n q_value = self._get_target_q_value(next_state, reward)\n # correction on the Q value for the action used\n q_values[0][action] = reward if done else q_value\n # collect batch state-q_value mapping\n state_batch.append(state[0])\n q_values_batch.append(q_values[0])\n # train the Q-network\n self.q_model.fit(np.array(state_batch),\n np.array(q_values_batch),\n batch_size=32, epochs=16, verbose=0)\n loss = self.q_model.evaluate(np.array(state_batch), np.array(q_values_batch), verbose=0)\n # the target_net update\n self._update_weights()\n # if self.replay_counter % 10 == 0:\n # self._update_weights()\n # self.replay_counter += 1\n return loss\n\n def act(self, a0, s0):\n s1, r1, is_done, info = self.env.step(a0)\n s1 = np.reshape(s1, [1, self.input_dim])\n # put the in the memory\n # Store experience in deque\n self.replay_buffer.append(np.array([s0, a0, r1, s1, is_done]))\n if len(self.replay_buffer) > self.replay_buffer_capacity:\n self.replay_buffer.pop(0)\n return s1, r1, is_done, info\n\n def learning(self, max_episodes=1000, batch_size=32, gamma=0.99, min_epsilon=0.1):\n \"\"\"\n epsilon-greed find the action and experience replay\n :return:\n \"\"\"\n # initially 90% exploration, 10% exploitation\n self.epsilon = 1.0\n self.gamma = gamma\n # iteratively applying decay til 10% exploration/90% exploitation\n self.epsilon_min = min_epsilon\n self.epsilon_decay = self.epsilon_min / self.epsilon\n self.epsilon_decay = self.epsilon_decay ** (1. / float(max_episodes))\n total_steps, step_in_episode, num_episode = 0, 0, 0\n steps_history, rewards_history, epsilon_history, step_in_episode_history = list(), list(), list(), list()\n # env state, action, next state\n env_state = list()\n env_switch = False\n # self.min_reward = -10 * self.env.maze_size\n while num_episode < max_episodes:\n # update exploration-exploitation probability\n self.update_epsilon()\n if env_switch:\n self.env = self.envs[0]\n env_switch = False\n else:\n self.env = self.envs[1]\n env_switch = True\n # self.update_epsilon(num_episode) # 2 method\n # epsilon_history.append(self.epsilon)\n # update the epsilon\n step_in_episode, total_reward = 0, 0\n loss, mean_loss = 0, 0\n is_done = False\n env_state.clear()\n self.env.reset(env_state) # get the env observation states\n print('goal', self.env.goal_pos)\n # print(id(env_state))\n s0 = np.reshape(env_state, [1, self.input_dim])\n while not is_done:\n a0 = self.perform_policy(s0, self.epsilon)\n self.env.render()\n s1, r1, is_done, info = self.act(a0, s0)\n total_reward += r1\n # if total_reward < self.min_reward:\n # is_done = True\n step_in_episode += 1\n s0 = s1\n # gc.collect()\n # call experience relay\n if len(self.replay_buffer) > batch_size:\n loss += self._learn_from_memory(batch_size)\n mean_loss = loss / step_in_episode\n print(\"episode: {:03d}/{:d} time_step:{:d} epsilon:{:3.2f}, loss:{:.5f}\"\n .format(num_episode+1, max_episodes, step_in_episode, self.epsilon, mean_loss))\n print('Episode reward: {:.2f}'.format(total_reward))\n total_steps += step_in_episode\n num_episode += 1\n steps_history.append(total_steps)\n step_in_episode_history.append(step_in_episode)\n rewards_history.append(total_reward)\n\n # finishing condition...\n # if len(rewards_history) > 20 and np.mean(rewards_history[-20:]) > 36:\n # print('Saving the model params...')\n # # save Q Network params to a file\n # self.q_model.save_weights(self.save_weights_file)\n # print('Finish training !')\n # break\n print('Saving the model params...')\n # save Q Network params to a file\n self.q_model.save_weights(self.save_weights_file)\n # plot training rewards\n plt.plot(steps_history, step_in_episode_history)\n plt.xlabel('steps')\n plt.ylabel('running avg steps')\n plt.show()\n return\n\n # decrease the exploration, increase exploitation\n def update_epsilon(self):\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n # def update_epsilon(self, episode):\n # self.epsilon = 1 / (1+episode)\n\n\n def perform_policy(self, s, epsilon=None):\n \"\"\"\n New action based on the Q_update net\n \"\"\"\n Q_s = self.q_model.predict(s)[0]\n if epsilon is not None and random.random() < epsilon:\n action = self.env.action_space.sample()\n return action\n else:\n return int(np.argmax(Q_s))\n","sub_path":"Maze_2020_spring/multi_environment/DQN_agent.py","file_name":"DQN_agent.py","file_ext":"py","file_size_in_byte":8658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"368762299","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/2/7 11:52\n# @Author : QiWei.Ren\n# -*- coding: utf-8 -*-\n\"\"\"号码封停处置\"\"\"\n# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport unittest, time, re\nfrom CINTEL_FZWEB3_1_2_1.logger.log import *\n\nlog=Log()\nclass UntitledTestCaseTask2(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(30)\n log.info(\"打开浏览器\")\n self.driver.get(\"http://192.168.2.87:8080/rg_web/login.shtml;JSESSIONID=c4b589c5-63d7-422f-9298-c35541a34873\")\n self.verificationErrors = []\n self.accept_next_alert = True\n\n def tearDown(self):\n log.info(\"关闭浏览器\")\n self.driver.close()\n self.assertEqual([], self.verificationErrors)\n\n def test_untitled_test_case_task2(self):\n self.driver.find_element_by_id(\"login_name\").send_keys(\"ct_operator\")\n self.driver.find_element_by_id(\"password\").send_keys(\"123456\")\n self.driver.find_element_by_id(\"vcode\").send_keys(\"8888\")\n\n self.driver.find_element_by_name(\"login_form\").submit()\n\n self.driver.implicitly_wait(30)\n log.info(self.driver.find_element_by_class_name(\"layui-layer-title\").text)\n self.driver.switch_to_frame(self.driver.find_element_by_tag_name(\"iframe\"))\n\n time.sleep(5)\n self.driver.find_element_by_xpath(\"//div[@id='taskOrder']/div[6]/div/i\").click()\n # ERROR: Caught exception [ERROR: Unsupported command [selectFrame | index=0 | ]]\n self.driver.find_element_by_xpath(\"//li[@onclick='li_1()']\").click()\n self.driver.find_element_by_xpath(\"//button[@onclick='deliver()']\").click()\n self.driver.find_element_by_xpath(\"//form[@id='conditionForm']/div/button\").click()\n self.driver.find_element_by_xpath(\"//button[@type='reset']\").click()\n # ERROR: Caught exception [ERROR: Unsupported command [selectFrame | relative=parent | ]]\n self.driver.find_element_by_xpath(\"//div[@id='layui-layer1']/span/a[2]\").click()\n self.driver.find_element_by_xpath(\"//div[@onclick='togglePro()']\").click()\n self.driver.find_element_by_xpath(\"//li[@onclick='quit()']\").click()\n\n def is_element_present(self, how, what):\n try:\n self.driver.find_element(by=how, value=what)\n except NoSuchElementException as e:\n return False\n return True\n\n def is_alert_present(self):\n try:\n self.driver.switch_to_alert()\n except NoAlertPresentException as e:\n return False\n return True\n\n def close_alert_and_get_its_text(self):\n try:\n alert = self.driver.switch_to_alert()\n alert_text = alert.text\n if self.accept_next_alert:\n alert.accept()\n else:\n alert.dismiss()\n return alert_text\n finally:\n self.accept_next_alert = True\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"case/Authority_case/police_numberclosureoperator.py","file_name":"police_numberclosureoperator.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"415859442","text":"from django.shortcuts import render\nfrom .models import Press, General, Contact, Page, AllNews, LastNews,\\\n Section, PressSection\nfrom django.contrib.auth.models import Group, Permission, User\n\n\ndef create_group():\n new_group, created = Group.objects.get_or_create(name='webmaster')\n\n codenames = [\n 'add_article',\n 'change_article',\n 'delete_article',\n\n 'add_press',\n 'change_press',\n 'delete_press',\n\n 'change_general',\n 'change_section',\n\n 'add_page',\n 'change_page',\n 'delete_page',\n\n 'add_contact',\n 'change_contact',\n 'delete_contact',\n\n 'add_simplesection',\n 'change_simplesection',\n 'delete_simplesection',\n\n 'add_doublesection',\n 'change_doublesection',\n 'delete_doublesection',\n\n 'add_localizationsection',\n 'change_localizationsection',\n 'delete_localizationsection',\n\n 'add_subscriptionsection',\n 'change_subscriptionsection',\n 'delete_subscriptionsection'\n\n ]\n\n for codename in codenames:\n permission = Permission.objects.get(codename=codename)\n new_group.permissions.add(permission)\n\n\ndef create_user():\n user = User.objects.create_user('webmaster', '', 'archipel01*')\n user.is_staff = True\n user.save()\n g = Group.objects.get(name='webmaster')\n g.user_set.add(user)\n\n user = User.objects.create_user('alcyon', '', 'password')\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n\ndef initialize():\n if len(General.objects.all()) == 0:\n General().save()\n if len(Section.objects.all()) == 0:\n Section().save()\n if len(LastNews.objects.all()) == 0:\n LastNews(sections_id=0).save()\n if len(AllNews.objects.all()) == 0:\n AllNews(sections_id=0).save()\n if len(PressSection.objects.all()) == 0:\n PressSection(sections_id=0, color=1).save()\n if len(Group.objects.all()) == 0:\n create_group()\n if len(User.objects.all()) == 0:\n create_user()\n\n\ndef construction(request):\n\n class Construction_info():\n website_title = \"minquier\"\n title = \"minquier\"\n big_title = \"Website




\" +\\\n \"under




construction

\"\n abstract = \"Visit the admin interface \" +\\\n \"in order to start you website.\" + \\\n \"

You see this page because you don't have already\" + \\\n \" create any page
- at least any with index 0\"\n boolean = True\n button = \"Admin Interface\"\n button_link = \"admin/\"\n\n initialize()\n construction_info = Construction_info()\n return render(request, 'main/home.html', {'general': construction_info})\n\n\ndef get_info():\n if len(General.objects.all()) > 0:\n general = General.objects.all()[0]\n else:\n general = None\n\n if len(Page.objects.all()) > 0:\n pages = Page.objects.order_by('index')\n else:\n pages = None\n\n if len(Press.objects.all()) > 0:\n last_press = Press.objects.order_by('date').reverse()[0:2]\n else:\n last_press = []\n\n if len(Contact.objects.all()) > 0:\n contact = Contact.objects.all()[0]\n else:\n contact = []\n\n info = {'last_press': last_press,\n # 'articles': Article.objects.all(),\n 'contact': contact,\n 'general': general,\n 'pages': pages}\n return info\n\n\ndef index(request):\n pages_0 = Page.objects.filter(index=0)\n if len(pages_0) == 0:\n return construction(request)\n else:\n page = pages_0[0]\n info = get_info()\n info['page'] = page\n return render(request, 'main/home.html', info)\n\n\ndef notFound(request, *args):\n return render(request, 'main/notFound.html')\n\n\ndef page(request, id):\n page = Page.objects.filter(index=id)\n if len(page) > 0:\n page = Page.objects.filter(index=id)[0]\n info = get_info()\n info['page'] = page\n return render(request, 'main/page.html', info)\n else:\n return notFound(request)\n\n\ndef article(request, id):\n info = get_info()\n return render(request, 'main/page.html', info)\n","sub_path":"wsgi/myproject/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"443722902","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom IVP import *\n\n\"\"\" Van der Pol's equation \"\"\"\n\nmu = 4000\n\ndef fcn(u,t):\n dx = u[1]\n dy = mu*((1-u[0]**2)*u[1] - u[0])\n return [dx,dy]\n\nNt = 1000\nTspan = [0,6]\nI = [2,0]\nu, t = RK4(fcn, Tspan, I, Nt)\n\nplt.figure()\nplt.plot(t, u[:,0], 'r-')\nplt.title(\"Van der Pol's equation\")\nplt.xlabel('t')\nplt.ylabel('y')\nplt.grid(True)\nplt.savefig('Pol2.eps',bbox_inches='tight')\nplt.show()\n\n","sub_path":"Ex4.py","file_name":"Ex4.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"592216648","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('project', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='adjunto',\n name='lenguaje',\n field=models.CharField(max_length=10, null=True, choices=[(b'clike', b'C'), (b'python', b'Python'), (b'ruby', b'Ruby'), (b'css', b'CSS'), (b'php', b'PHP'), (b'scala', b'Scala'), (b'sql', b'SQL'), (b'bash', b'Bash'), (b'javascript', b'JavaScript'), (b'markup', b'Markup')]),\n ),\n migrations.AlterField(\n model_name='nota',\n name='fecha',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n ]\n","sub_path":"project/migrations/0002_auto_20150521_1603.py","file_name":"0002_auto_20150521_1603.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"282198873","text":"\"\"\"\nSizeDistribution_EachTimepoint.py produces histograms of lineage size from all lineages in each timepoint\n\"\"\"\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n__author__ = 'Tee Udomlumleart'\n__maintainer__ = 'Tee Udomlumleart'\n__email__ = ['teeu@mit.edu', 'salilg@mit.edu']\n__status__ = 'Production'\n\n# Normalizing data\ntotal_cell_number = 10**8\n\nstate_1_ratio = 0.90\nstate_2_ratio = 0.05\nstate_3_ratio = 0.05\n\nstate_1_number = state_1_ratio * total_cell_number\nstate_2_number = state_2_ratio * total_cell_number\nstate_3_number = state_3_ratio * total_cell_number\n\nnormalizing_factor = [total_cell_number, state_1_number, state_2_number, state_3_number] * 10\n\ntable = pickle.load(open('191012_finished_table.pickle', 'rb'))\n\nsum_table = table.sum(axis=0)\nnormalized_table = table.div(sum_table)\ntrue_number_table = (normalized_table * normalizing_factor).round()\n\nall_barcode_number_list = []\nfor barcode, row in true_number_table.iterrows():\n # Retrieve different sample data from Day 0 to Day 24\n d0_all, d0_s1, d0_s2, d0_s3 = row[0:4]\n d6_all, d6_s1, d6_s2, d6_s3 = row[4:8]\n d12_all, d12_s1, d12_s2, d12_s3 = row[20:24]\n d18_all, d18_s1, d18_s2, d18_s3 = row[32:36]\n d24_all, d24_s1, d24_s2, d24_s3 = row[36:40]\n # new_list contains the total number of cells (S1 + S2 + S3) across all lineages\n new_list = [sum(row[1:4]), sum(row[5:8]), sum(row[21:24]), sum(row[33:36]), sum(row[37:40])]\n all_barcode_number_list.append(new_list)\n\n# all_barcode_number_list now contains the distribution of cells across all lineages\n# Turn it into dataframe for better visualization\ndf_all_barcode_number_list = pd.DataFrame(all_barcode_number_list, columns=['Day {}'.format(i*6) for i in range(5)])\n\n# Iterate through all timepoints\nfor index in range(5):\n fig, ax = plt.subplots()\n bins = 10 ** (np.arange(0, 7, 0.1))\n\n timepoint = 'Day {}'.format(6 * index)\n\n # Produce a histogram\n ax.hist(df_all_barcode_number_list[timepoint], bins=bins)\n ax.set_xscale('log') # log scale\n ax.set_title(timepoint)\n\n fig.text(0.5, 0.04, '$\\log_{10}$ Lineage Size', ha='center', va='center', size='x-large')\n fig.text(0.06, 0.5, 'Number of Lineages', ha='center', va='center', rotation='vertical', size='x-large')\n plt.savefig('SizeDistribution_EachTimepoint_{}.svg'.format(timepoint), bbox_inches='tight', format='svg', dpi=720)","sub_path":"Data_Analysis/Figure S1/SizeDistribution_EachTimepoint.py","file_name":"SizeDistribution_EachTimepoint.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"507756866","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\n\"\"\"\nExercise 8.12\n--------------\nROT13 is a weak form of encryption that involves \"rotating\" each letter in a word\nby 13 places. To rotate a letter means to shift it through the alphabet, wrapping around to the\nbeginning if necessary, so 'A' shifted by 3 is 'D' and 'Z' shifted by 1 is 'A'.\n\nWrite a function called rotate_word that takes a string and an integer as parameters, and that\nreturns a new string that contains the letters from the original string \"rotated\"” by the given amount.\n\nFor example, \"cheer\" rotated by 7 is \"jolly\" and \"melon\" rotated by -10 is \"cubed\".\n\nYou might want to use the built-in functions ord , which converts a character to a numeric code,\nand chr , which converts numeric codes to characters.\n\"\"\"\n\ndef rotate_word(word, m):\n \"\"\"\n This function rotates the given word to m positions,\n and returns a new word.\n \"\"\"\n new_word = ''\n for i in word:\n if i == ' ':\n new_word += i\n continue\n rot = ord(i) + m\n if i.isupper():\n if rot < ord('A'):\n t = ord('A') - rot\n rot = ord('Z') - (t-1)\n elif rot > ord('Z'):\n t = rot - ord('Z')\n rot = ord('A') + (t-1)\n else:\n if rot < ord('a'):\n t = ord('a') - rot\n rot = ord('z') - (t-1)\n elif rot > ord('z'):\n t = rot - ord('z')\n rot = ord('a') + (t-1)\n new_word += chr(rot)\n return new_word\n\nprint('banana', 5, rotate_word('banana', 5))\nprint('New Zealand', 5, rotate_word('New Zealand', 5))\nprint('football', 5, rotate_word('football', 5))\n","sub_path":"chapter08/exercise_8_12.py","file_name":"exercise_8_12.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"228949406","text":"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n\nclass SVM:\n \n def __init__(self, x, y, C=1, tol=0.1, eps=0.001, kernel='linear', sigma=0.45):\n \"\"\"\n :param x -> train data\n :param y -> train labels\n :param C -> soft margin\n :param tol -> tolerance \n :param eps -> epsilon\n :param kernel -> either 'linear' or 'gaussian'\n :param sigma -> variance, for gaussian kernel only\n \"\"\"\n\n self.x = x\n self.y = y\n self.alpha = None\n self.support_vectors_ = None # vector of support vectors indices\n self.b = 0\n self.m = x.shape[0]\n self.C = C\n self.tol = tol\n self.eps = eps\n self.simga = sigma\n kernel_type = {\n 'linear': lambda x_i, x_j: np.sum(np.dot(x_i, x_j)),\n 'gaussian': lambda x_i, x_j: np.exp(- np.square(np.linalg.norm(x_i - x_j)) / 2*(sigma**2))\n }\n self.kernel = kernel_type[kernel]\n self.kernel_cache = {}\n\n\n def _check_bounds(self):\n \"\"\"\n Check if the KKT conditions are met. \n \n Return the indices of the points inside the margin \n \"\"\"\n\n return np.flatnonzero(np.logical_and(self.alpha > 0, self.alpha < self.C))\n\n\n def _compute_L_H(self, i, j):\n \"\"\"\n Compute L and H according to the formulation in Platt's paper\n \"\"\"\n\n if self.y[i] != self.y[j]:\n L = max(0, self.alpha[j] - self.alpha[i])\n H = min(self.C, self.C + self.alpha[j] - self.alpha[i])\n return L, H\n L = max(0, self.alpha[j] + self.alpha[i] - self.C)\n H = min(self.C, self.alpha[j] + self.alpha[i])\n return L, H\n\n\n def _examine_example(self, j):\n \"\"\"\n Search for alphas to be optimized\n\n Return either changed (1) or unchanged (0)\n \"\"\"\n\n self.E_j = self._get_error(j)\n r_j = self.E_j * self.y[j]\n if (r_j < -self.tol and self.alpha[j] < self.C) or (r_j > self.tol and self.alpha[j] > 0):\n non_0_C = self._check_bounds()\n non_0_C_len = len(non_0_C)\n if non_0_C_len > 1:\n if self._second_heuristicA(j, non_0_C):\n return 1\n if self._second_heuristicB(j, non_0_C_len, non_0_C):\n return 1\n if self._second_heuristicB(j, self.m):\n return 1\n return 0\n\n\n def _error(self, i):\n \"\"\"\n Compute the error of the prediction as (prediction - ground-truth label)\n\n Return the error (scalar)\n \"\"\"\n\n return self.decision_function(i) - self.y[i]\n\n\n def _first_heuristic(self, num_changed):\n \"\"\"\n First heuristic according to Platt's paper\n\n Return the number of alphas changed\n \"\"\"\n\n ex_not_bounds = self._check_bounds() # array of indexes\n for i in ex_not_bounds:\n num_changed += self._examine_example(i)\n return num_changed\n\n\n def _get_error(self, i):\n if 0 < self.alpha[i] < self.C:\n return self.E[i]\n else:\n return self.decision_function(i) - self.y[i]\n\n\n def _get_support_vectors(self):\n \"\"\"\n Get the indexes of the support vectors\n\n Return numpy.array of indexes\n \"\"\"\n\n if self.support_vectors_ is None:\n return np.flatnonzero(self.alpha != 0)\n return self.support_vectors_\n\n\n def _initialize_parameters(self):\n \"\"\"\n Initialize parameters before optimization\n \"\"\"\n\n self.alpha = np.zeros((self.m, 1))\n self.b = 0\n self.E = np.zeros((self.m, 1)) # result of the decision function with all alphas equal to zero\n\n\n def _kernel(self, i, j):\n \"\"\"\n Check if the value of the kernel for i and j have been already calculated.\n If so, then skip the computation and return the value, \n else calculate and return the value\n\n Return scalar\n \"\"\"\n\n if (i,j) in self.kernel_cache:\n return self.kernel_cache[(i,j)]\n else:\n self.kernel_cache[(i,j)] = self.kernel(self.x[i], self.x[j])\n return self.kernel_cache[(i,j)]\n\n\n def _main_smo_fun(self):\n \"\"\"\n Main routine of the SMO algorithm\n \"\"\"\n\n self._initialize_parameters()\n num_changed = 0 \n examine_all = True\n with tqdm() as pbar:\n while num_changed > 0 or examine_all:\n num_changed = 0\n if examine_all:\n for i in range(self.m):\n num_changed += self._examine_example(i)\n else:\n num_changed = self._first_heuristic(num_changed)\n\n if examine_all == True:\n examine_all = False\n elif num_changed == 0:\n examine_all = True\n pbar.update(1)\n\n\n def _objective_function(self):\n \"\"\"\n Objective function of the dual representation\n\n Return the value of the objective function (scalar) \n \"\"\"\n\n support_vectors_idxs = self._get_support_vectors()\n result = 0\n for i in support_vectors_idxs:\n for j in support_vectors_idxs:\n result += self.y[i] * self.y[j] * self.alpha[i] * self.alpha[j] * self._kernel(i, j)\n result = 0.5 * result - np.sum(self.alpha[support_vectors_idxs])\n return result\n\n\n def _second_heuristicA(self, j, non_bounds):\n \"\"\"\n Search for the candidate alpha that maximizes the in-step progress\n \"\"\"\n\n if self.E_j > 0:\n i = np.argmin(self.E[non_bounds])\n else:\n i = np.argmax(self.E[non_bounds])\n if self._take_step(i, j):\n return True\n return False\n\n\n def _second_heuristicB(self, j, s_len, arr=None):\n \"\"\"\n Search systematically the alphas \n \"\"\"\n\n if arr is None:\n arr = np.arange(0, s_len)\n for i in np.roll(arr, np.random.randint(0, s_len)):\n if self._take_step(i, j):\n return True\n return False\n\n\n def _take_step(self, i, j):\n \"\"\"\n Compute the optimzation step and update the parameters\n\n Return True or False\n \"\"\"\n\n if i == j:\n return False\n\n E_i = self._get_error(i)\n s = self.y[i] * self.y[j]\n L, H = self._compute_L_H(i, j)\n \n if L == H:\n return False\n\n eta = self._kernel(i, i) + self._kernel(j, j) - 2 * self._kernel(i, j) \n if eta > 0:\n aj_new = self.alpha[j] + self.y[j] * (E_i - self.E_j) / eta\n if aj_new < L: \n aj_new = L\n elif aj_new > H:\n aj_new = H \n else:\n f1 = self.y[i] * (E_i + self.b) - self.alpha[i] * self._kernel(i,i) - s * self.alpha[j] * self._kernel(i,j)\n f2 = self.y[j] * (self.E_j + self.b) - s * self.alpha[i] * self._kernel(i,j) - self.alpha[j] * self._kernel(j,j)\n L1 = self.alpha[i] + s * (self.alpha[j] - L)\n H1 = self.alpha[i] + s * (self.alpha[j] - H)\n Lobj = L1 * f1 + L * f2 + 0.5 * L1**2 * self._kernel(i,i) + 0.5 * L**2 * self._kernel(j,j) + s * L * L1 * self._kernel(i,j)\n Hobj = H1 * f1 + H * f2 + 0.5 * H1**2 * self._kernel(i,i) + 0.5 * H**2 * self._kernel(j,j) + s * H * H1 * self._kernel(i,j)\n if Lobj < Hobj - self.eps:\n aj_new = L\n elif Lobj > Hobj + self.eps:\n aj_new = H\n else: \n aj_new = self.alpha[j]\n if abs(aj_new - self.alpha[j]) < (self.eps * (aj_new + self.alpha[j] + self.eps)):\n return False\n ai_new = self.alpha[i] + s * (self.alpha[j] - aj_new)\n b_old = self.b\n if 0 < ai_new < self.C:\n self.b = self.b + E_i + self.y[i] * (ai_new - self.alpha[i]) * self._kernel(i, i) + self.y[j] * (aj_new - self.alpha[j]) * self._kernel(i, j)\n elif 0 < aj_new < self.C:\n self.b = self.b + self.E_j + self.y[i] * (ai_new - self.alpha[i]) * self._kernel(i, j) + self.y[j] * (aj_new - self.alpha[j]) * self._kernel(j, j)\n else:\n b1 = self.b + E_i + self.y[i] * (ai_new - self.alpha[i]) * self._kernel(i, i) + self.y[j] * (aj_new - self.alpha[j]) * self._kernel(i, j)\n b2 = self.b + self.E_j + self.y[i] * (ai_new - self.alpha[i]) * self._kernel(i, j) + self.y[j] * (aj_new - self.alpha[j]) * self._kernel(j, j)\n self.b = (b1 + b2) / 2\n delta_i = self.y[i] * (ai_new - self.alpha[i])\n delta_j = self.y[j] * (aj_new - self.alpha[j])\n delta_b = self.b - b_old\n for k in range(self.m):\n if 0 < self.alpha[k] < self.C:\n self.E[k] += delta_i * self._kernel(i,k) + delta_j * self._kernel(j,k) - delta_b\n self.E[i] = 0\n self.E[j] = 0\n self.alpha[i] = ai_new\n self.alpha[j] = aj_new\n return True\n\n\n def decision_function(self, sample):\n \"\"\"\n Compute the decision function sum(y @ alpha * kernel) - b\n\n Return scalar\n \"\"\"\n\n u = 0\n support_vectors_idxs = self._get_support_vectors()\n if isinstance(sample, (int, np.int)):\n for i in support_vectors_idxs:\n u += self.y[i] * self.alpha[i] * self._kernel(i, sample) # use kernel caching\n else:\n for i in support_vectors_idxs:\n u += self.y[i] * self.alpha[i] * self.kernel(self.x[i], sample) # sample is a numpy array\n u = u - self.b\n return u\n\n\n def fit(self):\n \"\"\"\n Fit the data and start parameters optimization\n \"\"\"\n\n self._main_smo_fun()\n self.support_vectors_ = self._get_support_vectors()\n\n \n def predict(self, x):\n \"\"\"\n Predict the class given the input x\n\n Return either 1 or -1\n \"\"\" \n\n return int(np.sign(self.decision_function(x)))\n\n\n def support_vectors(self):\n \"\"\"\n Return the alphas of the support vectors\n \"\"\"\n\n return self.alpha[self.support_vectors_]\n\n\n\ndf_x = pd.read_csv(\"./data/logistic_x.txt\", sep=\" +\", names=[\"x1\",\"x2\"], header=None, engine='python')\ndf_y = pd.read_csv('./data/logistic_y.txt', sep=' +', names=[\"y\"], header=None, engine='python')\ndf_y = df_y.astype(int)\n\nx = np.hstack([np.ones((df_x.shape[0], 1)), df_x[[\"x1\",\"x2\"]].values])\ny = df_y[\"y\"].values\n\n\nsvm = SVM(x, y, kernel='gaussian', C=1, tol=0.001, eps=0.001)\n\nsvm.fit()\n\npredictions_list = []\nfor i in range(99):\n predictions_list.append(svm.predict(x[i]))\npredictions = np.array(predictions_list)\nprint(predictions)\nprint(np.sum(np.equal(predictions, y)))","sub_path":"additional_code/new_smo.py","file_name":"new_smo.py","file_ext":"py","file_size_in_byte":10742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"364872333","text":"import numpy as np\nfrom mymodule import grid\nimport ffronts\n\n# Normalised distance to centre of gridbox for 2d geometry\nm = 1 / np.sqrt(2)\n\n\ndef main(tau):\n \"\"\"\n Args:\n tau (iris.cube.Cube): A 2d iris cube of an appropriate variable for\n locating fronts\n\n Returns:\n fronts (np.ma.masked_array): An array with fronts locating at\n zero-points\n \"\"\"\n # Extract cube infomation\n theta, phi, rho = grid.polar_coords(tau)\n pressure = tau.coord('air_pressure').points[0]\n name = tau.name()\n\n # Calculate grad(tau)\n grad_tau = np.array(ffronts.grad2d(tau.data, theta, phi, rho))\n\n # Calculate grad(|grad(tau)|)\n grad_abs_grad_tau = np.array(ffronts.grad2d(abs2d(grad_tau),\n theta, phi, rho))\n\n # Calculate the locating variable\n loc = locating_variable(grad_abs_grad_tau, theta, phi, rho)\n\n # Calculate the masking criteria\n k1, k2 = thresholds(name, pressure)\n mask1 = m1(grad_tau, grad_abs_grad_tau, k1)\n mask2 = m2(grad_tau, grad_abs_grad_tau, theta, phi, rho, k2)\n mask = np.logical_or(mask1, mask2)\n\n # Find where the locating variable is zero\n fronts = np.ma.masked_where(mask, loc)\n\n return fronts\n\n\ndef locating_variable(grad_abs_grad_tau, theta, phi, rho):\n \"\"\"\n 'At each gridpoint :math:`\\hat{s}` is evaluated by computing a mean of five\n values of :math:`\\nabla |\\nabla \\tau|`, treating each as an axis as opposed\n to a vector\n \"\"\"\n # Derive a five point mean axis\n beta, D = ffronts.axis(grad_abs_grad_tau)\n # Resolve the four outer vectors into the positive \\hat{s} direction\n # and compute the total divergence of the resolved vectors using simple\n # first order finite differencing\n loc = np.array(ffronts.div2d(beta, D, theta, phi, rho))\n\n return loc\n\n\ndef m1(grad_tau, grad_abs_grad_tau, k1):\n \"\"\"Calculate the first masking variable\n\n Equation 10 in Hewson (1998),\n :math:`|\\nabla|\\nabla \\tau||\n (sign[\\nabla \\tau \\cdot \\nabla |\\nabla \\tau|])`\n \"\"\"\n # Calculate grad(tau).grad(|grad(tau|)\n y = grad_tau * grad_abs_grad_tau\n y = y[:, :, 0] + y[:, :, 1]\n # Average over 5 gridpoint array\n y = ffronts.fivepointave(y)\n # Use sign\n z = abs2d(grad_abs_grad_tau)\n y = z * np.sign(y)\n mask = y < k1\n return mask\n\n\ndef m2(grad_tau, grad_abs_grad_tau, theta, phi, rho, k2):\n \"\"\"Calculate the second masking variable\n\n Equation 11 in Hewson (1998)\n :math:`|\\nabla \\tau| + m \\chi |\\nabla |\\nabla \\tau||\n \"\"\"\n dx = np.array(ffronts.dx(theta, phi, rho))\n y = abs2d(grad_tau) + m * dx * abs2d(grad_abs_grad_tau)\n mask = y < k2\n return mask\n\n\ndef abs2d(x):\n \"\"\"Calculate the absolute value of a 2D array of vectors\n \"\"\"\n y = np.sqrt(x[:, :, 0] ** 2 + x[:, :, 1] ** 2)\n return y\n\n\ndef thresholds(name, pressure):\n \"\"\"Lookup masking thresholds for a given pressure level\n\n Args:\n name (str): The name of the locating variable used\n\n pressure (float): The pressure level fronts are being located on\n\n Returns:\n k1 (float): The first masking variable\n\n k2 (float): The second masking variable\n \"\"\"\n if name == 'air_wet_bulb_potential_temperature':\n k1 = 0.3\n k2 = (0.45 / 30000.0) * pressure\n\n elif name == 'air_potential_temperature':\n k1 = (-0.14 / 30000.0) * pressure + 0.75\n k2 = (0.08 / 30000.0) * pressure + 1.25\n\n else:\n raise ValueError('Front diagnostic must be calculated for theta or' +\n 'theta_w')\n return k1 * 1e-10, k2 * 1e-5\n","sub_path":"detection/fronts/fronts.py","file_name":"fronts.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"33874896","text":"# coding=utf-8\r\nimport logging\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nimport xlrd\r\nimport xlwt\r\nfrom xlutils.copy import copy\r\n\r\n\r\nclass excelutil:\r\n '''\r\n\r\n 写\r\n util = excelutil('d:/123.xls', 'w') 指定文件名,不写表头\r\n util = excelutil('d:/123.xls', 'w',head=['a','b']) 带表头的新表格,指定文件名\r\n util = excelutil(mode='w') 当前文件夹生成时间戳文件,不带表头\r\n\r\n util.write_lines_from_dict([{'a':1,'b':2},{'a':1,'b':2}],save=True)\r\n util.write_lines_from_list([[1,2],[1,2]],save=True) 一次写多行\r\n util.write_line_by_nrow(0,[1,2,3,4]) 写在某行\r\n util.write_nextline([1,2,3,4],save=True) 自动写在下一行\r\n 读\r\n util = excelutil('d:/123.xls', 'r') 文件必须存在\r\n util.read_lines_to_list_by_cols([0,1],2) 读1,2列数据,从第3行开始\r\n util.read_lines_to_list(2) 读所有列数据,从第3行开始\r\n util.read_lines_to_dic(1) 必须有表头,读所有列数据,返回[{head:value},{head:value}]格式数据,从第2列开始\r\n util.read_lines_to_dic_by_head(['a','b'],2) 必须有表头,读取指定列的数据,返回[{head:value},{head:value}]格式数据,从第3列开始\r\n util.read_all_sheet()\r\n 追加\r\n util = excelutil(file='d:/1.xls', mode='a',index='a')\r\n util = excelutil('d:/a/b/12.xls', mode='a', head=[11, 12, 12, 12, 12, 12, 12, 12]) 文件必须传,可不存在\r\n util.write_nextline([0, 0, 0, 0, 0, 0, 0, 0])\r\n util.write_line_by_nrow(10, [1, 1, 1, 1, 1, 1, 1, 1])\r\n\r\n '''\r\n level = logging.INFO\r\n filename = None\r\n filemode = None\r\n\r\n def __init__(self, file=None, mode='r', index=0, head=None, encoding='utf-8'):\r\n logging.basicConfig(level=self.level, filename=self.filename, filemode=self.filemode,\r\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n self.logger = logging.getLogger(__name__)\r\n\r\n # 读文件初始化\r\n if mode == 'r':\r\n self.read_mode(file, index=index)\r\n # 写文件初始化\r\n elif mode == 'w':\r\n self.write_mode(file, head=head, encoding=encoding)\r\n # 追加文件初始化\r\n elif mode == 'a':\r\n self.append_mode(file, index=index, head=head, encoding=encoding)\r\n else:\r\n raise Exception(\"excel 操作模式错误\")\r\n\r\n '''\r\n ==============================以下为mode======================================\r\n '''\r\n\r\n '''\r\n 读取excel初始化\r\n @:param file str 必须存在切以 xls或xlsx结尾\r\n @:param index int or str 读取的sheet id号或name,默认第1个sheet\r\n @:except\r\n '''\r\n\r\n def read_mode(self, file, index=0):\r\n if file and os.path.isfile(file) and (file.endswith('.xls') or file.endswith('.xlsx')):\r\n self.workbook = xlrd.open_workbook(file)\r\n if type(index) == int:\r\n self.table = self.workbook.sheet_by_index(index)\r\n elif type(index) == str:\r\n self.table = self.workbook.sheet_by_name(index)\r\n else:\r\n raise Exception(\"文件:\" + file + \"不存在或不是excel文件\")\r\n\r\n '''\r\n 写入excel初始化\r\n @:param file str 可以为None或者以xls\r\n @:param [] 表头\r\n @:param encoding 字符编码\r\n @:param index 写入的sheet名称\r\n '''\r\n\r\n def write_mode(self, file=None, head=None, encoding='utf-8', index='sheet1'):\r\n self.head_list=[]\r\n if file:\r\n if file.endswith('.xls') or file.endswith('xlsx'):\r\n self.file = file\r\n else:\r\n self.logger.error(file + '不是xls文件')\r\n sys.exit()\r\n else:\r\n self.file = str(int(time.time())) + '.xls'\r\n\r\n self.row_num = 0\r\n self.head_indexs = {}\r\n self.head_flag = True;\r\n self.workbook = xlwt.Workbook(encoding=encoding)\r\n self.table = self.workbook.add_sheet(index)\r\n if head: # 有表头\r\n self.write_head(head)\r\n\r\n '''\r\n 追加模式\r\n @param file 表格文件,可以为None,必须是xls格式\r\n @param head 表头,只有在当前文件不存在,或者文件存在但是文件中无内容时起作用 []\r\n @param encoding 文件编码默认为utf-8\r\n @param index sheet的index或者名称 int or str\r\n '''\r\n\r\n def append_mode(self, file, head=None, encoding='utf-8', index=0):\r\n if file and os.path.isfile(file) and file.endswith('.xls'):\r\n self.file = file\r\n openworkbook = xlrd.open_workbook(file)\r\n sheet_names = openworkbook.sheet_names()\r\n self.workbook = copy(openworkbook) # 用xlutils提供的copy方法将xlrd的对象转化为xlwt的对象\r\n if type(index) == int:\r\n try:\r\n self.row_num = openworkbook.sheet_by_index(index).nrows # 用wlrd提供的方法获得现在已有的行数\r\n except IndexError:\r\n self.logger.error('append_mode: index 超出范围')\r\n sys.exit()\r\n self.table = self.workbook.get_sheet(index) # 用xlwt对象的方法获得要操作的sheet\r\n elif type(index) == str:\r\n try:\r\n self.row_num = openworkbook.sheet_by_name(index).nrows\r\n except xlrd.biffh.XLRDError:\r\n self.logger.error('sheet:' + index + ' 不存在')\r\n sys.exit()\r\n self.table = self.workbook.get_sheet(sheet_names.index(index))\r\n if head and self.row_num == 0: # 写表头,文件存在但是要追加的sheet row_num为0\r\n self.write_head(head)\r\n else:\r\n self.write_mode(file, head, encoding, index)\r\n\r\n '''\r\n ===========================以下为读======================================\r\n '''\r\n\r\n ''' \r\n 读所有行返回二维数组\r\n @param start int 起始行,默认从头开始\r\n @return result二维数组 [[1,2],[1,2]]\r\n '''\r\n\r\n def read_lines_to_list(self, start=0):\r\n rows = self.table.nrows\r\n result = []\r\n for i in range(start, rows):\r\n result.append(self.table.row_values(i))\r\n self.logger.debug(self.table.row_values(i))\r\n return result\r\n\r\n def read_lines_for_Multithreading(self):\r\n self\r\n\r\n '''\r\n 读取整个excel\r\n @:return dict {'sheetname1':[{},{}],'sheetname2':[{},{}]}\r\n '''\r\n\r\n def read_all_sheet_to_dict(self):\r\n names = self.workbook.sheet_names()\r\n temp = {}\r\n for name in names:\r\n self.table = self.workbook.sheet_by_name(name)\r\n try:\r\n temp[name] = self.read_lines_to_dic()\r\n self.logger.debug('sheetname: ' + name)\r\n for x in temp[name]:\r\n self.logger.debug(x)\r\n except IndexError:\r\n self.logger.warning('sheet:' + name + '中没数据')\r\n return temp\r\n\r\n '''\r\n 读取整个excel\r\n @:return dict {'sheetname1':[[],[]],'sheetname2':[[],[]]}\r\n '''\r\n\r\n def read_all_sheet_to_list(self):\r\n names = self.workbook.sheet_names()\r\n temp = {}\r\n for name in names:\r\n self.table = self.workbook.sheet_by_name(name)\r\n temp[name] = self.read_lines_to_list()\r\n if len(temp[name]) > 0:\r\n self.logger.debug('sheetname: ' + name)\r\n for x in temp[name]:\r\n self.logger.debug(x)\r\n else:\r\n temp.pop(name)\r\n return temp\r\n\r\n ''' 将表格处理成装字典的list,必须有表头的表格才能使用此方法,\r\n @param start 起始行默认从第2行开始\r\n @param Thread_count 读取线程,默认单线程\r\n\r\n @return l 如果thread_count=1 返回[{},{}],如果thread_count>1,返回[[{},{}],[{},{}]]\r\n '''\r\n\r\n def read_lines_to_dic(self, start=1, Thread_count=1):\r\n index = start\r\n heads = self.table.row_values(0)\r\n rows = self.table.nrows\r\n l = []\r\n for row in range(start, rows):\r\n values = self.table.row_values(row)\r\n dic = {}\r\n for key, col in zip(heads, range(len(heads))):\r\n dic[key] = values[col]\r\n dic['_index'] = index # 每一行增加一个键值对 index=行号\r\n index = index + 1\r\n l.append(dic)\r\n self.logger.debug(dic)\r\n # 多线程读取\r\n if Thread_count > 1:\r\n l = slice_list(l, Thread_count)\r\n return l\r\n\r\n '''\r\n 根据表头筛选数据\r\n @:param head ['',''] 字符数组,选择的列。如果选择的列表格中没有可以正常执行。\r\n @:param start 起始行,默认为1, 第1行为0。\r\n @:param Thread_count 分组数,默认为1\r\n\r\n @:return result Thread_count==1时:[{},{}] Thread_count>1时:[[{},{}],[{},{}]]\r\n '''\r\n\r\n def read_lines_to_dic_by_head(self, head, start=1, Thread_count=1):\r\n result = []\r\n rows = self.read_lines_to_dic(start=start, Thread_count=Thread_count)\r\n for row in rows:\r\n new_dic = {}\r\n # 分组数据\r\n if Thread_count > 1:\r\n new_list = []\r\n for r in row:\r\n list_dic = {}\r\n for h in head:\r\n try:\r\n list_dic[h] = r[h]\r\n except KeyError:\r\n self.logger.warning('read_lines_to_dic_by_head:表头不存在' + h)\r\n list_dic['_index'] = r['_index']\r\n new_list.append(list_dic)\r\n result.append(new_list)\r\n self.logger.debug(new_list)\r\n # 一组数据\r\n elif Thread_count == 1:\r\n for h in head:\r\n new_dic[h] = row[h]\r\n self.logger.debug(new_dic)\r\n result.append(new_dic)\r\n return result\r\n\r\n '''\r\n 根据列id读取表格中的数据\r\n @param cols list 需要取的列,第1列为0。如果该列超出范围则跳过该列 例:[0,2]\r\n @param start int 开始的行号,第1行为0\r\n\r\n @return l list [[],[]] \r\n '''\r\n\r\n def read_lines_to_list_by_cols(self, cols, start=0):\r\n l = []\r\n for line in self.read_lines_to_list(start):\r\n new_line = []\r\n for index in cols:\r\n try:\r\n new_line.append(line[index])\r\n except IndexError:\r\n # index超出表格的范围\r\n self.logger.warning('read_lines_to_list_by_cols:第' + str(index) + '列超出范围')\r\n l.append(new_line)\r\n self.logger.debug(new_line)\r\n return l\r\n\r\n '''\r\n ===================以下为写===============================\r\n '''\r\n\r\n '''\r\n 根据行号写一行,并保存文件\r\n @:param row_name int 行号,第一行为0\r\n @:param row_value list [1,2,3]\r\n @:param save bool \r\n '''\r\n\r\n def write_line_by_nrow(self, row_num, row_value, save=True):\r\n for value, col_num in zip(row_value, range(len(row_value))):\r\n self.table.write(row_num, col_num, value)\r\n self.logger.debug(row_value)\r\n if save:\r\n self.save()\r\n\r\n ''' \r\n 保存\r\n '''\r\n\r\n def save(self):\r\n try:\r\n self.workbook.save(self.file)\r\n except PermissionError:\r\n print(\"文件未关闭,或无保存权限\")\r\n\r\n '''\r\n 写下一行\r\n @:param row_value list [1,2,3]\r\n @:param save bool True保存 False不保存 默认保存\r\n '''\r\n\r\n def write_nextline(self, row_value, save=True):\r\n if type(row_value) == list:\r\n self.write_line_by_nrow(self.row_num, row_value, save=save)\r\n elif type(row_value) == dict:\r\n if not self.head_list:\r\n self.head_list = list(row_value.keys())\r\n self.write_head(self.head_list)\r\n temp = []\r\n for head in self.head_list:\r\n temp.append(str(row_value[head]))\r\n self.write_line_by_nrow(self.row_num, temp, save=save)\r\n self.row_num = self.row_num + 1\r\n\r\n def write_lines_from_dict(self, data, save=True):\r\n for row in data:\r\n if self.head_flag:\r\n temp = list(row.keys())\r\n for key in temp:\r\n self.head_indexs[key] = temp.index(key)\r\n self.head_flag = False\r\n self.write_head(temp, save)\r\n for r in row:\r\n self.table.write(self.row_num, self.head_indexs[r], row[r])\r\n self.row_num = self.row_num + 1\r\n if save:\r\n self.save()\r\n\r\n '''\r\n 写多行 测试版本\r\n @:param data list 传入必须是[[1,2],[1,2]]格式\r\n @:param save bool True保存 False不保存 默认保存\r\n '''\r\n\r\n def write_lines_from_list(self, data, save=True):\r\n for values in data:\r\n if type(values) == tuple:\r\n values = list(values)\r\n if type(values) == list:\r\n self.write_nextline(values, save=save)\r\n else:\r\n self.logger.warning(values + ' 不是list')\r\n continue\r\n if save:\r\n self.save()\r\n\r\n '''\r\n 写表头\r\n @:param row_value list 传入必须是[1,2,3]格式\r\n @:param save bool True保存 False不保存 默认保存\r\n '''\r\n\r\n def write_head(self, row_value, save=True):\r\n self.head_list=row_value\r\n self.write_line_by_nrow(self.row_num, row_value, save=save)\r\n self.row_num = self.row_num + 1 # 写完表头行数自增\r\n\r\n '''\r\n 将一个list均分成几个list便于多线程使用\r\n @param full_list 需要被切割的list\r\n @param count 切割的数量,��一定与设置的count完全一直。例:4条数据,切割成3个。只能切割成2个,每个数组2条数据\r\n @return [[],[]]\r\n '''\r\n\r\n # def slice_list(self, full_list, count):\r\n # result = []\r\n # _len = len(full_list)\r\n # # 向上取整\r\n # every_list_len = math.ceil(_len / count)\r\n # start_index = 0\r\n # group_id = 1\r\n # for x in range(count):\r\n # temp = full_list[start_index:start_index + every_list_len]\r\n # if len(temp) > 0:\r\n # self.logger.debug('第' + str(group_id) + '组数据')\r\n # group_id = group_id + 1\r\n # for x in temp:\r\n # self.logger.debug(x)\r\n # start_index = start_index + every_list_len\r\n # result.append(temp)\r\n # return result\r\n\r\n\r\nif __name__ == '__main__':\r\n util = excelutil('1557196405.xls', 'w')\r\n a = {}\r\n a['b'] = 1\r\n a['c'] = 2\r\n b = {}\r\n b['b'] = \"sdfsdf\";\r\n b['c'] = 123123\r\n l = []\r\n l.append(a)\r\n l.append(b)\r\n util.write_lines_from_dict(l, False)\r\n util.write_lines_from_dict(l, True)\r\n","sub_path":"Desktop/Yingkit_YUel/628/socket_learning/excelutil.py","file_name":"excelutil.py","file_ext":"py","file_size_in_byte":15108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"60883470","text":"from sklearn.linear_model import Perceptron\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix, classification_report\n'''\n Runs the Perceptron Classifier\n'''\n\n\nclass _Perceptron:\n def __init__(self, file_name, X_train, y_train, X_test, y_test):\n self.f = open(file_name, 'w')\n self.Perceptron_Classifier_3_positive(X_train, y_train, X_test, y_test)\n self.Perceptron_Classifier_3_negative(X_train, y_train, X_test, y_test)\n\n def Perceptron_Classifier_3_positive(self, X_train, y_train, X_test,\n y_test):\n '''\n Make binary classification of positive or negative\n Considering star rating 3 as positive\n Perceptron Classifier\n '''\n percep = Perceptron(\n max_iter=1000, eta0=0.1, tol=0.0001, random_state=0)\n # Change the ratings to\n new_y_train1 = self.process_Y1(y_train)\n new_y_test1 = self.process_Y1(y_test)\n percep.fit(X_train, new_y_train1)\n y_pred = percep.predict(X_test)\n self.write_to_file(\"\\n\")\n self.write_to_file(\"Star Rating 3 considered as positive\")\n self.write_to_file(\"\\n\")\n self.write_to_file(\n 'Perceptron Accuracy: %.2f' % accuracy_score(new_y_test1, y_pred))\n self.write_to_file(\"\\n\")\n self.write_to_file('Classification Report:')\n self.write_to_file(classification_report(new_y_test1, y_pred))\n\n def process_Y1(self, y_):\n '''\n Considering 3 as positive\n '''\n newY = []\n for i in range(len(y_)):\n if float(y_[i]) < 3.0: newY.append(-1)\n else: newY.append(1)\n return newY\n\n def Perceptron_Classifier_3_negative(self, X_train, y_train, X_test,\n y_test):\n '''\n Make binary classification of positive or negative\n Considering star rating 3 as positive\n Perceptron Classifier\n Run the Perceptron Classifier and Print Accuracy\n '''\n percep = Perceptron(\n max_iter=1000, eta0=0.1, tol=0.0001, random_state=0)\n new_y_train2 = self.process_Y2(y_train)\n new_y_test2 = self.process_Y2(y_test)\n percep.fit(X_train, new_y_train2)\n y_pred = percep.predict(X_test)\n self.write_to_file(\"\\n\")\n self.write_to_file(\"Star Rating 3 considered as negative\")\n self.write_to_file(\"\\n\")\n self.write_to_file(\n 'Perceptron Accuracy: %.2f' % accuracy_score(new_y_test2, y_pred))\n self.write_to_file(\"\\n\")\n self.write_to_file('Classification Report:')\n self.write_to_file(classification_report(new_y_test2, y_pred))\n\n def process_Y2(self, y_):\n '''\n Considering 3 as negative\n '''\n newY = []\n for i in range(len(y_)):\n if float(y_[i]) <= 3.0: newY.append(-1)\n else: newY.append(1)\n return newY\n\n def write_to_file(self, text):\n self.f.writelines(text)","sub_path":"src/classifiers/classifier_perceptron.py","file_name":"classifier_perceptron.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"322684015","text":"import numpy as np\nimport scipy, fileio, feature, syllable\nfrom numpy import linalg\nfrom scipy import fftpack\nfrom pylab import *\n\nSAMPLES_DIR = \"./audio/samples/\"\nWORD_DIR = \"./audio/\"\nSYLLABLE_DIR = \"./audio/syllables/\"\nBLIND_DIR = \"./audio/blinddata/\"\n'''\ndef single_wav_analysis():\n audiofiles = fileio.readAllFilesInDirectory(\"./audio/\")\n sr = audiofiles[0][0].sr\n for file in audiofiles:\n title(file[1])\n amp, freq = syllable.segmentation(file[0])\n windowed_amp = syllable.moving_window(amp/linalg.norm(amp), 10)\n plot(windowed_amp, label=\"amp\")\n #plot(syllable.moving_average(freq/linalg.norm(freq), 10),label=\"freq\")\n plot(np.repeat(np.mean(windowed_amp),len(amp)), label='avga')\n maximi = syllable.local_max_locations(windowed_amp)\n print(maximi)\n #print(syllable.filter_distinct_peaks(windowed_amp, maximi))\n #plot(np.repeat(np.mean(freq/linalg.norm(freq)),len(amp)),label='avgf')\n show()\n #title(file[1])\n #plot(freq)\n #avgfreq = np.mean(freq)\n #plot(np.repeat(avgfreq, len(freq)))\n#single_wav_analysis()\n#syllable_debug_test()\ndef testing():\n audiofiles = fileio.readAllFilesInDirectory(BLIND_DIR)\n e = None;\n for file in audiofiles:\n #candidates = feature.partial_logic_1(file[0])\n #print(candidates)\n if e is None:\n e = feature.abstract_cartoon(file[0])\n #plot(e)\n else:\n f = feature.abstract_cartoon(file[0])\n #plot(f)\n e+=f\n #print(feature.abstract_cartoon(file[0]))\n test = feature.abstract_cartoon(fileio.Wave(\"test.wav\").data)\n e = feature.normalize(e)\n test = feature.align_peaks(test, e, 2)\n plot(e)\n plot(test)\n show()\n'''\n#testing()\ndef pipeline(sampleData=None, sampleFreq=None):\n if sampleData is None:\n sampleData=fileio.populateSampleData(SAMPLES_DIR)\n audiofile = fileio.Wave(\"test.wav\")\n candidates = feature.partial_logic_1(audiofile)\n #print(candidates)\n feature.partial_logic_2(candidates, audiofile, sampleData, sampleFreq)\n\n#plotting aux:\ndef plotstuff(filename):\n a=fileio.Wave(\"audio/samples/\"+filename+\".wav\")\n am, fr = syllable.segmentation(a)\n title(filename)\n plot(feature.normalize(syllable.moving_window(am, 10)), label=\"amp\")\n #plot(feature.normalize(syllable.moving_window(fr, 10)), label=\"freq\")\n #plot(feature.normalize(am), label=\"amp\")\n #plot(feature.normalize(fr), label=\"freq\")\n legend(framealpha=0.5);\n show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"142600235","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nh=np.linspace(-10,10,50)\nout=tf.nn.relu(h)\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n y=sess.run(out)\n plt.xlabel('Activity of Neuron')\n plt.ylabel('Output of Neuron')\n plt.title('Relu Activation Function')\n plt.plot(h,y)\n plt.show()\n","sub_path":"ReluFunction.py","file_name":"ReluFunction.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"406945035","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport glob\nimport pprint\n\nimport radical.utils as ru\nimport radical.saga as rs\nimport radical.pilot as rp\n\n\nglobal p_map\np_map = dict() # pilot: [task, task, ...]\n\n\n# ------------------------------------------------------------------------------\n#\ndef unit_state_cb(unit, state):\n\n # terminate pilots once all masters running it are completed,\n\n global p_map\n\n if state not in rp.FINAL:\n return True\n\n print('unit state: %s -> %s' % (unit.uid, state))\n pilot = None\n for p in p_map:\n for u in p_map[p]:\n if u.uid == unit.uid:\n pilot = p\n break\n if pilot:\n break\n\n # every master should be associated with one pilot\n assert(pilot), [pilot.uid, unit.uid, pprint.pformat(pilot.as_dict())]\n\n to_cancel = True\n for u in p_map[pilot]:\n if u.state not in rp.FINAL:\n to_cancel = False\n break\n\n if to_cancel:\n print('cancel pilot %s' % pilot.uid)\n pilot.cancel()\n else:\n print('pilot remains active: %s' % pilot.uid)\n\n return True\n\n\n# ------------------------------------------------------------------------------\n#\ndef check_runs(cfg_file, run_file):\n\n runs = list()\n n_smiles = dict()\n\n rec_path = 'input/receptors.v8/' # FIXME\n smi_path = 'input/smiles/' # FIXME\n\n cfg = ru.Config(cfg=ru.read_json(cfg_file))\n res_path = cfg.fs_url + cfg.workload.results\n \n fs = rs.filesystem.Directory(res_path)\n \n with open(run_file, 'r') as fin:\n \n for line in fin.readlines():\n \n line = line.strip()\n \n if not line:\n continue\n \n if line.startswith('#'):\n continue\n \n elems = line.split()\n \n assert(len(elems) == 4), line\n \n receptor = str(elems[0])\n smiles = str(elems[1])\n nodes = int(elems[2])\n runtime = int(elems[3])\n \n assert(receptor)\n assert(smiles)\n assert(nodes)\n assert(runtime)\n\n print('%s/%s.oeb' % (rec_path, receptor))\n print('%s/%s.csv' % (smi_path, smiles))\n assert(os.path.isfile('%s/%s.oeb' % (rec_path, receptor))), receptor\n assert(os.path.isfile('%s/%s.csv' % (smi_path, smiles))), smiles\n\n fname = '%s_-_%s.idx' % (receptor, smiles)\n pname = '%s/%s' % (smiles, fname)\n lname = '/tmp/%s' % (fname)\n \n if not fs.is_file(pname):\n n_have = 0\n else:\n ret = fs.list(pname)\n fs.copy(pname, 'file://localhost/%s' % lname)\n out, err, ret = ru.sh_callout('wc -l %s | cut -f 1 -d \" \"' % lname,\n shell=True)\n n_have = int(out)\n \n \n if smiles in n_smiles:\n n_need = n_smiles[smiles]\n \n else:\n sname = '%s/%s.csv' % (smi_path, smiles)\n out, err, ret = ru.sh_callout('wc -l %s | cut -f 1 -d \" \"' % sname, \n shell=True)\n n_need = int(out) - 1\n n_smiles[smiles] = n_need\n \n if n_need > n_have:\n perc = int(100 * n_have / n_need)\n print('run %-30s %-25s [%3d%%]' % (receptor, smiles, perc))\n runs.append([receptor, smiles, nodes, runtime])\n else:\n print('skip %-30s %-25s [100%%]' % (receptor, smiles))\n \n \n return runs\n\n\n\n# ------------------------------------------------------------------------------\n#\nif __name__ == '__main__':\n\n cfg_file = sys.argv[1] # resource and workload config\n run_file = sys.argv[2] # runs for this campaign\n session = None\n\n try:\n\n cfg = ru.Config(cfg=ru.read_json(cfg_file))\n runs = check_runs(cfg_file, run_file)\n\n if not runs:\n print('nothing to run')\n sys.exit()\n\n session = rp.Session()\n pmgr = rp.PilotManager(session=session)\n umgr = rp.UnitManager(session=session)\n \n umgr.register_callback(unit_state_cb)\n\n\n # for each run in the campaign:\n # - create pilot of requested size and runtime\n # - create cfg with requested receptor and smiles\n # - submit configured number of masters with that cfg on that pilot\n subs = dict()\n d = rs.filesystem.Directory('ssh://frontera/scratch1/07305/rpilot/workflow-0-results')\n ls = [str(u).split('/')[-1] for u in d.list()]\n\n workload = cfg.workload\n uid_cnt = 0\n for receptor, smiles, nodes, runtime in runs:\n\n print('%30s %s' % (receptor, smiles))\n name = '%s_-_%s' % (receptor, smiles)\n tgt = '%s.%s.gz' % (name, workload.output)\n rec = False\n\n if tgt in ls:\n if workload.recompute:\n rec += 1\n d.move(tgt, tgt + '.bak')\n else:\n print('skip 1 %s' % name)\n continue\n\n if smiles in ls:\n if smiles not in subs:\n subs[smiles] = [str(u).split('/')[-1] for u in d.list('%s/*' % smiles)]\n if tgt in subs[smiles]:\n if workload.recompute:\n rec += 2\n d.move('%s/%s' % (smiles, tgt),\n '%s/%s.bak' % (smiles, tgt))\n else:\n print('skip 2 %s' % name)\n continue\n\n # if os.path.exists('results/%s.%s.gz' % (name, wofkload.output)):\n # print('skip 3 %s' % name)\n # continue\n\n if rec: print('recompute %d %s' % (rec, name))\n else : print('compute 2 %s' % name)\n\n cpn = cfg.cpn\n gpn = cfg.gpn\n n_masters = cfg.n_masters\n half_nodes = int((nodes - cfg.n_agents) / 2)\n\n cfg.workload.receptor = receptor\n cfg.workload.smiles = smiles\n cfg.workload.name = name\n cfg.nodes = nodes\n cfg.runtime = runtime\n cfg.n_workers = (int(half_nodes / n_masters) - 1) # * 2 \n print('n_masters: %d' % cfg.n_masters)\n print('n_workers: %d' % cfg.n_workers)\n\n ru.write_json(cfg, 'configs/wf0.%s.cfg' % name)\n\n pd = rp.ComputePilotDescription(cfg.pilot_descr)\n pd.cores = nodes * 56 # FIXME: cpn\n pd.gpus = nodes * gpn\n pd.runtime = runtime\n\n pilot = pmgr.submit_pilots(pd)\n pid = pilot.uid\n\n umgr.add_pilots(pilot)\n\n tds = list()\n\n for i in range(n_masters):\n td = rp.ComputeUnitDescription(cfg.master_descr)\n td.executable = \"python3\"\n td.arguments = ['wf0_master.py', i]\n td.cpu_threads = cpn\n td.pilot = pid\n td.uid = 'master.%06d' % uid_cnt\n td.input_staging = [{'source': cfg.master,\n 'target': 'wf0_master.py',\n 'action': rp.TRANSFER,\n 'flags' : rp.DEFAULT_FLAGS},\n {'source': cfg.worker,\n 'target': 'wf0_worker.py',\n 'action': rp.TRANSFER,\n 'flags' : rp.DEFAULT_FLAGS},\n {'source': 'configs/wf0.%s.cfg' % name,\n 'target': 'wf0.cfg',\n 'action': rp.TRANSFER,\n 'flags' : rp.DEFAULT_FLAGS},\n {'source': workload.input_dir,\n 'target': 'input_dir',\n 'action': rp.LINK,\n 'flags' : rp.DEFAULT_FLAGS},\n {'source': workload.impress_dir,\n 'target': 'impress_md',\n 'action': rp.LINK,\n 'flags' : rp.DEFAULT_FLAGS},\n {'source': workload.oe_license,\n 'target': 'oe_license.txt',\n 'action': rp.LINK,\n 'flags' : rp.DEFAULT_FLAGS},\n ]\n # td.output_staging = [{'source': '%s.%s.gz' % (name, workload.output),\n # 'target': 'results/%s.%s.gz' % (name, workload.output),\n # 'action': rp.TRANSFER,\n # 'flags' : rp.DEFAULT_FLAGS}]\n tds.append(td)\n uid_cnt += 1\n\n for i in range(half_nodes + 1):\n for j in range(cpn):\n td = rp.ComputeUnitDescription(cfg.master_descr)\n td.executable = '/home1/07305/rpilot/hello_rp.sh'\n td.arguments = [600]\n td.sandbox = 'hello_rp'\n td.pilot = pid\n\n tds.append(td)\n uid_cnt += 1\n\n tasks = umgr.submit_units(tds)\n p_map[pilot] = tasks\n\n # all pilots and masters submitted - wait for them to finish\n umgr.wait_units()\n\n finally:\n if session:\n session.close(download=False)\n\n\n# ------------------------------------------------------------------------------\n\n","sub_path":"workflow-0/wf0_oe_frontera/impeccable/impeccable.py","file_name":"impeccable.py","file_ext":"py","file_size_in_byte":10027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"375492548","text":"from graph import Graph\nfrom util import Queue\n\ndef earliest_ancestor(ancestors, starting_node):\n g = ancestors_graph(ancestors)\n\n if len(g.get_neighbors(starting_node)) == 0:\n return -1\n\n distance = 0\n distances = {}\n \n q = Queue()\n q.enqueue([starting_node, distance])\n\n while q.size() > 0:\n person, curr_distance = q.dequeue()\n\n if curr_distance in distances:\n distances[curr_distance].append(person)\n\n else:\n distances[curr_distance] = [person]\n\n neighbors = g.get_neighbors(person)\n if len(neighbors) > 0:\n distance += 1\n for relative in neighbors:\n q.enqueue([relative, distance])\n \n farthest = max(distances.keys())\n result = min(distances[farthest])\n\n return result\n\ndef ancestors_graph(ancestors):\n g = Graph()\n for ancestor in ancestors:\n parent = ancestor[0]\n child = ancestor[1]\n if parent not in g.vertices:\n g.add_vertex(parent)\n if child not in g.vertices:\n g.add_vertex(child)\n g.add_edge(child, parent)\n return g\n\ntest_ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\nancestors_graph(test_ancestors)","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"448157172","text":"from Activator import *\n\nclass Switch(Activator):\n\tdef __init__(self, x, y, width, height, id=None):\n\t\tsuper().__init__(x, y, width, height, id, (0,255,0), (170,170,170))\n\t\tself.is_solid = False\n\t\t\n\tdef toggle(self):\n\t\tif self.is_active:\n\t\t\tself.deactivate()\n\t\telse:\n\t\t\tself.activate()","sub_path":"Switch.py","file_name":"Switch.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"368644723","text":"import sqlite3\n\n\nclass DataBaseOfContent:\n def __init__(self, path_to_db=\"data/content.db\"):\n self.path_to_db = path_to_db\n\n @property\n def connection(self):\n return sqlite3.connect(self.path_to_db)\n\n def execute(self, sql: str, parameters: tuple = None, fetchone=False, fetchall=False, commit=False):\n if not parameters:\n parameters = tuple()\n connection = self.connection\n connection.set_trace_callback(logger)\n cursor = connection.cursor()\n cursor.execute(sql, parameters)\n data = None\n if commit:\n connection.commit()\n if fetchone:\n data = cursor.fetchone()\n if fetchall:\n data = cursor.fetchall()\n connection.close()\n return data\n\n def create_table_content(self):\n sql = \"\"\"\n CREATE TABLE Content(\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n name_of_content VARCHAR(255) NOT NULL,\n overview_of_content TEXT NOT NULL,\n picture TEXT\n );\n \"\"\"\n self.execute(sql, commit=True)\n\n def add_content(self, name_of_content: str, overview_of_content: str, picture: str):\n sql = \"INSERT INTO Content(name_of_content, overview_of_content, picture) VALUES(?, ?, ?)\"\n parameters = (name_of_content, overview_of_content, picture)\n self.execute(sql, parameters=parameters, commit=True)\n\n def select_all_content(self):\n sql = \"SELECT * FROM Content\"\n return self.execute(sql, fetchall=True)\n\n @staticmethod\n def format_args(sql, parameters: dict):\n sql += \" AND\".join([\n f\"{item}= ?\" for item in parameters\n ])\n return sql, tuple(parameters.values())\n\n def select_content(self, **kwargs):\n sql = \"SELECT * FROM Content WHERE name = ?\"\n sql, parameters = self.format_args(sql, kwargs)\n self.execute(sql, parameters, fetchone=True)\n\n def random_serial_from_db(self):\n sql = \"SELECT * FROM Content ORDER BY RANDOM() LIMIT 1\"\n return self.execute(sql, fetchone=True)\n\n def count_content(self):\n return self.execute(\"SELECT COUNT(*) FROM Content;\", fetchone=True)\n\n\ndef logger(statement):\n print(f\"\"\"\n=====================================================================\nExecuting:\n{statement}\n=====================================================================\n\"\"\")\n","sub_path":"utils/db_api/database_of_content.py","file_name":"database_of_content.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"261175242","text":"import cv2\nimport mediapipe as mp\nimport time\n\ncap = cv2.VideoCapture(0)\n\nmpHands = mp.solutions.hands\nhands = mpHands.Hands()\nmpDraw = mp.solutions.drawing_utils\n\nwhile True:\n success, img = cap.read()\n imgRBG = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n results = hands.process(imgRBG)\n\n if results.multi_hand_landmarks:\n for handLms in results.multi_hand_landmarks:\n # for id, lm in enumerate(handLms.landmark):\n\n mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n","sub_path":"HandTrackingMinimum.py","file_name":"HandTrackingMinimum.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"619263129","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pets', '0003_pet_growth_points'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.CreateModel(\n name='ItemType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ],\n ),\n migrations.AddField(\n model_name='item',\n name='owner',\n field=models.ForeignKey(to='pets.ItemType'),\n ),\n ]\n","sub_path":"shapepets/pets/migrations/0004_auto_20151026_2326.py","file_name":"0004_auto_20151026_2326.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"457627481","text":"def slices(seq,n):\n\tif (n > len(seq)) or (n == 0):\n\t\traise ValueError\n\t\n\tvar = []\n\tfor i in range(len(seq)):\n\t\ttemp = []\n\t\tfor j in range(n):\n\t\t\tif (i+n <= len(seq)):\n\t\t\t\ttemp.append(int(seq[i+j]))\n\t\tvar.append(temp)\n\treturn [x for x in var if x]\n","sub_path":"all_data/exercism_data/python/series/ca158d73c35c442bbbcb080e47b782a2.py","file_name":"ca158d73c35c442bbbcb080e47b782a2.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"215830615","text":"\"\"\"\"\n7、10分\n问答题\n将一个正整数分解质因数。例如:输入90,打印出90=2*3*3*5。\n\n程序分析:对n进行分解质因数,应先找到一个最小的质数k,然后按下述步骤完成:\n(1)如果这个质数恰等于n,则说明分解质因数的过程已经结束,打印出即可。\n(2)如果nk,但n能被k整除(k能整除n,n/k),则应打印出k的值,并用n除以k的商,作为新的正整数你n,\n重复执行第一步。\n(3)如果n不能被k整除,则用k+1作为k的值,重复执行第一步。\n\"\"\"\"\"\n\n\ndef is_prime_number(number=2):\n \"\"\"\n 是否为素数(质数)\n :param number:\n :return:\n \"\"\"\n for i in range(2, number):\n if number % i == 0:\n return False\n\n return True\n\n\ndef decompose_prime_factor(number=0, k=2):\n if number == k:\n ks.append(str(k))\n\n elif not is_prime_number(k) or number % k != 0:\n decompose_prime_factor(number, k + 1)\n\n else:\n ks.append(str(k))\n decompose_prime_factor(number // k, 2)\n\n\nks = []\ndecompose_number = 138\ndecompose_prime_factor(decompose_number)\nprint(\"{}={}\".format(decompose_number, \"*\".join(ks)))\n","sub_path":"2019/AI/AIHomework/01_PythonBasis/06/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"213665126","text":"from datetime import datetime as dt\r\ndef myFunc(n):\r\n x=n\r\n maxPrime=n\r\n while x>1:\r\n while x%2==0:\r\n maxPrime=2\r\n x=x/2\r\n i=3\r\n while i=1):\n\tnotification.update(\"Meeting in \"+str(meetingtime)+\" mins!!\",\n\t\"AGENDA: \"+AGENDA+\"\\n VENUE: \"+VENUE+\"\\n TIME: \"+ START_TIME+\" - \"+END_TIME\n\t,\n\t\"dialog-information\")\n\ttime.sleep(60)\n\tnotification.show()\n\tmeetingtime-=1\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/calendar-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/calendar'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Fake Meeting Alerts'\n\n\ndef get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n event = {\n 'summary': AGENDA,\n 'location': VENUE,\n 'start': {\n 'dateTime': START_TIME,\n },\n 'end': {\n 'dateTime': END_TIME,\n },\n 'attendees': [\n {'email': 'lpage@example.com'},\n {'email': 'sbrin@example.com'},\n ],\n 'reminders': {\n 'useDefault': True,\n },\n }\n event = service.events().insert(calendarId='primary', body=event).execute()\n print('Event created: %s' % (event.get('htmlLink')))\n \nif __name__ == '__main__':\n main()\n","sub_path":"notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"545413140","text":"import sys\nsys.path.append('/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages')\nfrom conllu import parse\n\n\nwith open('ja_gsd-ud-test.conllu') as f:\n test_text = f.read()\n\nwith open('correct_tokenized_test.txt', 'w') as f:\n for sentence in parse(test_text):\n f.write(' '.join([token['form'] for token in sentence]) + '\\n')\n\n\n\n'''def extract_dictionary(filename):\n result = list()\n for tokenlist in readfile(filename):\n for token in tokenlist:\n if len(token[\"form\"]) > 1:\n result.append(token[\"form\"])\n\n result = sorted(list(set(result)))\n return list(result)\n\nprint (extract_dictionary('ja_gsd-ud-test.conllu'))'''\n\n'''with open('correct_tokenized_test.txt', 'w') as f:\n d = extract_dictionary('ja_gsd-ud-test.conllu')\n for i in d:\n f.write(d)'''","sub_path":"2018-komp-ling/practicals/tokenization/conllu_parser.py","file_name":"conllu_parser.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"339906150","text":"#-*- coding: utf-8 -*-\nimport os\nimport glob\n\nfrom PIL import Image\n\nfrom fpdf import FPDF\nfrom PyPDF2 import PdfFileReader, PdfFileMerger\n\n''' programe name: PDFgen.py\n Last version : 2017.04.15.001\n 2017.04.15.001 listing function replace from os.listdir to glob.glob\n 2017.05.27.001 ImageList sorting \n 2017.05.27.002 input path check and if not exist use current directory as imput path\n\n1. convert images to indivisual pdfs\n2. merge all pdfs one file\n\n''' \n\n# Working directory\nimage_directory=\"crop_output\"\npdf_directory =\"pdf_output\"\nfile_extensions=\".jpg\"\nmurged_fileName=\"final_merged, p\"\n# set margin\n# margin이 Nono Zero 인 경우 아래처럼 양쪽에 설정\n# pdf = FPDF(unit=\"pt\", format=[width + 2*margin, height + 2*margin])\nmargin=0 # fit pdf to image, unit: pt\n\n# Check input images path if not exist use current working directory from os.getcwd()\ndef ensure_input_path(source_path):\n if not os.path.exists(os.path.join(\".\", source_path)):\n \treturn os.getcwd()\n else:\n \treturn source_path\n\n\n# Create PDF output path\ndef ensure_output_path(target_path):\n if not os.path.exists(os.path.join(\".\", target_path)):\n os.makedirs(target_path)\n print(\"'{}' directory created.\".format(target_path))\n\n\n# Create PDFs \ndef convertImage2PDF(imagelist, pdf_directory):\n\tprint(\"Converting images to PDF ...\")\n\tfor imageFile in imagelist:\t\t\n\t\tcover=Image.open(imageFile)\n\t\twidth, height = cover.size\n\n\t\tpdf=FPDF(unit=\"pt\", format=[width, height])\n\t\tpdf.add_page()\n\n\t\tpdf.image(imageFile, margin, margin)\n\n\t\tfilename_ext=os.path.split(imageFile)[1] # \"p0001.jpg\"\n\t\tfilenameOnly=os.path.splitext(filename_ext)[0] # \"p0001\"\n\t\tfilename =filenameOnly+\".pdf\" # \"p0001.pdf\"\n\t\t\n\t\tpdf.output(os.path.join(pdf_directory, filename), \"F\")\n\n\n# Bind PDFs in one file\ndef bindPDF(pdf_dir, output_filename):\n\tprint(\"Merging PDF files...\")\n\t# pdf_files=[f for f in os.listdir(pdf_dir) if f.endswith(\"pdf\")]\n\tpdf_files=glob.glob(pdf_dir+\"/*.pdf\")\n\tpageCount=len(pdf_files)\n\n\tmerger = PdfFileMerger() \n\tfor file in pdf_files: \n\t\t# merger.append(PdfFileReader(os.path.join(pdf_dir, file), \"rb\")) \n\t\tmerger.append(PdfFileReader(file, \"rb\")) \n\toutput_filename=output_filename+str(pageCount)+\".pdf\"\n\t\n\t# 결과물을 현재 디렉토리에 생성\n\tmerger.write(output_filename)\n\tprint(\"Done! '{0}' file created.\".format(output_filename))\n\n\n# \"Run Main routine\"\nimage_directory=ensure_input_path(image_directory)\nimageList =sorted(glob.glob(image_directory+\"/*\"+file_extensions))\n\nimageCount=len(imageList)\nif imageCount> 0:\n\tensure_output_path(pdf_directory)\n\n\t# Make PDF\n\tprint(\"{0} images found in '{1}'\".format(imageCount, image_directory))\n\tconvertImage2PDF(imageList, pdf_directory)\n\n\t# Combind all pdfs to one\n\tbindPDF(pdf_directory, murged_fileName)\nelse:\n\tprint(\"{0} images not found. Check '{1}' folder.\".format(imageCount, image_directory))\n","sub_path":"PDFgen.py","file_name":"PDFgen.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"474650396","text":"import numpy as np\nfrom sklearn.datasets import make_regression\nfrom scratch_ml.utils import train_test_split, mean_squared_error\nfrom scratch_ml.supervised_learning import RegressionTree\nimport matplotlib.pyplot as plt\n\n\ndef main():\n print(\"Regressor Tree\")\n x, y = make_regression(n_samples=200, n_features=1, noise=20)\n x_train, x_test, y_train, y_test = train_test_split(x, y)\n\n model = RegressionTree()\n model.fit(x_train, y_train)\n y_pred = model.predict(x_test)\n\n cmap = plt.get_cmap('plasma')\n mse = mean_squared_error(y_test, y_pred)\n print(\"Mean Squared Error:\", mse)\n\n # Plot the results\n m1 = plt.scatter(366 * x_train, y_train, color=cmap(0.9), s=10)\n m2 = plt.scatter(366 * x_test, y_test, color=cmap(0.5), s=10)\n m3 = plt.scatter(366 * x_test, y_pred, color='black', s=10)\n plt.suptitle(\"Regression Tree\")\n plt.title(\"MSE: %.2f\" % mse, fontsize=10)\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.legend((m1, m2, m3), (\"Training data\", \"Test data\",\n \"Prediction\"), loc='lower right')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scratch_ml/demo/decision_tree_regressor.py","file_name":"decision_tree_regressor.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"46235315","text":"import os\nimport glob\nimport sys\nfrom subprocess import Popen, PIPE\nimport string\n\ndef get_user_words(user):\n \"\"\"Get a user words list\"\"\"\n path = os.path.join('users', user + '.txt')\n\n if os.path.isfile(path):\n with open(path, 'r') as in_f:\n chars = in_f.readlines()[0].replace('\\n', '')\n if chars:\n return set(chars.split(','))\n\n return set()\n\ndef add_user_words(user, words):\n \"\"\"Add words to a user list\"\"\"\n path = os.path.join('users', user + '.txt')\n words |= get_user_words(user)\n\n with open(path, 'w') as out_f:\n out_f.write(','.join(words))\n\ndef clean_file(path):\n \"\"\"Remove the non-Chinese words from a file\n\n The function clean a file and write this new version\n in a new file. It will give as result the new path.\n \"\"\"\n letters = list(string.ascii_lowercase)\n cleaned_words = [] \n\n with open(path, 'r') as in_f:\n words = ('').join(in_f.readlines()).split()\n for w in words:\n if not [l for l in letters if l in w]:\n cleaned_words.append(w)\n\n cleaned_file = os.path.join('tmp', path)\n with open(cleaned_file, 'w', encoding='utf8') as out_f:\n for w in cleaned_words:\n out_f.write(w + '\\n')\n\n return cleaned_file\n\ndef segment_files(paths):\n \"\"\"Segment a file into Chinese words\n\n This function takes in a bunch all the files fed and segment\n each line into a list of words. It uses the standford segmenter \n which takes some time to initialize. It is then better when \n given small files to give them all at once.\n \"\"\"\n paths = paths if type(paths) is list else [paths]\n paths = ','.join([os.path.join('..', path) for path in paths])\n process = Popen(['./segment.sh', paths], stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate()\n segmented_text = stdout.decode('UTF-8').split('\\n')\n \n return segmented_text\n\ndef add_file_to_user(user, path):\n \"\"\"Add Chinese characters to a user profile\n \n The file is first purge from all the [a-z] characters in order\n to not take into account the translations or whatever other text\n is as well in the file. This function need to be made more\n robust tho.\n \"\"\"\n path = clean_file(path)\n segmented = ' '.join(segment_files(path))\n segmented = set(segmented.replace(' ', ','))\n add_user_words(user, segmented)\n\nif __name__ == \"__main__\":\n path = sys.argv[1]\n user = 'rinku'\n add_file_to_user(user, path)\n\n # add_user_words(user, words)\n # l = {'了','子','好','吗','白','勺','的','干','王','玉','水','氵','火','灬','汁','厂','厅','在','小','尔','他','她','日','月','明','宀','字','豕','家','文','辶','这','过','八','天','土','又','双','少','妙','尝','学','帅','巾','丑','豆','矢','短','口','叫','夕','名','千','舌','不','杯','七','艮','很','亿','万','几','亮','上','下','卡','门','们','可','哥','卜','仆','耳','取','最','斤','近','尸','匕','呢','己','见','觉','外','心','关','止','正','反','人','太','十','木','林','森','从','众','丛'}\n","sub_path":"known_car.py","file_name":"known_car.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"132794549","text":"import dendropy\nimport sys\nfrom dendropy.calculate import treecompare\n\n\nif len(sys.argv) > 2:\n\n f = open('compare.csv', 'w+')\n \n t1 = dendropy.Tree.get(data=sys.argv[1], schema=\"newick\", rooting=\"default-rooted\")\n t2 = dendropy.Tree.get(data=sys.argv[2], schema=\"newick\", rooting=\"default-rooted\")\n \n bi1 = t1.bipartition_edge_map\n bi2 = t2.bipartition_edge_map\n\n common = {}\n \n for b1 in bi1:\n if (not b1.is_trivial()):\n if (bi1[b1].length is not None):\n f.write(str(bi1[b1].length))\n else:\n f.write('0.0')\n f.write(\",\")\n if (b1 in bi2) and (bi2[b1].length is not None):\n f.write(str(bi2[b1].length))\n common[b1] = bi2[b1]\n elif (b1 in bi2) and (bi2[b1].length is None):\n f.write('0.0')\n common[b1] = bi2[b1]\n else:\n f.write(\"NA\")\n\n f.write(\"\\n\")\n\n for b2 in bi2:\n\n if (b2 not in common) and (not b2.is_trivial()):\n f.write(\"NA,\")\n\n if (bi2[b2].length is not None):\n f.write(str(bi2[b2].length))\n else:\n f.write('0.0')\n\n f.write(\"\\n\") \n\n f.close()\nelse:\n print(\"Enter two trees to compare as arguments\")\n","sub_path":"2-dendropy/aditikg2/compare_bipartiotions.py","file_name":"compare_bipartiotions.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"355233947","text":"\"\"\"Define endpoints for interacting with Tiles.\"\"\"\nfrom typing import Awaitable, Callable, Dict, List, Optional\n\n\nclass Tile: # pylint: disable=too-few-public-methods\n \"\"\"Define \"Tile\" endpoints.\"\"\"\n\n def __init__(\n self,\n request: Callable[..., Awaitable[dict]],\n *,\n user_uuid: Optional[str] = None,\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self._request: Callable[..., Awaitable[dict]] = request\n self._user_uuid: Optional[str] = user_uuid\n\n async def all(\n self, whitelist: list = None, show_inactive: bool = False\n ) -> Dict[str, dict]:\n \"\"\"Get all Tiles for a user's account.\"\"\"\n list_data: dict = await self._request(\n \"get\", f\"users/{self._user_uuid}/user_tiles\"\n )\n\n tile_uuid_list: List[str] = [\n tile[\"tile_uuid\"]\n for tile in list_data[\"result\"]\n if not whitelist or tile[\"tileType\"] in whitelist\n ]\n\n tile_data: dict = await self._request(\n \"get\", \"tiles\", params=[(\"tile_uuids\", uuid) for uuid in tile_uuid_list]\n )\n\n return {\n tile_uuid: tile\n for tile_uuid, tile in tile_data[\"result\"].items()\n if show_inactive or tile[\"visible\"] is True\n }\n","sub_path":"pytile/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"5906009","text":"import mercantile\nfrom sqlalchemy.ext.mutable import MutableDict, MutableList\nfrom ml_enabler.models.utils import timestamp\nfrom geoalchemy2 import Geometry\nfrom sqlalchemy import or_, and_\nfrom geoalchemy2.functions import ST_AsGeoJSON\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.sql import func, text\nfrom sqlalchemy.sql.expression import cast\nimport sqlalchemy\nfrom flask_login import UserMixin\nfrom ml_enabler.models.dtos.dtos import (\n ProjectDTO,\n PredictionDTO,\n ProjectAccessDTO,\n UserDTO,\n)\nfrom ml_enabler import db\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = \"users\"\n\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String, unique=True)\n password = db.Column(db.String)\n name = db.Column(db.String)\n access = db.Column(db.String)\n\n def create(self, dto: UserDTO):\n self.name = dto.name\n self.email = dto.email\n self.access = dto.access\n self.password = dto.password\n\n results = db.session.execute(\n text(\n \"\"\"\n INSERT INTO users (name, email, access, password) VALUES (\n :name,\n :email,\n :access,\n crypt(:password, gen_salt('bf', 10))\n ) RETURNING id\n \"\"\"\n ),\n {\n \"name\": self.name,\n \"email\": self.email,\n \"access\": self.access,\n \"password\": self.password,\n },\n ).fetchall()\n\n db.session.commit()\n\n self.id = results[0][0]\n\n return self\n\n def list(user_filter: str, limit: int, page: int):\n \"\"\"\n Get all users in the database\n \"\"\"\n\n results = db.session.execute(\n text(\n \"\"\"\n SELECT\n count(*) OVER() AS count,\n id,\n name,\n access,\n email\n FROM\n users\n WHERE\n name iLIKE '%'||:filter||'%'\n OR email iLIKE '%'||:filter||'%'\n ORDER BY\n id ASC\n LIMIT\n :limit\n OFFSET\n :page\n \"\"\"\n ),\n {\"limit\": limit, \"page\": page * limit, \"filter\": user_filter},\n ).fetchall()\n\n return {\n \"total\": results[0][0] if len(results) > 0 else 0,\n \"users\": [\n {\"id\": u[1], \"name\": u[2], \"access\": u[3], \"email\": u[4]}\n for u in results\n ],\n }\n\n return results\n\n def password_check(self, test):\n results = db.session.execute(\n text(\n \"\"\"\n SELECT\n password = crypt(:test, password)\n FROM\n users\n WHERE\n id = :uid\n \"\"\"\n ),\n {\"test\": test, \"uid\": self.id},\n ).fetchall()\n\n return results[0][0]\n\n\nclass Prediction(db.Model):\n \"\"\" Predictions from a model at a given time \"\"\"\n\n __tablename__ = \"predictions\"\n\n id = db.Column(db.Integer, primary_key=True)\n created = db.Column(db.DateTime, default=timestamp, nullable=False)\n\n # One of 'prediction' or 'training' - On the backend these are essentially the same\n # but on the frontend, a training hint will not prompt for model upload\n hint = db.Column(db.String, nullable=False)\n\n imagery_id = db.Column(db.BigInteger, nullable=True)\n\n model_id = db.Column(\n db.BigInteger, db.ForeignKey(\"projects.id\", name=\"fk_models\"), nullable=False\n )\n\n version = db.Column(db.String, nullable=False)\n\n docker_url = db.Column(db.String)\n tile_zoom = db.Column(db.Integer, nullable=False)\n\n log_link = db.Column(db.String)\n model_link = db.Column(db.String)\n docker_link = db.Column(db.String)\n save_link = db.Column(db.String)\n tfrecord_link = db.Column(db.String)\n checkpoint_link = db.Column(db.String)\n inf_list = db.Column(db.String)\n inf_type = db.Column(db.String)\n inf_binary = db.Column(db.Boolean)\n inf_supertile = db.Column(db.Boolean)\n\n def create(self, prediction_dto: PredictionDTO):\n \"\"\" Creates and saves the current model to the DB \"\"\"\n\n self.imagery_id = prediction_dto.imagery_id\n self.model_id = prediction_dto.model_id\n self.hint = prediction_dto.hint\n self.version = prediction_dto.version\n self.docker_url = prediction_dto.docker_url\n self.tile_zoom = prediction_dto.tile_zoom\n self.inf_list = prediction_dto.inf_list\n self.inf_type = prediction_dto.inf_type\n self.inf_binary = prediction_dto.inf_binary\n self.inf_supertile = prediction_dto.inf_supertile\n\n db.session.add(self)\n db.session.commit()\n\n def link(self, update: dict):\n \"\"\" Update prediction to include asset links \"\"\"\n\n if update.get(\"logLink\") is not None:\n self.log_link = update[\"logLink\"]\n if update.get(\"modelLink\") is not None:\n self.model_link = update[\"modelLink\"]\n if update.get(\"dockerLink\") is not None:\n self.docker_link = update[\"dockerLink\"]\n if update.get(\"saveLink\") is not None:\n self.save_link = update[\"saveLink\"]\n if update.get(\"tfrecordLink\") is not None:\n self.tfrecord_link = update[\"tfrecordLink\"]\n if update.get(\"checkpointLink\") is not None:\n self.checkpoint_link = update[\"checkpointLink\"]\n\n db.session.commit()\n\n def save(self):\n \"\"\" Save changes to db\"\"\"\n db.session.commit()\n\n def export(self):\n return (\n db.session.query(\n PredictionTile.id,\n PredictionTile.quadkey,\n ST_AsGeoJSON(PredictionTile.geom).label(\"geometry\"),\n PredictionTile.predictions,\n PredictionTile.validity,\n )\n .filter(PredictionTile.prediction_id == self.id)\n .yield_per(100)\n )\n\n @staticmethod\n def get(prediction_id: int):\n \"\"\"\n Get prediction with the given ID\n :param prediction_id\n :return prediction if found otherwise None\n \"\"\"\n db.session.query(\n Prediction.id,\n Prediction.hint,\n Prediction.created,\n Prediction.docker_url,\n Prediction.model_id,\n Prediction.tile_zoom,\n Prediction.version,\n Prediction.log_link,\n Prediction.model_link,\n Prediction.docker_link,\n Prediction.save_link,\n Prediction.tfrecord_link,\n Prediction.checkpoint_link,\n Prediction.inf_list,\n Prediction.inf_type,\n Prediction.inf_binary,\n Prediction.inf_supertile,\n Prediction.imagery_id,\n ).filter(Prediction.id == prediction_id)\n\n return Prediction.query.get(prediction_id)\n\n @staticmethod\n def get_predictions_by_model(model_id: int):\n \"\"\"\n Gets predictions for a specified ML Model\n :param model_id: ml model ID in scope\n :return predictions if found otherwise None\n \"\"\"\n query = db.session.query(\n Prediction.id,\n Prediction.hint,\n Prediction.created,\n Prediction.docker_url,\n Prediction.model_id,\n Prediction.tile_zoom,\n Prediction.version,\n Prediction.log_link,\n Prediction.model_link,\n Prediction.docker_link,\n Prediction.save_link,\n Prediction.tfrecord_link,\n Prediction.checkpoint_link,\n Prediction.inf_list,\n Prediction.inf_type,\n Prediction.inf_binary,\n Prediction.inf_supertile,\n Prediction.imagery_id,\n ).filter(Prediction.model_id == model_id)\n\n return query.all()\n\n def delete(self):\n \"\"\" Deletes the current model from the DB \"\"\"\n db.session.delete(self)\n db.session.commit()\n\n @staticmethod\n def as_dto(prediction):\n \"\"\" Static method to convert the prediction result as a schematic \"\"\"\n\n prediction_dto = PredictionDTO()\n\n prediction_dto.prediction_id = prediction[0]\n prediction_dto.hint = prediction[1]\n prediction_dto.created = prediction[2]\n prediction_dto.docker_url = prediction[3]\n prediction_dto.model_id = prediction[4]\n prediction_dto.tile_zoom = prediction[5]\n prediction_dto.version = prediction[6]\n prediction_dto.log_link = prediction[7]\n prediction_dto.model_link = prediction[8]\n prediction_dto.docker_link = prediction[9]\n prediction_dto.save_link = prediction[10]\n prediction_dto.tfrecord_link = prediction[11]\n prediction_dto.checkpoint_link = prediction[12]\n prediction_dto.inf_list = prediction[13]\n prediction_dto.inf_type = prediction[14]\n prediction_dto.inf_binary = prediction[15]\n prediction_dto.inf_supertile = prediction[16]\n prediction_dto.imagery_id = prediction[17]\n\n return prediction_dto\n\n\nclass PredictionTile(db.Model):\n \"\"\" Store individual tile predictions \"\"\"\n\n __tablename__ = \"prediction_tiles\"\n\n id = db.Column(db.Integer, primary_key=True)\n\n prediction_id = db.Column(\n db.BigInteger,\n db.ForeignKey(\"predictions.id\", name=\"fk_predictions\"),\n nullable=False,\n )\n\n quadkey = db.Column(db.String, nullable=True)\n geom = db.Column(Geometry(\"POLYGON\", srid=4326), nullable=False)\n predictions = db.Column(postgresql.JSONB, nullable=False)\n validity = db.Column(MutableDict.as_mutable(postgresql.JSONB), nullable=True)\n\n prediction_tiles_quadkey_idx = db.Index(\n \"prediction_tiles_quadkey_idx\",\n \"prediction_tiles.quadkey\",\n postgresql_ops={\"quadkey\": \"text_pattern_ops\"},\n )\n\n @staticmethod\n def get(predictiontile_id: int):\n\n db.session.query(\n PredictionTile.id,\n PredictionTile.prediction_id,\n PredictionTile.validity,\n ).filter(PredictionTile.id == predictiontile_id)\n\n return PredictionTile.query.get(predictiontile_id)\n\n def update(self, validity):\n self.validity = validity\n\n db.session.commit()\n\n @staticmethod\n def inferences(prediction_id: int):\n results = db.session.execute(\n text(\n \"\"\"\n SELECT\n DISTINCT jsonb_object_keys(predictions)\n FROM\n prediction_tiles\n WHERE\n prediction_id = :pred\n \"\"\"\n ),\n {\"pred\": prediction_id},\n ).fetchall()\n\n inferences = []\n for res in results:\n inferences.append(res[0])\n\n return inferences\n\n @staticmethod\n def count(prediction_id: int):\n return (\n db.session.query(func.count(PredictionTile.geom).label(\"count\"))\n .filter(PredictionTile.prediction_id == prediction_id)\n .one()\n )\n\n @staticmethod\n def bbox(prediction_id: int):\n result = db.session.execute(\n text(\n \"\"\"\n SELECT\n ST_Extent(geom)\n FROM\n prediction_tiles\n WHERE\n prediction_id = :pred\n \"\"\"\n ),\n {\"pred\": prediction_id},\n ).fetchone()\n\n bbox = []\n for corners in result[0].replace(\"BOX(\", \"\").replace(\")\", \"\").split(\" \"):\n for corner in corners.split(\",\"):\n bbox.append(float(corner))\n\n return bbox\n\n def mvt(prediction_id: int, z: int, x: int, y: int):\n grid = mercantile.xy_bounds(x, y, z)\n\n result = db.session.execute(\n text(\n \"\"\"\n SELECT\n ST_AsMVT(q, 'data', 4096, 'geom', 'id') AS mvt\n FROM (\n SELECT\n p.id AS id,\n quadkey AS quadkey,\n predictions || COALESCE(v.validity, '{}'::JSONB) AS props,\n ST_AsMVTGeom(geom, ST_Transform(ST_MakeEnvelope(:minx, :miny, :maxx, :maxy, 3857), 4326), 4096, 256, false) AS geom\n FROM\n prediction_tiles AS p\n LEFT JOIN (\n SELECT\n id,\n JSONB_Object_Agg('v_'||key, value) AS validity\n FROM\n prediction_tiles,\n jsonb_each(validity)\n GROUP BY\n id\n ) AS v ON p.id = v.id\n WHERE\n p.prediction_id = :pred\n AND ST_Intersects(p.geom, ST_Transform(ST_MakeEnvelope(:minx, :miny, :maxx, :maxy, 3857), 4326))\n ) q\n \"\"\"\n ),\n {\n \"pred\": prediction_id,\n \"minx\": grid[0],\n \"miny\": grid[1],\n \"maxx\": grid[2],\n \"maxy\": grid[3],\n },\n ).fetchone()\n\n return bytes(result.values()[0])\n\n @staticmethod\n def get_tiles_by_quadkey(prediction_id: int, quadkeys: tuple, zoom: int):\n return (\n db.session.query(\n func.substr(PredictionTile.quadkey, 1, zoom).label(\"qaudkey\"),\n func.avg(\n cast(\n cast(\n PredictionTile.predictions[\"ml_prediction\"],\n sqlalchemy.String,\n ),\n sqlalchemy.Float,\n )\n ).label(\"ml_prediction\"),\n func.avg(\n cast(\n cast(\n PredictionTile.predictions[\"osm_building_area\"],\n sqlalchemy.String,\n ),\n sqlalchemy.Float,\n )\n ).label(\"osm_building_area\"),\n )\n .filter(PredictionTile.prediction_id == prediction_id)\n .filter(func.substr(PredictionTile.quadkey, 1, zoom).in_(quadkeys))\n .group_by(func.substr(PredictionTile.quadkey, 1, zoom))\n .all()\n )\n\n\nclass ProjectAccess(db.Model):\n __tablename__ = \"projects_access\"\n\n id = db.Column(db.Integer, primary_key=True)\n\n model_id = db.Column(\n db.BigInteger, db.ForeignKey(\"projects.id\", name=\"fk_projects\"), nullable=False\n )\n\n uid = db.Column(\n db.BigInteger, db.ForeignKey(\"users.id\", name=\"fk_users\"), nullable=False\n )\n\n access = db.Column(db.String, nullable=False)\n\n @staticmethod\n def get(access_id: int):\n \"\"\"\n Gets specified ML Model\n :param access_id: access object to get\n :return ML Model if found otherwise None\n \"\"\"\n return ProjectAccess.query.get(access_id)\n\n @staticmethod\n def get_uid(model_id: int, access_id: int):\n \"\"\"\n Gets specified ML Model\n :param access_id: access object to get\n :return ML Model if found otherwise None\n \"\"\"\n\n return ProjectAccess.query.filter(\n ProjectAccess.id == access_id, ProjectAccess.model_id == model_id\n ).one_or_none()\n\n @staticmethod\n def list_update(model_id: int, current_users: list, new_users: list):\n uids = []\n\n for user in new_users:\n user[\"model_id\"] = model_id\n\n # Update all new users\n for user in new_users:\n uids.append(int(user.get(\"uid\")))\n user[\"model_id\"] = model_id\n\n access = ProjectAccess.get_uid(model_id, user.get(\"id\"))\n\n if not access:\n access = ProjectAccess()\n access.create(user)\n else:\n access.update(user)\n\n for user in current_users:\n if user.get(\"uid\") not in uids:\n access = ProjectAccess.get_uid(model_id, user.get(\"id\"))\n access.delete()\n\n @staticmethod\n def list(model_id: int):\n query = db.session.query(\n ProjectAccess.id,\n ProjectAccess.uid,\n User.name,\n ProjectAccess.model_id,\n ProjectAccess.access,\n ).filter(ProjectAccess.model_id == model_id, User.id == ProjectAccess.uid)\n\n users = []\n for access in query.all():\n users.append(\n {\n \"id\": access[0],\n \"uid\": access[1],\n \"name\": access[2],\n \"model_id\": access[3],\n \"access\": access[4],\n }\n )\n\n return users\n\n def create(self, dto: ProjectAccessDTO):\n \"\"\" Creates and saves the current project access dto to the DB \"\"\"\n\n self.model_id = dto.get(\"model_id\")\n self.uid = dto.get(\"uid\")\n self.access = dto.get(\"access\")\n\n db.session.add(self)\n db.session.commit()\n return self\n\n def update(self, dto: ProjectAccessDTO):\n \"\"\" Updates an project access \"\"\"\n self.access = dto[\"access\"]\n db.session.commit()\n\n def delete(self):\n \"\"\" Deletes the current project access from the DB \"\"\"\n db.session.delete(self)\n db.session.commit()\n\n\nclass Project(db.Model):\n \"\"\" Describes an ML model registered with the service \"\"\"\n\n __tablename__ = \"projects\"\n\n id = db.Column(db.Integer, primary_key=True)\n created = db.Column(db.DateTime, default=timestamp, nullable=False)\n tags = db.Column(MutableList.as_mutable(postgresql.JSONB), nullable=False)\n name = db.Column(db.String, unique=True)\n source = db.Column(db.String)\n archived = db.Column(db.Boolean)\n project_url = db.Column(db.String)\n access = db.Column(db.String)\n notes = db.Column(db.String)\n predictions = db.relationship(\n Prediction, backref=\"projects\", cascade=\"all, delete-orphan\", lazy=\"dynamic\"\n )\n\n def create(self, dto: ProjectDTO):\n \"\"\" Creates and saves the current model to the DB \"\"\"\n\n self.name = dto.name\n self.source = dto.source\n self.archived = False\n self.tags = dto.tags\n self.access = dto.access\n self.project_url = dto.project_url\n self.notes = dto.notes\n\n db.session.add(self)\n db.session.commit()\n return self\n\n def save(self):\n \"\"\" Save changes to db\"\"\"\n db.session.commit()\n\n @staticmethod\n def get(model_id: int):\n \"\"\"\n Gets specified ML Model\n :param model_id: ml model ID in scope\n :return ML Model if found otherwise None\n \"\"\"\n return Project.query.get(model_id)\n\n @staticmethod\n def get_all(uid: int, model_filter: str, model_archived: bool):\n \"\"\"\n Get all models in the database\n \"\"\"\n return Project.query.filter(\n Project.name.ilike(model_filter + \"%\"),\n Project.archived == model_archived,\n or_(\n Project.access == \"public\",\n and_(ProjectAccess.uid == uid, ProjectAccess.model_id == Project.id),\n ),\n ).all()\n\n def delete(self):\n \"\"\" Deletes the current model from the DB \"\"\"\n db.session.delete(self)\n db.session.commit()\n\n def as_dto(self, users=None):\n \"\"\"\n Convert the model to it's dto\n \"\"\"\n\n dto = ProjectDTO()\n dto.model_id = self.id\n dto.name = self.name\n dto.tags = self.tags\n dto.created = self.created\n dto.source = self.source\n dto.archived = self.archived\n dto.project_url = self.project_url\n dto.access = self.access\n dto.notes = self.notes\n if users is not None:\n dto.users = users\n\n return dto\n\n def update(self, dto: ProjectDTO):\n \"\"\" Updates an ML model \"\"\"\n self.id = dto.model_id\n self.name = dto.name\n self.source = dto.source\n self.project_url = dto.project_url\n self.archived = dto.archived\n self.tags = dto.tags\n self.access = dto.access\n self.notes = dto.notes\n\n db.session.commit()\n","sub_path":"ml_enabler/models/ml_model.py","file_name":"ml_model.py","file_ext":"py","file_size_in_byte":20329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"188868756","text":"from collections import Mapping\nfrom ctypes import c_int, c_int32, c_double, c_char_p, POINTER\nfrom weakref import WeakValueDictionary\n\nimport numpy as np\n\nfrom . import _dll\nfrom .error import _error_handler\n\n__all__ = ['CellView', 'cells']\n\n# Cell functions\n_dll.openmc_cell_get_id.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_cell_get_id.restype = c_int\n_dll.openmc_cell_get_id.errcheck = _error_handler\n_dll.openmc_cell_set_temperature.argtypes = [\n c_int32, c_double, POINTER(c_int32)]\n_dll.openmc_cell_set_temperature.restype = c_int\n_dll.openmc_cell_set_temperature.errcheck = _error_handler\n_dll.openmc_get_cell.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_get_cell.restype = c_int\n_dll.openmc_get_cell.errcheck = _error_handler\n\n\nclass CellView(object):\n \"\"\"View of a cell.\n\n This class exposes a cell that is stored internally in the OpenMC solver. To\n obtain a view of a cell with a given ID, use the\n :data:`openmc.capi.nuclides` mapping.\n\n Parameters\n ----------\n index : int\n Index in the `cells` array.\n\n Attributes\n ----------\n id : int\n ID of the cell\n\n \"\"\"\n __instances = WeakValueDictionary()\n\n def __new__(cls, *args):\n if args not in cls.__instances:\n instance = super().__new__(cls)\n cls.__instances[args] = instance\n return cls.__instances[args]\n\n def __init__(self, index):\n self._index = index\n\n @property\n def id(self):\n cell_id = c_int32()\n _dll.openmc_cell_get_id(self._index, cell_id)\n return cell_id.value\n\n def set_temperature(self, T, instance=None):\n \"\"\"Set the temperature of a cell\n\n Parameters\n ----------\n T : float\n Temperature in K\n instance : int or None\n Which instance of the cell\n\n \"\"\"\n _dll.openmc_cell_set_temperature(self._index, T, instance)\n\n\nclass _CellMapping(Mapping):\n def __getitem__(self, key):\n index = c_int32()\n _dll.openmc_get_cell(key, index)\n return CellView(index.value)\n\n def __iter__(self):\n for i in range(len(self)):\n yield CellView(i + 1).id\n\n def __len__(self):\n return c_int32.in_dll(_dll, 'n_cells').value\n\ncells = _CellMapping()\n","sub_path":"openmc/capi/cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"233637336","text":"# /usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Author: wanngfeng\n# @Date: 2021-12-06 16:53:27\n# @Last Modified by: wanngfeng\n# @Last Modified time: 2021-12-06 17:03:00\n\n\nclass Solution:\n def countBits(self, n: int) -> List[int]:\n bits = [0]\n for i in range(1, n + 1):\n bits.append(bits[i >> 1] + i & 1)\n return bits\n","sub_path":"algorithm/count_bits.py","file_name":"count_bits.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"543549693","text":"import numpy as np\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Circle\nfrom matplotlib.legend_handler import HandlerLineCollection\n\nclass HandlerDashedLines(HandlerLineCollection):\n \"\"\"\n Custom Handler for LineCollection instances.\n \"\"\"\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize, trans):\n # figure out how many lines there are\n numlines = len(orig_handle.get_segments())\n xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,\n width, height, fontsize)\n leglines = []\n # divide the vertical space where the lines will go\n # into equal parts based on the number of lines\n ydata = np.full_like(xdata, height / (numlines + 1))\n # for each line, create the line at the proper location\n # and set the dash pattern\n for i in range(numlines):\n legline = Line2D(xdata, ydata/2 * (numlines - i) - ydescent/4)\n # xm = (xdata[0] + xdata[1])/2\n # y = ydata * (numlines - i) - ydescent\n # ym = (y[0] + y[1])/2\n # m = Circle(xm,ym)\n self.update_prop(legline, orig_handle, legend)\n # set color, dash pattern, and linewidth to that\n # of the lines in linecollection\n try:\n color = orig_handle.get_colors()[i]\n except IndexError:\n color = orig_handle.get_colors()[0]\n try:\n dashes = orig_handle.get_dashes()[i]\n except IndexError:\n dashes = orig_handle.get_dashes()[0]\n try:\n lw = orig_handle.get_linewidths()[i]\n except IndexError:\n lw = orig_handle.get_linewidths()[0]\n if dashes[1] is not None:\n legline.set_dashes(dashes[1])\n legline.set_color(color)\n legline.set_transform(trans)\n legline.set_linewidth(lw)\n # legline.set_marker('o')\n leglines.append(legline)\n # leglines.append(m)\n return leglines","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"33774336","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'humiture'\n\nurlpatterns = [\n # 获取温湿度数据页面\n url(r'^index/$', views.index, name='index'),\n # 获取温湿度Json数据\n url(r'^getHumitureListJson/$', views.getHumitureListJson, name='getHumitureListJson'),\n # 获取温湿度Json数据\n url(r'^getHumitureListJson1/$', views.getHumitureListJson1, name='getHumitureListJson1'),\n # 获取温湿度echarts展示数据\n url(r'^getHumitureStatisticsJson/.*$', views.getHumitureStatisticsJson, name='getHumitureStatisticsJson'),\n # 导出温湿度记录\n url(r'^exportHumitureData/.*$', views.exportHumitureData, name='exportHumitureData'),\n # 导出温湿度记录\n url(r'^exportHumitureData1/.*$', views.exportHumitureData1, name='exportHumitureData1'),\n]","sub_path":"humiture/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"217683453","text":"# Pretend we still have overflow in Python\nclass Solution(object):\n def divide(self, dividend, divisor):\n \"\"\"\n :type dividend: int\n :type divisor: int\n :rtype: int\n \"\"\"\n INT_MIN, INT_MAX = -(1 << 31), 0x7FFFFFFF\n if divisor == 0 or (dividend == INT_MIN and divisor == -1):\n return INT_MAX\n ret = 0\n positive = (dividend < 0) == (divisor < 0)\n dividend, divisor = abs(dividend), abs(divisor)\n for i in range(31, -1, -1):\n if (dividend >> i) >= divisor:\n ret |= (1 << i)\n dividend -= (divisor << i)\n return ret if positive else -ret\n","sub_path":"Divide Two Integers.py","file_name":"Divide Two Integers.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"256621161","text":"\"\"\"Tabular QL agent\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport framework\nimport utils\nimport agent_tabular_ql\nDEBUG = False\n\nGAMMA = 0.5 # discounted factor\nTRAINING_EP = 0.5 # epsilon-greedy parameter for training\nTESTING_EP = 0.05 # epsilon-greedy parameter for testing\nNUM_RUNS = 10\nNUM_EPOCHS = 200\nNUM_EPIS_TRAIN = 25 # number of episodes for training at each epoch\nNUM_EPIS_TEST = 50 # number of episodes for testing\nALPHA = 0.1 # learning rate for training\n\nACTIONS = framework.get_actions()\nOBJECTS = framework.get_objects()\nNUM_ACTIONS = len(ACTIONS)\nNUM_OBJECTS = len(OBJECTS)\n\nif __name__ == '__main__':\n # Data loading and build the dictionaries that use unique index for each state\n (dict_room_desc, dict_quest_desc) = framework.make_all_states_index()\n NUM_ROOM_DESC = len(dict_room_desc)\n NUM_QUESTS = len(dict_quest_desc)\n\n\nq_func = np.zeros((NUM_ROOM_DESC, NUM_QUESTS, NUM_ACTIONS, NUM_OBJECTS))\nq_func[1, 1, 2, 3] = 999\nq_func[1, 1, 2, 2] = 999\nprint(q_func.shape)\nprint(np.argmax(q_func[1, 1], axis = 1))\n\n#action, object = np.where(q_func[1, 1] == np.max(q_func[1, 1, :, :]))[0]\n#print(action)\n\nprint(np.unravel_index(q_func[1, 1, :, :].argmax(), q_func [1, 1].shape))\n\n","sub_path":"Projects/Project 5 - Reinforcement Learning - Text Based Game/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"204778777","text":"import untypy\n\nuntypy.enable()\nfrom typing import *\n\nEvenNumber = Annotated[int, lambda x: x % 2 == 0, \"Number must be even.\"]\n\n\ndef foo(funtions: List[Callable[[], EvenNumber]]) -> NoReturn:\n for fn in funtions:\n print(fn())\n\n\nfunc = lambda: 41\nfoo([func]) # This is a type error\n","sub_path":"examples/ex06.py","file_name":"ex06.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"47415993","text":"#############################################################################################################\n# Team: 1\n# Member Names: Andrew Almond, Clark Foster, Luke Seaton, Samuel Spohn, Christian Thibodeaux, Anthony Toussaint\n# GitHub: https://github.com/samcspohn/CSC-442\n# Date: 8 Oct 2019\n# Description: FTP Covert Message Extraction, Decode\n# Python Version: 3.7 \n#############################################################################################################\n\n# This file acts as the server sending the message to be decoded by the other file\n\n\n# IMPORTANT NOTE: PLEASE RUN THIS FILE IN SEPARATE TERMINAL IN ORDER AND RUN\n# THE OTHER FILE WHICH DECODES IT IN ANOTHER TERMINAL\n\n#import libraries\nimport socket\nimport time\nfrom binascii import hexlify\n\n# set up the server\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n port = 1337\n s.bind((\"\", port))\n s.listen(0)\nexcept:\n close()\n\nc, addr = s.accept()\n\n# initialize the time delays\nZERO = .025\nONE = .1\n\n# initialize overt and covert messages\nmsg = \"My name is Happy Gilmore. Ever since I was old enough to skate, I loved hockey... wasn't the greatest skater though. But that didn't stop my dad from teaching me the secret of slapping the greatest slapshot. My dad worshiped hockey, my mom didn't, that's why she moved to Egypt, where there's not a hockey rink within 15 hundred miles.\"\ncovert = \"I still feel I won the fight.\" + \"EOF\"\n\n# initialize covert message binary string\ncovert_bin = \"\"\n\n# convert covert message to binary message via time delays\nfor i in covert:\n covert_bin += bin(int(hexlify(i), 16))[2:].zfill(8) #16 because hex\nn = 0\nfor i in msg:\n c.send(i)\n if(covert_bin[n] == \"0\"):\n time.sleep(ZERO)\n else:\n time.sleep(ONE)\n n = (n + 1) % len(covert_bin)\n\n\n# send EOF and close out server\nc.send(\"EOF\")\nc.close()\n","sub_path":"program4/serverRUN_FIRST.py","file_name":"serverRUN_FIRST.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"173011760","text":"#\n# Chris Lumens \n#\n# Copyright 2005, 2006, 2007, 2008, 2012 Red Hat, Inc.\n#\n# This copyrighted material is made available to anyone wishing to use, modify,\n# copy, or redistribute it subject to the terms and conditions of the GNU\n# General Public License v.2. This program is distributed in the hope that it\n# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the\n# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat\n# trademarks that are incorporated in the source code or documentation are not\n# subject to the GNU General Public License and may only be used or replicated\n# with the express permission of Red Hat, Inc. \n#\nfrom pykickstart.base import KickstartCommand\nfrom pykickstart.constants import AUTOPART_TYPE_BTRFS, AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP, AUTOPART_TYPE_PLAIN\nfrom pykickstart.errors import KickstartParseError, KickstartValueError, formatErrorMsg\nfrom pykickstart.options import KSOptionParser\n\nimport gettext\n_ = lambda x: gettext.ldgettext(\"pykickstart\", x)\n\nclass FC3_AutoPart(KickstartCommand):\n removedKeywords = KickstartCommand.removedKeywords\n removedAttrs = KickstartCommand.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n KickstartCommand.__init__(self, writePriority, *args, **kwargs)\n self.autopart = kwargs.get(\"autopart\", False)\n\n def __str__(self):\n retval = KickstartCommand.__str__(self)\n\n if self.autopart:\n retval += \"autopart\\n\"\n\n return retval\n\n def parse(self, args):\n if len(args) > 0:\n raise KickstartValueError(formatErrorMsg(self.lineno, msg=_(\"Kickstart command %s does not take any arguments\") % \"autopart\"))\n\n self.autopart = True\n return self\n\nclass F9_AutoPart(FC3_AutoPart):\n removedKeywords = FC3_AutoPart.removedKeywords\n removedAttrs = FC3_AutoPart.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n FC3_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.encrypted = kwargs.get(\"encrypted\", False)\n self.passphrase = kwargs.get(\"passphrase\", \"\")\n\n self.op = self._getParser()\n\n def __str__(self):\n retval = KickstartCommand.__str__(self)\n\n if not self.autopart:\n return retval\n\n retval += \"autopart\"\n\n if self.encrypted:\n retval += \" --encrypted\"\n\n if self.passphrase != \"\":\n retval += \" --passphrase=\\\"%s\\\"\"% self.passphrase\n\n retval += \"\\n\"\n return retval\n\n def _getParser(self):\n op = KSOptionParser()\n op.add_option(\"--encrypted\", action=\"store_true\", default=False)\n op.add_option(\"--passphrase\")\n return op\n\n def parse(self, args):\n (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)\n # Rely on any error handling from baseclass\n FC3_AutoPart.parse(self, extra)\n\n self._setToSelf(self.op, opts)\n return self\n\nclass F12_AutoPart(F9_AutoPart):\n removedKeywords = F9_AutoPart.removedKeywords\n removedAttrs = F9_AutoPart.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n F9_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n\n self.escrowcert = kwargs.get(\"escrowcert\", \"\")\n self.backuppassphrase = kwargs.get(\"backuppassphrase\", False)\n\n def __str__(self):\n retval = F9_AutoPart.__str__(self)\n\n if not self.autopart:\n return retval\n\n if self.encrypted and self.escrowcert != \"\":\n retval = retval.strip()\n\n retval += \" --escrowcert=\\\"%s\\\"\" % self.escrowcert\n\n if self.backuppassphrase:\n retval += \" --backuppassphrase\"\n\n retval += \"\\n\"\n\n return retval\n\n def _getParser(self):\n op = F9_AutoPart._getParser(self)\n op.add_option(\"--escrowcert\")\n op.add_option(\"--backuppassphrase\", action=\"store_true\", default=False)\n return op\n\nclass RHEL6_AutoPart(F12_AutoPart):\n removedKeywords = F12_AutoPart.removedKeywords\n removedAttrs = F12_AutoPart.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n F12_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.cipher = kwargs.get(\"cipher\", \"\")\n\n def __str__(self):\n retval = F12_AutoPart.__str__(self)\n if not self.autopart:\n return retval\n\n if self.encrypted and self.cipher:\n # remove any trailing newline\n retval = retval.strip()\n retval += \" --cipher=\\\"%s\\\"\" % self.cipher\n retval += \"\\n\"\n\n return retval\n\n def _getParser(self):\n op = F12_AutoPart._getParser(self)\n op.add_option(\"--cipher\")\n return op\n\n def parse(self, args):\n # call the overriden command to do it's job first\n retval = F12_AutoPart.parse(self, args)\n\n # Using autopart together with other partitioning command such as\n # part/partition, raid, logvol or volgroup can lead to hard to debug\n # behavior that might among other result into an unbootable system.\n #\n # Therefore if any of those commands is detected in the same kickstart\n # together with autopart, an error is raised and installation is\n # aborted.\n conflicting_command = \"\"\n\n # seen indicates that the corresponding\n # command has been seen in kickstart\n if self.handler.partition.seen:\n conflicting_command = \"part/partition\"\n elif self.handler.raid.seen:\n conflicting_command = \"raid\"\n elif self.handler.volgroup.seen:\n conflicting_command = \"volgroup\"\n elif self.handler.logvol.seen:\n conflicting_command = \"logvol\"\n elif hasattr(self.handler, \"reqpart\") and self.handler.reqpart.seen:\n conflicting_command = \"reqpart\"\n\n if conflicting_command:\n # allow for translation of the error message\n errorMsg = _(\"The %s and autopart commands can't be used at the same time\") % \\\n conflicting_command\n raise KickstartParseError(formatErrorMsg(self.lineno, msg=errorMsg))\n return retval\n\n\nclass F16_AutoPart(F12_AutoPart):\n removedKeywords = F12_AutoPart.removedKeywords\n removedAttrs = F12_AutoPart.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n F12_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.lvm = kwargs.get(\"lvm\", True)\n\n def __str__(self):\n retval = F12_AutoPart.__str__(self)\n if not self.autopart:\n return retval\n\n # If requested, disable LVM autopart\n if not self.lvm:\n # remove any trailing newline\n retval = retval.strip()\n retval += \" --nolvm\"\n retval += \"\\n\"\n\n return retval\n\n def _getParser(self):\n op = F12_AutoPart._getParser(self)\n op.add_option(\"--nolvm\", action=\"store_false\", dest=\"lvm\",\n default=True)\n return op\n\nclass F17_AutoPart(F16_AutoPart):\n def __init__(self, writePriority=100, *args, **kwargs):\n F16_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.type = kwargs.get(\"type\", None)\n self.typeMap = { \"lvm\": AUTOPART_TYPE_LVM,\n \"btrfs\": AUTOPART_TYPE_BTRFS,\n \"plain\": AUTOPART_TYPE_PLAIN,\n \"partition\": AUTOPART_TYPE_PLAIN }\n\n def _typeAsStr(self):\n retval = None\n\n for (key, value) in list(self.typeMap.items()):\n if value == self.type:\n retval = key\n break\n\n if retval == \"partition\":\n retval = \"plain\"\n\n return retval\n\n def __str__(self):\n retval = F16_AutoPart.__str__(self)\n if not self.autopart:\n return retval\n\n ty = self._typeAsStr()\n if ty:\n # remove any trailing newline\n retval = retval.strip()\n retval += \" --type=%s\\n\" % ty\n\n return retval\n\n def _getParser(self):\n def type_cb(option, opt_str, value, parser):\n if value.lower() in self.typeMap:\n parser.values.ensure_value(option.dest,\n self.typeMap[value.lower()])\n\n def nolvm_cb(option, opt_str, value, parser):\n parser.values.ensure_value(option.dest, AUTOPART_TYPE_PLAIN)\n\n op = F16_AutoPart._getParser(self)\n op.add_option(\"--nolvm\", action=\"callback\", callback=nolvm_cb,\n dest=\"type\", nargs=0)\n\n op.add_option(\"--type\", action=\"callback\", callback=type_cb,\n dest=\"type\", nargs=1, type=\"string\")\n return op\n\n def parse(self, args):\n (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)\n # Rely on any error handling from baseclass\n F16_AutoPart.parse(self, extra)\n\n self._setToSelf(self.op, opts)\n\n # make this always True to avoid writing --nolvm\n self.lvm = True\n\n return self\n\nclass F18_AutoPart(F17_AutoPart):\n removedKeywords = F17_AutoPart.removedKeywords\n removedAttrs = F17_AutoPart.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n F17_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.cipher = kwargs.get(\"cipher\", \"\")\n\n def __str__(self):\n retval = F17_AutoPart.__str__(self)\n if not self.autopart:\n return retval\n\n if self.encrypted and self.cipher:\n # remove any trailing newline\n retval = retval.strip()\n retval += \" --cipher=\\\"%s\\\"\" % self.cipher\n retval += \"\\n\"\n\n return retval\n\n def _getParser(self):\n op = F17_AutoPart._getParser(self)\n op.add_option(\"--cipher\")\n return op\n\n\nclass F20_AutoPart(F18_AutoPart):\n def __init__(self, writePriority=100, *args, **kwargs):\n F18_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.typeMap[\"thinp\"] = AUTOPART_TYPE_LVM_THINP\n\n def parse(self, args):\n # call the overriden command to do it's job first\n retval = F18_AutoPart.parse(self, args)\n\n # Using autopart together with other partitioning command such as\n # part/partition, raid, logvol or volgroup can lead to hard to debug\n # behavior that might among other result into an unbootable system.\n #\n # Therefore if any of those commands is detected in the same kickstart\n # together with autopart, an error is raised and installation is\n # aborted.\n conflicting_command = \"\"\n\n # seen indicates that the corresponding\n # command has been seen in kickstart\n if self.handler.partition.seen:\n conflicting_command = \"part/partition\"\n elif self.handler.raid.seen:\n conflicting_command = \"raid\"\n elif self.handler.volgroup.seen:\n conflicting_command = \"volgroup\"\n elif self.handler.logvol.seen:\n conflicting_command = \"logvol\"\n elif hasattr(self.handler, \"reqpart\") and self.handler.reqpart.seen:\n conflicting_command = \"reqpart\"\n elif hasattr(self.handler, \"mount\") and self.handler.mount.seen:\n conflicting_command = \"mount\"\n\n if conflicting_command:\n # allow for translation of the error message\n errorMsg = _(\"The %s and autopart commands can't be used at the same time\") % \\\n conflicting_command\n raise KickstartParseError(formatErrorMsg(self.lineno, msg=errorMsg))\n return retval\n\nclass F21_AutoPart(F20_AutoPart):\n removedKeywords = F20_AutoPart.removedKeywords\n removedAttrs = F20_AutoPart.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n F20_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.fstype = kwargs.get(\"fstype\", \"\")\n\n def __str__(self):\n retval = F20_AutoPart.__str__(self)\n if not self.autopart:\n return retval\n\n if self.fstype:\n # remove any trailing newline\n retval = retval.strip()\n retval += \" --fstype=%s\" % self.fstype\n retval += \"\\n\"\n\n return retval\n\n def _getParser(self):\n op = F20_AutoPart._getParser(self)\n op.add_option(\"--fstype\")\n return op\n\n def parse(self, args):\n # call the overriden command to do it's job first\n retval = F20_AutoPart.parse(self, args)\n\n # btrfs is not a valid filesystem type\n if self.fstype == \"btrfs\":\n raise KickstartParseError(formatErrorMsg(self.lineno,\n msg=_(\"autopart --fstype=btrfs is not valid fstype, use --type=btrfs instead\")))\n\n if self._typeAsStr() == \"btrfs\" and self.fstype:\n raise KickstartParseError(formatErrorMsg(self.lineno,\n msg=_(\"autopart --fstype cannot be used with --type=btrfs\")))\n\n return retval\n\nclass RHEL7_AutoPart(F21_AutoPart):\n removedKeywords = F21_AutoPart.removedKeywords\n removedAttrs = F21_AutoPart.removedAttrs\n\n def __init__(self, writePriority=100, *args, **kwargs):\n F21_AutoPart.__init__(self, writePriority=writePriority, *args, **kwargs)\n self.nohome = kwargs.get(\"nohome\", False)\n\n def __str__(self):\n retval = F21_AutoPart.__str__(self)\n if not self.autopart:\n return retval\n\n if self.nohome:\n # remove any trailing newline\n retval = retval.strip()\n retval += \" --nohome\"\n retval += \"\\n\"\n\n return retval\n\n def _getParser(self):\n op = F21_AutoPart._getParser(self)\n op.add_option(\"--nohome\", dest=\"nohome\", action=\"store_true\", default=False)\n return op\n","sub_path":"python2.7/site-packages/pykickstart/commands/autopart.py","file_name":"autopart.py","file_ext":"py","file_size_in_byte":14289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"56197436","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom basic_units import radians, degrees, cos\n\ndef format_func(value, tick_number):\n # find number of multiples of pi/2\n N = int(np.round(2 * value / np.pi))\n if N == 0:\n return \"0\"\n elif N == 1:\n return r\"$\\pi/2$\"\n elif N == 2:\n return r\"$\\pi$\"\n elif N % 2 > 0:\n return r\"${0}\\pi/2$\".format(N)\n else:\n return r\"${0}\\pi$\".format(N // 2)\n\n\nplt.rcParams.update({'font.size': 20})\nplt.rcParams['mathtext.fontset'] = 'stix'\nplt.rcParams['font.family'] = 'Times New Roman'\nplt.rcParams[\"legend.handlelength\"] = 1.0\n\nns = [0,1,2,10,15]\nqs = [5,25]\n\na = {}\nb = {}\nfor q in qs:\n for n in ns:\n data = np.loadtxt('an{0}q{1}.txt'.format(n, q))\n a[(n,q,'x')] = data[:,0]\n a[(n,q,'y')] = data[:,1]\n if n != 0:\n data = np.loadtxt('bn{0}q{1}.txt'.format(str(n), q))\n b[(n,q,'x')] = data[:,0]\n b[(n,q,'y')] = data[:,1]\n\n\nlw = 2\n\nfor q in qs:\n for n in ns:\n if n==0:\n fig, ax = plt.subplots(figsize=(8,4))\n ax.plot(a[n,q,'x'], a[n,q,'y'], lw=lw, label='$y_\\mathrm{I}$'+' n={0}, q={1}'.format(n,q), color='r')\n ax.set(xlabel='$t$', ylabel=r'Eigenfunction $y$')\n ax.legend(ncol=5, loc='lower center',prop={'size': 16}, frameon=False, bbox_to_anchor= (0.5, 1.01))\n ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))\n plt.tight_layout()\n plt.savefig('figs/a_{}_{}.eps'.format(n,q))\n\n else:\n fig, ax = plt.subplots(figsize=(8,4))\n ax.plot(a[n,q,'x'], a[n,q,'y'], lw=lw, label='$y_{\\mathrm{I}}$'+' n={0}, q={1}'.format(n,q), color='r')\n ax.plot(b[n,q,'x'], b[n,q,'y'], lw=lw, label='$y_{\\mathrm{II}}$'+' n={0}, q={1}'.format(n,q), color='b', ls='--')\n ax.set(xlabel='$t$', ylabel=r'Eigenfunction $y$')\n ax.legend(ncol=5, loc='lower center',prop={'size': 16}, frameon=False, bbox_to_anchor= (0.5, 1.01))\n ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))\n plt.tight_layout()\n plt.savefig('figs/{}_{}.eps'.format(n,q))\n # ax.plot(a[0,25,'x'], a[0,25,'y'], lw=lw, label=r'a$_{{0}}$({1})'.format(0,5))\n\n\n\n# q=5\n# for n in ns:\n# ax.plot(a[(n,q,'x')], a[n,q,'y'], lw=lw, label=r'a$_{{0}}$({1})'.format(n,q))\n# if n != 0:\n# ax.plot(b[(n,q,'x')], b[n,q,'y'], lw=lw, label=r'b$_{{0}}$({1})'.format(n,q))\n#\n# ax.set(xlabel='$t$', ylabel='Eigenfunction')\n# ax.legend(ncol=5, loc='upper center',prop={'size': 16}, frameon=False)\n#\n# ax=axs[1]\n# q=25\n# for n in ns:\n# ax.plot(a[(n,q,'x')], a[n,q,'y'], lw=lw, label=r'a$_{{0}}$({1})'.format(n,q))\n#\n# if n != 0:\n# ax.plot(b[(n,q,'x')], b[n,q,'y'], lw=lw, label=r'b$_{{0}}$({1})'.format(n,q))\n# ax.set(xlabel='$t$', ylabel='Eigenfunction')\n# ax.legend(ncol=5, loc='upper center',prop={'size': 16}, frameon=False)\n\n\n\nplt.show()\n","sub_path":"HW8/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"134149434","text":"import os \n\nclass LogTailer(object):\n def __init__(self, filename):\n self.filename = filename\n # When we initialize the log tailer don't dump entire file\n self.openLog()\n self.lastSize = self.getSize()\n self.closeLog()\n\n def openLog(self):\n self.file = open(self.filename)\n\n def closeLog(self):\n self.file.close()\n\n def seekTo(self, place):\n self.file.seek(place)\n\n def getSize(self):\n self.file.seek(0, os.SEEK_END)\n size = self.file.tell()\n self.seekTo(0)\n return size\n\n def tail(self):\n self.openLog()\n\n size = self.getSize()\n\n lines = []\n\n if size > self.lastSize:\n # More data\n self.seekTo(self.lastSize)\n lines = self.file.read().strip('\\n').split('\\n')\n\n if size < self.lastSize:\n lines = self.file.read().strip('\\n').split('\\n')\n\n self.lastSize = size\n\n self.closeLog()\n\n return lines\n","sub_path":"winch-node/winchnode/logtail.py","file_name":"logtail.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"99195973","text":"'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.models as models\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport os\nimport io\nimport datetime\nimport threading\nimport argparse\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import train_test_split\nfrom ShioDataSet import MyDataset\nfrom samplenet_analog import *\nfrom trainingdata import *\nfrom testreport import *\n\nimport numpy as np\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/../config\")\nimport learning_config\n\nDISCRETIZATION = learning_config.Discretization_number\n\nTrepo = TestReport()\ndef main():\n\t# Parse arguments.\n\targs = parse_args()\n\n\t# Set device.\n\tdevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\tROOT_DIR = \"\"\n\t\n\timgDataset = MyDataset(args.data_csv, ROOT_DIR, 320, 240, transform=transforms.ToTensor(), PILtrans = args.PILtrans, ORGtrans = args.ORGtrans)\n\t# Load dataset.\n\ttrain_data, test_data = train_test_split(imgDataset, test_size=0.2)\n\t#pd.to_pickle(test_data, \"test_data.pkl\")\n\t#del test_data\n\ttrain_loader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)\n\t\n\tprint('data set')\n\t# Set a model.\n\tif args.model == 'resnet18':\n\t\tmodel = models.resnet18()\n\t\tmodel.fc = torch.nn.Linear(512, DISCRETIZATION)\n\telif args.model == 'samplenet':\n\t\tmodel = SampleNet()\n\telif args.model == 'simplenet':\n\t\tmodel = SimpleNet()\n\telse:\n\t\traise NotImplementedError()\n\tmodel.train()\n\tmodel = model.to(device)\n\n\tprint('model set')\n\t# Set loss function and optimization function.\n\tcriterion = nn.CrossEntropyLoss()\n\toptimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)\n\t#optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n\tprint('optimizer set')\n\t\n\tTdata = TrainingData(args.n_epoch)\n\n\t# Train and test.\n\tprint('Train starts')\n\tfor epoch in range(args.n_epoch):\n\t\t# Train and test a model.\n\t\ttrain_acc, train_loss = train(model, device, train_loader, criterion, optimizer)\n\t\tTdata.train_result[0,epoch]=epoch+1\n\t\tTdata.train_result[1,epoch]=train_acc\n\t\tTdata.train_result[2,epoch]=train_loss\n\n\t\t# Output score.\n\t\tif(epoch%args.test_interval == 0):\n\t\t\t#pd.to_pickle(train_data, \"train_data.pkl\")\n\t\t\t#del train_data\n\t\t\t\n\t\t\t#test_data = pd.read_pickle(\"test_data.pkl\")\n\t\t\ttest_loader = torch.utils.data.DataLoader(test_data, batch_size=20, shuffle=True)\n\t\t\t#del test_data\n\t\t\ttest_acc, test_loss = test(model, device, test_loader, criterion, epoch+1)\n\t\t\tTdata.test_result[0,int(epoch/args.test_interval)]=epoch+1\n\t\t\tTdata.test_result[1,int(epoch/args.test_interval)]=test_acc\n\t\t\tTdata.test_result[2,int(epoch/args.test_interval)]=test_loss\n\t\t\t#del test_loader\n\t\t\t\n\t\t\tstdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}'\n\t\t\tprint(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss))\n\t\t\tglobal Trepo\n\t\t\tfor repo in Trepo.report:\n\t\t\t\tif repo[\"epoch\"]==epoch+1:\n\t\t\t\t\tprint(\"\")\n\t\t\t\t\t#print(json.dumps(repo, indent=4))\n\t\t\t#train_data = pd.read_pickle(\"train_data.pkl\")\n\t\telse:\t\n\t\t\tstdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' #, test acc: {:<8}, test loss: {:<8}'\n\t\t\tprint(stdout_temp.format(epoch+1, train_acc, train_loss)) #, test_acc, test_loss))\n\t\tTdata.set_data()\n\t\tplt.pause(1)\n\t\t# Save a model checkpoint.\n\t\tif(epoch%args.save_model_interval == 0):\n\t\t\tmodel_ckpt_path = (savedir+\"checkpoints/{}_{}_epoch={}.pth\").format(args.dataset_name, args.model_name, epoch+1)\n\t\t\ttorch.save(model.state_dict(), model_ckpt_path)\n\t\t\tprint('Saved a model checkpoint at {}'.format(model_ckpt_path))\n\t\t\tprint('')\n\t\n\tglobal settings\n\twith open(savedir+'settings.json','w') as f:\n\t\tjson.dump(settings, f, indent=4)\n\n\twith open(savedir+'reports.json','w') as f:\n\t\tjson.dump(Trepo.report, f, indent=4)\n\n\tnp.savetxt(savedir+'testdata.csv', Tdata.test_result.transpose(), delimiter=',', fmt='%f')\n\tnp.savetxt(savedir+'traindata.csv', Tdata.train_result.transpose(), delimiter=',', fmt='%f')\n\tTdata.fig.savefig(savedir+'traindata.png')\n\n\n\ndef train(model, device, train_loader, criterion, optimizer):\n\tmodel.train()\n\n\toutput_list = []\n\ttarget_list = []\n\trunning_loss = 0.0\n\tfor batch_idx, (inputs, targets) in enumerate(train_loader):\n\t\t# Forward processing.\n\t\tinputs, targets = inputs.to(device), targets.to(device)\n\t\toutputs = model(inputs)\n\t\tloss = criterion(outputs, targets)\n\n\t\t# Backward processing.\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\t# Set data to calculate score.\n\t\toutput_list += [int(o.argmax()) for o in outputs]\n\t\ttarget_list += [int(t) for t in targets]\n\t\trunning_loss += loss.item()\n\n\t\t# Calculate score at present.\n\t\ttrain_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader)\n\t\tif batch_idx % 10 == 0 and batch_idx != 0:\n\t\t\tstdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}'\n\t\t\tprint(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss))\n\n\t# Calculate score.\n\ttrain_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader)\n\n\treturn train_acc, train_loss\n\n\ndef test(model, device, test_loader, criterion, epoch_n):\n\tmodel.eval()\n\n\toutput_list = []\n\ttarget_list = []\n\trunning_loss = 0.0\n\tfor batch_idx, (inputs, targets) in enumerate(test_loader):\n\t\t# Forward processing.\n\t\tinputs, targets = inputs.to(device), targets.to(device)\n\t\toutputs = model(inputs)\n\t\tloss = criterion(outputs, targets)\n\t\t\n\t\t# Set data to calculate score.\n\t\toutput_list += [int(o.argmax()) for o in outputs]\n\t\ttarget_list += [int(t) for t in targets]\n\t\trunning_loss += loss.item()\n\t\t\n\ttest_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader)\n\n\tprint('confusion_matrix')\n\tconf_mat = confusion_matrix(output_list, target_list)\n\tprint(conf_mat)\n\tprint('classification_report')\n\tclass_repo = classification_report(output_list, target_list)\n\tprint(class_repo)\n\n\tglobal Trepo\n\tTrepo.append(epoch_n,class_repo,conf_mat.tolist())\n\n\treturn test_acc, test_loss\n\n\n\ndef calc_score(output_list, target_list, running_loss, data_loader):\n\t# Calculate accuracy.\n\t#result = classification_report(output_list, target_list) #, output_dict=True)\n\t#acc = round(result['weighted avg']['f1-score'], 6)\n\tacc = round(f1_score(output_list, target_list, average='micro'), 6)\n\tloss = round(running_loss / len(data_loader.dataset), 6)\n\n\treturn acc, loss\n\n\ndef parse_args():\n\t# Set arguments.\n\targ_parser = argparse.ArgumentParser(description=\"Image Classification\")\n\t\n\targ_parser.add_argument(\"--dataset_name\", type=str, default='sim_race')\n\targ_parser.add_argument(\"--data_csv\", type=str, default=os.environ['HOME'] + '/Images_from_rosbag/_2020-11-05-01-45-29_2/_2020-11-05-01-45-29.csv')\n\targ_parser.add_argument(\"--model\", type=str, default='resnet18')\n\targ_parser.add_argument(\"--model_name\", type=str, default='joycon_ResNet18')\n\targ_parser.add_argument(\"--model_ckpt_dir\", type=str, default=os.environ['HOME'] + '/work/experiments/models/')\n\targ_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default=os.environ['HOME'] + '/work/experiments/models/checkpoints/{}_{}_epoch={}.pth')\n\targ_parser.add_argument('--n_epoch', default=20, type=int, help='The number of epoch')\n\targ_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate')\n\targ_parser.add_argument('--test_interval', default=5, type=int, help='test interval')\n\targ_parser.add_argument('--save_model_interval', default=5, type=int, help='save model interval')\n\targ_parser.add_argument('--PILtrans', action='store_true')\n\targ_parser.add_argument('--ORGtrans', action='store_true')\n\n\targs = arg_parser.parse_args()\n\n\t# Make directory.\n\tos.makedirs(args.model_ckpt_dir, exist_ok=True)\n\tdat = datetime.datetime.now()\n\tcur_time = str(dat.year).zfill(4)+str(dat.month).zfill(2)+str(dat.day).zfill(2)+str(dat.hour).zfill(2)+str(dat.minute).zfill(2)+str(dat.second).zfill(2)\n\t\n\tglobal savedir\n\tsavedir = args.model_ckpt_dir+args.model_name+\"_\"+cur_time+\"/\"\n\n\tos.makedirs(savedir, exist_ok=True)\n\tos.makedirs(savedir + \"checkpoints/\", exist_ok=True)\n\n\tglobal settings\n\tsettings={\"dataset_name\"\t:args.dataset_name\n\t\t\t ,\"data_csv\"\t\t:args.data_csv\n\t\t\t ,\"model\"\t\t\t:args.model\n\t\t\t ,\"model_name\"\t\t:args.model_name\n\t\t\t ,\"model_ckpt_dir\"\t:args.model_ckpt_dir\n\t\t\t ,\"model_ckpt_path_temp\":args.model_ckpt_path_temp\n\t\t\t ,\"n_epoch\"\t\t\t:args.n_epoch\n\t\t\t ,\"lr\"\t\t\t\t:args.lr\n\t\t\t ,\"test_interval\"\t:args.test_interval\n\t\t\t ,\"save_model_interval\"\t:args.save_model_interval\n\t\t\t ,\"PILtrans\"\t\t:args.PILtrans\n\t\t\t ,\"ORGtrans\"\t\t:args.ORGtrans\n\t\t\t }\n\n\tprint(args.data_csv)\n\t# Validate paths.\n\tassert os.path.exists(args.data_csv)\n\tassert os.path.exists(args.model_ckpt_dir)\n\n\treturn args\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"ai_race/learning/scripts/shio_train.py","file_name":"shio_train.py","file_ext":"py","file_size_in_byte":8961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"93405026","text":"import numpy as np\n\ndef gaussian_kernel(sigma, ksize):\n radius = (ksize - 1) / 2.0\n x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1]\n sigma = sigma ** 2\n k = 2 * np.exp(-0.5 * (x ** 2 + y ** 2) / sigma)\n k = k / np.sum(k)\n return k\n\ndef tile_and_reflect(input):\n tiled_input = np.tile(input, (3, 3))\n rows = input.shape[0]\n cols = input.shape[1]\n\n for i in range(3):\n tiled_input[i * rows:(i + 1) * rows, 0:cols] = np.fliplr(tiled_input[i * rows:(i + 1) * rows, 0:cols])\n tiled_input[i * rows:(i + 1) * rows, -cols:] = np.fliplr(tiled_input[i * rows:(i + 1) * rows, -cols:])\n\n for i in range(3):\n tiled_input[0:rows, i * cols:(i + 1) * cols] = np.flipud(tiled_input[0:rows, i * cols:(i + 1) * cols])\n tiled_input[-rows:, i * cols:(i + 1) * cols] = np.flipud(tiled_input[-rows:, i * cols:(i + 1) * cols])\n\n assert (np.array_equal(input, tiled_input[rows:2 * rows, cols:2 * cols]))\n\n assert (np.array_equal(input[0, :], tiled_input[rows - 1, cols:2 * cols]))\n assert (np.array_equal(input[:, -1], tiled_input[rows:2 * rows, 2 * cols]))\n assert (np.array_equal(input[-1, :], tiled_input[2 * rows, cols:2 * cols]))\n assert (np.array_equal(input[:, 0], tiled_input[rows:2 * rows, cols - 1]))\n\n return tiled_input\n\ndef convolve(input, weights):\n assert (len(input.shape) == 2)\n assert (len(weights.shape) == 2)\n\n assert (weights.shape[0] < input.shape[0] + 1)\n assert (weights.shape[0] < input.shape[1] + 1)\n\n output = np.copy(input)\n tiled_input = tile_and_reflect(input)\n\n rows = input.shape[0]\n cols = input.shape[1]\n hw_row = weights.shape[0] // 2\n hw_col = weights.shape[1] // 2\n\n for i, io in zip(range(rows, rows * 2), range(rows)):\n for j, jo in zip(range(cols, cols * 2), range(cols)):\n average = 0.0\n overlapping = tiled_input[i - hw_row:i + hw_row,\n j - hw_col:j + hw_col]\n assert (overlapping.shape == weights.shape)\n tmp_weights = weights\n merged = tmp_weights[:] * overlapping\n average = np.sum(merged)\n output[io, jo] = average\n return output\n\ndef gaussian_blur(img, ksize, sigma):\n k = gaussian_kernel(sigma, ksize)\n blurred_img = convolve(img, k)\n return blurred_img\n\ndef dsampleWithBlur(image, sigma, ksize, color=True):\n k = gaussian_kernel(sigma, ksize)\n if color:\n ndim = 3\n else:\n ndim = 1\n ds = []\n for i in range(ndim):\n img = image[:, :, i]\n blocks = extract_blocks(img.reshape(240, 320), (ksize, ksize))\n lst = []\n for block in blocks:\n lst.append(np.sum(np.multiply(block, k)))\n ds.append(np.array(lst).reshape(int(img.shape[0] / ksize), int(img.shape[1] / ksize)))\n return np.transpose(np.array(ds), (1, 2, 0))\n\ndef dsampleWithAvg(image, ksize, color=True):\n if color:\n ndim = 3\n else:\n ndim = 1\n ds = []\n for i in range(ndim):\n img = image[:, :, i]\n blocks = extract_blocks(img.reshape(240, 320), (ksize, ksize))\n lst = []\n for block in blocks:\n lst.append(np.mean(block))\n ds.append(np.array(lst).reshape(int(img.shape[0] / ksize), int(img.shape[1] / ksize)))\n return np.transpose(np.array(ds), (1, 2, 0))\n\ndef extract_blocks(img, blocksize):\n M, N = img.shape\n b0, b1 = blocksize\n return img.reshape(M // b0, b0, N // b1, b1).swapaxes(1, 2).reshape(-1, b0, b1)","sub_path":"data-preparation/UCF-101/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"394258068","text":"# Jogo da velha\n\nimport pygame\nfrom pygame import *\n\n# inicia biblioteca pygame\npygame.init()\n\n# Definindo cores\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n# inicia biblioteca de fontes (texto)\npygame.font.init()\n\n# cria formatação específica para textos\nfonte30 = pygame.font.SysFont('Comic Sans MS', 30)\nfonte50 = pygame.font.SysFont('Comic Sans MS', 50)\n\n\n######## FUNÇÕES DO JOGO ##########\n\n# função que toca arquivo de som\ndef play(caminho_arquivo):\n pygame.mixer.music.load(caminho_arquivo)\n pygame.mixer.music.set_volume(0.2)\n pygame.mixer.music.play()\n\n\n# função para imprimir mensagem na área de notificações\ndef mensagem(texto):\n textsurface = fonte30.render(texto, 1, WHITE)\n tela.blit(textsurface, (10, 600))\n\n\n# função para limpar mensagens na área de notificações\ndef limpar_mensagens():\n pygame.draw.rect(tela, BLACK, (10, 600, 600, 50))\n pygame.display.update()\n\n\n# função que finaliza o jogo\ndef finaliza(texto):\n fundo = pygame.image.load(\"img/fundo.png\").convert_alpha()\n tela.blit(fundo, pygame.rect.Rect(0, 0, 128, 128))\n pygame.display.flip()\n text = fonte50.render(texto, 1, WHITE)\n text_rect = text.get_rect()\n tela.blit(text, [100, 250])\n\n\n# funcao que verifica se algum jogador ganhou\ndef verifica_se_ganhou(jogada):\n vitorias_possiveis = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]\n\n for j in vitorias_possiveis:\n vezes = 0\n for l in j:\n for i in jogada:\n\n if l == i:\n vezes += 1\n\n if vezes == 3:\n break\n\n if vezes == 3:\n return True\n else:\n return False\n\n\n# tamanho_tela de abertura do jogo\ndef abertura():\n # música\n pygame.mixer.music.load(jogo_inicio)\n pygame.mixer.music.set_volume(1)\n pygame.mixer.music.play(0)\n\n # titulo\n textsurface = fonte50.render(\"JOGO DA VELHA\", 1, WHITE)\n tela.blit(textsurface, (int(quadrante[0] / 2) + 0, int(quadrante[1]) + 140))\n\n # animação capcom\n for i in range(1, 25):\n tela.blit(pygame.image.load('img/capcom' + str(i) + '.png'),\n (int(quadrante[0] / 2) + 50, int(quadrante[1]) + 50))\n pygame.time.delay(100)\n pygame.display.flip()\n\n pygame.time.delay(1000)\n\n\n# tamanho da janela\ntamanho_tela = (600, 650)\n\n# tamanho do tabuleiro\ntamanho_tabuleiro = (600, 600)\n\ntela = pygame.display.set_mode(tamanho_tela)\n\n# Nome do Jogo (barra de título) e cor do fundo\npygame.display.set_caption(\"Jogo da Velha Beta\")\ntela.fill(BLACK)\n\n# dimensoes de cada quadrante no tabuleiro\nquadrante = (int(tamanho_tabuleiro[0] / 3), int(tamanho_tabuleiro[1] / 3))\n\n# efeitos sonoros\njogada_certa = 'audio/jogada_certa.mp3'\njogada_errada = 'audio/jogada_errada.mp3'\njogo_inicio = 'audio/capcom.mp3'\njogo_perdeu = 'audio/jogo_perdeu.mp3'\njogo_ganhou = 'audio/jogo_ganhou.mp3'\n\n# inicia abertura do jogo\nabertura()\n\n# imagem dos jogadores\nx = pygame.image.load(\"img/x.png\")\no = pygame.image.load(\"img/o.png\")\n# ajustando tamanho da imagem para o tamanho do quadrante\nx = pygame.transform.scale(x, quadrante)\no = pygame.transform.scale(o, quadrante)\n\n# cria tabuleiro (surface)\ntabuleiro = pygame.Surface(tamanho_tabuleiro)\n# cor de fundo do tabuleiro\ntabuleiro.fill(WHITE)\n\n# ------- desenho das linhas do tabueiro ---------\npygame.draw.rect(tabuleiro, BLACK, ((tamanho_tabuleiro[1] / 3), 0, 5, tamanho_tabuleiro[1]))\npygame.draw.rect(tabuleiro, BLACK, ((tamanho_tabuleiro[1] / 3) * 2, 0, 5, tamanho_tabuleiro[1]))\npygame.draw.rect(tabuleiro, BLACK, (0, (tamanho_tabuleiro[0] / 3), tamanho_tabuleiro[0], 5))\npygame.draw.rect(tabuleiro, BLACK, (0, (tamanho_tabuleiro[0] / 3) * 2, tamanho_tabuleiro[0], 5))\n\n# posições ([x,y]) possíveis no tabuleiro - para incluir o X e o O\nposicoes_tabuleiro = [\n # [x, y, preenchido?]\n # primeira linha\n [0, 0, 0], [quadrante[0], 0, 0], [2 * quadrante[0], 0, 0],\n # segunda linha\n [0, quadrante[1], 0], [quadrante[0], quadrante[1], 0], [2 * quadrante[0], quadrante[1], 0],\n # terceira linha\n [0, 2 * quadrante[1], 0], [quadrante[0], 2 * quadrante[1], 0], [2 * quadrante[0], 2 * quadrante[1], 0]\n]\n\n# adiciona o tabuleiro (testes)\ntela.blit(tabuleiro, (0, 0))\n# atualiza display com o tabuleiro\npygame.display.flip()\n\n# tabela que armazena as jogadas\njogadas = [[\"X\", []], [\"O\", []]]\n\n# nomes dos jogadores\njogador = (\"X\", \"O\")\n\n# contador de jogadas - para saber quando termina (no cado de velhar)\njogada = 0\n\n# variável que guarda o jogador de quem vai jogar\nvez = jogador[0]\nmensagem(\"Vez de Jogar: \" + vez)\n\n# Loop de eventos\ndone = False\n\n# -------- Loop de eventos -----------\nwhile not done:\n\n # posição do mouse\n position = pygame.mouse.get_pos()\n\n # --- Loop principal\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n done = True\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n\n # lê posição e grava X ou O\n for i in range(len(posicoes_tabuleiro)):\n\n if posicoes_tabuleiro[i][0] < position[0] < posicoes_tabuleiro[i][0] + quadrante[0] and \\\n posicoes_tabuleiro[i][1] < position[1] < posicoes_tabuleiro[i][1] + \\\n quadrante[\n 1]:\n\n limpar_mensagens()\n jogada += 1\n\n # verifica se o quadrante está ocupado\n if posicoes_tabuleiro[i][2] == 1:\n play(jogada_errada)\n mensagem('Posição Ocupada! Jogue numa vazia!')\n\n # se tiver disponível,\n else:\n\n # se for o X\n if vez == jogador[0]:\n\n # marca a posição como ocupado\n posicoes_tabuleiro[i][2] = 1\n\n # carrega a imagem \"X\" na posição\n tela.blit(x, (posicoes_tabuleiro[i][0], posicoes_tabuleiro[i][1]))\n\n # grava a posição jogada\n jogadas[1][1].append(i)\n\n # verifica se o jogador ganhou\n if verifica_se_ganhou(jogadas[1][1]):\n finaliza(\"Vencedor: \" + vez)\n play(jogo_ganhou)\n\n else:\n if jogada == 9:\n finaliza(\"VELHOU!\")\n play(jogo_perdeu)\n else:\n vez = jogador[1]\n mensagem(\"Vez de Jogar: \" + vez)\n play(jogada_certa)\n\n # se for o O\n else:\n\n # marca a posição como ocupado\n posicoes_tabuleiro[i][2] = 1\n\n # carrega a imagem \"O\" na posição\n tela.blit(o, (posicoes_tabuleiro[i][0], posicoes_tabuleiro[i][1]))\n\n # grava a posição jogada\n jogadas[0][1].append(i)\n\n # verifica se o jogador ganhou\n if verifica_se_ganhou(jogadas[0][1]):\n finaliza(\"Vencedor: \" + vez)\n play(jogo_ganhou)\n\n else:\n if jogada == 9:\n finaliza(\"VELHOU!\")\n play(jogo_perdeu)\n else:\n vez = jogador[0]\n mensagem(\" Vez de Jogar: \" + vez)\n play(jogada_certa)\n\n pygame.display.flip()\n\npygame.quit()","sub_path":"lista09/pygame/pygame_velha/pygame_velha.py","file_name":"pygame_velha.py","file_ext":"py","file_size_in_byte":7965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"450876239","text":"import abjad\nimport baca\nimport cary\n\n\ndef make_grace_indices():\n \"\"\"\n Makes grace indices.\n\n >>> import cary\n\n .. container:: example\n\n Makes grace indices:\n\n >>> graces = cary.make_grace_indices()\n\n >>> graces['first']['indices'][:10]\n [118, 119, 120, 121, 122, 123, 124, 125, 126, 127]\n\n >>> graces['first']['material'][:6]\n [0, 0, 0, 0, 0, [Note(\"d'16\"), Note(\"d'16\"), Note(\"d'16\"), Note(\"d'16\")]]\n\n >>> graces['second']['indices'][:10]\n [311, 312, 313, 314, 315, 316, 317, 318, 319, 320]\n\n >>> graces['second']['material'][:6]\n [[Note(\"d'16\")], [Note(\"d'16\")], [Note(\"d'16\")], [Note(\"d'16\")], [Note(\"d'16\")], [Note(\"d'16\")]]\n\n \"\"\"\n regions = cary.make_key_region_indices()\n first = {}\n first[\"indices\"] = list(regions[1]) + list(regions[2]) + list(regions[5])\n assert len(first[\"indices\"]) == 386\n lengths = baca.sequence([[0, 0, 0, 0, 0, 4], [1, 2, 2], [0, 0, 0, 3]])\n first[\"material\"] = lengths.helianthate(1, 1).flatten()\n first[\"material\"] = [\n [abjad.Note(\"d'16\") for _ in range(x)] if 0 < x else 0\n for x in first[\"material\"]\n ]\n assert len(first[\"material\"]) == 156\n second = {}\n second[\"indices\"] = list(range(311, 1000))\n assert len(second[\"indices\"]) == 689\n second[\"material\"] = 24 * [[abjad.Note(\"d'16\")]]\n assert len(second[\"material\"]) == 24\n graces = {\"first\": first, \"second\": second}\n return graces\n","sub_path":"cary/tools/make_grace_indices.py","file_name":"make_grace_indices.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"155848810","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\nobjID = '1237665023837798488'\nhostfile = 'hosts_speconly_csp-cfa-ss-ptf'\nfname = hostfile + '_' + objID + '.fit'\n\nc = 2.98e18\nnm_to_mujy = 3.631\nmujy_to_erg = 1e-29\n\nos.chdir(os.getcwd() + '/sdss')\nhost_df = pd.read_csv(hostfile + '.cat', delimiter='\\t', index_col=0)\nos.chdir(str(os.getcwd()) + '/best_fits')\ndata = np.genfromtxt(fname, dtype=float)\n\nflux_df = host_df.filter(like='f_', axis=1)\nfluxerr_df = host_df.filter(like='e_', axis=1)\n\nprint(objID)\nprint(flux_df.loc[int(objID), :])\n\nsdssbands = np.asarray([3543, 4770, 6231, 7625, 9134])\ninputflux = np.asarray(flux_df.loc[int(objID), :])\ninputfluxerr = 1/np.sqrt(np.asarray(fluxerr_df.loc[int(objID), :]))\nprint(inputflux)\n\ninputflux = (inputflux * mujy_to_erg * 1e19 * c)/sdssbands**2\ninputfluxerr = (inputfluxerr * mujy_to_erg * 1e19 * c)/sdssbands**2\n\nplt.plot(data[:, 0], data[:, 1])\nplt.errorbar(sdssbands, inputflux, yerr=inputfluxerr, marker='o', linestyle=' ')\nplt.xlabel(r'$\\lambda$')\nplt.xlim([3000., 8000.])\n#plt.ylim([0., 300.])\nplt.show()\n","sub_path":"_sed_plots.py","file_name":"_sed_plots.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"416216697","text":"import xbmc\n\nfrom libs.pykodi import datetime_now, datetime_strptime\nfrom libs import quickjson\n\ndef add_one(dbid, mediatype):\n mediaitem = quickjson.get_details(dbid, mediatype)\n playcount = mediaitem['playcount'] + 1\n lastplayed = datetime_now()\n quickjson.set_item_details(dbid, mediatype, playcount=playcount, lastplayed=str(lastplayed).split('.')[0])\n xbmc.executebuiltin('Container.Refresh')\n\ndef remove_one(dbid, mediatype):\n mediaitem = quickjson.get_details(dbid, mediatype)\n lastplayed = datetime_strptime(mediaitem['lastplayed'], '%Y-%m-%d %H:%M:%S')\n dateadded = datetime_strptime(mediaitem['dateadded'], '%Y-%m-%d %H:%M:%S')\n\n newplaycount = mediaitem['playcount'] - 1\n newlastplayed = lastplayed - (lastplayed - dateadded) / newplaycount\n quickjson.set_item_details(dbid, mediatype, playcount=newplaycount, lastplayed=str(newlastplayed).split('.')[0])\n xbmc.executebuiltin('Container.Refresh')\n","sub_path":"python/watchedcount.py","file_name":"watchedcount.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"428516716","text":"# ==========================================================================================================\r\n# AstroDataCube-Spectra_1E0102.py\r\n# ==========================================================================================================\r\n#\r\n# AUTHOR: F. Schmidt (f.schmidt.16@ucl.ac.uk)\r\n# CREATION DATE: 25.10.2017\r\n# PROJECT: WiFeS\r\n# DESCRIPTION: Python script to extract data and plot spctra from AstroDataCube 1E0102\r\n# NOTES: -\r\n#\r\n# HISTORY: 25.10.2017: Script creation (F.Schmidt)\r\n#\r\n# ==========================================================================================================\r\n\r\n\r\n\r\n# Load libraries\r\nfrom astropy.io import fits\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as mtick\r\nimport numpy as np\r\nfrom FUNCTIONS import *\r\nimport os\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n# +--------------------------------------------------------------------------------------------------------+\r\n# | FITS File Operations |\r\n# +--------------------------------------------------------------------------------------------------------+\r\n\r\n\r\n# Load the FITS file\r\nos.chdir('/home/maria/Documents/SNR-DATA/')\r\nhdulist = fits.open('mosB_1E0102_wcs_lowess_frac_0.6_it_5_removed_deblend.fits')\r\n\r\n# Extract the data and store it in array\r\nscidata = hdulist[0].data\r\n\r\n\r\n\r\n\r\n# Define the dimensions\r\nslabs = len(scidata) # bin no, 'no of data points in each spectrum'\r\n\r\nyaxis = len(scidata[0]) #56, y AXIS 1st dim \r\n\r\nxaxis = len(scidata[0][0]) #48, x AXIS 2nd dim\r\n\r\nprint(xaxis,yaxis)\r\n\r\n###constant for conversions\r\nage = 1020 * (365)#*24*60*60) #age of SNR in seconds\r\nprint(age)\r\nsmc_dist = 59000 #distance to smc in parsec\r\npc = 3.086e13 #km in a parsec\r\narcsec_to_km = (np.pi * smc_dist * pc)/(180*3600)\r\n\r\nphys_size = 12 * arcsec_to_km\r\nprint(phys_size)\r\n\r\nage_s = phys_size/2150 #in seconds\r\nage_years = age_s/(365*24*60*60)\r\nprint(age_years*(365))\r\n\r\n\r\nc = 2.9979e5 #in km\r\nobspeak = 5009.65 #need to figure out whether its shifted!!\r\nlabpeak = 5006.842 \r\n\r\nzpeak = (obspeak - labpeak)/obspeak\r\nradvel = c * zpeak\r\n\r\n\r\n\r\n#########\r\n#make x-axis\r\n\r\nlambda_start = 4200.0 #from vogt et al. 2017\r\nlambda_stop = 5548.0\r\nlambda_step = (lambda_stop - lambda_start)/slabs\r\n\r\nobswav = np.arange(lambda_start, lambda_stop, lambda_step)\r\n\r\nobs_vel = []\r\n\r\n##going from wavelength on spectral axis to velocity\r\n\r\nfor i in obswav:\r\n \r\n z = ((i - labpeak)/i)\r\n j = (c * z) - radvel\r\n obs_vel.append(j)\r\n\r\n\r\n\r\n###from obs vel observationally we want to see what velocity values we want to\r\n#mask out. then we save an array with the indices\r\n\r\n\r\nfor index,line in enumerate(obs_vel):\r\n if line == find_nearest(obs_vel,-4000): #cutting off rest of spect #MAKE THESE LARGER WHEN IM PLOTTN GRID\r\n start = index\r\n \r\n if line == find_nearest(obs_vel,-177):\r\n stop = index \r\n \r\n if line == find_nearest(obs_vel,194): #taking out narrow lines\r\n start2 = index \r\n \r\n if line == find_nearest(obs_vel,4000):\r\n stop2 = index\r\n\r\n \r\n \r\n \r\nind = np.arange(start,stop,1) \r\nind_2 = np.arange(start2,stop2,1)\r\n\r\noiii_line_indices = np.concatenate((ind,ind_2)) ##all relevant information is in this array\r\n\r\n###resetting velocity axis so its the same size as flux array\r\nvel_axis = [obs_vel[i] for i in oiii_line_indices]\r\n\r\n\r\n\r\n\r\n# Initialize list to store spectrum for each pixel\r\n\r\nspectrum_pixel = []\r\nxvals = []\r\nyvals = []\r\ncomb = []\r\ntotal_pixel = []\r\n\r\n#Loop over all pixels in x dimension, 56 ti(mes\r\nfor i in range(0, (yaxis)):\r\n \r\n # Loop ober all pixels in y direction, 48 times\r\n for j in range(0, (xaxis)):\r\n \r\n comb.append([j + 1,i + 1]) #matches way pixels are read in in image (checked with QFITSview)\r\n \r\n conv_km_j = round(((j - 19)*arcsec_to_km)/age,1)\r\n conv_km_i = round(((i - 27)*arcsec_to_km)/age,1)\r\n \r\n #changing it so centre of expansion is point '0-0'\r\n \r\n xvals.append(conv_km_j)\r\n yvals.append(conv_km_i)\r\n # Generate temporary array\r\n tmp_list1 = []\r\n tmp_list2 = []\r\n \r\n # Loop over all slapsa\r\n for k in oiii_line_indices: #for grid\r\n \r\n tmp_list1.append(scidata[k][i][j])\r\n \r\n for k in range(0,slabs): #for integrated spectra\r\n \r\n tmp_list2.append(scidata[k][i][j])\r\n \r\n # Append tmp_list to spectrum\r\n spectrum_pixel.append(tmp_list1)\r\n total_pixel.append(tmp_list2)\r\n\r\n\r\n###locating peak of velocity...\r\n#creating a velocity array for every spect_pixel value, as they are all going to be different sizes\r\nvel_axis_grid = [vel_axis] * len(spectrum_pixel)\r\n\r\n\r\n\r\n###getting peak >0 and <0 of 1361\r\n#for i in vel_axis_grid[1361]:\r\n # if i > 0:\r\n # print(i)\r\n\r\n\r\n\r\n\r\n\r\n####\r\n\r\n#HERE I WANT TO SELECT THE RELEVANT X,Y POINTS FOR THE SPECTRa with OIII emission\r\n#ALSO WANT TO TRIM THESE DOWN SO WE ONLY HAVE THE LINE OF INTEREST\r\n#SO WE HAVE ONE LOOP WHERE WE TRIM OUT SPECTRA AND THEIR ASSOCIATED POINTS IN SPACE\r\n#AND ALSO TRIMMING THESE DOWN AROUND THE PEAK\r\n\r\nx_neb = []\r\ny_neb = []\r\nx = []\r\ny = []\r\noiii_spectrum = []\r\nvel_emission = []\r\n\r\n\r\n\r\nfor i in range(0,len(xvals)): #loop over every spectra\r\n if sum(spectrum_pixel[i]) > 6e-16:\r\n \r\n \r\n \r\n peak = np.amax((np.absolute(spectrum_pixel[i])))\r\n for index,line in enumerate(spectrum_pixel[i]): #loop over every entry in every spectra\r\n \r\n if line == peak:\r\n #print(i)\r\n x_neb.append(xvals[i])\r\n y_neb.append(yvals[i])\r\n #find peak\r\n oiii_spectrum.append(spectrum_pixel[i][index - 10: index + 10])\r\n vel_emission.append(vel_axis_grid[i][index - 10: index + 10])\r\n###trimmed relevant spectra, only relevant ones will be read in\r\n \r\n \r\n\r\n#print(np.shape(oiii_spectrum))\r\n\r\n\r\n\r\n\r\ndatagrid = ([[0,0,0,0]] * len(oiii_spectrum) * len(oiii_spectrum[0]))\r\n##ALSO NEEDS TO MATCH DIMENSIONS, OF EVERY DIFFRENT SPECT AND VEL AXIS FOR EERY SPECTRUM\r\nprint(len(datagrid))\r\n\r\n\r\n\r\n#writing out whole grid\r\n\r\ncounter = 20\r\nfor i in range(0,len(oiii_spectrum)):\r\n \r\n #DIFFERENT FOR EACH ONE\r\n #spect length\r\n \r\n x_index = [x_neb[i]] * 20\r\n y_index = [y_neb[i]] * 20\r\n \r\n \r\n \r\n spect_chunk = (list(zip(x_index,y_index,vel_emission[i],oiii_spectrum[i])))\r\n print(spect_chunk)\r\n datagrid[(counter - 20) : counter] = spect_chunk\r\n counter = counter + 20\r\n \r\n\r\n\r\n\r\nos.chdir('/home/maria/Documents')\r\nwith open('e0102_mapped_datagrid', 'w') as fp:\r\n fp.write('\\n'.join('{} {} {} {}'.format(x[0],x[1],x[2],x[3]) for x in datagrid))\r\n\r\n\r\n\r\n##############checking how grid reads out - 3d plotting\r\nlist1,list2,list3,list4 = list(zip(*datagrid)) \r\nv_x = (makeitfloat(list1))\r\nv_y = (makeitfloat(list2))\r\nv_z = (makeitfloat(list3))\r\ndensity = makeitfloat(list4)\r\n\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111,projection='3d')\r\n\r\nax.set_xlabel('X axis')\r\nax.set_ylabel('Y axis')\r\nax.set_zlabel('Z axis')\r\n\r\n\r\nplt.title(\"0 rot\")\r\nax.scatter(v_x,v_y,v_z,c=density,cmap='spring',label='Wifes High density')\r\n\r\n\r\n'''\r\n\r\n##### \r\n####if there are any empty spectra full of nans, then we're taking note of these so we don't plot the\r\n#this is because we're plotting entire spectrum so we need to remove then so they dont affect the summation\r\n#might not actually need any of this\r\nnan_cube = []\r\nfor i in range(0,len(total_pixel)): #looping over every spectra\r\n tmp_list = []\r\n\r\n for j in range(0,len(total_pixel[0])): #looping over every intensity point \r\n x = np.isnan(total_pixel[i][j]) \r\n tmp_list.append(x)\r\n \r\n if set(tmp_list) == {True}:\r\n nan_cube.append(i)\r\n \r\n\r\nspect_minusnan = []\r\n\r\nfor i in range(0,len(total_pixel)):\r\n if i not in nan_cube:\r\n spect_minusnan.append(total_pixel[i])\r\n\r\n\r\n\r\n\r\ninteg_spect = []\r\nfor i in zip(*spect_minusnan):\r\n\r\n integ_spect.append(sum(i))\r\n\r\n\r\n\r\n\r\n# Close the FITS file\r\nhdulist.close()\r\n\r\n# +--------------------------------------------------------------------------------------------------------+\r\n# | Plots |\r\n# +--------------------------------------------------------------------------------------------------------+\r\n\r\n\r\n\r\n\r\n# Plot the individual spectra for each pixel\r\n# ------------------------------------------\r\nfig = plt.figure()\r\npl = fig.add_subplot(111)\r\n\r\n# Axis labels\r\npl.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))\r\n\r\n# Labels\r\n#pl.set_title(r\"WiFeS 1E0102\".format(i+1))\r\npl.set_xlabel(r\"Wavelength ($\\AA$)\")\r\npl.set_ylabel(r\"Flux\")\r\n\r\n# Plot data\r\nplt.plot(vel_axis_grid[1361],spectrum_pixel[1361], color=\"darkred\",linewidth=0.7)\r\n\r\n\r\n# Plot the spectrum for the entire nebula\r\n# ---------------------------------------\r\n \r\n# Initialise figure\r\nfig = plt.figure()\r\npl = fig.add_subplot(111)\r\n\r\n# Axis labels\r\npl.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))\r\n\r\n# Labels\r\n#pl.set_title(r\"WiFeS 1E0102\".format(i+1))\r\npl.set_xlabel(r\"Wavelength ($\\AA$)\")\r\npl.set_ylabel(r\"Flux\")\r\n\r\n# Plot data\r\nplt.plot(obs_vel,integ_spect, color=\"darkred\",linewidth=0.7)\r\n#x,y = list(zip(*spatial_pixel))\r\n#plt.scatter(x,y)\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\r\ninteg_spect_data = list(zip(vel_axis,integ_spect))\r\n\r\nos.chdir('/home/maria/damocles-master/damocles/input')\r\nnp.savetxt('integrated_oiii_e0102.in',integ_spect_data,fmt=\"%d %d\")\r\n\r\n\r\n# Save .png file\r\n#out_name = \"1E0102.png\".format(num=i+1)\r\n#fig.savefig(out_name, dpi=500)\r\n#plt.close(fig)\r\n##'''","sub_path":"AstroDataCube-Spectra_1E0102.py","file_name":"AstroDataCube-Spectra_1E0102.py","file_ext":"py","file_size_in_byte":9995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"40491229","text":"import csv\n\n#reads in movieData file in peopleReader list\nwith open('movieData', 'rt') as csvfileR:\n peopleReader = []\n pReader = csv.reader(csvfileR, delimiter='\\t', quotechar='|')\n for row in pReader:\n endRow = row[6:]\n peopleReader.append(row)\n\nmovieList = [] #will create a list of all movies\n\n#creates the class Movie and allows us to access each movie's variables\nclass Movie:\n def __init__(self, title, imdb, boxOffice, score, year, writers, directors, producers, femTalkPerc, femWPerc, femDPerc, femPPerc, overallFemPerc):\n self.title = title\n self.imdb = imdb\n self.boxOffice = boxOffice\n self.score = int(score)\n self.year = int(year)\n self.writers = writers\n self.directors = directors\n self.producers = producers\n self.femTalkPerc = femTalkPerc\n self.femWriterPerc = femWPerc\n self.femDirectorPerc = femDPerc\n self.femProducerPerc = femPPerc\n self.overallFemProductionPerc = overallFemPerc\n\n def __str__(self):\n return(str(self.title))\n def moviePrint(self):\n print(\"Title: \" + self.title + \"\\tBox Office: \" + str(self.boxOffice) + \"\\tScore: \" + str(self.score) + \"\\tYear: \" + str(self.year))\n\npeopleList = [] #will create a list of all people involved in the movies\n\n#creates the class People and allows us to access each person's variables (name, gender, what movies they've been in, and what role they had in production)\nclass People:\n def __init__(self, name, gender, movie, role):\n self.name = name\n self.gender = gender\n self.movie = [(movie, role)]\n self.avScore = movie.score\n def __str__(self):\n return(str(self.name))\n def peoplePrint(self, printBool):\n stringMovie = \"[\"\n count = 0\n for x in self.movie:\n if count != len(self.movie) - 1:\n stringMovie = stringMovie + x[0].title + \", \"\n else:\n stringMovie = stringMovie + x[0].title + \"]\"\n count += 1\n if printBool:\n print(\"Name: \" + self.name + \"\\tGender: \" + self.gender + \"\\tMovies: \" + stringMovie + \"\\tScore: \" + str(\n self.avScore))\n else:\n return(\"Name: \" + self.name + \"\\tGender: \" + self.gender + \"\\tMovies: \" + stringMovie + \"\\tScore: \" + str(\n self.avScore))\n def numMovies(self):\n count = 0\n for movie in [a for a in self.movie]:\n count+=1\n return count\n\n\n\ncount = 0\n\nfor row in peopleReader:\n if count > 0: #don't want the headers being stored\n\n #instantiates Movie object and appends it to movieList\n movieList.append(Movie(row[0], row[1], int(row[2]), float(row[3]), int(row[4]), row[5], row[6], row[7], float(row[8]), float(row[9]), float(row[10]), float(row[11]), float(row[12])))\n\n #breaks apart list of writers and creates a People object for each person\n writers = movieList[count-1].writers.split(\"], \")\n for w in writers:\n if w == \"[]\":\n break\n w = w.strip().split(\", \")\n if w[0][:3] == \"[[\\\\\":\n name = w[0][4:-1]\n if w[0][:2] == \"[[\":\n name = w[0][3:-1]\n elif w[0][0] == \"[\":\n name = w[0][2:-1]\n else:\n name = w[0][1:-1]\n gender = w[1][2]\n name = name.split(\" \")\n name = \" \".join(name)\n #instantiates People object for new people and appends to peopleList\n if (name not in [p.name for p in peopleList]):\n person = People(name, gender, movieList[count-1], \"w\")\n peopleList.append(person)\n #for repeat people, finds person in peopleList, appends the movie to their list, and recalculate their average score\n else:\n index = [p.name for p in peopleList].index(name)\n person = peopleList[index]\n person.movie.append((movieList[count-1], \"w\"))\n if (movieList[count-1].title not in [t[0] for t in set([movie for movie in person.movie])]):\n person.avScore = (person.avScore*(len(person.movie)-1)+person.movie[-1][0].score)/len(person.movie)\n\n #does the exact same thing for directors as writers\n directors = movieList[count - 1].directors.split(\"], \")\n for d in directors:\n if d == \"[]\":\n break\n d = d.strip().split(\", \")\n if d[0][:3] == \"[[\\\\\":\n name = d[0][4:-1]\n if d[0][:2] == \"[[\":\n name = d[0][3:-1]\n elif d[0][0] == \"[\":\n name = d[0][2:-1]\n else:\n name = d[0][1:-1]\n gender = d[1][2]\n name = name.split(\" \")\n name = \" \".join(name)\n if (name not in [d.name for d in peopleList]):\n person = People(name, gender, movieList[count - 1], \"d\")\n peopleList.append(person)\n else:\n index = [d.name for d in peopleList].index(name)\n person = peopleList[index]\n person.movie.append((movieList[count - 1], \"d\"))\n if (movieList[count - 1].title not in [t[0] for t in set([movie for movie in person.movie])]):\n person.avScore = (person.avScore * (len(person.movie) - 1) + person.movie[-1][0].score) / len(person.movie)\n\n #does the exact same thing for producers as writers\n producers = movieList[count - 1].producers.split(\"], \")\n for p in producers:\n if p == \"[]\":\n break\n p = p.strip().split(\", \")\n if p[0][:3] == \"[[\\\\\":\n name = p[0][4:-1]\n if p[0][:2] == \"[[\":\n name = p[0][3:-1]\n elif p[0][0] == \"[\":\n name = p[0][2:-1]\n else:\n name = p[0][1:-1]\n gender = p[1][2]\n name = name.split(\" \")\n name = \" \".join(name)\n if (name not in [p.name for p in peopleList]):\n person = People(name, gender, movieList[count - 1], \"p\")\n peopleList.append(person)\n else:\n index = [p.name for p in peopleList].index(name)\n person = peopleList[index]\n person.movie.append((movieList[count - 1], \"p\"))\n if (movieList[count - 1].title not in [t[0] for t in set([movie for movie in person.movie])]):\n person.avScore = (person.avScore * (len(person.movie) - 1) + person.movie[-1][0].score) / len(person.movie)\n count+=1\n\n\ncsvfileR.close()\n\nprint(\"peopleList length: \", len(peopleList)) #diagnostic, should get 10493\n\nprint(\"movieList length: \", len(movieList)) #diagnostic, should get 2467\n\n#average Bechdel score of everybody\navBechAll = sum([p.avScore for p in peopleList]) / len(peopleList)\nprint(\"The average average Bechdel score is: \", avBechAll)\n\nf =[]\nm = []\nfor person in peopleList:\n if person.gender == \"f\":\n f.append(person)\n elif person.gender == \"m\":\n m.append(person)\n\n\navF = sum([p.avScore for p in f]) / len(f)\navM = sum([p.avScore for p in m]) / len(m)\nprint(\"The average average Bechdel score for females is: \", avF)\nprint(\"There are \", len(f), \" females out of \", len(peopleList), \" people.\")\nprint(\"The average average Bechdel score for males is: \", avM)\nprint(\"There are \", len(m), \" males out of \", len(peopleList), \" people.\")\n'''\n#see if this holds with a more random sample\nrF = [f[i] for i in range(len(f)) if i % 10 == 0]\nrM = [m[i] for i in range(len(m)) if i % 10 == 0]\navRF = sum([p.avScore for p in rF]) / len(rF)\navRM = sum([p.avScore for p in rM]) / len(rM)\nprint(\"The average average Bechdel score for every fifth female is: \", avRF)\nprint(\"The average average Bechdel score for every fifth male is: \", avRM)\n'''","sub_path":"baseObjectOriented.py","file_name":"baseObjectOriented.py","file_ext":"py","file_size_in_byte":7927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"564822794","text":"\"\"\"\n.. module:: test_extra_vals\n :synopsis: Test extra values\n.. moduleauthor:: David Eriksson \n\"\"\"\n\nfrom pySOT import Ackley, SyncStrategyNoConstraints, \\\n SymmetricLatinHypercube, RBFInterpolant, CubicKernel, \\\n LinearTail, CandidateDYCORS\nfrom poap.controller import ThreadController, BasicWorkerThread, EvalRecord\nimport numpy as np\nimport os.path\nimport logging\n\n\ndef main():\n if not os.path.exists(\"./logfiles\"):\n os.makedirs(\"logfiles\")\n if os.path.exists(\"./logfiles/test_extra_vals.log\"):\n os.remove(\"./logfiles/test_extra_vals.log\")\n logging.basicConfig(filename=\"./logfiles/test_extra_vals.log\",\n level=logging.INFO)\n\n print(\"\\nNumber of threads: 4\")\n print(\"Maximum number of evaluations: 500\")\n print(\"Sampling method: CandidateDYCORS\")\n print(\"Experimental design: Symmetric Latin Hypercube\")\n print(\"Surrogate: Cubic RBF\")\n\n nthreads = 4\n maxeval = 500\n nsamples = nthreads\n\n data = Ackley(dim=10)\n print(data.info)\n\n nextra = 10\n extra = np.random.uniform(data.xlow, data.xup, (nextra, data.dim))\n extra_vals = np.nan * np.ones((nextra, 1))\n for i in range(nextra): # Evaluate every second point\n if i % 2 == 0:\n extra_vals[i] = data.objfunction(extra[i, :])\n\n # Create a strategy and a controller\n controller = ThreadController()\n controller.strategy = \\\n SyncStrategyNoConstraints(\n worker_id=0, data=data,\n maxeval=maxeval, nsamples=nsamples,\n exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),\n response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,\n maxp=maxeval),\n sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim),\n extra=extra, extra_vals=extra_vals)\n\n # Append the known function values to the POAP database since POAP won't evaluate these points\n for i in range(len(extra_vals)):\n if not np.isnan(extra_vals[i]):\n record = EvalRecord(params=(np.ravel(extra[i, :]),), status='completed')\n record.value = extra_vals[i]\n record.feasible = True\n controller.fevals.append(record)\n\n # Launch the threads and give them access to the objective function\n for _ in range(nthreads):\n worker = BasicWorkerThread(controller, data.objfunction)\n controller.launch_worker(worker)\n\n # Run the optimization strategy\n result = controller.run()\n\n print('Best value found: {0}'.format(result.value))\n print('Best solution found: {0}\\n'.format(\n np.array_str(result.params[0], max_line_width=np.inf,\n precision=5, suppress_small=True)))\n\nif __name__ == '__main__':\n main()\n","sub_path":"build/lib/pods/algorithms/pods/test/test_extra_vals.py","file_name":"test_extra_vals.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"17963813","text":"# -*- coding:utf-8 -*-\nimport json\n\nstr = '''\n[{\n \"name\": \"炸弹\",\n \"gender\": \"male\",\n \"birthday\": \"1992-10-18\"\n}, {\n \"name\": \"Selina\",\n \"gender\": \"female\",\n \"birthday\": \"1995-10-18\"\n}]\n'''\ndata = json.loads(str)\nprint(data)\nprint(data[1].get('age'))\n\nwith open('data.json', 'w', encoding='utf-8') as file:\n file.write(json.dumps(data, indent=2, ensure_ascii=False)) # 缩进2个字符\n","sub_path":"urllib/saveformatTest/jsonTest.py","file_name":"jsonTest.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"560131264","text":"from flask import render_template\nfrom Logdata.Log_Data_Blueprint import logdata\nfrom Logdata.model.LogData import LogData\n\n\n@logdata.route('/')\ndef index():\n try:\n items = LogData.objects().all()\n values = set()\n\n if items.count() == 0:\n return render_template('main.html')\n\n for item in items:\n values.add(item.packageName)\n\n return render_template('main.html', values=values)\n except Exception as e:\n print(e)\n","sub_path":"Logdata/Logdata/controller/MainController.py","file_name":"MainController.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"585994899","text":"import sys\r\n\r\n'''\r\nThe prime factors of 13195 are 5, 7, 13 and 29.\r\n\r\nWhat is the largest prime factor of the number 600851475143 ?\r\n\r\n'''\r\n\r\ntarget = 600851475143\r\n\r\ndef primefactor (n):\r\n i = 2\r\n l = i\r\n\r\n while (i**2 <= n):\r\n while not(n % i):\r\n l = i\r\n n //= i\r\n i += 1\r\n \r\n if (n > l): return n\r\n else: return l\r\n\r\nprint(primefactor(target))","sub_path":"projectEuler/euler003.py","file_name":"euler003.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"49362726","text":"\nfrom django.urls import include, path\nfrom . import views_psn\n# ../apis/+ 接口\nurlpatterns = [\n path('psnSubmitUser', views_psn.psnSubmitUser),\n path('psnLogin', views_psn.psnLogin),\n path('getPsnResumeInfo', views_psn.getPsnResumeInfo),\n path('subPsnBaseInfo', views_psn.subPsnBaseInfo),\n path('subPsnProjectInfo', views_psn.subPsnProjectInfo),\n path('delPsnProjectInfo', views_psn.delPsnProjectInfo),\n path('subPsnWorkInfo', views_psn.subPsnWorkInfo),\n path('delPsnWorkInfo', views_psn.delPsnWorkInfo),\n path('subPsnEduInfo', views_psn.subPsnEduInfo),\n path('delPsnEduInfo', views_psn.delPsnEduInfo),\n]\n","sub_path":"polls/urls_psn.py","file_name":"urls_psn.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"413461766","text":"\"\"\"\n Given an array where elements are sorted in ascending order,\n convert it to a height balanced BST.\n Source: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/#/description\n \"\"\"\n\n\n# Constructor for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\n# DO NOT CHANGE THIS CLASS\nclass Solution(object):\n def sortedArrayToBST(self, nums):\n if len(nums) == 0:\n return None\n cutting = len(nums)//2\n root = TreeNode(nums[cutting])\n root.left = self.sortedArrayToBST(nums[:cutting])\n root.right = self.sortedArrayToBST(nums[(cutting+1):])\n return root\n\n\n#Please come up with your own testcases below:\n\n\n","sub_path":"problem_6/Fellow Codes Go Here/Yonghua_Zhuang_q6.py","file_name":"Yonghua_Zhuang_q6.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"223103341","text":"# two deques, o(n)\nclass Solution:\n def longestSubarray(self, nums: List[int], limit: int) -> int:\n increStack, decreStack = deque([]), deque([])\n ans = left = 0\n for i, num in enumerate(nums):\n while increStack and num - increStack[0][0] > limit:\n left = max(left, increStack.popleft()[1]+1)\n while decreStack and decreStack[0][0] - num > limit:\n left = max(left, decreStack.popleft()[1]+1)\n while increStack and increStack[-1][0] > num:\n increStack.pop()\n while decreStack and decreStack[-1][0] < num:\n decreStack.pop()\n increStack.append((num, i))\n decreStack.append((num, i))\n ans = max(ans, i - left + 1)\n return ans\n\n# heap, nlogn\nclass Solution:\n def longestSubarray(self, nums: List[int], limit: int) -> int:\n minHeap, maxHeap = [], []\n left = 0\n ans = 0\n for i, num in enumerate(nums):\n heappush(minHeap, (num, i))\n heappush(maxHeap, (-num, i))\n while minHeap and num - minHeap[0][0] > limit:\n left = max(left, minHeap[0][1]+1)\n heappop(minHeap)\n while maxHeap and -maxHeap[0][0] - num > limit:\n left = max(left, maxHeap[0][1]+1)\n heappop(maxHeap) \n ans = max(ans, i-left+1)\n return ans","sub_path":"1438. Longest Continuous Subarray With Absolute Diff Less Than or Equal to Limit.py","file_name":"1438. Longest Continuous Subarray With Absolute Diff Less Than or Equal to Limit.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"83291736","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport sys\nimport math\n\nlistOfLists=[]\n\n#import data\nfor line in sys.stdin:\n newList=[int(elem) for elem in line.split()]\n listOfLists.append(newList)\n \nnum=listOfLists[0][0]\nnumList=listOfLists[1]\n\n#calculate median \n#sort the list \nnumList.sort()\n\nif num%2==0:\n num_median=num/2\n result_median=(numList[num_median]+numList[num_median-1])/2\n \n lowerList=numList[0:num_median]\n upperList=numList[num_median:num]\nelse: \n num_median=int(math.ceil(num/2.0))\n result_median=numList[num_median-1]\n \n lowerList=numList[0:num_median-1]\n upperList=numList[num_median:num]\n\n\n#calculate Q1 and Q3\nlowNum=len(lowerList)\nupperNum=len(upperList)\n\n#sort the list \nlowerList.sort()\nupperList.sort()\n\nif lowNum%2==0:\n numQ1=lowNum/2\n resultQ1=(lowerList[numQ1]+lowerList[numQ1-1])/2\nelse: \n numQ1=int(math.ceil(lowNum/2.0))\n resultQ1=lowerList[numQ1-1]\n\nif upperNum%2==0:\n numQ3=upperNum/2\n resultQ3=(upperList[numQ3]+upperList[numQ3-1])/2\nelse: \n numQ3=int(math.ceil(upperNum/2.0))\n resultQ3=upperList[numQ3-1]\n\nprint(resultQ1)\nprint(result_median)\nprint(resultQ3)\n","sub_path":"10DaysOfStat/Day1:Quartiles.py","file_name":"Day1:Quartiles.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"412938177","text":"import shutil\nimport os\nimport numpy as np\nimport argparse\nimport glob\ndef get_files_from_folder(path):\n\n files = [i for i in os.listdir(path) if i.startswith('Color')]\n return np.asarray(files)\n\ndef main(path_to_data, path_to_test_data, train_ratio):\n # get dirs\n _, dirs, _ = next(os.walk(path_to_data))\n \n # calculates how many train data per class\n data_counter_per_class = np.zeros((len(dirs)))\n for i in range(len(dirs)):\n path = os.path.join(path_to_data, dirs[i])\n files = get_files_from_folder(path)\n data_counter_per_class[i] = len(files)\n test_counter = np.round(data_counter_per_class * (1 - train_ratio))\n\n # transfers files\n for i in range(len(dirs)):\n path_to_original = os.path.join(path_to_data, dirs[i])\n path_to_save = os.path.join(path_to_test_data, dirs[i])\n\n #creates dir\n if not os.path.exists(path_to_save):\n os.makedirs(path_to_save)\n files = get_files_from_folder(path_to_original)\n # moves data\n for j in range(int(test_counter[i])):\n dst1 = os.path.join(path_to_save, files[j])\n dst2 = os.path.join(path_to_save, files[j].replace('Color','Depth'))\n src1 = os.path.join(path_to_original, files[j])\n src2 = os.path.join(path_to_original, files[j].replace('Color','Depth'))\n shutil.move(src1, dst1)\n shutil.move(src2, dst2)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Dataset divider\")\n parser.add_argument(\"--data_path\", help=\"Path to data\",\n default = '/home/teama/dev/src/gitlab/Datasets/realsense_jp/fill_RGBD/train')\n parser.add_argument(\"--test_data_path_to_save\", help=\"Path to test data where to save\",\n default = '/home/teama/dev/src/gitlab/Datasets/realsense_jp/fill_RGBD/val')\n parser.add_argument(\"--train_ratio\", type = float , help=\"Train ratio - 0.7 means splitting data in 70 % train and 30 % test\",\n default = 0.85)\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.data_path, args.test_data_path_to_save, float(args.train_ratio))","sub_path":"depth_estimation/split_images.py","file_name":"split_images.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"444859681","text":"# coding=utf-8\n'''Trains a simple convnet on the MNIST dataset.\n\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\nfrom __future__ import print_function\nimport logging\nimport random\nlogging.getLogger().setLevel(logging.DEBUG)\nlogging.getLogger('missinglink').addHandler(logging.StreamHandler())\n\nimport numpy as np\nimport missinglink\n\nnp.random.seed(1337) # for reproducibility\nimport argparse\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.callbacks import Callback\nfrom keras.utils import np_utils\nfrom keras import backend as K\nfrom random import randint\n\nfrom custom_metrics import precision, recall, f1\n\n\nclass TestCallback(Callback):\n def __init__(self, test_data, callback):\n self.callback = callback\n self.precision_metrics = []\n self.recall_metrics = []\n self.x_test, self.y_test = test_data\n\n def on_epoch_end(self, batch, logs=None):\n with self.callback.test(self.model):\n score = self.model.evaluate(self.x_test, self.y_test, verbose=0)\n precision_index = self.model.metrics_names.index('precision')\n recall_index = self.model.metrics_names.index('recall')\n self.precision_metrics.append(score[precision_index])\n self.recall_metrics.append(score[recall_index])\n\n def on_train_end(self, epoch, logs=None):\n x = self.callback.calculate_weights_hash(self.model)\n self.callback.send_chart(name='Precision Recall',\n x_values=self.precision_metrics, y_values=self.recall_metrics,\n x_legend='Precision', y_legends='Recall',\n scope='test', type='line', model_weights_hash=x)\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\n\nparser.add_argument('--owner-id', required=False, default=None)\nparser.add_argument('--project-token', required=False, default=None)\nparser.add_argument('--epochs', type=int, default=None)\nparser.add_argument('--batch-size', type=int, default=None)\nparser.add_argument('--is-sampling', type=bool, default=True)\nparser.add_argument('--host')\n\nargs = parser.parse_args()\n\nbatch_size = random.choice([16, 32, 64, 128, 256, 512])\nepochs =random.choice(list(range(15,105,5)))\n\nnb_classes = 10\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n# number of convolutional filters to use\nnb_filters = 32\n# size of pooling area for max pooling\npool_size = (2, 2)\n# convolution kernel size\nkernel_size = (3, 3)\n\n# the data, shuffled and split between train and test sets\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nprint('args.is_sampling: ', args.is_sampling, ' type: ', type(args.is_sampling))\n\nif args.is_sampling:\n random_sampling_factor = randint(0, 100) / float(100)\n\n rows_to_delete = range(int(random_sampling_factor * X_train.shape[0]))\n\n X_train = np.delete(X_train, rows_to_delete, 0)\n y_train = np.delete(y_train, rows_to_delete, 0)\n\nif K.image_dim_ordering() == 'th':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\nmodel = Sequential()\n\nmodel.add(Convolution2D(\n nb_filters, kernel_size[0], kernel_size[1],\n input_shape=input_shape))\n\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=pool_size))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\n\nmodel.compile(\n loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy', 'categorical_accuracy', 'mean_squared_error', 'hinge', precision, recall, f1])\n\ncallback = missinglink.KerasCallback(owner_id=args.owner_id, project_token=args.project_token, host=args.host)\n\ncallback.set_properties(display_name='KerasMnistTest', description='cool kerassing around')\n\nif args.is_sampling:\n callback.set_hyperparams(\n sampling_factor=1 - random_sampling_factor) # we log how many samples in % we have from the total samples\n\n\ncallback.set_hyperparams(train_sample_count=X_train.shape[0])\ncallback.set_hyperparams(test_sample_count=X_test.shape[0])\ncallback.set_hyperparams(total_epochs=epochs)\n\nmodel.fit(\n X_train, Y_train, batch_size=batch_size, nb_epoch=epochs, validation_split=0.2,\n callbacks=[callback, TestCallback((X_test, Y_test), callback)])\n\n\n\nwith callback.test(model):\n score = model.evaluate(X_test, Y_test, verbose=0)\n\nprint('Test score:', score[0])\nprint('Test accuracy:', score[1])\n","sub_path":"keras_mnist.py","file_name":"keras_mnist.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"390227558","text":"import sys, csv\nargs = sys.argv\n\ndef evaluate(pred_path, gt_path, split = False):\n with open(pred_path, mode='r') as pred:\n reader = csv.reader(pred)\n pred_dict = {rows[0]:rows[1] for rows in reader}\n\n with open(gt_path, mode='r') as gt:\n reader = csv.reader(gt)\n gt_dict = {rows[0]:rows[1] for rows in reader}\n\n total_count = 0\n correct_count = 0\n for key, value in pred_dict.items():\n if key not in gt_dict:\n if split:\n continue\n sys.exit(\"Item mismatch: \\\"{}\\\" does not exist in the provided ground truth file.\".format(key))\n if value == 'label':\n continue\n if gt_dict[key] == value:\n correct_count += 1\n total_count += 1\n\n accuracy = (correct_count / total_count) * 100\n print('Accuracy: {}/{} ({}%)'.format(correct_count, total_count, accuracy))\n","sub_path":"Problem4/hw3_eval.py","file_name":"hw3_eval.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"540544492","text":"import komand\nfrom .schema import DeactivateUserInput, DeactivateUserOutput\n# Custom imports below\nimport requests\nimport urllib\n\n\nclass DeactivateUser(komand.Action):\n def __init__(self):\n super(self.__class__, self).__init__(\n name='deactivate_user',\n description='Deactivate a user',\n input=DeactivateUserInput(),\n output=DeactivateUserOutput())\n\n def run(self, params={}):\n \"\"\" Get the user by email \"\"\"\n email = params.get(\"email\")\n okta_url = self.connection.okta_url\n\n url = requests.compat.urljoin(okta_url, '/api/v1/users/' + urllib.quote(email))\n\n \"\"\" Search for the user by email to get the id \"\"\"\n response = self.connection.session.get(url)\n data = response.json()\n\n if response.status_code != 200:\n self.logger.error('Okta: Lookup User by Email failed: ' + data['errorSummary'])\n return {'success': False}\n\n userid = data['id']\n \"\"\" Deactivate the user by id \"\"\"\n url = requests.compat.urljoin(okta_url, '/api/v1/users/' + userid + '/lifecycle/deactivate')\n response = self.connection.session.post(url)\n\n if response.status_code == 401:\n self.logger.error('Okta: Invalid token or domain')\n\n if response.status_code != 200:\n raise Exception('Okta Deactivate User failed with status code: ' + str(response.status_code))\n return {'email': email, 'user_id': userid, 'success': True}\n\n def test(self):\n return {'success': True}\n","sub_path":"okta/komand_okta/actions/deactivate_user/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"615845377","text":"\"\"\"\nWilliam Austin\nPrakash Dhimal\nGeorge Mason University\nCS 584 Theory and Applications of Data Mining\nSemester project: Predicting the Impact of COVID-19\n\"\"\"\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter1d\nfrom scipy.signal import savgol_filter\nfrom sklearn import linear_model\nimport math\n\n\ndef newIntArray(size):\n return np.zeros(shape=(size,), dtype=np.int64)\n\n\ndef newFloatArray(size):\n return np.zeros(shape=(size,), dtype=np.float64)\n\n\ndef getSimpleFilter(size):\n x = np.ones(shape=(size,), dtype=np.float64)\n return x / size\n\n\ndef getSavitskyGovol(inputArray):\n return savgol_filter(inputArray, 7, 3)\n\n\ndef getGaussianAverage(inputArray, sigma):\n return gaussian_filter1d(inputArray, sigma, mode='nearest')\n\n\ndef getSimpleMovingAverage(inputArray, smoothingSize):\n smoothingFilter = getSimpleFilter(smoothingSize)\n smoothedArray = np.convolve(inputArray, smoothingFilter, 'same')\n return smoothedArray\n # f = interpolate.interp1d(xValues, inputArray, 'cubic')\n '''\n smoothed = newFloatArray(arraySize)\n for i in range(smoothSize - 1, arraySize):\n currentSum = 0\n for j in range(smoothSize):\n currentSum +=\n '''\n # return f(xValues)\n \ndef boundAtZero(inArray):\n return np.array([max(x, 0) for x in inArray])\n \ndef getPredictionsSIR2(betaArray, gammaArray, sInit, iInit, rInit, countryPopulation):\n predictionSize = betaArray.shape[0]\n sPredict = newFloatArray(predictionSize)\n iPredict = newFloatArray(predictionSize)\n rPredict = newFloatArray(predictionSize)\n\n sPrevious = sInit\n iPrevious = iInit\n rPrevious = rInit\n\n for i in range(predictionSize):\n infectionCount = (betaArray[i] * (sPrevious * iPrevious)) / countryPopulation\n recoveryCount = gammaArray[i] * iPrevious\n sPredict[i] = sPrevious - infectionCount\n iPredict[i] = iPrevious + infectionCount - recoveryCount\n rPredict[i] = rPrevious + recoveryCount\n \n sPrevious = sPredict[i]\n iPrevious = iPredict[i]\n rPrevious = rPredict[i]\n\n # print(\"Day = \" + str(i) + \": (S, I, R) = (\" + str(sPredict[i]) + \", \" + str(iPredict[i]) + \", \" + str(rPredict[i]) + \")\")\n # print(\" -> Infections = \" + str(infectionCount) + \", Recoveries = \" + str(recoveryCount))\n\n return {\"S\": sPredict, \"I\": iPredict, \"R\": rPredict}\n\n\ndef getPredictionsSIR(betaArray, gammaArray, sInit, iInit, rInit, countryPopulation):\n predictionSize = betaArray.shape[0]\n sPredict = newFloatArray(predictionSize)\n iPredict = newFloatArray(predictionSize)\n rPredict = newFloatArray(predictionSize)\n\n for i in range(predictionSize):\n if i == 0:\n # sPredict[i] = sInit - (sInit * iInit) / countryPopulation\n # iPredict[i] = iInit + (sInit * iInit) / countryPopulation - gamma * iInit\n # rPredict[i] = rInit + gamma * iInit\n sPredict[i] = sInit\n iPredict[i] = iInit\n rPredict[i] = rInit\n else:\n infectionCount = (betaArray[i] * (sPredict[i - 1] * iPredict[i - 1])) / countryPopulation\n recoveryCount = gammaArray[i] * iPredict[i - 1]\n sPredict[i] = sPredict[i - 1] - infectionCount\n iPredict[i] = iPredict[i - 1] + infectionCount - recoveryCount\n rPredict[i] = rPredict[i - 1] + recoveryCount\n\n # print(\"Day = \" + str(i) + \": (S, I, R) = (\" + str(sPredict[i]) + \", \" + str(iPredict[i]) + \", \" + str(rPredict[i]) + \")\")\n # print(\" -> Infections = \" + str(infectionCount) + \", Recoveries = \" + str(recoveryCount))\n\n return [sPredict, iPredict, rPredict]\n\n\ndef getRandomVariable(meanValue, factor=8):\n rv = np.random.normal(meanValue, meanValue / factor)\n return max(rv, 0.0)\n\n\ndef getRandomBeta(meanValue):\n rv = np.random.normal(meanValue, meanValue / 3.0)\n if rv < 0.0:\n return 0.0\n\n return rv\n\ndef getObservedModelValues(tsData, countryName):\n tsSize = tsData.dateCount\n countryData = tsData.countryMap[countryName]\n\n countryPopulation = countryData.population\n\n confirmed = countryData.confirmed\n recovered = countryData.recovered\n deaths = countryData.deaths\n\n S = newIntArray(tsSize)\n I = newIntArray(tsSize)\n R = newIntArray(tsSize)\n sDelta = newIntArray(tsSize)\n iDelta = newIntArray(tsSize)\n rDelta = newIntArray(tsSize)\n betaObserved = newFloatArray(tsSize)\n gammaObserved = newFloatArray(tsSize)\n\n for i in range(tsSize):\n # rTemp[i] = deaths[i] + recovered[i]\n\n S[i] = countryPopulation - confirmed[i] # - deaths[i] - recovered[i]\n I[i] = confirmed[i] - deaths[i] - recovered[i]\n R[i] = deaths[i] + recovered[i]\n\n if i == 0:\n sDelta[i] = -1 * confirmed[i]\n iDelta[i] = confirmed[i] - deaths[i] - recovered[i]\n rDelta[i] = deaths[i] + recovered[i]\n betaObserved[i] = 0.0\n gammaObserved[i] = 0.0\n else:\n sDelta[i] = S[i] - S[i - 1]\n iDelta[i] = I[i] - I[i - 1]\n rDelta[i] = R[i] - R[i - 1]\n\n if I[i - 1] > 0:\n betaObserved[i] = (-1 * sDelta[i]) / ((S[i - 1] * I[i - 1]) / countryPopulation)\n gammaObserved[i] = rDelta[i] / I[i - 1]\n else:\n betaObserved[i] = 0\n gammaObserved[i] = 0\n\n # betaSmoothed = performInterpolation(betaObserved)\n # betaSmoothed3 = getSimpleMovingAverage(betaObserved, 3)\n # betaSmoothed7 = getSimpleMovingAverage(betaObserved, 7)\n # savitskyGovol = getSavitskyGovol(betaObserved)\n betaSmoothed = getGaussianAverage(betaObserved, 2.5)\n\n return {\"S\": S, \"I\": I, \"R\": R, \"beta\": betaObserved, \"gamma\": gammaObserved, \"betaSmoothed\": betaSmoothed}\n\n\ndef getStandardPredictions(tsData, countryName, rangeStart, rangeEnd, daysToPredict):\n countryData = tsData.countryMap[countryName]\n countryPopulation = countryData.population\n tsSizeSliced = rangeEnd - rangeStart + 1\n # elements = [\"S\", \"I\", \"R\", \"beta\", \"gamma\", \"betaSmoothed\"]\n # elementsIndex = {}\n # for i in range(len(elements)):\n # elementsIndex[elements[i]] = i\n\n fullData = getObservedModelValues(tsData, countryName)\n # ------------------- slicedData = [x[rangeStart:rangeEnd] for x in fullData]\n\n sSliced = fullData[\"S\"][rangeStart:rangeEnd + 1]\n iSliced = fullData[\"I\"][rangeStart:rangeEnd + 1]\n rSliced = fullData[\"R\"][rangeStart:rangeEnd + 1]\n betaSliced = fullData[\"beta\"][rangeStart:rangeEnd + 1]\n gammaSliced = fullData[\"gamma\"][rangeStart:rangeEnd + 1]\n betaSmoothedSliced = fullData[\"betaSmoothed\"][rangeStart:rangeEnd]\n\n betaAvg, betaStdDev = np.mean(betaSliced[tsSizeSliced - 7:]), np.std(betaSliced[tsSizeSliced - 7:])\n gammaAvg, gammaStdDev = np.mean(gammaSliced[tsSizeSliced - 14:]), np.std(gammaSliced[tsSizeSliced - 14:])\n\n # At index 0, this will be the same as the last value of the real data\n predictionDays = daysToPredict + 1\n gammaSampleArray1 = newFloatArray(predictionDays)\n gammaSampleArray2 = newFloatArray(predictionDays)\n gammaSampleArray3 = newFloatArray(predictionDays)\n\n betaConstantTrend = newFloatArray(predictionDays)\n betaDownwardTrend = newFloatArray(predictionDays)\n betaContinueTrend = newFloatArray(predictionDays)\n\n regr = linear_model.LinearRegression()\n regr.fit(np.arange(tsSizeSliced - 7, tsSizeSliced).reshape(-1, 1), betaSmoothedSliced[-7:])\n betaLinearCoefficient = regr.coef_[0]\n betaLinearIntercept = regr.intercept_\n # betaContinueStart = tsSize * betaLinearCoefficient + betaLinearIntercept\n\n for i in range(predictionDays):\n if i == 0:\n betaConstantTrend[i] = betaAvg\n betaDownwardTrend[i] = betaAvg\n betaContinueTrend[i] = betaAvg\n else:\n betaConstantTrend[i] = getRandomVariable(betaAvg)\n\n predictionCompletionRatio = i / (predictionDays - 1)\n betaDownwardMean = max(betaAvg - (betaAvg * predictionCompletionRatio), betaAvg / 10.0)\n betaDownwardTrend[i] = getRandomVariable(betaDownwardMean)\n\n betaContinueMean = max(betaAvg + (betaLinearCoefficient * i), betaAvg / 10.0)\n betaContinueTrend[i] = getRandomVariable(betaContinueMean)\n\n gammaSampleArray1[i] = getRandomVariable(gammaAvg)\n gammaSampleArray2[i] = getRandomVariable(gammaAvg)\n gammaSampleArray3[i] = getRandomVariable(gammaAvg)\n\n predictionsConstant = getPredictionsSIR(betaConstantTrend, gammaSampleArray1, sSliced[-1], iSliced[-1], rSliced[-1],\n countryPopulation)\n predictionsDownward = getPredictionsSIR(betaDownwardTrend, gammaSampleArray2, sSliced[-1], iSliced[-1], rSliced[-1],\n countryPopulation)\n predictionsContinueTrend = getPredictionsSIR(betaContinueTrend, gammaSampleArray3, sSliced[-1], iSliced[-1],\n rSliced[-1], countryPopulation)\n\n return {\"sirConstant\": predictionsConstant, \"sirDownward\": predictionsDownward, \\\n \"sirContinueTrend\": predictionsContinueTrend, \"betaConstant\": betaConstantTrend, \\\n \"betaDownward\": betaDownwardTrend, \"betaContinueTrend\": betaContinueTrend}\n\n\n# Note that the indices are sensitive here!\n# Probably best for 3 week training and 1 week testing\ndef getTrainingSample(sSlice, iSlice, rSlice, betaSlice, trainDays, testDays):\n sample = np.zeros(shape=(1, 5), dtype=np.float64)\n # (1, 3, 7, 13, 21) => Predict a week or 2 weeks?\n # (1, 7, 14, 22, 31) => Predict a month \n sample[0, 0] = betaSlice[trainDays - 1]\n sample[0, 1] = betaSlice[trainDays - 7]\n sample[0, 2] = betaSlice[trainDays - 14]\n sample[0, 3] = betaSlice[trainDays - 22]\n sample[0, 4] = betaSlice[trainDays - 31]\n\n label = betaSlice[-1]\n\n return sample, label\n\n\ndef buildSlidingWindowTrainingSet(tsData, trainDays, testDays):\n totalWindowSize = trainDays + testDays\n tsSize = tsData.dateCount\n countryMap = tsData.countryMap\n X = None\n y = None\n\n for countryName in tsData.countryIndex:\n countryData = countryMap[countryName]\n firstIndex = countryData.firstIndex\n dataMap = getObservedModelValues(tsData, countryName)\n windowStart = firstIndex\n i = 0\n\n countryTrainDataSize = tsSize - totalWindowSize - firstIndex + 1\n if countryTrainDataSize > 0:\n countryTrainSamples = np.zeros(shape=(countryTrainDataSize, 5), dtype=np.float64)\n countryTrainLabels = np.zeros(shape=(countryTrainDataSize,), dtype=np.float64)\n\n while windowStart + totalWindowSize <= tsSize:\n sSlice = dataMap[\"S\"][windowStart:windowStart + totalWindowSize]\n iSlice = dataMap[\"I\"][windowStart:windowStart + totalWindowSize]\n rSlice = dataMap[\"R\"][windowStart:windowStart + totalWindowSize]\n betaSlice = dataMap[\"betaSmoothed\"][windowStart:windowStart + totalWindowSize]\n\n sample, label = getTrainingSample(sSlice, iSlice, rSlice, betaSlice, trainDays, testDays)\n countryTrainSamples[i, :] = sample\n countryTrainLabels[i] = label\n\n windowStart += 1\n i += 1\n\n if X is None and y is None:\n X = countryTrainSamples\n y = countryTrainLabels\n else:\n X_tuple = (X, countryTrainSamples)\n X = np.concatenate(X_tuple, axis=0)\n\n y_tuple = (y, countryTrainLabels)\n y = np.concatenate(y_tuple)\n\n return X, y\n\ndef fillBetaTransitionQuadratic(startBeta, startBetaSlope, targetBeta, predictionDays):\n c = startBeta\n b = startBetaSlope\n a = (targetBeta - b * predictionDays) / (predictionDays * predictionDays)\n \n results = np.zeros(shape=(predictionDays), dtype=np.float64)\n \n for i in range(predictionDays):\n x = i + 1\n results[i] = (a * x * x) + (b * x) + c\n \n return results\n \ndef fillBetaTransitionLinear(startBeta, targetBeta, predictionDays):\n delta = targetBeta - startBeta\n \n results = np.zeros(shape=(predictionDays), dtype=np.float64)\n \n for i in range(predictionDays):\n results[i] = startBeta + (delta * ((i + 1) * 1.0 / predictionDays))\n \n return results\n\ndef addNoiseToArray(inputArray, factor=8):\n #arraySize = inputArray.shape[0]\n noisy = np.array([getRandomVariable(x, factor) for x in inputArray])\n return noisy\n\ndef computeMeanSquareError(trueValues, predictedValues):\n elementCount = 0\n sumSquaredErrors = 0.0 \n for yTrue, yPredicted in zip(trueValues, predictedValues):\n error = (yPredicted - yTrue)\n #currentWeight = index + 1\n sumSquaredErrors += (error * error) #currentWeight * (percentError * percentError)\n #sumOfWeights += currentWeight\n #index += 1\n elementCount += 1\n \n return math.sqrt(sumSquaredErrors / elementCount)\n","sub_path":"src/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":13038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"402096995","text":"from bottle import Bottle, request\nfrom bottle.ext import sqlalchemy\nfrom datetime import datetime, timedelta\nfrom . import config\nfrom . import db\nfrom .db import CryptoKey\n\napp = Bottle()\napp.install(sqlalchemy.Plugin(\n db.engine, # SQLAlchemy engine created with create_engine function.\n db.Base.metadata, # SQLAlchemy metadata, required only if create=True.\n keyword='db', # Keyword used to inject session database in a route (default 'db').\n create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).\n commit=True, # If it is true, plugin commit changes after route is executed (default True).\n))\n\ndef validate_request(r, fields):\n message = None\n if not r:\n message = \"no params given\"\n if not message:\n for field in fields:\n if field not in r:\n message = \"missing mandatory parameter '%s'\" % field\n break\n if message:\n return {\n 'result' : 'bad request',\n 'message' : message,\n }\n\ndef find_key(db, id):\n return db.query(CryptoKey).filter(CryptoKey.id == id).first()\n\ndef refresh_key(ck):\n ck.attempts = config.retrieve_attempts\n ck.expires = datetime.now() + timedelta(seconds=config.expire_interval)\n\n@app.post('/store')\ndef store(db):\n r = request.json\n err = validate_request(r, ['id', 'pin', 'key'])\n if err:\n return err\n\n ck = find_key(db, r['id']) or CryptoKey(id=r['id'])\n ck.pin = r['pin']\n ck.key = r['key']\n refresh_key(ck)\n\n db.add(ck)\n\n return {\n 'result' : 'ok',\n }\n\n@app.post('/retrieve')\ndef retrieve(db):\n r = request.json\n err = validate_request(r, ['id', 'pin'])\n if err:\n return err\n\n ck = find_key(db, r['id'])\n if not ck or datetime.now() > ck.expires:\n if ck:\n db.delete(ck)\n return { 'result' : 'not found' }\n\n if ck.pin != r['pin']:\n ck.attempts -= 1\n if ck.attempts < 1:\n db.delete(ck)\n return { 'result' : 'invalid pin' }\n\n refresh_key(ck)\n return {\n 'result' : 'ok',\n 'key' : ck.key,\n }\n","sub_path":"weks/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"169745185","text":"from abc import ABCMeta, abstractmethod\nfrom typing import Any, Dict, List, Tuple\n\nfrom ee.clickhouse.models.cohort import format_person_query, get_precalculated_query, is_precalculated_query\nfrom ee.clickhouse.models.property import filter_element, prop_filter_json_extract\nfrom ee.clickhouse.queries.util import parse_timestamps\nfrom posthog.models import Cohort, Filter, Property, Team\n\n\nclass ClickhouseEventQuery(metaclass=ABCMeta):\n DISTINCT_ID_TABLE_ALIAS = \"pdi\"\n PERSON_TABLE_ALIAS = \"person\"\n EVENT_TABLE_ALIAS = \"e\"\n\n _PERSON_PROPERTIES_ALIAS = \"person_props\"\n _filter: Filter\n _team_id: int\n _should_join_distinct_ids = False\n _should_join_persons = False\n _should_round_interval = False\n\n def __init__(\n self,\n filter: Filter,\n team_id: int,\n round_interval=False,\n should_join_distinct_ids=False,\n should_join_persons=False,\n **kwargs,\n ) -> None:\n self._filter = filter\n self._team_id = team_id\n self.params = {\n \"team_id\": self._team_id,\n }\n\n self._should_join_distinct_ids = should_join_distinct_ids\n self._should_join_persons = should_join_persons\n\n if not self._should_join_distinct_ids:\n self._determine_should_join_distinct_ids()\n\n if not self._should_join_persons:\n self._determine_should_join_persons()\n\n self._should_round_interval = round_interval\n\n @abstractmethod\n def get_query(self) -> Tuple[str, Dict[str, Any]]:\n pass\n\n @abstractmethod\n def _determine_should_join_distinct_ids(self) -> None:\n pass\n\n def _get_disintct_id_query(self) -> str:\n if self._should_join_distinct_ids:\n return f\"\"\"\n INNER JOIN (\n SELECT\n person_id,\n distinct_id\n FROM (\n SELECT *\n FROM person_distinct_id\n JOIN (\n SELECT distinct_id,\n max(_offset) as _offset\n FROM person_distinct_id\n WHERE team_id = %(team_id)s\n GROUP BY distinct_id\n ) as person_max\n USING (distinct_id, _offset)\n WHERE team_id = %(team_id)s\n )\n WHERE team_id = %(team_id)s\n ) AS {self.DISTINCT_ID_TABLE_ALIAS}\n ON events.distinct_id = {self.DISTINCT_ID_TABLE_ALIAS}.distinct_id\n \"\"\"\n else:\n return \"\"\n\n def _determine_should_join_persons(self) -> None:\n for prop in self._filter.properties:\n if prop.type == \"person\":\n self._should_join_distinct_ids = True\n self._should_join_persons = True\n return\n if prop.type == \"cohort\" and self._does_cohort_need_persons(prop):\n self._should_join_distinct_ids = True\n self._should_join_persons = True\n return\n\n if self._filter.breakdown_type == \"person\":\n self._should_join_distinct_ids = True\n self._should_join_persons = True\n\n if self._filter.filter_test_accounts:\n test_account_filters = Team.objects.only(\"test_account_filters\").get(id=self._team_id).test_account_filters\n test_filter_props = [Property(**prop) for prop in test_account_filters]\n for prop in test_filter_props:\n if prop.type == \"person\":\n self._should_join_distinct_ids = True\n self._should_join_persons = True\n return\n\n def _does_cohort_need_persons(self, prop: Property) -> bool:\n try:\n cohort = Cohort.objects.get(pk=prop.value, team_id=self._team_id)\n except Cohort.DoesNotExist:\n return False\n if is_precalculated_query(cohort):\n return True\n for group in cohort.groups:\n if group.get(\"properties\"):\n return True\n return False\n\n def _get_person_query(self) -> str:\n if self._should_join_persons:\n return f\"\"\"\n INNER JOIN (\n SELECT id, properties as person_props\n FROM (\n SELECT id,\n argMax(properties, person._timestamp) as properties,\n max(is_deleted) as is_deleted\n FROM person\n WHERE team_id = %(team_id)s\n GROUP BY id\n HAVING is_deleted = 0\n )\n ) {self.PERSON_TABLE_ALIAS} \n ON {self.PERSON_TABLE_ALIAS}.id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id\n \"\"\"\n else:\n return \"\"\n\n def _get_date_filter(self) -> Tuple[str, Dict]:\n\n parsed_date_from, parsed_date_to, date_params = parse_timestamps(filter=self._filter, team_id=self._team_id)\n\n query = f\"\"\"\n {parsed_date_from}\n {parsed_date_to}\n \"\"\"\n\n return query, date_params\n\n def _get_props(self, filters: List[Property], allow_denormalized_props: bool = False) -> Tuple[str, Dict]:\n\n filter_test_accounts = self._filter.filter_test_accounts\n team_id = self._team_id\n table_name = f\"{self.EVENT_TABLE_ALIAS}.\"\n prepend = \"global\"\n\n final = []\n params: Dict[str, Any] = {}\n\n if filter_test_accounts:\n test_account_filters = Team.objects.only(\"test_account_filters\").get(id=team_id).test_account_filters\n filters.extend([Property(**prop) for prop in test_account_filters])\n\n for idx, prop in enumerate(filters):\n if prop.type == \"cohort\":\n person_id_query, cohort_filter_params = self._get_cohort_subquery(prop)\n params = {**params, **cohort_filter_params}\n final.append(f\"AND {person_id_query}\")\n\n elif prop.type == \"person\":\n filter_query, filter_params = prop_filter_json_extract(\n prop,\n idx,\n \"{}person\".format(prepend),\n allow_denormalized_props=allow_denormalized_props,\n prop_var=self._PERSON_PROPERTIES_ALIAS,\n )\n final.append(filter_query)\n params.update(filter_params)\n elif prop.type == \"element\":\n query, filter_params = filter_element({prop.key: prop.value}, prepend=\"{}_\".format(idx))\n final.append(\"AND {}\".format(query[0]))\n params.update(filter_params)\n else:\n filter_query, filter_params = prop_filter_json_extract(\n prop, idx, prepend, prop_var=\"properties\", allow_denormalized_props=allow_denormalized_props,\n )\n\n final.append(filter_query)\n params.update(filter_params)\n return \" \".join(final), params\n\n def _get_cohort_subquery(self, prop) -> Tuple[str, Dict[str, Any]]:\n try:\n cohort: Cohort = Cohort.objects.get(pk=prop.value, team_id=self._team_id)\n except Cohort.DoesNotExist:\n return \"0 = 1\", {} # If cohort doesn't exist, nothing can match\n\n is_precalculated = is_precalculated_query(cohort)\n\n person_id_query, cohort_filter_params = (\n get_precalculated_query(cohort, 0, custom_match_field=f\"{self.DISTINCT_ID_TABLE_ALIAS}.person_id\")\n if is_precalculated\n else format_person_query(cohort, 0, custom_match_field=f\"{self.DISTINCT_ID_TABLE_ALIAS}.person_id\")\n )\n\n return person_id_query, cohort_filter_params\n","sub_path":"ee/clickhouse/queries/event_query.py","file_name":"event_query.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"359611460","text":"from agent._models.bra_admin.ApAvailableOid import ApAvailableOid\n\nimport agent._services.snmp.GetBulkService as GetBulkService\n\n\ndef create_pon(ap, oid_description):\n pon_list = {}\n\n pon_oid = ApAvailableOid.objects.using('bra_admin').get(ap_id=ap.id, description='pon')\n\n if oid_description == 'online':\n pon_oid_bulk = GetBulkService.create(ap.address, pon_oid.oid, ap.snmp_community)\n\n for line in pon_oid_bulk:\n parsed = str(line)\n substringedStart = parsed.rindex(pon_oid.oid_reduced) + len(pon_oid.oid_reduced) + 1\n substringed = parsed[substringedStart:]\n endID = substringed.find(\"=\")\n\n snmp_index = substringed[:endID].strip()\n snmp_result = (substringed[endID + 1:]).strip()\n\n pon_description = snmp_result[:snmp_result.rindex('/')]\n pon_position = snmp_result[snmp_result.rindex('/')+1:]\n\n if pon_description in pon_list:\n pon_list[pon_description] = pon_list[pon_description] + 1\n else:\n pon_list[pon_description] = 1\n\n result_array = []\n\n for desc, amount in pon_list.items():\n result_array.append({\"channel\": desc, \"value\": amount})\n result_array.append({\"channel\": \"TOTAL\", \"value\": len(pon_oid_bulk)})\n\n result_formatted = {\n \"prtg\": {\n \"result\": result_array\n }\n }\n\n return result_formatted\n\ndef create_unauthorized(ap_list, oid):\n result_array = []\n total_count = 0\n\n for ap in ap_list:\n print(ap.description)\n olt_oid_bulk = GetBulkService.create(ap.address, oid, ap.snmp_community)\n for line in olt_oid_bulk:\n parsed = str(line)\n print(parsed)\n result_array.append({\"channel\": ap.description, \"value\": len(olt_oid_bulk)})\n total_count = total_count + len(olt_oid_bulk)\n\n result_array.append({\"channel\": \"TOTAL\", \"value\": total_count})\n result_formatted = {\n \"prtg\": {\n \"result\": result_array\n }\n }\n\n return result_formatted\n","sub_path":"snmp_agent/agent/_services/PrtgRestService.py","file_name":"PrtgRestService.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"325602305","text":"from vector import Vector\nfrom time import time\n\nalpha, vector_size = 0.1, 4\niterations = 1000\n\nvectors = []\nclusters = []\nweights = []\n\ntime_start = time()\n\n\ndef read_data():\n global weights\n for line in open(\"./input/iris.dat\", \"r\"):\n vectors.append(Vector(list(map(float, line.split()[:vector_size] + [\"1\"]))))\n\n a, b, cut = 50, 100, 25\n clusters.append(vectors[:a - cut])\n clusters.append(vectors[a:b - cut])\n clusters.append(vectors[b:-cut])\n weights = [Vector([1.0 for _ in range(vector_size + 1)]) for _ in range(len(clusters))]\n\n\ndef learning():\n global iterations\n for i in range(iterations):\n ok = True\n for ci, cluster in enumerate(clusters): # индекс кластера и сам кластер\n for vi, vector in enumerate(cluster): # индекс вектора и сам вектор из текущего кластера\n for wi in range(len(weights)): # индекс весового вектора\n if wi != ci:\n d_i = weights[ci] * vector\n d_l = weights[wi] * vector\n if d_l >= d_i:\n c_i = alpha * (abs(d_i) / (vector * vector))\n c_l = alpha * (abs(d_l) / (vector * vector))\n weights[ci] += vector * c_i\n weights[wi] -= vector * c_l\n ok = False\n if ok:\n iterations = i\n break\n else:\n print(\"not 100% classification\")\n\n\ndef print_result():\n print(\"\\nweights:\")\n print(\"\\n\".join(map(str, weights)))\n print(\"execution time for {:,} iterations: {:.3f}s\"\n .format(iterations, time() - time_start))\n\n\ndef check_weights():\n print()\n global clusters\n clusters = []\n a, b, cut = 50, 100, 25\n clusters.append(vectors[cut:a])\n clusters.append(vectors[a+cut:b])\n clusters.append(vectors[b+cut:])\n\n summary = 0\n for ci, cluster in enumerate(clusters): # индекс кластера и сам кластер\n counter = 0\n for vi, vector in enumerate(cluster): # индекс вектора и сам вектор из текущего кластера\n for wi in range(len(weights)): # индекс весового вектора\n if wi != ci:\n d_i = weights[ci] * vector\n d_l = weights[wi] * vector\n if d_l >= d_i:\n counter += 1\n print(\"cluster {}: {} anomaly(ies)\".format(ci+1, counter))\n summary += counter\n print(\"summary: {}% error\".format(summary/(cut*len(clusters))*100))\n\n\ndef recalculate_clusters():\n new_clusters = [[], [], []]\n for vector in vectors:\n d_max = 0\n i_max = 0\n for wi in range(len(weights)): # индекс весового вектора\n d = weights[wi] * vector\n if d >= d_max:\n d_max = d\n i_max = wi\n new_clusters[i_max].append(vector)\n\n for i, c in enumerate(new_clusters):\n print(\"cluster #{}: {} vectors\".format(i, len(c)))\n\n\nif __name__ == \"__main__\":\n read_data()\n learning()\n print_result()\n check_weights()\n recalculate_clusters()\n","sub_path":"classifier_constructor.py","file_name":"classifier_constructor.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"116663930","text":"import random\nimport os\nimport pandas as pd\nfrom sklearn import model_selection\nfrom sklearn import preprocessing\nfrom keras.models import load_model\nfrom pandas import DataFrame\nfrom xlwt import Workbook\n\nfrom src.loop_random.random_schedule_real import random_schedule_real\nfrom src.loop_random.random_schedule_DNN import random_schedule_DNN\nfrom src.loop_random.random_schedule_matrix import random_schedule_matrix\n\n\"\"\"\n实现多组实验,多比例实验\n\"\"\"\n\"\"\"\n随机获取任务列表\n\"\"\"\n\n\ndef get_task(group_num, task_num, task_total):\n # 创建文件以存储数据集id\n book = Workbook(encoding='utf-8')\n sheet1 = book.add_sheet('Sheet 1')\n sheet1.write(0, 0, \"id\")\n sheet1.write(0, 1, \"data_id\")\n for x in range(task_num):\n sheet1.write(x + 1, 0, x)\n # 保存Excel book.save('path/文件名称.xls')\n book.save(path + 'group' + str(group_num) + '/task_img_id_' + str(group_num) + '.xls')\n print('---new file:', path + 'group' + str(group_num) + '/task_img_id_' + str(group_num) + '.xls')\n\n result = pd.read_excel(path + 'group' + str(group_num) + '/task_img_id_' + str(group_num) + '.xls')\n task_list = []\n # 在task_total份图像数据中随机选取task_num份图像\n chosen = 0\n while chosen < task_num:\n temp = int(random.random() * task_total)\n if temp in task_list:\n continue\n else:\n task_list.append(temp)\n result['data_id'][chosen] = temp\n DataFrame(result).to_excel(path + 'group' + str(group_num) + '/task_img_id_' + str(group_num) + '.xls')\n print('---choose:' + str(chosen) + 'task', str(temp))\n chosen += 1\n return task_list\n\n\n\"\"\"\n根据任务列表获取初始数据\n\"\"\"\n\n\ndef get_initial_data(group_num, task_num, task_total):\n # 构造initial_data表\n # 创建文件以存储840个样本\n book = Workbook(encoding='utf-8')\n sheet1 = book.add_sheet('Sheet 1')\n sheet1.write(0, 0, \"id\")\n sheet1.write(0, 1, \"image_size\")\n sheet1.write(0, 2, \"resolution1\")\n sheet1.write(0, 3, \"resolution2\")\n sheet1.write(0, 4, \"face_num\")\n sheet1.write(0, 5, \"face_area\")\n sheet1.write(0, 6, \"cpu_core\")\n sheet1.write(0, 7, \"mem_total\")\n sheet1.write(0, 8, \"mem_used\")\n sheet1.write(0, 9, \"disk_capacity\")\n sheet1.write(0, 10, \"frame_process_time\")\n sheet1.write(0, 11, \"predict_time\")\n sheet1.write(0, 12, \"error\")\n for task in range(task_num * 28):\n sheet1.write(task + 1, 0, task)\n # 保存Excel book.save('path/文件名称.xls')\n book.save(path + 'group' + str(group_num) + '/initial_data_' + str(group_num) + '.xls')\n print('---new file:', path + 'group' + str(group_num) + '/initial_data_' + str(group_num) + '.xls')\n\n sample = pd.read_excel(path + 'group' + str(group_num) + '/initial_data_' + str(group_num) + '.xls')\n # 有28份原始数据,循环28次\n temp = 0\n for table in range(28):\n data = pd.read_excel('../../data/raw/result' + str(table + 1) + '.xlsx')\n\n # 循环task_num次\n for t in range(task_num):\n sample['image_size'][temp] = data['image_size'][task_list[t]]\n sample['resolution1'][temp] = data['resolution1'][task_list[t]]\n sample['resolution2'][temp] = data['resolution2'][task_list[t]]\n sample['face_num'][temp] = data['face_num'][task_list[t]]\n sample['face_area'][temp] = data['face_area'][task_list[t]]\n sample['cpu_core'][temp] = data['cpu_core'][task_list[t]]\n sample['mem_total'][temp] = data['mem_total'][task_list[t]]\n sample['mem_used'][temp] = data['mem_used'][task_list[t]]\n sample['disk_capacity'][temp] = data['disk_capacity'][task_list[t]]\n sample['frame_process_time'][temp] = data['frame_process_time'][task_list[t]]\n # 将更新写到新的Excel中\n DataFrame(sample).to_excel(path + 'group' + str(group_num) + '/initial_data_' + str(group_num) + '.xls')\n temp += 1\n print('---group' + str(group_num) + ',get initial_data:', temp)\n\n\n\"\"\"\n新建目录\n\"\"\"\n\n\ndef mkdir(path):\n folder = os.path.exists(path)\n if not folder:\n os.makedirs(path)\n print('---new folder:', path)\n\n\n\"\"\"\n获取DNN预测时间矩阵\n\"\"\"\n\n\ndef get_predict_time(group_num, pro):\n result_index = ['image_size', 'resolution1', 'resolution2', 'face_num', 'face_area', 'cpu_core', 'mem_total',\n 'mem_used']\n # 读取原始的数据集\n df = pd.read_excel(path + 'group' + str(group_num) + '/initial_data_' + str(group_num) + '.xls')\n # 把数据转为float类型\n # df['displacement'] = df['displacement'].astype(float)\n\n # 逐列获取数据集\n # First and last (mpg and car names) are ignored for X,左闭右开,13个\n X = df[result_index][0:840]\n # print(X)\n y = df['frame_process_time'][0:840]\n\n # 分离数据集,将数据集按比例分为训练集和测试集\n X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0)\n\n # Scale the data for 收敛优化\n scaler = preprocessing.StandardScaler()\n\n # Set the transform parameters\n X_train = scaler.fit_transform(X_train)\n\n # Build a 2 layer fully connected DNN with 10 and 5 units respectively\n\n # 加载模型,旧版本是load_model()\n model = load_model('../../data/DNN_train/model_0.' + str(pro + 1) + '.h5')\n\n # 打印模型\n model.summary()\n\n # 用随机数根据比例生成数据类型的选择\n real_num = int(840 * pro)\n real_data = []\n num = 0\n while num < real_num:\n real_data.append(random.randint(0, 840))\n num += 1\n\n # 预测\n pre = model.predict(X_train, verbose=1)\n # print(pre)\n for t in range(840):\n df['predict_time'][t] = pre[t]\n df['error'][t] = pow(df['frame_process_time'][t] - pre[t], 2) # 计算平方误差\n # print(t, df['frame_process_time'][t], pre[t], df['error'][t])\n print('---get predict time:', t)\n DataFrame(df).to_excel(\n path + 'group' + str(group_num) + '/0.' + str(i + 1) + '/DNN_predict_data_0.' + str(pro + 1) + '.xls')\n\n\ndef get_matrix(group_num, pro):\n # 构造矩阵\n if not os.path.exists(path + 'group' + str(group_num) + '/0.' + str(i + 1) + '/DNN_predict_time_matrix_0.' + str(\n pro + 1) + '.xls'):\n # 构造结果表\n book = Workbook(encoding='utf-8')\n sheet1 = book.add_sheet('Sheet 1')\n sheet1.write(0, 0, \"id\")\n # print('task_list', task_list)\n for a in range(30):\n sheet1.write(a + 1, 0, 'task' + str(a))\n # print(task_list[a])\n for t in range(28):\n sheet1.write(0, t + 1, 'equip' + str(t))\n sheet1.write(0, 29, 'deadline')\n # 保存Excel book.save('path/文件名称.xls')\n book.save(path + 'group' + str(group_num) + '/0.' + str(i + 1) + '/DNN_predict_time_matrix_0.' + str(\n pro + 1) + '.xls')\n print('---new file:',\n path + 'group' + str(group_num) + '/0.' + str(i + 1) + '/DNN_predict_time_matrix_0.' + str(\n pro + 1) + '.xls')\n\n real_num = int(84 * pro)\n real_data = []\n num = 0\n while num < real_num:\n r = random.randint(0, 840)\n if r not in real_data:\n real_data.append(r)\n print(num, r)\n num += 1\n\n # 打开原始数据文件\n # 打开Excel文件\n dt = pd.read_excel(\n path + 'group' + str(group_num) + '/0.' + str(i + 1) + '/DNN_predict_data_0.' + str(pro + 1) + '.xls')\n df = pd.read_excel(path + 'group' + str(group_num) + '/0.' + str(i + 1) + '/DNN_predict_time_matrix_0.' + str(\n pro + 1) + '.xls')\n for table in range(840):\n df['equip' + str(int(table / 30))][int(table % 30)] = dt['predict_time'][table]\n\n DataFrame(df).to_excel(\n path + 'group' + str(group_num) + '/0.' + str(i + 1) + '/DNN_predict_time_matrix_0.' + str(\n pro + 1) + '.xls')\n print('---get DNN matrix:', table)\n\n\ndef get_predict_time_matrix(group_num, pro): # 输入组别,比例,任务列表\n # 预测时间\n path = '../../data/random_loop/group' + str(group_num) + '/0.' + str(pro + 1)\n if not os.path.exists(path + '/DNN_predict_data_0.' + str(pro + 1) + '.xls'):\n get_predict_time(group_num, pro)\n # 构造矩阵\n get_matrix(group_num, pro)\n\n\n\"\"\"\n获取真实时间矩阵\n\"\"\"\n\n\ndef get_real_time_matrix(group_num):\n path = '../../data/random_loop/'\n # 构造矩阵\n if not os.path.exists(\n path + 'group' + str(group_num) + '/real_time_matrix_' + str(group_num) + '.xls'):\n # 构造结果表\n book = Workbook(encoding='utf-8')\n sheet1 = book.add_sheet('Sheet 1')\n sheet1.write(0, 0, \"id\")\n for a in range(30):\n sheet1.write(a + 1, 0, 'task' + str(a))\n # print(task_list[a])\n for t in range(28):\n sheet1.write(0, t + 1, 'equip' + str(t))\n sheet1.write(0, 29, 'deadline')\n # 保存Excel book.save('path/文件名称.xls')\n book.save(path + 'group' + str(group_num) + '/real_time_matrix_' + str(group_num) + '.xls')\n print('---new file:', path + 'group' + str(group_num) + '/real_time_matrix_' + str(group_num) + '.xls')\n\n # 打开原始数据文件\n # 打开Excel文件\n dt = pd.read_excel(path + 'group' + str(group_num) + '/initial_data_' + str(group_num) + '.xls')\n df = pd.read_excel(\n path + 'group' + str(group_num) + '/real_time_matrix_' + str(group_num) + '.xls')\n for table in range(840):\n print('---get real_time_matrix:equip' + str(int(table / 30)), int(table % 30))\n df['equip' + str(int(table / 30))][int(table % 30)] = dt['frame_process_time'][table]\n\n DataFrame(df).to_excel(\n path + 'group' + str(group_num) + '/real_time_matrix_' + str(group_num) + '.xls')\n\n\nif __name__ == '__main__':\n path = '../../data/random_loop/'\n schedule_times = 10 # 实验组数(自定义)\n task_num = 30 # 任务数(自定义)\n task_total = 300 # 任务总数\n # 一共有k组实验\n for group_num in [2]:\n print('------第' + str(group_num) + '组实验------')\n # 每组实验的任务列表\n task_list = []\n # 创建根文件夹\n if not os.path.exists(path + 'group' + str(group_num)):\n mkdir(path + 'group' + str(group_num)) # 根据组号创建文件夹\n\n # 获取随机任务列表。先判断任务列表存不存在\n if not os.path.exists(path + 'group' + str(group_num) + '/task_img_id_' + str(group_num) + '.xls'):\n task_list = get_task(group_num, task_num, task_total) # 输入参数为:组号,任务数,任务总数\n print('---第' + str(group_num) + '组实验,' + '随机获取的任务列表:', task_list)\n\n # 根据task_list任务列表获取初始数据,方便后续的DNN预测\n if not os.path.exists(path + 'group' + str(group_num) + '/initial_data_' + str(group_num) + '.xls'):\n get_initial_data(group_num, task_num, task_total) # 输入参数为:组号,任务数,任务总数\n\n # 获取真实时间矩阵,为了matrix的预测准备数据\n get_real_time_matrix(group_num)\n\n print('------real调度:' + str(group_num) + '组实验------')\n random_schedule_real(group_num, task_num)\n\n # # 9个比例循环实现\n # for i in [0, 4, 8]:\n # # 新建比例的文件夹\n # if not os.path.exists(path + 'group' + str(group_num) + '/0.' + str(i + 1)):\n # mkdir(path + 'group' + str(group_num) + '/0.' + str(i + 1))\n #\n # # 根据不同比例预测数据\n # print('------获取DNN预测时间矩阵:' + str(group_num) + '组' + str(i) + '比例实验------')\n # get_predict_time_matrix(group_num, i) # 输入组别,比例,任务列表\n #\n # print('------DNN调度:' + str(group_num) + '组' + str(i) + '比例实验------')\n # random_schedule_DNN(group_num, task_num, i)\n #\n # print('------matrix调度:' + str(group_num) + '组' + str(i) + '比例实验------')\n # random_schedule_matrix(group_num, task_num, i)\n\n print('')\n","sub_path":"src/loop_random/random_schedule.py","file_name":"random_schedule.py","file_ext":"py","file_size_in_byte":12438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"365508398","text":"\"\"\"\n Author : Charlie Kim\n Date : 2012/02/02\n Description : last.fm chart html parser and archiver\n\"\"\"\nfrom django.core.management.base import BaseCommand\n\nfrom lastchart.models import WeeklyChart\nfrom lastchart.models import Rank\nfrom lastchart.models import Track\nfrom lastchart import settings\n\nfrom HTMLParser import HTMLParser\nimport calendar\nimport copy\nimport datetime\nimport re\nimport urllib2\n\n# regex definition for parsing\ndateRegex = re.compile(r'(\\d{1,2})\\s(' + '|'.join([calendar.month_name[i] for i in xrange(1, 13)]) + r')\\s(\\d{4})')\nnonDecimal = re.compile(r'[^\\d]+')\n\n\nclass lastFmParser(HTMLParser):\n \"\"\" last.fm chart page parser \"\"\"\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.tracks = []\n self.endDate = None\n self.curData = ''\n self.curTrack = {}\n self.recording = None\n\n def handle_starttag(self, tag, attrs):\n \"\"\" Start recording data \"\"\"\n if len(self.tracks) >= settings.CHART_SIZE:\n return\n attrDict = dict(attrs)\n if attrDict.get('class', None) == 'rankItem-position':\n self.recording = 'rank'\n elif attrDict.get('class', None) == 'rankItem-title':\n self.recording = 'title'\n elif attrDict.get('class', None) == 'rankItem-bar':\n self.recording = 'listeners'\n elif attrDict.get('class', None) == 'weekpicker collapsibleBox s-closed':\n self.recording = 'ending'\n \n def handle_endtag(self, tag):\n \"\"\" End recording and save the data \"\"\"\n if self.recording:\n if self.recording == 'rank':\n self.curTrack['rank'] = int(nonDecimal.sub('', self.curData))\n elif self.recording == 'title':\n titleSplit = self.curData.split('\\xe2\\x80\\x93')\n self.curTrack['artist'] = titleSplit[0].strip()\n self.curTrack['title'] = titleSplit[-1].strip()\n elif self.recording == 'listeners':\n self.curTrack['listeners'] = int(nonDecimal.sub('', self.curData))\n self.tracks.append(copy.copy(self.curTrack))\n elif self.recording == 'ending':\n dateSearch = dateRegex.search(self.curData)\n self.endDate = datetime.datetime.strptime(dateSearch.group(0), '%d %B %Y')\n self.curData = ''\n self.recording = None\n\n def handle_data(self, data):\n \"\"\" Recording the data in tags \"\"\"\n if self.recording and data:\n self.curData = self.curData + data\n \n\nclass Command(BaseCommand):\n args = ''\n help = 'Pass the url of last.fm chart to parse and archive'\n\n def handle(self, *args, **options):\n html = urllib2.urlopen(args[0]).read()\n parser = lastFmParser()\n parser.feed(self.sanitizeHtml(html))\n self.saveDatabase(parser.tracks, parser.endDate)\n \n def sanitizeHtml(self, htmlDoc):\n \"\"\" Remove unnecessary text from html\n @param htmlDoc: html string\n @return: clean html string \n \"\"\" \n htmlDoc = re.sub('\\r', '', htmlDoc)\n htmlDoc = re.sub('\\n', '', htmlDoc)\n htmlDoc = re.sub(' ', '', htmlDoc)\n htmlDoc = re.sub(r'', '', htmlDoc)\n htmlDoc = re.sub(r'', '', htmlDoc)\n htmlDoc = re.sub(r'', '', htmlDoc) # remove comments\n return htmlDoc\n \n def saveDatabase(self, tracks, endingDate):\n \"\"\" Save the parsed chart data to database\n @param tracks: list of track dictionary\n @param endingDate: datetime object\n \"\"\"\n cObj, created = WeeklyChart.objects.get_or_create(endingDate=endingDate)\n for track in tracks:\n tObj, created = Track.objects.get_or_create(artist=track['artist'], title=track['title'])\n Rank.objects.get_or_create(chart=cObj, track=tObj, rank=track['rank'], listeners=track['listeners'])\n \n \n ","sub_path":"lastarchive/lastchart/management/commands/parseLastFm.py","file_name":"parseLastFm.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"621377243","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom db_connection import SQLite, get_tables\nfrom data_frame import get_table_data\n\n\nstocks_prediction = {}\n\nwith SQLite() as (curr, conn):\n\n # get all the table names\n tables = get_tables(curr, conn)\n for table in tables:\n if (table[0] == 'a' or table[0] == 'b'):\n # dummy tables\n print(table[0], 'will not be processed')\n else:\n print(table[0], 'will be processed')\n # get data from each table\n df = get_table_data(curr, conn, table[0])\n data = df.sort_index(ascending=True, axis=0)\n\n # get and rename columns data and close\n new_data = pd.DataFrame(index=range(0, len(df)), columns=['date', 'close'])\n for i in range(0, len(data)):\n new_data['date'][i] = data['date'][i]\n new_data['close'][i] = data['4. close'][i]\n\n # change date to the index column\n new_data.index = new_data.date\n new_data.drop('date', axis=1, inplace=True)\n\n # split data into train and test sets\n dataset = new_data.values\n train = dataset\n\n # converting dataset into x_train and y_train\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_data = scaler.fit_transform(dataset)\n x_train, y_train = [], []\n for i in range(60,len(train)):\n x_train.append(scaled_data[i-60:i,0])\n y_train.append(scaled_data[i,0])\n x_train, y_train = np.array(x_train), np.array(y_train)\n x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))\n\n # create and fit the LSTM network\n model = Sequential()\n model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1],1)))\n model.add(LSTM(units=50))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam')\n model.fit(x_train, y_train, epochs=1, batch_size=1, verbose=2)\n\n # predicting 426 values, using past 60 from the train data\n inputs = new_data[len(new_data) - 61:].values\n inputs = inputs.reshape(-1,1)\n inputs = scaler.transform(inputs)\n\n X_test = []\n for i in range(60,inputs.shape[0]):\n X_test.append(inputs[i-60:i,0])\n X_test = np.array(X_test)\n X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))\n closing_price = model.predict(X_test)\n closing_price = scaler.inverse_transform(closing_price)\n\n prev_month = dataset[-1][0]\n predicted_month = closing_price[0][0]\n change = ((predicted_month - prev_month) / prev_month) * 100\n stocks_prediction[table[0]] = change\n print(stocks_prediction)\n\nprint(stocks_prediction)\n\n\n","sub_path":"findigital/findigital.py","file_name":"findigital.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"636521515","text":"from dataclasses import dataclass\n\nfrom dcs.action import AITaskPush\nfrom dcs.condition import TimeAfter, UnitDamaged, Or, GroupLifeLess\nfrom dcs.triggers import TriggerOnce, Event\n\nfrom gen import namegen\nfrom gen.ground_forces.ai_ground_planner import CombatGroupRole, DISTANCE_FROM_FRONTLINE\nfrom .callsigns import callsign_for_support_unit\nfrom .conflictgen import *\n\nSPREAD_DISTANCE_FACTOR = 0.1, 0.3\nSPREAD_DISTANCE_SIZE_FACTOR = 0.1\n\nFRONTLINE_CAS_FIGHTS_COUNT = 16, 24\nFRONTLINE_CAS_GROUP_MIN = 1, 2\nFRONTLINE_CAS_PADDING = 12000\n\nRETREAT_DISTANCE = 20000\nBREAKTHROUGH_OFFENSIVE_DISTANCE = 35000\nAGGRESIVE_MOVE_DISTANCE = 16000\n\nFIGHT_DISTANCE = 3500\n\nRANDOM_OFFSET_ATTACK = 250\n\n\n@dataclass(frozen=True)\nclass JtacInfo:\n \"\"\"JTAC information.\"\"\"\n unit_name: str\n callsign: str\n region: str\n code: str\n # TODO: Radio info? Type?\n\n\nclass GroundConflictGenerator:\n\n def __init__(self, mission: Mission, conflict: Conflict, game, player_planned_combat_groups, enemy_planned_combat_groups, player_stance):\n self.mission = mission\n self.conflict = conflict\n self.enemy_planned_combat_groups = enemy_planned_combat_groups\n self.player_planned_combat_groups = player_planned_combat_groups\n self.player_stance = CombatStance(player_stance)\n self.enemy_stance = random.choice([CombatStance.AGGRESSIVE, CombatStance.AGGRESSIVE, CombatStance.AGGRESSIVE, CombatStance.ELIMINATION, CombatStance.BREAKTHROUGH]) if len(enemy_planned_combat_groups) > len(player_planned_combat_groups) else random.choice([CombatStance.DEFENSIVE, CombatStance.DEFENSIVE, CombatStance.DEFENSIVE, CombatStance.AMBUSH, CombatStance.AGGRESSIVE])\n self.game = game\n self.jtacs: List[JtacInfo] = []\n\n def _group_point(self, point) -> Point:\n distance = randint(\n int(self.conflict.size * SPREAD_DISTANCE_FACTOR[0]),\n int(self.conflict.size * SPREAD_DISTANCE_FACTOR[1]),\n )\n return point.random_point_within(distance, self.conflict.size * SPREAD_DISTANCE_SIZE_FACTOR)\n\n def generate(self):\n\n player_groups = []\n enemy_groups = []\n\n combat_width = self.conflict.distance/2\n if combat_width > 500000:\n combat_width = 500000\n if combat_width < 35000:\n combat_width = 35000\n\n position = Conflict.frontline_position(self.game.theater, self.conflict.from_cp, self.conflict.to_cp)\n\n # Create player groups at random position\n for group in self.player_planned_combat_groups:\n if group.role == CombatGroupRole.ARTILLERY:\n distance_from_frontline = self.get_artilery_group_distance_from_frontline(group)\n else:\n distance_from_frontline = DISTANCE_FROM_FRONTLINE[group.role]\n final_position = self.get_valid_position_for_group(position, True, combat_width, distance_from_frontline)\n\n if final_position is not None:\n g = self._generate_group(\n side=self.mission.country(self.game.player_country),\n unit=group.units[0],\n heading=self.conflict.heading+90,\n count=len(group.units),\n at=final_position)\n g.set_skill(self.game.settings.player_skill)\n player_groups.append((g,group))\n\n self.gen_infantry_group_for_group(g, True, self.mission.country(self.game.player_country), self.conflict.heading + 90)\n\n # Create enemy groups at random position\n for group in self.enemy_planned_combat_groups:\n if group.role == CombatGroupRole.ARTILLERY:\n distance_from_frontline = self.get_artilery_group_distance_from_frontline(group)\n else:\n distance_from_frontline = DISTANCE_FROM_FRONTLINE[group.role]\n final_position = self.get_valid_position_for_group(position, False, combat_width, distance_from_frontline)\n\n if final_position is not None:\n g = self._generate_group(\n side=self.mission.country(self.game.enemy_country),\n unit=group.units[0],\n heading=self.conflict.heading - 90,\n count=len(group.units),\n at=final_position)\n g.set_skill(self.game.settings.enemy_vehicle_skill)\n enemy_groups.append((g, group))\n\n self.gen_infantry_group_for_group(g, False, self.mission.country(self.game.enemy_country), self.conflict.heading - 90)\n\n # Plan combat actions for groups\n self.plan_action_for_groups(self.player_stance, player_groups, enemy_groups, self.conflict.heading + 90, self.conflict.from_cp, self.conflict.to_cp)\n self.plan_action_for_groups(self.enemy_stance, enemy_groups, player_groups, self.conflict.heading - 90, self.conflict.to_cp, self.conflict.from_cp)\n\n # Add JTAC\n if \"has_jtac\" in self.game.player_faction and self.game.player_faction[\"has_jtac\"] and self.game.settings.include_jtac_if_available:\n n = \"JTAC\" + str(self.conflict.from_cp.id) + str(self.conflict.to_cp.id)\n code = 1688 - len(self.jtacs)\n\n utype = MQ_9_Reaper\n if \"jtac_unit\" in self.game.player_faction:\n utype = self.game.player_faction[\"jtac_unit\"]\n\n jtac = self.mission.flight_group(country=self.mission.country(self.game.player_country),\n name=n,\n aircraft_type=utype,\n position=position[0],\n airport=None,\n altitude=5000)\n jtac.points[0].tasks.append(SetInvisibleCommand(True))\n jtac.points[0].tasks.append(SetImmortalCommand(True))\n jtac.points[0].tasks.append(OrbitAction(5000, 300, OrbitAction.OrbitPattern.Circle))\n frontline = f\"Frontline {self.conflict.from_cp.name}/{self.conflict.to_cp.name}\"\n # Note: Will need to change if we ever add ground based JTAC.\n callsign = callsign_for_support_unit(jtac)\n self.jtacs.append(JtacInfo(n, callsign, frontline, str(code)))\n\n def gen_infantry_group_for_group(self, group, is_player, side:Country, forward_heading):\n\n # Disable infantry unit gen if disabled\n if not self.game.settings.perf_infantry:\n return\n\n infantry_position = group.points[0].position.random_point_within(250, 50)\n\n if side == self.conflict.attackers_country:\n cp = self.conflict.from_cp\n else:\n cp = self.conflict.to_cp\n\n if is_player:\n faction = self.game.player_name\n else:\n faction = self.game.enemy_name\n\n possible_infantry_units = db.find_infantry(faction)\n if len(possible_infantry_units) == 0:\n return\n\n u = random.choice(possible_infantry_units)\n self.mission.vehicle_group(\n side,\n namegen.next_infantry_name(side, cp, u), u,\n position=infantry_position,\n group_size=1,\n heading=forward_heading,\n move_formation=PointAction.OffRoad)\n\n for i in range(randint(3, 10)):\n u = random.choice(possible_infantry_units)\n position = infantry_position.random_point_within(55, 5)\n self.mission.vehicle_group(\n side,\n namegen.next_infantry_name(side, cp, u), u,\n position=position,\n group_size=1,\n heading=forward_heading,\n move_formation=PointAction.OffRoad)\n\n\n def plan_action_for_groups(self, stance, ally_groups, enemy_groups, forward_heading, from_cp, to_cp):\n\n if not self.game.settings.perf_moving_units:\n return\n\n for dcs_group, group in ally_groups:\n if group.role == CombatGroupRole.ARTILLERY:\n # Fire on any ennemy in range\n if self.game.settings.perf_artillery:\n target = self.get_artillery_target_in_range(dcs_group, group, enemy_groups)\n if target is not None:\n\n if stance != CombatStance.RETREAT:\n hold_task = Hold()\n hold_task.number = 1\n dcs_group.add_trigger_action(hold_task)\n\n # Artillery strike random start\n artillery_trigger = TriggerOnce(Event.NoEvent, \"ArtilleryFireTask #\" + str(dcs_group.id))\n artillery_trigger.add_condition(TimeAfter(seconds=random.randint(1, 45)* 60))\n\n fire_task = FireAtPoint(target, len(group.units) * 10, 100)\n if stance != CombatStance.RETREAT:\n fire_task.number = 2\n else:\n fire_task.number = 1\n dcs_group.add_trigger_action(fire_task)\n artillery_trigger.add_action(AITaskPush(dcs_group.id, len(dcs_group.tasks)))\n self.mission.triggerrules.triggers.append(artillery_trigger)\n\n # Artillery will fall back when under attack\n if stance != CombatStance.RETREAT:\n\n # Hold position\n dcs_group.points[0].tasks.append(Hold())\n retreat = self.find_retreat_point(dcs_group, forward_heading, (int)(RETREAT_DISTANCE/3))\n dcs_group.add_waypoint(dcs_group.position.point_from_heading(forward_heading, 1), PointAction.OffRoad)\n dcs_group.points[1].tasks.append(Hold())\n dcs_group.add_waypoint(retreat, PointAction.OffRoad)\n\n artillery_fallback = TriggerOnce(Event.NoEvent, \"ArtilleryRetreat #\" + str(dcs_group.id))\n for i, u in enumerate(dcs_group.units):\n artillery_fallback.add_condition(UnitDamaged(u.id))\n if i < len(dcs_group.units) - 1:\n artillery_fallback.add_condition(Or())\n\n hold_2 = Hold()\n hold_2.number = 3\n dcs_group.add_trigger_action(hold_2)\n\n retreat_task = GoToWaypoint(toIndex=3)\n retreat_task.number = 4\n dcs_group.add_trigger_action(retreat_task)\n\n artillery_fallback.add_action(AITaskPush(dcs_group.id, len(dcs_group.tasks)))\n self.mission.triggerrules.triggers.append(artillery_fallback)\n\n for u in dcs_group.units:\n u.initial = True\n u.heading = forward_heading + random.randint(-5,5)\n\n elif group.role in [CombatGroupRole.TANK, CombatGroupRole.IFV]:\n if stance == CombatStance.AGGRESSIVE:\n # Attack nearest enemy if any\n # Then move forward OR Attack enemy base if it is not too far away\n target = self.find_nearest_enemy_group(dcs_group, enemy_groups)\n if target is not None:\n rand_offset = Point(random.randint(-RANDOM_OFFSET_ATTACK, RANDOM_OFFSET_ATTACK), random.randint(-RANDOM_OFFSET_ATTACK, RANDOM_OFFSET_ATTACK))\n dcs_group.add_waypoint(target.points[0].position + rand_offset, PointAction.OffRoad)\n dcs_group.points[1].tasks.append(AttackGroup(target.id))\n\n if to_cp.position.distance_to_point(dcs_group.points[0].position) <= AGGRESIVE_MOVE_DISTANCE:\n attack_point = to_cp.position.random_point_within(500, 0)\n else:\n attack_point = self.find_offensive_point(dcs_group, forward_heading, AGGRESIVE_MOVE_DISTANCE)\n dcs_group.add_waypoint(attack_point, PointAction.OnRoad)\n elif stance == CombatStance.BREAKTHROUGH:\n # In breakthrough mode, the units will move forward\n # If the enemy base is close enough, the units will attack the base\n if to_cp.position.distance_to_point(\n dcs_group.points[0].position) <= BREAKTHROUGH_OFFENSIVE_DISTANCE:\n attack_point = to_cp.position.random_point_within(500, 0)\n else:\n attack_point = self.find_offensive_point(dcs_group, forward_heading, BREAKTHROUGH_OFFENSIVE_DISTANCE)\n dcs_group.add_waypoint(attack_point, PointAction.OnRoad)\n elif stance == CombatStance.ELIMINATION:\n # In elimination mode, the units focus on destroying as much enemy groups as possible\n targets = self.find_n_nearest_enemy_groups(dcs_group, enemy_groups, 3)\n i = 1\n for target in targets:\n rand_offset = Point(random.randint(-RANDOM_OFFSET_ATTACK, RANDOM_OFFSET_ATTACK), random.randint(-RANDOM_OFFSET_ATTACK, RANDOM_OFFSET_ATTACK))\n dcs_group.add_waypoint(target.points[0].position+rand_offset, PointAction.OffRoad)\n dcs_group.points[i].tasks.append(AttackGroup(target.id))\n i = i + 1\n if to_cp.position.distance_to_point(dcs_group.points[0].position) <= AGGRESIVE_MOVE_DISTANCE:\n attack_point = to_cp.position.random_point_within(500, 0)\n dcs_group.add_waypoint(attack_point)\n\n if stance != CombatStance.RETREAT:\n self.add_morale_trigger(dcs_group, forward_heading)\n\n elif group.role in [CombatGroupRole.APC, CombatGroupRole.ATGM]:\n\n if stance in [CombatStance.AGGRESSIVE, CombatStance.BREAKTHROUGH, CombatStance.ELIMINATION]:\n # APC & ATGM will never move too much forward, but will follow along any offensive\n if to_cp.position.distance_to_point(dcs_group.points[0].position) <= AGGRESIVE_MOVE_DISTANCE:\n attack_point = to_cp.position.random_point_within(500, 0)\n else:\n attack_point = self.find_offensive_point(dcs_group, forward_heading, AGGRESIVE_MOVE_DISTANCE)\n dcs_group.add_waypoint(attack_point, PointAction.OnRoad)\n\n if stance != CombatStance.RETREAT:\n self.add_morale_trigger(dcs_group, forward_heading)\n\n if stance == CombatStance.RETREAT:\n # In retreat mode, the units will fall back\n # If the ally base is close enough, the units will even regroup there\n if from_cp.position.distance_to_point(dcs_group.points[0].position) <= RETREAT_DISTANCE:\n retreat_point = from_cp.position.random_point_within(500, 250)\n else:\n retreat_point = self.find_retreat_point(dcs_group, forward_heading)\n reposition_point = retreat_point.point_from_heading(forward_heading, 10) # Another point to make the unit face the enemy\n dcs_group.add_waypoint(retreat_point, PointAction.OnRoad)\n dcs_group.add_waypoint(reposition_point, PointAction.OffRoad)\n\n\n def add_morale_trigger(self, dcs_group, forward_heading):\n \"\"\"\n This add a trigger to manage units fleeing whenever their group is hit hard, or being engaged by CAS\n \"\"\"\n\n if len(dcs_group.units) == 1:\n return\n\n # Units should hold position on last waypoint\n dcs_group.points[len(dcs_group.points) - 1].tasks.append(Hold())\n\n # Force unit heading\n for unit in dcs_group.units:\n unit.heading = forward_heading\n dcs_group.manualHeading = True\n\n # We add a new retreat waypoint\n dcs_group.add_waypoint(self.find_retreat_point(dcs_group, forward_heading, (int)(RETREAT_DISTANCE / 8)), PointAction.OffRoad)\n\n # Fallback task\n fallback = ControlledTask(GoToWaypoint(toIndex=len(dcs_group.points)))\n fallback.enabled = False\n dcs_group.add_trigger_action(Hold())\n dcs_group.add_trigger_action(fallback)\n\n # Create trigger\n fallback = TriggerOnce(Event.NoEvent, \"Morale manager #\" + str(dcs_group.id))\n\n # Usually more than 50% casualties = RETREAT\n fallback.add_condition(GroupLifeLess(dcs_group.id, random.randint(51, 76)))\n\n # Do retreat to the configured retreat waypoint\n fallback.add_action(AITaskPush(dcs_group.id, len(dcs_group.tasks)))\n\n self.mission.triggerrules.triggers.append(fallback)\n\n\n def find_retreat_point(self, dcs_group, frontline_heading, distance=RETREAT_DISTANCE):\n \"\"\"\n Find a point to retreat to\n :param dcs_group: DCS mission group we are searching a retreat point for\n :param frontline_heading: Heading of the frontline\n :return: dcs.mapping.Point object with the desired position\n \"\"\"\n return dcs_group.points[0].position.point_from_heading(frontline_heading-180, distance)\n\n def find_offensive_point(self, dcs_group, frontline_heading, distance):\n \"\"\"\n Find a point to attack\n :param dcs_group: DCS mission group we are searching an attack point for\n :param frontline_heading: Heading of the frontline\n :param distance: Distance of the offensive (how far unit should move)\n :return: dcs.mapping.Point object with the desired position\n \"\"\"\n return dcs_group.points[0].position.point_from_heading(frontline_heading, distance)\n\n def find_n_nearest_enemy_groups(self, player_group, enemy_groups, n):\n \"\"\"\n Return the neaarest enemy group for the player group\n @param group Group for which we should find the nearest ennemies\n @param enemy_groups Potential enemy groups\n @param n number of nearby groups to take\n \"\"\"\n targets = []\n sorted_list = sorted(enemy_groups, key=lambda group: player_group.points[0].position.distance_to_point(group[0].points[0].position))\n for i in range(n):\n if len(sorted_list) <= i:\n break\n else:\n targets.append(sorted_list[i][0])\n return targets\n\n\n def find_nearest_enemy_group(self, player_group, enemy_groups):\n \"\"\"\n Search the enemy groups for a potential target suitable to armored assault\n @param group Group for which we should find the nearest ennemy\n @param enemy_groups Potential enemy groups\n \"\"\"\n min_distance = 99999999\n target = None\n for dcs_group, group in enemy_groups:\n dist = player_group.points[0].position.distance_to_point(dcs_group.points[0].position)\n if dist < min_distance:\n min_distance = dist\n target = dcs_group\n return target\n\n\n def get_artillery_target_in_range(self, dcs_group, group, enemy_groups):\n \"\"\"\n Search the enemy groups for a potential target suitable to an artillery unit\n \"\"\"\n rng = group.units[0].threat_range\n if len(enemy_groups) == 0:\n return None\n for o in range(10):\n potential_target = random.choice(enemy_groups)[0]\n distance_to_target = dcs_group.points[0].position.distance_to_point(potential_target.points[0].position)\n if distance_to_target < rng:\n return potential_target.points[0].position\n return None\n\n\n def get_artilery_group_distance_from_frontline(self, group):\n \"\"\"\n For artilery group, decide the distance from frontline with the range of the unit\n \"\"\"\n rg = group.units[0].threat_range - 7500\n if rg > DISTANCE_FROM_FRONTLINE[CombatGroupRole.ARTILLERY]:\n rg = DISTANCE_FROM_FRONTLINE[CombatGroupRole.ARTILLERY]\n if rg < DISTANCE_FROM_FRONTLINE[CombatGroupRole.TANK]:\n rg = DISTANCE_FROM_FRONTLINE[CombatGroupRole.TANK] + 100\n return rg\n\n\n def get_valid_position_for_group(self, conflict_position, isplayer, combat_width, distance_from_frontline):\n i = 0\n while i < 25: # 25 attempt for valid position\n heading_diff = -90 if isplayer else 90\n shifted = conflict_position[0].point_from_heading(self.conflict.heading,\n random.randint((int)(-combat_width / 2), (int)(combat_width / 2)))\n final_position = shifted.point_from_heading(self.conflict.heading + heading_diff, distance_from_frontline)\n\n if self.conflict.theater.is_on_land(final_position):\n return final_position\n else:\n i = i + 1\n continue\n return None\n\n def _generate_group(self, side: Country, unit: VehicleType, count: int, at: Point, move_formation: PointAction = PointAction.OffRoad, heading=0):\n\n if side == self.conflict.attackers_country:\n cp = self.conflict.from_cp\n else:\n cp = self.conflict.to_cp\n\n logging.info(\"armorgen: {} for {}\".format(unit, side.id))\n group = self.mission.vehicle_group(\n side,\n namegen.next_unit_name(side, cp.id, unit), unit,\n position=self._group_point(at),\n group_size=count,\n heading=heading,\n move_formation=move_formation)\n\n for c in range(count):\n vehicle: Vehicle = group.units[c]\n vehicle.player_can_drive = True\n\n return group","sub_path":"gen/armor.py","file_name":"armor.py","file_ext":"py","file_size_in_byte":21964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"68895299","text":"\nimport cadquery as cq\nfrom paramak import RotateStraightShape\n\n\nclass PoloidalFieldCoilCaseSet(RotateStraightShape):\n \"\"\"Creates a series of rectangular poloidal field coils.\n\n Args:\n heights (list of floats): the vertical (z axis) heights of the coil (cm).\n widths (list of floats): the horizontal (x axis) widths of the coil (cm).\n casing_thicknesses (list of floats): the thickness of the casing (cm).\n center_points (tuple of floats): the center of the coil (x,z) values (cm).\n\n Keyword Args:\n name (str): the legend name used when exporting a html graph of the\n shape.\n color (sequences of 3 or 4 floats each in the range 0-1): the color to\n use when exporting as html graphs or png images.\n material_tag (str): The material name to use when exporting the\n neutronics description.\n stp_filename (str): The filename used when saving stp files as part of a\n reactor.\n azimuth_placement_angle (float or iterable of floats): The angle or\n angles to use when rotating the shape on the azimuthal axis.\n rotation_angle (float): The rotation angle to use when revolving the\n solid (degrees).\n workplane (str): The orientation of the CadQuery workplane. Options are\n XY, YZ or XZ.\n intersect (CadQuery object): An optional CadQuery object to perform a\n boolean intersect with this object.\n cut (CadQuery object): An optional CadQuery object to perform a boolean\n cut with this object.\n union (CadQuery object): An optional CadQuery object to perform a\n boolean union with this object.\n tet_mesh (str): Insert description.\n physical_groups (type): Insert description.\n\n Returns:\n a paramak shape object: A shape object that has generic functionality\n with points determined by the find_points() method. A CadQuery solid of\n the shape can be called via shape.solid.\n \"\"\"\n\n def __init__(\n self,\n heights,\n widths,\n casing_thicknesses,\n center_points,\n rotation_angle=360,\n stp_filename=\"PoloidalFieldCoil.stp\",\n stl_filename=\"PoloidalFieldCoil.stl\",\n color=(0.5, 0.5, 0.5),\n azimuth_placement_angle=0,\n name=\"pf_coil\",\n material_tag=\"pf_coil_mat\",\n **kwargs\n ):\n\n default_dict = {\n \"points\": None,\n \"workplane\": \"XZ\",\n \"solid\": None,\n \"intersect\": None,\n \"cut\": None,\n \"union\": None,\n \"tet_mesh\": None,\n \"physical_groups\": None,\n }\n\n for arg in kwargs:\n if arg in default_dict:\n default_dict[arg] = kwargs[arg]\n\n super().__init__(\n name=name,\n color=color,\n material_tag=material_tag,\n stp_filename=stp_filename,\n stl_filename=stl_filename,\n azimuth_placement_angle=azimuth_placement_angle,\n rotation_angle=rotation_angle,\n hash_value=None,\n **default_dict\n )\n\n self.center_points = center_points\n self.heights = heights\n self.widths = widths\n self.casing_thicknesses = casing_thicknesses\n\n @property\n def solid(self):\n if self.get_hash() != self.hash_value:\n self.create_solid()\n return self._solid\n\n @solid.setter\n def solid(self, value):\n self._solid = value\n\n @property\n def center_points(self):\n return self._center_points\n\n @center_points.setter\n def center_points(self, center_points):\n self._center_points = center_points\n\n @property\n def heights(self):\n return self._heights\n\n @heights.setter\n def heights(self, heights):\n self._heights = heights\n\n @property\n def widths(self):\n return self._widths\n\n @widths.setter\n def widths(self, widths):\n self._widths = widths\n\n def find_points(self):\n \"\"\"Finds the XZ points joined by straight connections that describe\n the 2D profile of the poloidal field coil shape.\"\"\"\n\n all_points = []\n\n for height, width, center_point, casing_thickness in zip(\n self.heights, self.widths, self.center_points, self.casing_thicknesses):\n\n all_points = all_points + [\n (\n center_point[0] + width / 2.0,\n center_point[1] + height / 2.0,\n ), # upper right\n (\n center_point[0] + width / 2.0,\n center_point[1] - height / 2.0,\n ), # lower right\n (\n center_point[0] - width / 2.0,\n center_point[1] - height / 2.0,\n ), # lower left\n (\n center_point[0] - width / 2.0,\n center_point[1] + height / 2.0,\n ), # upper left\n (\n center_point[0] + width / 2.0,\n center_point[1] + height / 2.0,\n ), # upper right\n (\n center_point[0] + (casing_thickness + width / 2.0),\n center_point[1] + (casing_thickness + height / 2.0),\n ),\n (\n center_point[0] + (casing_thickness + width / 2.0),\n center_point[1] - (casing_thickness + height / 2.0),\n ),\n (\n center_point[0] - (casing_thickness + width / 2.0),\n center_point[1] - (casing_thickness + height / 2.0),\n ),\n (\n center_point[0] - (casing_thickness + width / 2.0),\n center_point[1] + (casing_thickness + height / 2.0),\n ),\n (\n center_point[0] + (casing_thickness + width / 2.0),\n center_point[1] + (casing_thickness + height / 2.0),\n )\n ]\n\n self.points = all_points\n\n def create_solid(self):\n \"\"\"Creates a 3d solid using points with straight connections edges,\n azimuth_placement_angle and rotation angle. Individual solids in\n the compound can be accessed using .Solids()[i] where i is an int\n\n Returns:\n A CadQuery solid: A 3D solid volume\n \"\"\"\n\n iter_points = iter(self.points)\n pf_coils_set = []\n for p1, p2, p3, p4, p5, p6, p7, p8, p9, p10 in zip(\n iter_points, iter_points, iter_points, iter_points,\n iter_points, iter_points, iter_points, iter_points,\n iter_points, iter_points,\n ):\n\n solid = (\n cq.Workplane(self.workplane)\n .polyline([p1, p2, p3, p4, p5, p6, p7, p8, p9, p10])\n .close()\n .revolve(self.rotation_angle)\n )\n pf_coils_set.append(solid)\n\n compound = cq.Compound.makeCompound(\n [a.val() for a in pf_coils_set]\n )\n\n self.solid = compound\n\n # Calculate hash value for current solid\n self.hash_value = self.get_hash()\n\n return compound\n","sub_path":"paramak/parametric_components/poloidal_field_coil_case_set.py","file_name":"poloidal_field_coil_case_set.py","file_ext":"py","file_size_in_byte":7304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"581937024","text":"import unittest\nimport os\nimport os.path\nimport settingsmanager\nfrom settingsmanager import parse_terminal_setting, valid_setting,\\\n get_auto_setting_acronym, get_updated_css\n\n\nclass GetAutoSettingAcronymTest(unittest.TestCase):\n\n def test_default(self):\n default_config = {\n \"automatic\": {\n \"Auto-Indent\": False,\n \"Line Numbers\": False,\n \"open in New Window\": False,\n \"Vertical Scrollbar\": \"on\",\n \"Animate Terminal Output\": False,\n \"Terminal Animation Interval\": 5,\n \"max Page Width\": 1000,\n \"Show WordCount in titlebar\": False\n }\n }\n correct_result = {\n \"ai\": \"Auto-Indent\",\n \"ln\": \"Line Numbers\",\n \"nw\": \"open in New Window\",\n \"vs\": \"Vertical Scrollbar\",\n \"ato\": \"Animate Terminal Output\",\n \"tai\": \"Terminal Animation Interval\",\n \"pw\": \"max Page Width\",\n \"swc\": \"Show WordCount in titlebar\"\n }\n result = get_auto_setting_acronym(default_config)\n self.assertEqual(result, correct_result)\n\n def test_no_acronym(self):\n default_config = {\n \"automatic\": {\n \"Auto-Indent\": False,\n \"line numbers\": False\n }\n }\n with self.assertRaisesRegex(Exception, '(?i)config'):\n get_auto_setting_acronym(default_config)\n\n def test_empty_config(self):\n result = get_auto_setting_acronym({'automatic':{}})\n self.assertEqual(result, {})\n\n\nclass GetSettingTypes(unittest.TestCase):\n pass\n\n\nclass GetSettingTypes(unittest.TestCase):\n pass\n\n\nclass ValidSettingTest(unittest.TestCase):\n\n def setUp(self):\n self.setting_types = {\n 'automatic': {\n 'testbool': bool,\n 'testfloat': float\n },\n 'manual': {\n 'testint': int,\n 'testnope': None\n }\n }\n\n def test_valid_bool(self):\n self.assertEqual(valid_setting('testbool', True, self.setting_types), True)\n\n def test_invalid_bool(self):\n self.assertEqual(valid_setting('testbool', 'true', self.setting_types), False)\n\n def test_valid_int(self):\n self.assertEqual(valid_setting('testint', 189, self.setting_types), True)\n\n def test_invalid_int(self):\n self.assertEqual(valid_setting('testint', 'no', self.setting_types), False)\n\n def test_valid_float(self):\n self.assertEqual(valid_setting('testfloat', 12.3, self.setting_types), True)\n\n def test_invalid_float(self):\n self.assertEqual(valid_setting('testfloat', 12, self.setting_types), False)\n\n def test_notype(self):\n self.assertEqual(valid_setting('testnope', 'Haha', self.setting_types), True)\n\n def test_notype(self):\n self.assertEqual(valid_setting('testnope', 'Haha', self.setting_types), True)\n\n def test_both_subdicts(self):\n setting_types = {\n 'automatic': {\n 'test1auto': None\n },\n 'manual': {\n 'test2manual': None\n }\n }\n with self.subTest(i=0, msg='Testing automatic subdict'):\n self.assertEqual(valid_setting('test1auto', '', setting_types), True)\n with self.subTest(i=1, msg='Testing manual subdict'):\n self.assertEqual(valid_setting('test2manual', '', setting_types), True)\n\n def test_empty_automatic(self):\n setting_types = {\n 'automatic': {},\n 'manual': {\n 'test': None\n }\n }\n self.assertEqual(valid_setting('test', '', setting_types), True)\n\n def test_empty_manual(self):\n setting_types = {\n 'automatic': {\n 'test': None\n },\n 'manual': {}\n }\n self.assertEqual(valid_setting('test', '', setting_types), True)\n\n\nclass UpdateRuntimeSettingTest(unittest.TestCase):\n pass\n\n\nclass LoadSettingsTest(unittest.TestCase):\n pass\n\n\nclass ParseSettingCommandTest(unittest.TestCase):\n pass\n\n\nclass ParseTerminalSettingTest(unittest.TestCase):\n\n def test_bool(self):\n trues = ['y', '1', 'true', 'Y', 'True', 'TRUE']\n falses = ['n', '0', 'false', 'N', 'False', 'FALSE']\n l = [(x, True) for x in trues] + [(x, False) for x in falses]\n for n, x in enumerate(l):\n with self.subTest(i=n):\n self.assertEqual(parse_terminal_setting(x[0], bool), x[1])\n\n def test_invalid_bool(self):\n l = ['no', 'yes', 'x', '10']\n for n, x in enumerate(l):\n with self.subTest(i=n):\n self.assertIsNone(parse_terminal_setting(x, bool))\n\n def test_int(self):\n l = ['10', '1298', '-50']\n for n, x in enumerate(l):\n with self.subTest(i=n):\n self.assertEqual(parse_terminal_setting(x, int), int(x))\n\n def test_invalid_int(self):\n l = ['10.1', 'x', '-0.05', 'blooop']\n for n, x in enumerate(l):\n with self.subTest(i=n):\n self.assertIsNone(parse_terminal_setting(x, int))\n\n def test_float(self):\n l = ['10.0', '1298', '-0.50', '-99.9']\n for n, x in enumerate(l):\n with self.subTest(i=n):\n self.assertEqual(parse_terminal_setting(x, float), float(x))\n\n def test_invalid_float(self):\n l = ['x', 'blooop']\n for n, x in enumerate(l):\n with self.subTest(i=n):\n self.assertIsNone(parse_terminal_setting(x, float))\n\n def test_no_type(self):\n l = ['10.0', 'arst', 'jinz tai lorem ipsum']\n for n, x in enumerate(l):\n with self.subTest(i=n):\n self.assertEqual(parse_terminal_setting(x, None), x)\n\n\n# class GetUpdatedCSSTest(unittest.TestCase):\n\n# def test_use_default_style(self):\n# stylepath = os.path.join(sys.path[0], 'test_style_delete_me')\n# os.remove(stylepath)\n# get_updated_css(stylepath, )\n\n\nclass GetPathsTest(unittest.TestCase):\n\n def test_default_path(self):\n config_dir = os.path.join(os.getenv('HOME'), '.config', 'kalpana')\n dirs = {\n 'config_dir': config_dir,\n 'config_file': os.path.join(config_dir, 'kalpana.conf'),\n 'style': os.path.join(config_dir, 'style.conf'),\n 'loadorder': os.path.join(config_dir, 'loadorder.conf'),\n 'plugins': os.path.join(config_dir, 'plugins'),\n 'spellcheck-pwl': os.path.join(config_dir, 'spellcheck-pwl')\n }\n self.assertEqual(settingsmanager.get_paths(''), dirs)\n\n def test_custom_path(self):\n config_dir = os.path.join(os.getenv('HOME'), '.config')\n dirs = {\n 'config_dir': config_dir,\n 'config_file': os.path.join(config_dir, 'kalpana.conf'),\n 'style': os.path.join(config_dir, 'style.conf'),\n 'loadorder': os.path.join(config_dir, 'loadorder.conf'),\n 'plugins': os.path.join(config_dir, 'plugins'),\n 'spellcheck-pwl': os.path.join(config_dir, 'spellcheck-pwl')\n }\n self.assertEqual(settingsmanager.get_paths(config_dir), dirs)\n\n def test_broken_custom_path(self):\n custom_config_dir = os.path.join('bad', 'path', 'error', '404')\n config_dir = os.path.join(os.getenv('HOME'), '.config', 'kalpana')\n dirs = {\n 'config_dir': config_dir,\n 'config_file': os.path.join(config_dir, 'kalpana.conf'),\n 'style': os.path.join(config_dir, 'style.conf'),\n 'loadorder': os.path.join(config_dir, 'loadorder.conf'),\n 'plugins': os.path.join(config_dir, 'plugins'),\n 'spellcheck-pwl': os.path.join(config_dir, 'spellcheck-pwl')\n }\n self.assertEqual(settingsmanager.get_paths(custom_config_dir), dirs)\n","sub_path":"test_settingsmanager.py","file_name":"test_settingsmanager.py","file_ext":"py","file_size_in_byte":7904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"230177246","text":"from discord.ext import commands\nimport discord, nekos, var, config, random, datetime, os\n\nbot = commands.Bot(command_prefix=config.prefix)\nTOKEN = config.token\n\n@bot.event\nasync def on_ready():\n os.system('clear')\n print('\\n\\x1b[35m ███╗ ██╗██╗ ██╗ ██████╗ ██╗ ██╗██╗ ██╗');\n print('\\x1b[95m ████╗ ██║╚██╗ ██╔╝██╔═══██╗██║ ██╔╝██║ ██║');\n print('\\x1b[35m ██╔██╗ ██║ ╚████╔╝ ██║ ██║█████╔╝ ██║ ██║');\n print('\\x1b[95m ██║╚██╗██║ ╚██╔╝ ██║ ██║██╔═██╗ ██║ ██║');\n print('\\x1b[35m ██║ ╚████║ ██║ ╚██████╔╝██║ ██╗╚██████╔╝');\n print('\\x1b[95m ╚═╝ ╚═══╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝')\n print(f'\\x1b[35m\\n Logged in as {bot.user}\\n\\x1b[0m')\n\n## this embed no worky innit\n\n## @bot.command()\n## async def last(ctx):\n## \tlast = discord.Embed(\"[USER](https://nekos.cc/u/1000?mode=0).\",colour=0x0000ff)\n## \tlast.description = f\"[USER](https://nekos.cc/u/1000?mode=0).\"\n## \tlast.add_field(name=\"Field2\", value=\"hi2\", inline=True)\n## \tlast.add_field(name=\"Field2\", value=\"hi2\", inline=True)\n## \tlast.set_image(url=\"https://assets.ppy.sh/beatmaps/1/covers/cover.jpg\")\n## \tawait ctx.send(embed=last)\n\n## the commands below are boring lol\n\n@bot.command()\nasync def hi(ctx):\n await ctx.send('whats up twat')\n\n## @bot.command()\n## async def cum(ctx):\n## await ctx.send('https://cdn.discordapp.com/emojis/797469159403421774.png')\n\n@bot.command()\nasync def sex(ctx):\n await ctx.send('go back to the gc!! https://i.redd.it/cah38y4p41f51.png')\n\n@bot.command()\nasync def lolis(ctx):\n await ctx.send('https://media.very.co.uk/i/very/MFWPY_SQ1_0000000099_N_A_SLf?$550x733_standard$')\n\n@bot.command()\nasync def women(ctx):\n await ctx.send('https://image.emojipng.com/54/12456054.jpg')\n\n## @bot.command()\n## async def nekosu(ctx):\n## await ctx.send('Pls play nekosu i am desperate https://nekos.cc/')\n\n## @bot.command()\n## async def cookiezi(ctx):\n## await ctx.send('Check if Cookiezi is up here: https://c.cookiezi.gay')\n\n## end of boring commands and the start of the embeds\n\n@bot.command()\nasync def embed(ctx):\n embed=discord.Embed(title=\"we do a little trolling\", url=\"https://nekos.cc/\", description=\"This is a sample embed. If you are seeing this matty actually did something right\", colour=0xFF5733)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def nekosu(ctx):\n embed=discord.Embed(title=\"Nekosu!\", url=\"https://nekos.cc/\", description=\"Nekosu! is an osu! private server basically based around catboys/catgirls. We also praise Astolfo\", colour=0xfc03df)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def cookiezi(ctx):\n embed=discord.Embed(title=\"osu!Cookiezi\", url=\"https://cookiezi.gay/\", description=\"osu!Cookiezi is matty's secondary server that runs gulag instead of ripple. This server is considered to be less important so won't get updates very often and there's no guarantee that it will always work. This server is the first dedicated cheating server running on Gulag.\", colour=0xfc03df)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def source(ctx):\n embed=discord.Embed(title=\"Nyoku source code!\", url=\"https://github.com/mattylive/Nyoku\", description=\"Linked above is the full Nyoku source code (usually kept up to date with the production code)\", colour=0xfc03df)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def catboy(ctx):\n embed=discord.Embed(title=\"Catboy\", url=\"https://cdn.donmai.us/sample/ac/d4/sample-acd4d3388360a9b5a1bcd860a25bd438.jpg\", description=\"Enjoy this catboy image\")\n embed.set_image(url=\"https://cdn.donmai.us/sample/ac/d4/sample-acd4d3388360a9b5a1bcd860a25bd438.jpg\")\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def catgirl(ctx):\n embed=discord.Embed(title=\"Catgirl\", url=\"https://nekos.life/\", description=\"Not as good as catboys but still coot :3\")\n embed.set_image(url=(nekos.img('neko')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def pp(ctx):\n embed = discord.Embed(title=\"PEEPEE SIZE MACHINE\", color=0xfc03df)\n embed.add_field(name=ctx.message.author.name + \"'s pp size is:\", value=\" 8{}D\".format(random.choice(var.ppSizes)), inline=False)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def roll(ctx):\n embed = discord.Embed(title=\":game_die: Roll the die.\", color=0xfc03df)\n embed.add_field(name=ctx.message.author.name + \"'s, roll:\", value=\"{}\".format(random.randint(var.minimum, var.maximum)), inline=False)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def ball(ctx):\n embed = discord.Embed(title=ctx.message.author.name + \" asked: \" + ctx.message.content[6:], description=\":8ball: | \" + random.choice(var.ball), color=0xfc03df)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def avatar(ctx):\n embed = discord.Embed(color=0xfc03df, timestamp=datetime.datetime.utcnow())\n embed.set_author(name=ctx.author.name, url=ctx.author.avatar_url, icon_url=ctx.author.avatar_url)\n embed.set_image(url=ctx.author.avatar_url)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def blush(ctx):\n embed = discord.Embed(title=\":two_hearts: \" + ctx.message.author.name + \" is blushing... awww!\" , color=0xfc03df, timestamp=datetime.datetime.utcnow())\n embed.set_image(url=\"{}\".format(random.choice(var.blushGifs)))\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def feet(ctx):\n embed=discord.Embed(title=\"Feet :flushed:\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"why did i make this - Matty 2021\")\n embed.set_image(url=(nekos.img('feet')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def trap(ctx):\n embed=discord.Embed(title=\"Traps\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"You have good taste\")\n embed.set_image(url=(nekos.img('trap')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def nsfwgif(ctx):\n embed=discord.Embed(title=\"Nsfw Neko Gif\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"it moves :flushed:\")\n embed.set_image(url=(nekos.img('nsfw_neko_gif')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed) \n \n \n@bot.command()\nasync def pat(ctx):\n embed=discord.Embed(title=\"Headpats\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"How cute :pleading_face:\")\n embed.set_image(url=(nekos.img('pat')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def cum(ctx):\n embed=discord.Embed(title=\"Cum\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"Yummy cummies :yum:\")\n embed.set_image(url=(nekos.img('cum')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n \n@bot.command()\nasync def doggo(ctx):\n embed=discord.Embed(title=\"Doggo\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"Woof woof :3\")\n embed.set_image(url=(nekos.img('woof')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n \n@bot.command()\nasync def fox(ctx):\n embed=discord.Embed(title=\"Fox girl\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"Fox girls are cute :pleading_face:\")\n embed.set_image(url=(nekos.img('fox_girl')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n \n@bot.command()\nasync def cat(ctx):\n embed=discord.Embed(title=\"Cat\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"Cats are adorable :3\")\n embed.set_image(url=(nekos.cat()))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def cuddle(ctx):\n embed=discord.Embed(title=\"Cuddles\", url=\"https://github.com/Nekos-life/nekos.py/blob/master/nekos/nekos.py#L18\", description=\"so soft :pleading_face:\")\n embed.set_image(url=(nekos.img('cuddle')))\n embed.set_author(name=\"Nyoku\", url='https://nekos.cc/Nyoku', icon_url='https://a.nekos.cc/1689')\n await ctx.send(embed=embed)\n\nbot.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"518259186","text":"from ..utils.cached import cached\nfrom ..utils.tab import Tab\n\n\nclass SettingsBase(object):\n \"\"\"base setting class\"\"\"\n\n def get_value(self, option):\n \"\"\"return the value of the given option\"\"\"\n if not hasattr(self, option):\n return 'N/A'\n return self.__dict__[option]\n\n def get_alt(self, option):\n \"\"\"get alternative values of an option\"\"\"\n alt = option + '_alt'\n if not hasattr(self, alt):\n return ''\n return self.__dict__[alt]\n\n def dump_help(self, export='plain', save=False):\n \"\"\"dump help document for setting classes\"\"\"\n rows = []\n title = 'Setting class <{:s}>'.format(self.__class__.__name__)\n table = Tab(export=export, title=title)\n\n for opt in sorted(self.doc_help):\n if hasattr(self, opt):\n descr = self.doc_help[opt]\n rows.append([opt, descr, self.get_value(opt)])\n else:\n warn_msg = 'Setting object {:s} has no \\'{:s}\\' option. Correct in doc_help.'.format(self.__name__, opt)\n print(warn_msg)\n table.add_rows(rows, header=False) # use guess_header()\n\n ext = 'txt'\n if export == 'latex':\n ext = 'tex'\n\n results = table.draw()\n\n if save is False:\n print(results)\n else:\n filename = 'settings_help' + '.' + ext\n try:\n f=open(filename, 'w')\n f.write(table)\n f.close()\n except IOError:\n print(results)\n print('Error saving settings help to file')\n\n @cached\n def doc_help(self):\n descriptions = {}\n return descriptions\n","sub_path":"andes/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"415462748","text":"#!/usr/bin/env python3\nimport os\nfrom PIL import Image\n\n\ndef convert_images():\n\n for name_file in os.listdir(\"images\"):\n if name_file.endswith(\"48dp\"):\n image_file = Image.open(\"images/\"+name_file, mode='r')\n image_file.rotate(90).resize((128, 128)).convert(\n \"RGB\").save(\"opt/icons/\"+name_file, \"JPEG\")\n\n\ndef main():\n convert_images()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Coursera/Google/PIL/convert_images.py","file_name":"convert_images.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"484219077","text":"# STANDARD LIB\nfrom __future__ import print_function\n\nimport logging\nimport sys\nimport time\n\n# THIRD PARTY\nfrom django.db import DataError\nfrom django.db.migrations.operations.base import Operation\nfrom google.appengine.api.datastore import Delete, Query, Get, Key, Put, RunInTransaction\nfrom google.appengine.api import datastore_errors\nfrom google.appengine.runtime import DeadlineExceededError\n\n# DJANGAE\nfrom djangae.db.backends.appengine.caching import remove_entities_from_cache_by_key\nfrom djangae.db.backends.appengine.commands import reserve_id\nfrom djangae.utils import retry\nfrom . import mapper_library\n\nfrom .constants import TASK_RECHECK_INTERVAL\nfrom .utils import clone_entity\n\n\nlogger = logging.getLogger(__name__)\nTESTING = 'test' in sys.argv\n\n\nclass DjangaeMigration(object):\n \"\"\" Base class to enable us to distinguish between Djangae migrations and Django migrations.\n \"\"\"\n pass\n\n\nclass BaseEntityMapperOperation(Operation, DjangaeMigration):\n \"\"\" Base class for operations which map over Datastore Entities, rather than Django model\n instances.\n \"\"\"\n\n reversible = False\n reduces_to_sql = False\n\n def __init__(self, *args, **kwargs):\n self.uid = kwargs.pop(\"uid\", \"\")\n self.shard_count = kwargs.pop(\"shard_count\", None)\n self.entities_per_task = kwargs.pop(\"entities_per_task\", None)\n self.queue = kwargs.pop(\"queue\", None)\n self.skip_errors = kwargs.pop(\"skip_errors\", False)\n super(BaseEntityMapperOperation, self).__init__(*args, **kwargs)\n\n def state_forwards(self, app_label, state):\n \"\"\" As all Djangae migrations are only supplements to the Django migrations, we don't need\n to do any altering of the model state.\n \"\"\"\n pass\n\n def _print(self, *objects):\n if not TESTING:\n print(*objects)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n # Django's `migrate` command writes to stdout without a trailing line break, which means\n # that unless we print a blank line our first print statement is on the same line\n self._print(\"\") # yay\n\n self.identifier = self._get_identifier(app_label, schema_editor, from_state, to_state)\n if self.uid:\n self.identifier = \"{}.{}\".format(self.uid, self.identifier)\n\n self._set_map_kind(app_label, schema_editor, from_state, to_state)\n self._pre_map_hook(app_label, schema_editor, from_state, to_state)\n self.namespace = schema_editor.connection.settings_dict.get(\"NAMESPACE\")\n\n if mapper_library.mapper_exists(self.identifier, self.namespace):\n self._wait_until_task_finished()\n return\n\n self._print(\"Deferring migration operation task for %s\" % self.identifier)\n self._start_task()\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n raise NotImplementedError(\"Erm...? Help?!\")\n\n def _wait_until_task_finished(self):\n if mapper_library.is_mapper_finished(self.identifier, self.namespace):\n self._print(\"Task for migration operation '%s' already finished. Skipping.\" % self.identifier)\n return\n\n while mapper_library.is_mapper_running(self.identifier, self.namespace):\n self._print(\"Waiting for migration operation '%s' to complete.\" % self.identifier)\n time.sleep(TASK_RECHECK_INTERVAL)\n\n self._print(\"Migration operation '%s' completed!\" % self.identifier)\n\n def _start_task(self):\n assert not mapper_library.is_mapper_running(self.identifier, self.namespace), \"Migration started by separate thread?\"\n\n query = Query(self.map_kind, namespace=self.namespace)\n return mapper_library.start_mapping(\n self.identifier, query, self, operation_method=\"_wrapped_map_entity\",\n shard_count=self.shard_count, entities_per_task=self.entities_per_task,\n queue=self.queue\n )\n\n def _wrapped_map_entity(self, entity):\n \"\"\" Wrapper for self._map_entity which removes the entity from Djangae's cache. \"\"\"\n\n # TODO: Note that other threads (from the general application running) could also be\n # modifying the entity, and that we're not using Djangae's transaction managers for our\n # stuff here.\n\n remove_entities_from_cache_by_key([entity.key()], self.namespace)\n try:\n retry(self._map_entity, entity)\n except DeadlineExceededError:\n # This is (probably) not an error with the individual entity, but more likey that the\n # task has tried to process too many entities. Either way, we always re-raise it so\n # that the mapper library can deal with it\n raise\n except Exception:\n if self.skip_errors:\n logger.exception(\n \"Error processing operation %s for entity %s. Skipping.\",\n self.identifier, entity.key()\n )\n else:\n raise\n if entity.key():\n # Assuming the entity hasn't been deleted and/or it's key been wiped...\n remove_entities_from_cache_by_key([entity.key()], self.namespace)\n\n\n ##############################################################################################\n # METHODS FOR SUBCLASSES TO IMPLEMENT\n ##############################################################################################\n\n def _pre_map_hook(self, app_label, schema_editor, from_state, to_state):\n \"\"\" A hook for subclasses to do anything that needs to be done before the mapping starts\n but which cannot be done in __init__ due to the need for the schema_editor/state/etc.\n \"\"\"\n pass\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n \"\"\" Return an ID for self.identifier, which must be a string which uniquely identifies this operation\n across the entire site. It must be able to fit in a Datastore string property.\n This will likely need to use app_label combined with values passed to __init__.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses of EntityMapperOperation must implement _get_identifier\"\n )\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n \"\"\" Set an attribute 'map_kind' of the 'kind' of Datastore Entities to be mapped over. \"\"\"\n raise NotImplementedError(\n \"Subclasses of EntityMapperOperation must implement _set_map_kind\"\n )\n\n def _map_entity(self, entity):\n \"\"\" Hook for subclasses to implement. This is called for every Entity and should do\n whatever data manipulation is necessary. Note that whatever you do to the entity\n must be done transactionally; this is not wrapped in a transaction.\n \"\"\"\n raise NotImplementedError(\"Subclasses of EntityMapperOperation must implement _map_entity\")\n\n\nclass AddFieldData(BaseEntityMapperOperation):\n\n def __init__(self, model_name, name, field, **kwargs):\n self.model_name = model_name\n self.name = name\n self.field = field\n super(AddFieldData, self).__init__(**kwargs)\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n identifier = \"%s.%s.%s:%s\" % (\n app_label, self.model_name, self.__class__.__name__, self.name\n )\n return identifier\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n model = to_state.apps.get_model(app_label, self.model_name)\n kind = model._meta.db_table\n self.map_kind = kind\n\n def _map_entity(self, entity):\n column_name = self.field.db_column or self.name\n # Call get_default() separately for each entity, in case it's a callable like timezone.now\n value = self.field.get_default()\n\n def txn(entity):\n entity = Get(entity.key())\n entity[column_name] = value\n Put(entity)\n\n RunInTransaction(txn, entity)\n\n\nclass RemoveFieldData(BaseEntityMapperOperation):\n\n def __init__(self, model_name, name, field, **kwargs):\n self.model_name = model_name\n self.name = name\n self.field = field\n super(RemoveFieldData, self).__init__(**kwargs)\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n identifier = \"%s.%s.%s:%s\" % (\n app_label, self.model_name, self.__class__.__name__, self.name\n )\n return identifier\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n model = to_state.apps.get_model(app_label, self.model_name)\n kind = model._meta.db_table\n self.map_kind = kind\n\n def _map_entity(self, entity):\n column_name = self.field.db_column or self.name\n\n def txn(entity):\n entity = Get(entity.key())\n try:\n del entity[column_name]\n except KeyError:\n return\n Put(entity)\n\n RunInTransaction(txn, entity)\n\n\nclass CopyFieldData(BaseEntityMapperOperation):\n\n def __init__(self, model_name, from_column_name, to_column_name, **kwargs):\n self.model_name = model_name\n self.from_column_name = from_column_name\n self.to_column_name = to_column_name\n super(CopyFieldData, self).__init__(**kwargs)\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n identifier = \"%s.%s.%s:%s.%s\" % (\n app_label, self.model_name, self.__class__.__name__,\n self.from_column_name, self.to_column_name\n )\n return identifier\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n model = to_state.apps.get_model(app_label, self.model_name)\n kind = model._meta.db_table\n self.map_kind = kind\n\n def _map_entity(self, entity):\n\n def txn(entity):\n entity = Get(entity.key())\n try:\n entity[self.to_column_name] = entity[self.from_column_name]\n except KeyError:\n return\n Put(entity)\n\n RunInTransaction(txn, entity)\n\n\nclass DeleteModelData(BaseEntityMapperOperation):\n\n def __init__(self, model_name, **kwargs):\n self.model_name = model_name\n super(DeleteModelData, self).__init__(**kwargs)\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n identifier = \"%s.%s:%s\" % (\n app_label, self.model_name, self.__class__.__name__\n )\n return identifier\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n model = to_state.apps.get_model(app_label, self.model_name)\n kind = model._meta.db_table\n self.map_kind = kind\n\n def _map_entity(self, entity):\n try:\n Delete(entity.key())\n except datastore_errors.EntityNotFoundError:\n return\n\n\nclass CopyModelData(BaseEntityMapperOperation):\n \"\"\" Copies entities from one entity kind to another. \"\"\"\n\n def __init__(\n self, model_name, to_app_label, to_model_name,\n overwrite_existing=False, **kwargs\n ):\n self.model_name = model_name\n self.to_app_label = to_app_label\n self.to_model_name = to_model_name\n self.overwrite_existing = overwrite_existing\n super(CopyModelData, self).__init__(**kwargs)\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n identifier = \"%s.%s.%s:%s.%s\" % (\n app_label, self.model_name, self.__class__.__name__,\n self.to_app_label, self.to_model_name\n )\n return identifier\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n \"\"\" We need to map over the entities that we're copying *from*. \"\"\"\n model = to_state.apps.get_model(app_label, self.model_name)\n kind = model._meta.db_table\n self.map_kind = kind\n\n def _pre_map_hook(self, app_label, schema_editor, from_state, to_state):\n to_model = to_state.apps.get_model(self.to_app_label, self.to_model_name)\n self.to_kind = to_model._meta.db_table\n\n def _map_entity(self, entity):\n new_key = Key.from_path(self.to_kind, entity.key().id_or_name(), namespace=self.namespace)\n\n def txn():\n try:\n existing = Get(new_key)\n except datastore_errors.EntityNotFoundError:\n existing = None\n if existing and not self.overwrite_existing:\n return\n if isinstance(entity.key().id_or_name(), (int, long)):\n reserve_id(self.to_kind, entity.key().id_or_name(), self.namespace)\n new_entity = clone_entity(entity, new_key)\n Put(new_entity)\n\n RunInTransaction(txn)\n\n\nclass CopyModelDataToNamespace(BaseEntityMapperOperation):\n \"\"\" Copies entities from one Datastore namespace to another. \"\"\"\n\n def __init__(\n self, model_name, to_namespace, to_app_label=None, to_model_name=None,\n overwrite_existing=False, **kwargs\n ):\n self.model_name = model_name\n self.to_namespace = to_namespace\n self.to_app_label = to_app_label\n self.to_model_name = to_model_name\n self.overwrite_existing = overwrite_existing\n super(CopyModelDataToNamespace, self).__init__(**kwargs)\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n to_app_label = self.to_app_label or app_label\n to_model_name = self.to_model_name or self.model_name\n identifier = \"%s.%s.%s:%s.%s.%s\" % (\n app_label, self.model_name, self.__class__.__name__, self.to_namespace, to_app_label,\n to_model_name\n )\n return identifier\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n \"\"\" We need to map over the entities that we're copying *from*. \"\"\"\n model = to_state.apps.get_model(app_label, self.model_name)\n self.map_kind = model._meta.db_table\n\n def _pre_map_hook(self, app_label, schema_editor, from_state, to_state):\n to_app_label = self.to_app_label or app_label\n to_model_name = self.to_model_name or self.model_name\n to_model = to_state.apps.get_model(to_app_label, to_model_name)\n self.to_kind = to_model._meta.db_table\n\n def _map_entity(self, entity):\n new_key = Key.from_path(\n self.to_kind, entity.key().id_or_name(), namespace=self.to_namespace\n )\n\n parent = entity.parent()\n if parent:\n # If the entity has an ancestor then we need to make sure that that ancestor exists in\n # the new namespace as well\n new_parent_key = Key.from_path(\n parent.kind(), parent.is_or_name(), namespace=self.to_namespace\n )\n new_parent_exists = Get([new_parent_key])[0]\n if not new_parent_exists:\n raise DataError(\n \"Trying to copy entity with an ancestor (%r) to a new namespace but the \"\n \"ancestor does not exist in the new namespace. Copy the ancestors first.\"\n % entity.key()\n )\n\n def txn():\n existing = Get([new_key])[0]\n if existing and not self.overwrite_existing:\n return\n if isinstance(entity.key().id_or_name(), (int, long)):\n reserve_id(self.to_kind, entity.key().id_or_name(), self.to_namespace)\n new_entity = clone_entity(entity, new_key)\n Put(new_entity)\n\n RunInTransaction(txn)\n\n\nclass MapFunctionOnEntities(BaseEntityMapperOperation):\n \"\"\" Operation for calling a custom function on each entity of a given model. \"\"\"\n\n def __init__(self, model_name, function, **kwargs):\n self.model_name = model_name\n self.function = function\n super(MapFunctionOnEntities, self).__init__(**kwargs)\n\n def _get_identifier(self, app_label, schema_editor, from_state, to_state):\n identifier = \"%s.%s.%s:%s\" % (\n app_label, self.model_name, self.__class__.__name__, self.function.__name__\n )\n return identifier\n\n def _set_map_kind(self, app_label, schema_editor, from_state, to_state):\n model = to_state.apps.get_model(app_label, self.model_name)\n kind = model._meta.db_table\n self.map_kind = kind\n\n def _map_entity(self, entity):\n self.function(entity)\n","sub_path":"djangae/db/migrations/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":16558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"425754248","text":"import random\nimport time\nfrom vk_api.bot_longpoll import VkBotEventType\n\nfrom utils_base import get_base\nfrom vk_bot.settings import *\nimport requests\nimport vk_api\nfrom vk_bot.polls import MyVkBotLongPoll\n\n\nclass ChatBot:\n def __init__(self):\n self.token = TOKEN\n self.vk_session = vk_api.VkApi(token=self.token)\n self.vk = self.vk_session.get_api()\n self.long = MyVkBotLongPoll(self.vk_session, ID_GROUP)\n self.help = 'Я могу присылать вам все новые новости из группы, просто включите меня!'\n\n def send_message(self, chat_id, message, attachment: str = ''):\n self.vk.messages.send(chat_id=chat_id, random_id=random.getrandbits(32), message=message,\n keyboard=open('chat_bot.json', \"r\", encoding=\"UTF-8\").read(), attachment=attachment)\n\n def commands(self, event):\n if event.message.text.lower() == '!help' or event.message.text.lower() == '!помощь':\n self.send_message(chat_id=event.chat_id, message=self.help)\n elif 'включить рассылку' in event.message.text.lower():\n with get_base(True) as base:\n id_ = base.execute(\"\"\"SELECT * FROM chat_vk WHERE chat_id = ?;\"\"\", (event.chat_id,)).fetchall()\n if id_:\n base.execute(\"\"\"UPDATE chat_vk SET flag = 1 WHERE chat_id = ?;\"\"\", (event.chat_id,))\n else:\n base.execute(\"\"\"INSERT INTO chat_vk (id, chat_id, flag)\n VALUES((SELECT id FROM chat_vk ORDER BY id DESC LIMIT 1) + 1, ?, ?);\"\"\",\n (event.chat_id, 1))\n self.send_message(chat_id=event.chat_id, message='Рассылка включена!')\n elif 'выключить рассылку' in event.message.text.lower():\n with get_base(True) as base:\n id_ = base.execute(\"\"\"SELECT * FROM chat_vk WHERE chat_id = ?;\"\"\", (event.chat_id,)).fetchall()\n if id_:\n base.execute(\"\"\"UPDATE chat_vk SET flag = 0 WHERE chat_id = ?;\"\"\", (event.chat_id,))\n self.send_message(chat_id=event.chat_id, message='Рассылка выключена!')\n else:\n self.send_message(chat_id=event.chat_id, message='Вы не подключали рассылку!')\n\n def start(self):\n try:\n while True:\n for event in self.long.listen():\n if event.type == VkBotEventType.MESSAGE_NEW:\n if event.message.peer_id != event.message.from_id:\n if event.message.text:\n self.commands(event)\n except requests.exceptions.ReadTimeout:\n time.sleep(10)\n except vk_api.AuthError as error_msg:\n print('ERROR:', error_msg)\n\n\nif __name__ == '__main__':\n bot = ChatBot()\n bot.start()\n","sub_path":"vk_bot/chat_bot.py","file_name":"chat_bot.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"558223","text":"#coding=utf-8\r\nfrom web.models import Node\r\nimport os\r\nimport threading\r\nimport Queue\r\n\r\n\r\n# 存放能ping+ssh通的节点\r\nq1 = Queue.Queue()\r\n# 存放不能ping+ssh通的节点\r\nq2 = Queue.Queue()\r\n\r\ndef my_worker(host):\r\n for a in host:\r\n tt = os.popen('sudo ping -c1 -w1 '+a+' &>/dev/null; echo $?').readlines()[0].replace('\\n','')\r\n if tt == '0':\r\n tt_1 = os.popen('sudo ssh '+a+' ls &>/dev/null; echo $?').readlines()[0].replace('\\n','')\r\n if tt_1 == '0':\r\n q1.put(a)\r\n else:\r\n q2.put(a)\r\n else:\r\n q2.put(a)\r\n\r\n\r\ndef course():\r\n course_obj = []\r\n host_all_1 = []\r\n hosts = []\r\n host_1 = []\r\n host_2 = []\r\n host_3 = []\r\n host_4 = []\r\n allow = []\r\n deny = []\r\n host_all = Node.objects.all().values('host_name')\r\n for x in host_all:\r\n hosts.append(x['host_name'].encode())\r\n\r\n nums=len(hosts)\r\n for a in range(0,nums,4):\r\n host_1.append(hosts[a])\r\n\r\n for b in range(1,nums,4):\r\n host_2.append(hosts[b])\r\n\r\n for c in range(2,nums,4):\r\n host_3.append(hosts[c])\r\n\r\n for d in range(3,nums,4):\r\n host_4.append(hosts[d])\r\n\r\n host_all_1.append(host_1)\r\n host_all_1.append(host_2)\r\n host_all_1.append(host_3)\r\n host_all_1.append(host_4)\r\n\r\n #��启线程\r\n for y in host_all_1:\r\n if len(y) > 0:\r\n pp=threading.Thread(target=my_worker,args=(y,))\r\n pp.start()\r\n course_obj.append(pp)\r\n\r\n #等待线程执行完毕\r\n for zz in course_obj:\r\n zz.join()\r\n\r\n #取结果:allow\r\n while not q1.empty():\r\n allow.append(q1.get())\r\n # 取结果:deny\r\n while not q2.empty():\r\n deny.append(q2.get())\r\n\r\n return (allow,deny)","sub_path":"includes/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"196708047","text":"import operator\nfrom docx.shared import Inches, Mm, Twips\nfrom docx.oxml.text.parfmt import CT_PPr, CT_Ind\nfrom docx.text.paragraph import Paragraph\n\nVALID_STYLES = [\n 'JACoW_Abstract_Heading',\n 'JACoW_Author List',\n 'JACoW_Body Text Indent',\n 'JACoW_Bulleted List',\n 'JACoW_Numbered list',\n 'JACoW_Paper Title',\n 'JACoW_Reference #10 onwards',\n 'JACoW_Reference #1-9 when >= 10 Refs',\n 'JACoW_Reference when <= 9 Refs',\n 'JACoW_Reference Italics',\n 'JACoW_Reference url_doi',\n 'JACoW_Third-level Heading',\n 'JACoW_Section Heading',\n 'JACoW_Subsection Heading'\n]\n\nVALID_NON_JACOW_STYLES = [\n 'Figure Caption',\n 'Figure Caption Multi Line',\n 'Table Caption',\n 'Table Caption Multi Line',\n 'Caption',\n 'Caption Multi Line'\n]\n\n# These are in the jacow templates so may be in docs created from them\n# Caption and Normal for table title and figure title\n# 'Body Text Indent' instead of 'JACoW_Body Text Indent' in a few places\n# 'Heading 3' for Acronyms header\nOTHER_VALID_STYLES = ['Body Text Indent', 'Normal', 'Caption', 'Heading 3']\n\nEXTRA_RULES = [\n '''\n

The latest JACoW Template must be used.
\nStandard JACoW Style’s must embedded in the document.

\n '''\n]\nHELP_INFO = 'CSEJACoWStyles'\nEXTRA_INFO = {\n 'title': 'Style Breakdown',\n 'headers': 'StyleEmbedded in Document',\n 'columns': ['style', 'style_ok']\n}\n\n\n# check if th\ndef check_jacow_styles(doc):\n result = []\n jacow_styles = get_jacow_styles(doc)\n\n for valid_style in VALID_STYLES:\n result.append({'style': valid_style, 'style_ok': valid_style in jacow_styles})\n\n return result\n\n\ndef get_jacow_styles(doc):\n return [s.name for s in doc.styles if s.name.startswith('JACoW')]\n\n\ndef get_paragraph_style_exceptions(doc):\n jacow_styles = get_jacow_styles(doc)\n exceptions = []\n for i, p in enumerate(doc.paragraphs):\n if (\n not p.text.strip() == ''\n and p.style.name not in jacow_styles\n and p.style.name not in OTHER_VALID_STYLES\n ):\n exceptions.append(p)\n return exceptions\n\n\ndef get_paragraph_alignment(paragraph):\n # alignment style can be overridden by more local definition\n alignment = paragraph.style.paragraph_format.alignment\n if alignment is None and paragraph.style.base_style is not None and \\\n paragraph.style.base_style.paragraph_format.alignment is not None:\n alignment = paragraph.style.base_style.paragraph_format.alignment\n\n if paragraph.alignment is not None:\n alignment = paragraph.alignment\n elif paragraph.paragraph_format.alignment is not None:\n alignment = paragraph.paragraph_format.alignment\n\n if alignment:\n return alignment._member_name\n else:\n return None\n\n\ndef get_indents(style_format):\n first_line_indent, hanging_indent, left_indent = None, None, None\n for i in style_format.element.iterchildren():\n if isinstance(i, CT_PPr):\n for j in i.iterchildren():\n if isinstance(j, CT_Ind):\n first_line_indent, hanging_indent, left_indent = j.firstLine, j.hanging, j.left\n return first_line_indent, hanging_indent, left_indent\n\n\ndef get_paragraph_space(paragraph):\n # paragraph formatting style can be overridden by more local definition\n before, after = \\\n paragraph.style.paragraph_format.space_before, \\\n paragraph.style.paragraph_format.space_after\n\n first_line_indent, hanging_indent, left_indent = get_indents(paragraph.style.paragraph_format)\n\n if paragraph.style.base_style is not None:\n first_line, hanging, left = get_indents(paragraph.style.base_style.paragraph_format)\n if before is None:\n before = paragraph.style.base_style.paragraph_format.space_before\n if after is None:\n after = paragraph.style.base_style.paragraph_format.space_after\n if first_line_indent is None:\n first_line_indent = first_line\n if hanging_indent is None:\n hanging_indent = hanging\n if left_indent is None:\n left_indent = left\n\n if paragraph.paragraph_format.space_before is not None:\n before = paragraph.paragraph_format.space_before\n if paragraph.paragraph_format.space_after is not None:\n after = paragraph.paragraph_format.space_after\n first_line, hanging, left = get_indents(paragraph.paragraph_format)\n if first_line is not None:\n first_line_indent = first_line\n if hanging is not None:\n hanging_indent = hanging\n if left is not None:\n left_indent = left\n\n if before:\n before = before.pt\n if after:\n after = after.pt\n if first_line_indent:\n first_line_indent = first_line_indent.pt\n elif hanging_indent:\n first_line_indent = -hanging_indent.pt\n if hanging_indent:\n hanging_indent = hanging_indent.pt\n if left_indent:\n left_indent = left_indent.pt\n\n return before, after, first_line_indent, hanging_indent, left_indent\n\n\ndef get_style_font(paragraph, url):\n # use paragraph style if values set\n style = paragraph.style\n bold, italic, font_size, font_name, all_caps = style.font.bold, style.font.italic, style.font.size, style.font.name, style.font.all_caps\n if paragraph.style.base_style is not None:\n style = paragraph.style.base_style\n # if values not set, use base style\n if font_size is None:\n font_size = style.font.size\n if font_name is None:\n font_name = style.font.name\n if bold is None:\n bold = style.font.bold\n if italic is None:\n italic = style.font.italic\n if all_caps is None:\n all_caps = style.font.all_caps\n\n # TODO get distinct list\n sections = [paragraph]\n if isinstance(paragraph, Paragraph):\n sections = paragraph.runs\n\n for r in sections:\n text = r.text.strip()\n if not text:\n continue\n\n if 'has_url' in url and url['has_url']:\n found = False\n for s in url['starts']:\n if text.startswith(s):\n found = True\n # ignore font if url found at start\n if found:\n continue\n\n if r.font is not None:\n if r.font.size is not None:\n font_size = r.font.size\n if r.font.name is not None:\n font_name = r.font.name\n if r.font.bold is not None:\n bold = r.font.bold\n if r.font.italic is not None:\n italic = r.font.italic\n if r.font.all_caps is not None:\n all_caps = r.font.all_caps\n if r.bold is not None:\n bold = r.bold\n if r.italic is not None:\n italic = r.italic\n\n if not font_size:\n font_size = 10.0\n # TODO find default size (from section ?)\n # styles = paragraph._parent._parent._parent._parent.styles\n else:\n font_size = font_size.pt\n\n if not font_name:\n font_name = 'Times new Roman'\n\n return bold, italic, font_size, font_name, all_caps\n\n\ndef get_style_details(p, url={}):\n space_before, space_after, first_line_indent, hanging_indent, left_indent = get_paragraph_space(p)\n bold, italic, font_size, font_name, all_caps = get_style_font(p, url)\n alignment = get_paragraph_alignment(p)\n return locals()\n\n\ndef get_compare(inp, relate, cut):\n ops = {'>': operator.gt,\n '<': operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=': operator.eq}\n return ops[relate](inp, cut)\n\n\n# TODO work out why two almost identical functions below\ndef check_style(p, compare, url={}):\n detail = get_style_details(p, url)\n # remove paragraph from dict returned since it is not json serialisable\n del detail['p']\n\n # use list from compare\n style_ok = True\n for key, value in compare.items():\n if key not in detail:\n continue\n elif key in ['space_before', 'space_after']:\n if isinstance(compare[key], list):\n result = detail[key] is not None and get_compare(detail[key], compare[key][0], compare[key][1])\n if not result:\n detail[key] = f\"{detail[key]} should be {' '.join(map(str, compare[key]))}\"\n else:\n result = any([detail[key] == compare[key], detail[key] is None and compare[key] == 0.0])\n if not result:\n detail[key] = f\"{detail[key]} should be {compare[key]}\"\n else:\n result = detail[key] == compare[key]\n if not result:\n detail[key] = f\"{detail[key]} should be {compare[key]}\"\n if not result:\n style_ok = False\n\n # if key not in compare, then change to NA\n for key, value in detail.items():\n if not key == 'all_caps' and key not in compare.keys():\n detail[key] = 'NA'\n return style_ok, detail\n\n\ndef check_style_detail(p, compare):\n detail = get_style_details(p)\n # remove paragraph from dict returned since it is not json serialisable\n del detail['p']\n\n # use list from compare\n style_ok = True\n for key, value in compare.items():\n if key not in detail:\n continue\n elif key in ['space_before', 'space_after']:\n if isinstance(compare[key], list):\n result = detail[key] is not None and get_compare(detail[key], compare[key][0], compare[key][1])\n if not result:\n detail[key] = f\"{detail[key]} should be {' '.join(map(str, compare[key]))}\"\n else:\n result = any([detail[key] == compare[key], detail[key] is None and compare[key] == 0.0])\n if not result:\n detail[key] = f\"{detail[key]} should be {compare[key]}\"\n else:\n result = detail[key] == compare[key]\n if not result:\n detail[key] = f\"{detail[key]} should be {compare[key]}\"\n if not result:\n style_ok = False\n detail['style_ok'] = style_ok\n\n # if key not in compare, then change to NA\n for key, value in detail.items():\n if key not in ['all_caps', 'style_ok'] and key not in compare.keys():\n detail[key] = 'NA'\n return detail\n\n\ndef get_style_summary(doc):\n jacow_styles = check_jacow_styles(doc)\n return {\n 'title': 'JACoW Styles',\n 'extra_rules': EXTRA_RULES,\n 'help_info': HELP_INFO,\n 'extra_info': EXTRA_INFO,\n 'ok': all([tick['style_ok'] for tick in jacow_styles]),\n 'message': 'Styles issues',\n 'details': jacow_styles,\n 'anchor': 'styles'\n }\n","sub_path":"src/jacowvalidator/docutils/styles.py","file_name":"styles.py","file_ext":"py","file_size_in_byte":10767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"180577293","text":"#!/usr/bin/env python3\n\n# Copyright 2014 Brett Slatkin, Pearson Education Inc.\n#\n# Udostępniono na licencji Apache w wersji 2.0 (\"Licencja\").\n# Tego pliku można używać jedynie zgodnie z warunkami Licencji.\n# Treść Licencji znajdziesz na stronie:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# O ile obowiązujące prawo nie stanowi inaczej lub czegoś innego nie\n# uzgodniono w formie pisemnej, oprogramowanie objęte Licencją jest\n# dostarczane w stanie, w jakim jest (wersja \"AS IS\"), BEZ JAKIEJKOLWIEK\n# GWARANCJI, ani wyrażonej otwarcie, ani domyślnej. Dokładne zasady\n# i warunki Licencji znajdziesz w jej treści.\n\n# Przygotowania mające na celu odtworzenie środowiska użytego w książce.\nimport logging\nfrom pprint import pprint\nfrom sys import stdout as STDOUT\n\n\n# Przykład 1.\nhandle = open('/tmp/random_data.txt', 'w', encoding='utf-8')\nhandle.write('success\\nand\\nnew\\nlines')\nhandle.close()\nhandle = open('/tmp/random_data.txt') # Może spowodować zgłoszenie wyjątku IOError.\ntry:\n data = handle.read() # Może spowodować zgłoszenie wyjątku UnicodeDecodeError.\nfinally:\n handle.close() # Zawsze wykonywane po bloku try.\n\n\n# Przykład 2.\nimport json\n\ndef load_json_key(data, key):\n try:\n result_dict = json.loads(data) # Może spowodować zgłoszenie wyjątku ValueError.\n except ValueError as e:\n raise KeyError from e\n else:\n return result_dict[key] # Może spowodować zgłoszenie wyjątku KeyError.\n\n# Dekodowanie JSON zakończone powodzeniem.\nassert load_json_key('{\"foo\": \"bar\"}', 'foo') == 'bar'\ntry:\n load_json_key('{\"foo\": \"bar\"}', 'nie istnieje')\n assert False\nexcept KeyError:\n pass # Oczekiwane.\n\n# Dekodowanie JSON zakończone niepowodzeniem.\ntry:\n load_json_key('{\"foo\": błędne dane', 'foo')\n assert False\nexcept KeyError:\n pass # Oczekiwane.\n\n\n# Przykład 3.\nimport json\nUNDEFINED = object()\n\ndef divide_json(path):\n handle = open(path, 'r+') # Może spowodować zgłoszenie wyjątku IOError.\n try:\n data = handle.read() # Może spowodować zgłoszenie wyjątku UnicodeDecodeError.\n op = json.loads(data) # May raise ValueError\n value = (\n op['numerator'] /\n op['denominator']) # Może spowodować zgłoszenie wyjątku ZeroDivisionError.\n except ZeroDivisionError as e:\n return UNDEFINED\n else:\n op['result'] = value\n result = json.dumps(op)\n handle.seek(0)\n handle.write(result) # Może spowodować zgłoszenie wyjątku IOError.\n return value\n finally:\n handle.close() # Zawsze wykonywane.\n\n# Wszystko działa.\ntemp_path = '/tmp/random_data.json'\nhandle = open(temp_path, 'w')\nhandle.write('{\"numerator\": 1, \"denominator\": 10}')\nhandle.close()\nassert divide_json(temp_path) == 0.1\n\n# Dzielenie przez zero.\nhandle = open(temp_path, 'w')\nhandle.write('{\"numerator\": 1, \"denominator\": 0}')\nhandle.close()\nassert divide_json(temp_path) is UNDEFINED\n\n# Błąd w trakcie dekodowania danych JSON.\nhandle = open(temp_path, 'w')\nhandle.write('{\"numerator\": 1 błędne dane')\nhandle.close()\ntry:\n divide_json(temp_path)\n assert False\nexcept ValueError:\n pass # Oczekiwane.\n","sub_path":"Python/Python - efektywny Python/item_13.py","file_name":"item_13.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"428608719","text":"import qrcode\nfrom PIL import Image\nfrom qrcode.main import QRCode\n\nqr = qrcode.QRCode(version=1,\nerror_correction=qrcode.constants.ERROR_CORRECT_L,\nbox_size =20,\nborder =2)\n\nqr.add_data(\"https://github.com/riddhiisingh\")\nqr.make(fit=True)\n\nimg = qr.make_image(fill_color=\"black\", back_color=\"white\")\n\nimg.save(\"githubprofile.png\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"300101158","text":"#!/usr/bin/env python3\n\n\"\"\"wcount.py: count words from an Internet file.\n\n__author__ = \"Norman\"\n__pkuid__ = \"1700011727\"\n__email__ = \"1700011727@pku.edu.cn\"\n\"\"\"\n\nimport sys\nfrom urllib.request import urlopen\n\n\ndef wcount(lines, topn=10):\n \"\"\"count words from lines of text string, then sort by their counts\n in reverse order, output the topn (word count), each in one line. \n \"\"\"\n lines = lines.lower()\n text = ''\n for ch in lines:\n if (ord(ch) >= 65 and ord(ch) <= 90) or (ord(ch) >= 97 and ord(ch) <= 122):\n text = text+ch\n else:\n text = text+' '\n text = text.split()\n dictionary = {}\n for word in text:\n if word not in dictionary:\n dictionary[word] = text.count(word) \n else: \n pass\n table = zip(dictionary.values(),dictionary.keys())\n table = sorted(table)\n table.reverse()\n for i in range(topn):\n (a,b) = table[i]\n print('{} {}\\n'.format(a,b))\n \nif __name__ == '__main__':\n\n if len(sys.argv) == 1:\n print('Usage: {} url [topn]'.format(sys.argv[0]))\n print(' url: URL of the txt file to analyze ')\n print(' topn: how many (words count) to output. If not given, will output top 10 words')\n sys.exit(1)\n\n try:\n topn = 10\n if len(sys.argv) == 3:\n topn = int(sys.argv[2])\n except ValueError:\n print('{} is not a valid topn int number'.format(sys.argv[2]))\n sys.exit(1)\n\n try:\n with urlopen(sys.argv[1]) as f:\n contents = f.read()\n lines = contents.decode()\n wcount(lines, topn)\n except Exception as err:\n print(err)\n sys.exit(1)\n","sub_path":"pyassign3/wcount.py","file_name":"wcount.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"38228324","text":"import argparse\r\nfrom utils import *\r\nfrom model4 import *\r\nfrom data4 import *\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport torch\r\nfrom torch.autograd import Variable\r\nimport torch.nn as nn\r\nimport math\r\n\r\n\r\n\r\n# month * house => month * batch_num * batch_size\r\ndef make_index_batch(train_index, batch_size):\r\n month_len, house_size = train_index.shape\r\n train_index = torch.LongTensor(train_index)\r\n index_list = []\r\n for i in range(month_len):\r\n index_month_list = []\r\n for j in range(math.ceil(house_size / batch_size)):\r\n batch_start = j * batch_size\r\n batch_end = (j + 1) * batch_size\r\n # print('batch_start: ' + str(batch_start))\r\n # print('batch_end: ' + str(batch_end))\r\n if batch_end > house_size:\r\n batch_end = house_size\r\n batch_start = batch_end - batch_size\r\n index_month_list.append(train_index[i, batch_start:batch_end])\r\n index_month_list = torch.stack(index_month_list, 0)\r\n index_list.append(index_month_list)\r\n index_batch = torch.stack(index_list, 0).permute(1, 0, 2)\r\n return index_batch\r\n\r\n\r\ndef make_Y_from_index(labels, train_index):\r\n batch_num, month_len, batch_size = train_index.size()\r\n Y_train_batch = []\r\n for i in range(batch_num):\r\n Y_train_batch.append(labels[train_index[i]])\r\n Y_train_batch = torch.stack(Y_train_batch, 0)\r\n return Y_train_batch\r\n\r\n\r\n# ID,挂牌价,成交价,预测价,挂牌预测差,成交预测差\r\ndef price_str(val_predict, val_target, val_listprice):\r\n w_str = ''\r\n #print('val_predict: ' + str(val_predict.shape))\r\n #print('val_target: ' + str(val_target.shape))\r\n #print('val_listprice: ' + str(val_listprice.shape))\r\n seq_len, batch_size, label_size = val_predict.shape\r\n for j in range(seq_len):\r\n for k in range(batch_size):\r\n w_str += str(int(val_listprice[j, k, 1]))+', '+str(val_listprice[j, k, 0])+', '+str(val_target[j, k, 0]) + \\\r\n ', ' + str(val_predict[j, k, 0]) + ',' + str(abs(val_predict[j, k, 0]-val_target[j, k, 0])) + \\\r\n ', ' + str(abs(val_listprice[j, k, 0]-val_target[j, k, 0])) + '\\n'\r\n return w_str\r\n\r\n\r\ndef main(args):\r\n # 参数本地化\r\n train_epoch = args['epoch']\r\n seq_len = args['seq_len']\r\n gc1_out_dim = args['gc1_out_dim']\r\n lstm_input_dim = args['lstm_input_size']\r\n meta_size = args['meta_size']\r\n batch_size = args['batch_size']\r\n update_len = args['update_len']\r\n device = args['device']\r\n # 数据读取\r\n if args['set_data']:\r\n adj, features, labels, listprice, train_index, test_index = \\\r\n load_data(path=args['data_path'], month_len=seq_len, house_size=args['house_size'])\r\n print('Data is generated.', flush=True)\r\n else:\r\n adj = np.load(args['data_path'] + 'adj.npy')\r\n features = np.load(args['data_path'] + 'features.npy')\r\n labels = np.load(args['data_path'] + 'labels.npy')\r\n listprice = np.load(args['data_path'] + 'listprice.npy')\r\n train_index = np.load(args['data_path'] + 'train_index.npy')\r\n test_index = np.load(args['data_path'] + 'test_index.npy')\r\n print('Data is loaded.', flush=True)\r\n whole_house_size = features.shape[0]\r\n feature_size = features.shape[1]\r\n hidden_dim = feature_size # 令hidden_dim与embedding的维度一致\r\n all_month = train_index.shape[0]+1\r\n house_size = int(whole_house_size/all_month)\r\n # 去除预训练已包含的部分\r\n train_index = train_index[-6:]\r\n test_index = test_index[-6:]\r\n print('adj: ' + str(adj.shape), flush=True)\r\n print('features: ' + str(features.shape), flush=True)\r\n print('labels: ' + str(labels.shape), flush=True)\r\n print('listprice: ' + str(listprice.shape), flush=True)\r\n print('train_index: ' + str(train_index.shape), flush=True)\r\n print('test_index: ' + str(test_index.shape), flush=True)\r\n print('***********************************************************', flush=True)\r\n # 结果输出文件设置\r\n result_file_path = 'result_prelifelong/'\r\n model_file_path = 'model_saved_prelifelong/'\r\n other_file_path = 'result_prelifelong/others/'\r\n # 如果目录不存在,则创建\r\n for output_path in [result_file_path, model_file_path, other_file_path]:\r\n if not os.path.isdir(output_path):\r\n os.makedirs(output_path)\r\n # 数据批处理\r\n train_index_batch = make_index_batch(train_index, batch_size)\r\n print(\"train_index_batch: \" + str(train_index_batch.shape), flush=True)\r\n test_index_batch = make_index_batch(test_index, house_size)\r\n print(\"test_index_batch: \" + str(test_index_batch.shape), flush=True)\r\n # tensor化\r\n train_index_batch = train_index_batch.to(device)\r\n test_index_batch = test_index_batch.to(device)\r\n adj = torch.tensor(adj).to(device)\r\n features = torch.tensor(features).to(device)\r\n labels = torch.tensor(labels).to(device)\r\n listprice = torch.tensor(listprice).to(device)\r\n\r\n # 模型训练\r\n for cur_month in range(1, 7):\r\n # 一个月对应一个模型model,均在本月的模型内进行参数更新;cur_month代表当前参加训练的最后一月\r\n # r_gcnLSTMs每次都从送入数据的第一个月开始训练,逐步扩张模型至cur_month长度\r\n # 根据update_len,当cur_month超过update_len时,每次只更新[cur_month-update_len: cur_month]月的参数\r\n if cur_month <= update_len:\r\n model_lstm_len = cur_month\r\n train_index_p = train_index_batch[:, 0: cur_month, :]\r\n test_index_p = test_index_batch[:, 0: cur_month, :]\r\n else:\r\n model_lstm_len = update_len\r\n train_index_p = train_index_batch[:, cur_month - model_lstm_len: cur_month, :]\r\n test_index_p = test_index_batch[:, cur_month - model_lstm_len: cur_month, :]\r\n\r\n #print('train_index_p: ' + str(train_index_p.shape))\r\n #print('test_index_p: ' + str(test_index_p.shape))\r\n Y_train_batch = make_Y_from_index(labels, train_index_p).to(device)\r\n Y_test_batch = make_Y_from_index(labels, test_index_p).to(device)\r\n lp_batch = make_Y_from_index(listprice, test_index_p).to(device)\r\n batch_num = train_index_batch.shape[0]\r\n #print('Y_train_batch: ' + str(Y_train_batch.shape))\r\n #print('Y_test_batch: ' + str(Y_test_batch.shape))\r\n #print('lp_batch: ' + str(lp_batch.shape))\r\n\r\n # 给定参数,使得经过GCN和lstm的数据维度并不发生变化\r\n '''\r\n model = r_gcn_1LSTMs(gcn_input_dim=feature_size, lstm_input_dim=feature_size, hidden_dim=hidden_dim,\r\n label_out_dim=1, Nodes=whole_house_size, meta_size=meta_size, all_month=all_month,\r\n month_len=model_lstm_len, layers=args['layers'], dropout=args['dropout']\r\n ).to(device)\r\n '''\r\n model = r_gcn2lv_1LSTMs(gcn_input_dim=feature_size, gc1_out_dim=gc1_out_dim, lstm_input_dim=feature_size,\r\n hidden_dim=hidden_dim, label_out_dim=1, meta_size=meta_size, all_month=all_month,\r\n month_len=model_lstm_len, layers=args['layers'], dropout=args['dropout']).to(device)\r\n # 预训练模型参数载入\r\n if cur_month == 1:\r\n static_model = torch.load('model_saved_staticlstm/static.pkl')\r\n model_dict = model.state_dict()\r\n # 已有参数全部继承,包括LSTM和各月GCN\r\n state_dict = {'glstm.0.'+str(k): v for k, v in static_model.items() if 'glstm.0.'+str(k) in model_dict.keys()}\r\n print(state_dict.keys(), flush=True)\r\n model_dict.update(state_dict)\r\n state_dict = {k: v for k, v in static_model.items() if k in model_dict.keys()}\r\n print(state_dict.keys(), flush=True)\r\n model_dict.update(state_dict)\r\n model.load_state_dict(model_dict)\r\n\r\n elif 1 < cur_month <= update_len: # 当前月在更新范围内\r\n # 参数继承\r\n old_model = torch.load(model_file_path + 'month' + str(cur_month - 1) + '.pkl')\r\n model_dict = model.state_dict()\r\n # 已有参数全部继承,包括LSTM和各月GCN\r\n state_dict = {k: v for k, v in old_model.items() if k in model_dict.keys()}\r\n print(state_dict.keys(), flush=True)\r\n model_dict.update(state_dict)\r\n\r\n # 该月GCN模型参数沿用其前一个月的\r\n new_dict = {k.replace('glstm.' + str(int(cur_month - 2)), 'glstm.' + str(int(cur_month - 1))): v for k, v in\r\n old_model.items() if 'glstm.' + str(int(cur_month - 2)) in k}\r\n model_dict.update(new_dict)\r\n\r\n model.load_state_dict(model_dict)\r\n elif cur_month > update_len: # 当前月超出更新范围\r\n # 参数继承\r\n old_model = torch.load(model_file_path + 'month' + str(cur_month - 1) + '.pkl')\r\n model_dict = model.state_dict()\r\n # 已有参数全部继承,包括LSTM和各月GCN\r\n state_dict = {k: v for k, v in old_model.items() if k in model_dict.keys()}\r\n print(state_dict.keys(), flush=True)\r\n model_dict.update(state_dict)\r\n # 各月GCN错位继承,即old_model中的glstm.1应是model中的glstm.0\r\n gcn_dict = {k.replace('glstm.' + str(get_layer(k)), 'glstm.' + str(get_layer(k) - 1)): v\r\n for k, v in old_model.items() if 'glstm.' in k and get_layer(k) > 0}\r\n print(gcn_dict.keys(), flush=True)\r\n model_dict.update(gcn_dict)\r\n model.load_state_dict(model_dict)\r\n\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args['lr'], weight_decay=args['weight_decay'])\r\n loss_criterion = nn.MSELoss()\r\n min_rmse = 10000\r\n w_str = ''\r\n with open(other_file_path + 'month' + str(cur_month) + '_price_list.csv', 'a+') as f:\r\n f.write('index, list, target, pre, pre-target, list-target\\n')\r\n \r\n # 暂定每个月的模型的训练周期相同\r\n for i in range(train_epoch):\r\n for b in range(batch_num):\r\n start_time = time.time()\r\n training_loss = []\r\n validation_losses = []\r\n model.train()\r\n optimizer.zero_grad() # 梯度置零\r\n new_embedding, out_price = model(adj, features, train_index_p[b])\r\n new_embedding = Variable(new_embedding.data, requires_grad=True)\r\n #features = new_embedding.to(device) # features相当于全局变量,每次都继承\r\n # 送入的是多个月的index\r\n #print('out_price: ' + str(out_price.shape))\r\n #print('Y_train_batch[b]: ' + str(Y_train_batch[b][cur_month-1:cur_month].shape))\r\n loss = loss_criterion(out_price, Y_train_batch[b]) # loss计算,pre与target\r\n loss.backward() # 反向传播计算\r\n optimizer.step() # 模型参数更新\r\n training_loss.append(loss.detach().cpu().numpy())\r\n avg_training_loss = sum(training_loss) / len(training_loss)\r\n print(\"Month:{} Epoch:{} Training loss:{}\".format(cur_month, i, avg_training_loss), flush=True)\r\n with open(result_file_path + 'month' + str(cur_month) + '_loss_error.txt', 'a+') as f:\r\n f.write(\"Month:{} Epoch:{} Training loss:{}\\n\".format(cur_month, i, avg_training_loss))\r\n with open(other_file_path + 'train_loss.txt', 'a+') as f:\r\n f.write(\"{}\\n\".format(avg_training_loss))\r\n\r\n # 对训练好的模型在测试集上进行评估\r\n with torch.no_grad():\r\n model.eval()\r\n _, out_test_price = model(adj, features, test_index_p[0])\r\n\r\n val_target = Y_test_batch[0].cpu().numpy()\r\n val_listprice = lp_batch[0].cpu().numpy()\r\n val_predict = out_test_price.detach().cpu().numpy()\r\n '''\r\n print('test_index_p[0][-1:]: '+str(test_index_p[0][-1:].shape))\r\n print('val_predict: '+str(val_predict.shape))\r\n print('val_listprice: '+str(val_listprice.shape))\r\n print('val_target: '+str(val_target.shape))\r\n '''\r\n mse, mae, rmse = score(val_predict, val_target)\r\n y_pre_error = pre_error(val_predict, val_target)\r\n if rmse < min_rmse:\r\n min_rmse = rmse\r\n output = val_predict\r\n torch.save(model.state_dict(), model_file_path + 'month' + str(cur_month) + '.pkl')\r\n w_str = price_str(val_predict, val_target, val_listprice)\r\n # features = new_embedding.to(device)\r\n end_time = time.time()\r\n cost_time = end_time - start_time\r\n print(\"Test MSE: {} MAE:{} RMSE: {} pre_error:{} cost_time:{}\".format(mse, mae, rmse, y_pre_error, cost_time), flush=True)\r\n with open(result_file_path + 'month' + str(cur_month) + '_loss_error.txt', 'a+') as f:\r\n f.write(\"Test MSE: {} MAE:{} RMSE: {} pre_error:{} cost_time:{}\\n\".format(mse, mae, rmse, y_pre_error, cost_time))\r\n with open(other_file_path + 'valid_RMSE.txt', 'a+') as f:\r\n f.write(\"{}\\n\".format(rmse))\r\n with open(other_file_path + 'pre_error.txt', 'a+') as f:\r\n f.write(\"{}\\n\".format(y_pre_error))\r\n\r\n with open(other_file_path + 'month' + str(cur_month) + '_price_list.csv', 'a+') as f:\r\n f.write(w_str)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser('GLSTM')\r\n args = parser.parse_args().__dict__\r\n args = setup(args)\r\n print('参数配置:\\n{}'.format(args), flush=True)\r\n if not os.path.isdir('result_prelifelong/'):\r\n os.makedirs('result_prelifelong/')\r\n with open('result_prelifelong/parameters.txt', 'w') as f:\r\n f.write('Parameters:\\n{}'.format(args))\r\n main(args)\r\n","sub_path":"main_prelifelong.py","file_name":"main_prelifelong.py","file_ext":"py","file_size_in_byte":14379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"470655341","text":"# Script to\n# 1) get node embeddings from a graph\n# 2) get a (t-SNE) projection from that\n\nimport json\nimport os\nfrom argparse import ArgumentParser\nfrom random import randint\n\nimport numpy as np\nimport torch\nfrom sklearn.manifold import TSNE\n\nfrom commander import load_model\nfrom loaders.data_generator import Generator\nfrom models import get_model\n\n# data_path = \"dataset/QAP_steps_ErdosRenyi_100_25_1.0_0.1_0.2/test.pkl\"\ndata_path = \"dataset/QAP_ErdosRenyi_ErdosRenyi_1000_25_1.0_0.05_0.2/test.pkl\"\nmodel_path = \"runs/Reg-ER-100-0.0.0_freeze_mlp/QAP_ErdosRenyi_ErdosRenyi_25_1.0_0.05_0.2\"\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\ndef get_embeddings(model, g1, g2=None):\n \"\"\"Take g1, g2 1-batches of graph\n returns embeddings of shape (1,n, embed_dim)\"\"\"\n embeddings = []\n handle = model.node_embedder.register_forward_hook(lambda module, inp, outp : embeddings.append(outp))\n if g2 is None :\n model(g1)\n else:\n model(g1,g2)\n handle.remove()\n return embeddings\n\n\ndef get_graphs(data, i, device):\n print(f\"Using data point nb {args.i}\")\n g1, g2 = data[args.i] # graphs !\n g1.unsqueeze_(0) # batches of 1\n g2.unsqueeze_(0)\n print(g1.shape)\n g1 = g1.to(device)\n g2 = g2.to(device)\n\n return g1, g2\n\n\ndef embed(g1, g2, model):\n e1, e2 = get_embeddings(model, g1, g2)\n e = torch.cat((e1, e2), axis=1)\n tsne = TSNE()\n v = tsne.fit_transform(e.cpu().detach().squeeze().numpy())\n _, n, _ = e1.shape\n v1 = v[:n, :]\n v2 = v[n:, :]\n return v1, v2\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"-i\", default=None, help=\"Id of the data point to use. by default, use random\"\n )\n parser.add_argument(\"-m\", help=\"Path to model to load\", default=model_path)\n parser.add_argument(\"-d\", help=\"Path to data to load\", default=data_path)\n\n args = parser.parse_args()\n print(\"Using data from \" + args.d)\n data = list(torch.load(args.d))\n\n if args.i is None:\n args.i = randint(0, len(data) - 1)\n\n with open(os.path.join(model_path, \"config.json\")) as reader:\n cfg = json.load(reader)\n\n model = get_model(cfg[\"arch\"])\n model.eval()\n model.to(device)\n model = load_model(model, device, os.path.join(model_path,\"model_best.pth.tar\"))\n g1,g2 = get_graphs(data,args.i, device)\n \n e1, e2 = embed(g1, g2, model)\n # embeddings\n np.save(\"embeds/g1/embeds\", e1)\n np.save(\"embeds/g2/embeds\", e2)\n # original graphs\n np.save(\"embeds/g1/graph\", g1.detach().cpu().numpy())\n np.save(\"embeds/g2/graph\", g2.detach().cpu().numpy())\n","sub_path":"explainability.py","file_name":"explainability.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"591570352","text":"# -*- coding:utf-8 -*-\n\nimport os\nimport re\nfrom multiprocessing import Pool\nfrom pyquery import PyQuery as pq\nimport requests\nfrom requests.exceptions import RequestException\n\nheader = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'ws1.sinaimg.cn',\n 'If-Modified-Since': 'Mon, 08 Jul 2013 18:06:40 GMT',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',\n}\n\n\ndef total_page():\n \"\"\"\n get the total page from Jandan WuliaoTu\n \"\"\"\n url = 'http://jandan.net/pic'\n try:\n response = requests.get(url)\n if response.status_code == 200:\n html = response.text\n images = re.compile('.(\\d+).', re.S)\n result = re.findall(images, html)[0]\n return result\n return None\n except RequestException:\n print('status_code验证失败:', url)\n return None\n\n\ndef parse_page(num):\n \"\"\"\n parse page url, get jpg-link and likes comments\n \"\"\"\n try:\n url = 'http://jandan.net/pic/page-' + num\n r = requests.get(url,timeout=3)\n if r.status_code == 200:\n html = r.text\n doc = pq(html)\n items = doc('.commentlist li').items()\n for item in items:\n ooxx = item.find('.row .jandan-vote span').text()\n oo = re.compile('(\\d+)')\n result_oo = re.findall(oo, ooxx)\n if result_oo:\n # 确保图片实际存在,排除广告空值\n real_oo = result_oo[0]\n if int(real_oo) >= 150:\n link = str(item.find('.row .text p a'))\n images = re.compile('')\n # 直接在此挑选jpg格式,会有一个情况出错,是gif连着jpg的,正则无法筛选出来,需要重新写\n result = re.findall(images, link)\n print(result)\n if result:\n JPG = {\n 'like': real_oo,\n 'link': result\n }\n for i in JPG['link']:\n title = num + 'page' + JPG['like'] + 'like'\n # 这里如果不加like,可以保证不会重复下载,但是无法区分图片是否一组\n # title = num +'page'\n content = \"http:\" + i\n MD_5 = i[23:-4]\n download_jpg(title, MD_5, content)\n except TimeoutError:\n return parse_page(num)\n except RequestException:\n pass\n\n\ndef download_jpg(title, MD_5, content):\n try:\n file_path = '{0}/{1}{2}.{3}'.format('E:\\Githouse\\pythonExercise\\JanDan\\jpg', title, MD_5, 'jpg')\n if not os.path.exists(file_path):\n with open(file_path, 'wb') as f:\n f.write(requests.get(content,timeout=10).content)\n # 如果链接只有jpg等直接结尾,就不用再登陆验证那么麻烦了\n f.close()\n except TimeoutError:\n return download_jpg(title, MD_5, content)\n except RequestException:\n pass\n\n\nRunDOWN = int(total_page())\nprint('总页数为:', RunDOWN)\n\n\ndef main(totalNum):\n num = str(totalNum)\n parse_page(num)\n print('%d页完成' % totalNum)\n\n\nif __name__ == '__main__':\n pool = Pool()\n pool.map(main, [i for i in range(661,793)])\n #pool.map(main, [i for i in range(int(total_page()))])\n","sub_path":"JanDan/无聊图素材下载.py","file_name":"无聊图素材下载.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"238497239","text":"#!/usr/bin/env python\n\nimport pythonpath; pythonpath\nfrom x19290 import just_print\n\nfrom os.path import basename\nfrom os import getcwd\nfrom subprocess import check_call\nfrom sys import argv, path as pythonpath, version_info\n\ndef app():\n just_print(main(basename(getcwd())))\n\n_prefix = r'/opt'\n_lib = r'{}/lib/python2.7/site-packages'\n_command = r'./setup.py install --prefuix={}'\ndef main(package):\n this_version = version_info[0]\n opt_lib = None\n expect = r'/opt/lib/python{}'.format(version_info[0])\n for p in reversed(pythonpath):\n if not expect in p:\n continue\n opt_lib = p\n break\n if not opt_lib:\n raise ValueError(expect)\n prefix = _prefix.format(package)\n lib = _lib.format(prefix)\n line = r'PYTHONPATH={} python ./setup.py install --prefix={}'\n yield line.format(opt_lib, prefix)\n\nif __name__ == r'__main__':\n app()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"50011796","text":"import unittest\n\nfrom statefun.request_reply_pb2 import Address\n\nfrom statefun_tasks._types import _TaskEntry, _GroupEntry\nfrom statefun_tasks import DefaultSerialiser, RetryPolicy\n\nclass PipelineSerialisationTests(unittest.TestCase):\n def test_task_entry_serialisation(self):\n serialiser = DefaultSerialiser(known_proto_types=[Address])\n\n args = (1,'2', Address(namespace='test'))\n kwargs = {'arg': [1, 2, 3]}\n parameters = {'a_parameter': 'some_value'}\n\n entry = _TaskEntry('task_id', 'task_type', args, kwargs, parameters, True)\n entry.mark_complete()\n\n entry_proto = entry.to_proto(serialiser)\n reconsituted_entry = _TaskEntry.from_proto(entry_proto, serialiser)\n\n self.assertEqual(entry_proto.task_entry.request.type_url, 'type.googleapis.com/statefun_tasks.ArgsAndKwargs')\n self.assertEqual(reconsituted_entry.task_id, entry.task_id)\n self.assertEqual(reconsituted_entry.task_type, entry.task_type)\n self.assertEqual(reconsituted_entry.args, tuple(entry.args,))\n self.assertEqual(reconsituted_entry.kwargs, kwargs)\n self.assertEqual(reconsituted_entry.parameters, parameters)\n self.assertEqual(reconsituted_entry.is_finally, True)\n self.assertEqual(reconsituted_entry.is_complete(), True)\n\n def test_task_entry_serialisation_with_single_protobuf_arg(self):\n serialiser = DefaultSerialiser(known_proto_types=[Address])\n\n args = Address(namespace='test')\n entry = _TaskEntry('task_id', 'task_type', args, {}, {}, True)\n\n entry_proto = entry.to_proto(serialiser)\n reconsituted_entry = _TaskEntry.from_proto(entry_proto, serialiser)\n\n self.assertEqual(entry_proto.task_entry.request.type_url, 'type.googleapis.com/org.apache.flink.statefun.flink.core.polyglot.Address')\n\n self.assertEqual(reconsituted_entry.args, entry.args)\n self.assertEqual(reconsituted_entry.kwargs, {})\n\n\n def test_group_entry_serialisation(self):\n serialiser = DefaultSerialiser(known_proto_types=[Address])\n\n args = (1,'2', Address(namespace='test'))\n kwargs = {'arg': [1, 2, 3]}\n \n group_entry = _GroupEntry(group_id='inner_group_id')\n\n group_entry.add_to_group([\n _TaskEntry('inner_task_id_1', 'task_type', args, kwargs),\n _TaskEntry('inner_task_id_2', 'task_type', args, kwargs)\n ])\n\n entry = _GroupEntry(group_id='group_id')\n entry.add_to_group([\n group_entry,\n _TaskEntry('grouped_task_chain_1_1', 'task_type', args, kwargs),\n _TaskEntry('grouped_task_chain_1_2', 'task_type', args, kwargs)\n ])\n\n entry.add_to_group([\n _TaskEntry('grouped_task_chain_2_1', 'task_type', args, kwargs)\n ])\n\n proto = entry.to_proto(serialiser)\n reconsituted_entry = _GroupEntry.from_proto(proto, serialiser)\n self.assertEqual(str(reconsituted_entry), str(entry))\n\n def test_task_entry_serialisation_with_task_retry_policy(self):\n serialiser = DefaultSerialiser(known_proto_types=[Address])\n\n args = ()\n kwargs = {}\n parameters = {'retry_policy': RetryPolicy(retry_for=[Exception, ValueError]).to_proto()}\n\n entry = _TaskEntry('task_id', 'task_type', args, kwargs, parameters)\n\n entry_proto = entry.to_proto(serialiser)\n reconsituted_entry = _TaskEntry.from_proto(entry_proto, serialiser)\n retry_policy = reconsituted_entry.get_parameter('retry_policy')\n self.assertEqual(['builtins.Exception', 'builtins.ValueError'], retry_policy.retry_for)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_pipeline_serialisation.py","file_name":"test_pipeline_serialisation.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"233022281","text":"from argparse import ArgumentParser\n\nimport dask\nfrom dask_sql import run_server\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--host\",\n default=\"0.0.0.0\",\n help=\"The host interface to listen on (defaults to all interfaces)\",\n )\n parser.add_argument(\n \"--port\", default=8080, help=\"The port to listen on (defaults to 8080)\"\n )\n parser.add_argument(\n \"--scheduler-address\",\n default=None,\n help=\"Connect to this dask scheduler if given\",\n )\n\n args = parser.parse_args()\n\n client = None\n if args.scheduler_address:\n client = dask.distributed.Client(args.scheduler_address)\n\n run_server(host=args.host, port=args.port, client=client)\n","sub_path":"scripts/startup_script.py","file_name":"startup_script.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"163031959","text":"import copy\n\nfrom datapackage_pipelines.wrapper import ingest, spew\nfrom datapackage_pipelines.utilities.kvstore import DB\n\n\ndef saver(resource, db):\n for idx, row in enumerate(resource):\n key = \"{:08x}\".format(idx)\n db.set(key, row)\n yield row\n\n\ndef loader(db):\n for k, value in db.items():\n yield value\n\n\ndef process_resources(resource_iterator, source):\n for resource in resource_iterator:\n if resource.spec['name'] == source:\n db = DB()\n yield saver(resource, db)\n yield loader(db)\n else:\n yield resource\n\n\ndef process_datapackage(dp, source, target_name, target_path):\n\n def traverse_resources(resources):\n for res in resources:\n yield res\n if res['name'] == source:\n res = copy.deepcopy(res)\n res['name'] = target_name\n res['path'] = target_path\n yield res\n\n dp['resources'] = list(traverse_resources(dp['resources']))\n return dp\n\n\nif __name__ == '__main__':\n parameters, datapackage, resource_iterator = ingest()\n\n source = parameters['source']\n target_name = parameters['target-name']\n target_path = parameters['target-path']\n\n spew(process_datapackage(datapackage, source, target_name, target_path),\n process_resources(resource_iterator, source))\n","sub_path":"datapackage_pipelines/lib/duplicate.py","file_name":"duplicate.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"116560771","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 11 16:48:50 2017\n\n@author: rvennam\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Convolution2D, Input, Lambda\nfrom keras.optimizers import Adam\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport os\n\nfrom utils import preprocess_img\nfrom utils import training_image_generator\nfrom utils import get_images\nfrom utils import drop_low_steeringangle_data\n\n\n\n#Config Parameters\nDATA_PATH = \"data\"\nLOG_FILE = os.path.join(DATA_PATH, \"driving_log.csv\")\nBATCH_SIZE = 64\nEPOCHS = 5\n\n\ndef build_nvidiamodel(input_shape):\n \n \n def resize_images(img):\n \"\"\"Returns resized image\n Cannot be directly used in lambda function\n as tf is not understood by keras\n \"\"\"\n import tensorflow as tf\n return tf.image.resize_images(img, (66, 200))\n \n model = Sequential()\n model.add(Lambda(resize_images, input_shape=input_shape))\n model.add(Lambda(lambda x: x/255.-0.5))\n model.add(Convolution2D(24, 5, 5, border_mode=\"same\", subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(36, 5, 5, border_mode=\"same\", subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(48, 5, 5, border_mode=\"valid\", subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(64, 3, 3, border_mode=\"valid\", activation=\"relu\"))\n model.add(Convolution2D(64, 3, 3, border_mode=\"valid\", activation=\"relu\"))\n model.add(Flatten())\n model.add(Dropout(0.3))\n model.add(Dense(100, activation=\"relu\"))\n model.add(Dense(50, activation=\"relu\"))\n model.add(Dense(10, activation=\"relu\"))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(optimizer=Adam(lr=0.001), loss='mse')\n return model\n\n#### Load CSV File generated by simulator\ncsv_data = pd.read_csv(LOG_FILE, index_col=False)\n\n## Shuffle the entries before splitting the dataset \ncsv_data = csv_data.sample(n=len(csv_data))\n\n## Training and Validation Data (85% training set and 15% testset)\ntraining_count = int(0.85 * len(csv_data))\ntraining_data = csv_data[:training_count].reset_index()\nvalidation_data = csv_data[training_count:].reset_index()\n\n## Remove low steering angle data to balance the dataset and to remove the bias\ntraining_data = drop_low_steeringangle_data(training_data)\n\n## extract one sample image to calculate image shape (required for model)\nsample_img_path = os.path.join(DATA_PATH, training_data['center'].iloc[5].strip())\nsample_img = preprocess_img(plt.imread(sample_img_path))\n\n## Build the model\nmodel = build_nvidiamodel(sample_img.shape)\nsamples_per_epoch = int(len(training_data) / BATCH_SIZE) * BATCH_SIZE\nnb_val_samples = len(validation_data)\n\n## Create generators for training and validation data\nvalues = model.fit_generator(training_image_generator(training_data, BATCH_SIZE, DATA_PATH), samples_per_epoch=samples_per_epoch, nb_epoch=EPOCHS, validation_data=get_images(validation_data, DATA_PATH), nb_val_samples=len(validation_data))\n\nmodel.save('model.h5')\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"256390091","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import (\n pre_save,\n post_save)\n\nfrom availableworks.uprofile.models import AWUser\nfrom availableworks.core.models.mixins import AWModel\n\nclass RequestedInvite(AWModel):\n profile = models.OneToOneField(\n 'uprofile.UserProfile',\n related_name = 'requested_invite',\n null=True,\n primary_key=False)\n email_address = models.CharField(\n max_length=100,\n primary_key=True)\n name = models.CharField(max_length=100)\n website = models.CharField(max_length=100)\n\n@receiver(post_save, sender=RequestedInvite)\ndef attach_to_profile(sender, **kwargs):\n invite_request = kwargs.get('instance')\n if not invite_request.profile:\n user = AWUser.objects.filter(\n email=invite_request.email_address).first()\n if user:\n invite_request.profile = user.profile\n invite_request.save()\n","sub_path":"availableworks/registration/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"352295497","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('article', '0004_auto_20150221_1957'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='article',\n name='article_title',\n field=models.CharField(max_length=200, verbose_name=b'\\xd0\\x9d\\xd0\\xb0\\xd0\\xb7\\xd0\\xb2\\xd0\\xb0\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5'),\n preserve_default=True,\n ),\n ]\n","sub_path":"bin/firstapp/article/migrations/0005_auto_20150221_2030.py","file_name":"0005_auto_20150221_2030.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"430558371","text":"# Para un triángulo cualquiera, donde sus lados miden a, b y c centímetros, escriba un programa en python que lea los tres lados del triángulo y permita calcular y mostrar el semiperímetro, el área y el circumradius de ese triángulo.\nimport math\n\n\ndef trian():\n l1 = float(input('ingrese el primer lado del triángulo: '))\n l2 = float(input('ingrese el segundo lado del triángulo: '))\n l3 = float(input('ingrese el tercer lado del triángulo: '))\n\n semipe = (l1 + l2 + l3) / 2\n area = math.sqrt(semipe * (semipe - l1) + (semipe - l2) + (semipe - l3))\n circum = (l1 * l2 * l3) / ((4 * math.pi) * math.sqrt(semipe *\n (semipe - l1) + (semipe - l2) + (semipe - l3)))\n\n return 'Para las medidas del tríangulo ingresadas, se tiene que el semiperímetro es de %s cm, el área de %s cm y el circumradius de %s cm.' % (\"{0:,.2f}\".format(semipe), \"{0:,.2f}\".format(area), \"{0:,.2f}\".format(circum))\n\n\nprint(trian())\n# ingrese el primer lado del triángulo: 2\n# ingrese el segundo lado del triángulo: 3\n# ingrese el tercer lado del triángulo: 4\n# Para las medidas del tríangulo ingresadas, se tiene que el semiperímetro es de 4.50 cm, el área de 3.64 cm y el circumradius de 0.52 cm.\n","sub_path":"guia-1/taller-serie-ejercicios-programación/ejercicio-7.py","file_name":"ejercicio-7.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"570364782","text":"# -*- coding: utf-8 -*-\n\n#make sure the current directory is specific to an individual sample and haveing data directly in this directory\n#make sure there is a subfolder of scanpy_out under the currrent directory\nimport os\nos.chdir(\"/content/drive/Shared drives/CARD/projects/iNDI/line_prioritization/projects_lirong/Florian_data/data/filtered_cellranger_matrix/cortical_dopaminergic_sample_4\")\n\nimport h5py\nimport numpy as np\nimport pandas as pd\nimport scanpy as sc\n\nsc.settings.verbosity = 3 \nsc.logging.print_versions()\nsc.settings.set_figure_params(dpi=80)\n\nsample_name = 'cortical_dopaminergic_sample_4'\nresults_file = 'scanpy_out/'+sample_name+'.h5ad'\nresults_file2 = 'scanpy_out/'+sample_name+'_unnormalized.h5ad'\nfigure_file = '_'+sample_name+'.pdf'\n\n## Read 10xGenomics sc-RNA sequencing data using mtx\n\n#adata = sc.read_10x_h5(\"filtered_feature_bc_matrix.h5\")\nadata = sc.read_10x_mtx(\"../\"+sample_name, var_names='gene_symbols')\nadata.var_names_make_unique()\n\n#check the most abundantly expressed genes\nsc.pl.highest_expr_genes(adata, n_top=20, )\n\n## Basic filtering\nsc.pp.filter_cells(adata, min_genes=200)\nsc.pp.filter_genes(adata, min_cells=3)\nsc.pp.filter_genes(adata, min_counts=1)\n## Calculate the percentage of mitochondrial genes\n\nmito_genes = adata.var_names.str.startswith('MT-')\n\nadata.obs['percent_mito'] = np.sum(\n adata[:, mito_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1\n\nadata.obs['n_counts'] = adata.X.sum(axis=1).A1\n\n## Check sequencing quality\n\nsc.pl.violin(adata, ['n_genes', 'n_counts', 'percent_mito'],\n jitter=0.4, multi_panel=True, save='violin_'+figure_file)\n\nsc.pl.scatter(adata, x='n_counts', y='percent_mito', save=\"scatter1_\"+figure_file)\nsc.pl.scatter(adata, x='n_counts', y='n_genes', save=\"scatter2_\"+figure_file)\n\nadata = adata[adata.obs.n_genes < 7500, :]\nadata = adata[adata.obs.percent_mito < 0.15, :]\nadata.write(results_file2)\n\n#Scale and logarithmize the data\n#note that adata.raw has been processed by normalization and log tranformation, but not go through cofounder correction yet\n#adata.raw can be used for differentail expression analaysis\n\nsc.pp.normalize_total(adata, target_sum=1e4)\nsc.pp.log1p(adata)\nadata.raw=adata\n\n# Choosing highly-variable genes (HVG) for further analysis, here we do not subset the adata using HVG\n\nsc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)\nsc.pl.highly_variable_genes(adata, save=figure_file)\n#if not subseeting, the regress_out function will generate error\nadata = adata[:, adata.var.highly_variable]\n\n# Further scale on cofounder \"n_counts\" and \"percent_mito\"\n\nsc.pp.regress_out(adata, ['n_counts', 'percent_mito'])\nsc.pp.scale(adata, max_value=10)\n\n# Principal component analysis\n\nsc.tl.pca(adata, svd_solver='arpack')\nsc.pl.pca_variance_ratio(adata, log=True,save=figure_file)\nsc.pl.pca(adata,save=figure_file)\n\n# Computing, embedding, and clustering the neighborhood graph\n# defaults are: n_neighbors= 20, n_pcs=50\n\nsc.pp.neighbors(adata)\nsc.tl.umap(adata)\nsc.tl.leiden(adata)\nsc.tl.leiden(adata, resolution=0.2, key_added = \"leiden_0.2\")\nsc.tl.leiden(adata, resolution=0.4, key_added = \"leiden_0.4\")\n\n## Visualize the clusters\n\nsc.pl.umap(adata, color=['leiden'], save=\"1\"+figure_file)\nsc.pl.umap(adata, color=['leiden_0.2'], save=\"0.2\"+figure_file)\nsc.pl.umap(adata, color=['leiden_0.4'], save=\"0.4\"+figure_file)\nsc.pl.umap(adata, color=['MAPT','TMEM106B'], save=\"gene\"+figure_file)\n\n# Finding marker genes using one vs rest comparison using Mann-Whitney-U-test (recommend)\n#sc.settings.verbosity = 2\n\nsc.tl.rank_genes_groups(adata, 'leiden', method='wilcoxon')\nsc.pl.rank_genes_groups(adata, n_genes=20, sharey=False, save=figure_file)\n\nadata.write(results_file)\n# Export a list of marker genes\n\nresult = adata.uns['rank_genes_groups']\ngroups = result['names'].dtype.names\n\ntop_marker_genes = pd.DataFrame(\n {group + '_' + key[:1]: result[key][group]\n for group in groups for key in ['names', 'pvals']}).head(50)\n\ntop_marker_genes.to_csv('scanpy_out/top_markers_'+sample_name+'.csv')\n\n","sub_path":"scanpy_QC_basic_florian_cortical_dopamin_3_and_4.py","file_name":"scanpy_QC_basic_florian_cortical_dopamin_3_and_4.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"638192217","text":"from . import ast as A\nfrom . import types as T\nfrom . import error as Q\nimport ctypes as ct\nimport llvmlite.binding as llvm\n\n\nclass Engine:\n def __init__(self, ll_file=None, llvm_ir=None):\n llvm.initialize()\n llvm.initialize_native_target()\n llvm.initialize_native_asmprinter() # yes, even this one\n\n # Create a target machine representing the host\n target = llvm.Target.from_default_triple()\n target_machine = target.create_target_machine()\n\n # And an execution engine with a backing module\n if ll_file:\n self.main_mod = self.compile_file(ll_file)\n elif llvm_ir:\n self.main_mod = self.compile_ir(llvm_ir)\n else:\n self.main_mod = self.compile_ir('')\n\n self.engine = llvm.create_mcjit_compiler(self.main_mod, target_machine)\n\n def add_lib(self, *libs):\n for lib in libs:\n llvm.load_library_permanently(lib)\n\n def compile_file(self, ll_file):\n with open(ll_file) as tmp:\n return self.compile_ir(tmp.read())\n\n def compile_ir(self, llvm_ir):\n # Create a LLVM module object from the IR\n mod = llvm.parse_assembly(llvm_ir)\n mod.verify()\n return mod\n\n def link_file(self, *additions):\n self.link_ir(*(self.compile_file(add) for add in additions))\n\n def link_ir(self, *additions):\n for add in additions:\n self.main_mod.link_in(add)\n\n def set_main_mod(self, mod):\n self.main_mod = mod\n self.engine.add_module(mod)\n\n def finalize(self):\n self.engine.finalize_object()\n\n def get_func(self, name, *types):\n func_typ = ct.CFUNCTYPE(*types)\n func_ptr = self.engine.get_function_address(name)\n return func_typ(func_ptr)\n\n def get_global(self, name, typ):\n addr = self.engine.get_global_value_address(name)\n ptr = ct.cast(ct.c_void_p(addr), typ)\n return ptr\n\n def main(self):\n main = self.get_func('main', ct.c_int, ct.c_int, ct.POINTER(ct.c_char_p))\n\n argc = ct.c_int(1)\n argv_0 = ct.c_char_p(\"test\".encode(\"utf-8\"))\n\n return main(argc, ct.byref(argv_0))\n\n def gc_init(self):\n gc_init = self.get_func('rain_gc_init', ct.c_int)\n gc_init()\n\n\n # rain_get\n\n def rain_get(self, table_box, key_box):\n get = self.get_func('rain_get', T.carg, T.carg, T.carg) # ret, table, key\n ret_box = T.cbox.to_rain(None)\n get(ct.byref(ret_box), ct.byref(table_box), ct.byref(key_box))\n return ret_box\n\n def rain_get_py(self, table_box, key):\n return self.rain_get(table_box, T.cbox.to_rain(key))\n\n def rain_get_ptr(self, table_ptr, key_box):\n get_ptr = self.get_func('rain_get_ptr', T.carg, T.carg, T.carg)\n return get_ptr(table_ptr, ct.byref(key_box))\n\n def rain_get_ptr_py(self, table_ptr, key):\n return self.rain_get_ptr(table_ptr, T.cbox.to_rain(key))\n\n\n # rain_put\n\n def rain_put(self, table_box, key_box, value_box):\n put = self.get_func('rain_put', T.carg, T.carg, T.carg) # table, key, val\n put(ct.byref(table_box), ct.byref(key_box), ct.byref(value_box))\n\n def rain_put_py(self, table_box, key, value_box):\n key_box = T.cbox.to_rain(key)\n self.rain_put(table_box, key_box, value_box)\n\n def rain_set_table(self, table_box):\n set_table = self.get_func('rain_set_table', T.carg)\n set_table(ct.byref(table_box))\n\n\n # set environment\n\n def rain_set_env(self, table_box, meta_ptr):\n set_meta = self.get_func('rain_set_env', T.carg, T.carg)\n set_meta(ct.byref(table_box), meta_ptr)\n\n\n # converting between Rain and Python AST\n\n def to_rain(self, val):\n if isinstance(val, list):\n table_box = T.cbox.to_rain(None)\n self.rain_set_table(table_box)\n\n ast_ptr = self.get_global('core.ast.exports', T.carg)\n meta_ptr = self.rain_get_ptr_py(ast_ptr, 'list')\n self.rain_set_env(table_box, meta_ptr)\n\n for i, n in enumerate(val):\n self.rain_put_py(table_box, i, self.to_rain(n))\n\n return table_box\n\n elif isinstance(val, A.node):\n table_box = T.cbox.to_rain(None)\n self.rain_set_table(table_box)\n\n ast_ptr = self.get_global('core.ast.exports', T.carg)\n meta_ptr = self.rain_get_ptr_py(ast_ptr, val.__tag__)\n self.rain_set_env(table_box, meta_ptr)\n\n slots = [self.to_rain(getattr(val, key, None)) for key in val.__slots__]\n\n tag_in = T.cbox.to_rain(val.__tag__)\n self.rain_put_py(table_box, 'tag', tag_in)\n\n for key, box in zip(val.__slots__, slots):\n self.rain_put_py(table_box, key, box)\n\n return table_box\n\n return T.cbox.to_rain(val)\n\n def to_py(self, box):\n if box.type == T.typi.table:\n tag_box = self.rain_get_py(box, 'tag')\n tag = self.to_py(tag_box)\n\n if tag:\n node_type = A.tag_registry[tag]\n slots = [self.to_py(self.rain_get_py(box, slot)) for slot in node_type.__slots__]\n\n return node_type(*slots)\n\n else:\n res = []\n i = 0\n while True:\n next = self.to_py(self.rain_get_py(box, i))\n if next is None:\n break\n\n res.append(next)\n i += 1\n\n return res\n\n return box.to_py()\n","sub_path":"rain/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"382265259","text":"try:\n from django.conf.urls import url, include\nexcept ImportError:\n from django.urls import re_path as url\n\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom .views import QuizListView, CategoriesListView, \\\n ViewQuizListByCategory, QuizUserProgressView, QuizMarkingList, \\\n QuizMarkingDetail, QuizDetailView, QuizTake, SystemFeedbackView, \\\n QuizUserDetail, QuizProfProgressView, QuizTurmaProgressView\n\nurlpatterns = [\n\n url(r'^$',\n view=QuizListView.as_view(),\n name='quiz_index'),\n\n url(r'^category/$',\n view=CategoriesListView.as_view(),\n name='quiz_category_list_all'),\n\n url(r'^category/(?P[\\w|\\W-]+)/$',\n view=ViewQuizListByCategory.as_view(),\n name='quiz_category_list_matching'),\n\n path('progress/',\n view=QuizUserProgressView.as_view(),\n name='progress'),\n\n url(r'^marking/$',\n view=QuizMarkingList.as_view(),\n name='quiz_marking'),\n\n url(r'^marking/(?P[\\d.]+)/$',\n view=QuizMarkingDetail.as_view(),\n name='quiz_marking_detail'),\n\n url(r'^userdetail/(?P[\\d.]+)/(?P[\\d.]+)/$',\n view=QuizMarkingDetail.as_view(),\n name='usuario_detail'),\n\n url(r'^userquiz/(?P[\\d.]+)/$',\n view=QuizUserDetail.as_view(),\n name='quiz_user_detail'),\n\n url(r'^progressdetail/(?P[\\d.]+)/$',\n view=QuizProfProgressView.as_view(),\n name='progress_detail'),\n\n url(r'^progressturma/(?P[\\d.]+)/$',\n view=QuizTurmaProgressView.as_view(),\n name='progress_turma'),\n \n\n\n # passes variable 'quiz_name' to quiz_take view\n url(r'^(?P[\\w-]+)/$',\n view=QuizDetailView.as_view(),\n name='quiz_start_page'),\n\n \n url(r'^(?P[\\w-]+)/take/$',\n view=QuizTake.as_view(),\n name='quiz_question'),\n\n url(r'^feedback/(?P[\\d.]+)/$',\n view=SystemFeedbackView.as_view(),\n name='feedback'),\n \n \n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"django_quiz/quiz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"597052814","text":"# Ex2b.py\r\n#\r\n# Exercise 2 - subpoint b\r\n# Examine specific heat variance as function of temperature\r\n# using bootstrap method.\r\n#\r\n# Changelog:\r\n# 12.12.2016 - Script created and completed\r\n\r\n# Import from hub file\r\nfrom Ising import *\r\n\r\n\r\ndef Ex2b_main(L=20, MCS1=20, MCS2=20):\r\n \"\"\"Module's main function.\r\n\r\n Output to file specific heat variance as function of temperature.\r\n L - sidelength of the spin grid,\r\n MCS1 - number of Monte Carlo steps for to calculate SH variance.\r\n MCS2 - number of Monte Carlo steps for to calculate SH.\r\n \"\"\"\r\n\r\n result = [] # Initialize result list to study Ising model properties\r\n # Initialize ordered grid of size L by L and constant J = 1\r\n grid = Ising(L, fill_randomly=False)\r\n for kT in range(1, 51): # Loop over temperatures, 50 points, step = 0.1\r\n kT /= 10\r\n print(\"kT = \" + str(kT)) # Print for testing\r\n temp_result = [] # Initialize result list to save grid parameters in\r\n\r\n for t in range(MCS1): # Loop over MC step\r\n print(\"t = \" + str(t)) # Print for testing\r\n energies = [] # Energies list to calculate specific heat\r\n for i in range(MCS2): # Loop over time\r\n grid.MonteCarloStep(kT) # Monte Carlo step\r\n # if t % 10 == 9: # Save parameters every 100 steps\r\n energies.append(grid.energy()) # Append energy to the list\r\n # Append specific heat to the list\r\n temp_result.append(np.var(np.array(energies)) / L**2 / kT**2)\r\n\r\n temp_result = np.array(temp_result) # Convert to numpy array\r\n # Append SH and its variance to the list\r\n result.append([kT, np.average(temp_result), np.var(temp_result)])\r\n\r\n # Print the output\r\n output(result, \"Ex2b.txt\", \"#Temperature\\tSpecific_heat\\tSpecific_heat_variance\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Ex2b_main() # Run the main function if script is executed\r\n","sub_path":"Monte Carlo/Ising/Ex2b.py","file_name":"Ex2b.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"454838955","text":"__author__ = 'vvlad'\n__author__ = 'vvlad'\n\n\nimport MapReduce\nimport sys\n\n\"\"\"\nSequence Trim in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n # key: friendA\n # value: friendB\n #a = record[0]\n b = record[1]\n if len(b)>10:\n b = record[1][:-10]\n mr.emit_intermediate(b,1)\n\n\ndef reducer(key, list_of_values):\n # key: trimmed string\n # value: list of values\n\n mr.emit(key)\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n\n inputfile = \"./data/dna.json\"\n if len(sys.argv) > 1:\n inputfile = sys.argv[1]\n\n inputdata = open(inputfile)\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"assignment3/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"524960848","text":"# -*- coding: utf-8 -*-\n\nfrom osgeo import osr, ogr\nfrom pyramid.threadlocal import get_current_registry\nfrom pyramid.i18n import get_locale_name\nfrom pyramid.httpexceptions import HTTPBadRequest\nimport unicodedata\nfrom urllib import quote\nfrom urlparse import urlparse, urlunparse\n\n\ndef versioned(path):\n version = get_current_registry().settings['app_version']\n entry_path = get_current_registry().settings['entry_path'] + '/'\n if version is not None:\n agnosticPath = make_agnostic(path)\n parsedURL = urlparse(agnosticPath)\n # we don't do version when behind pserve (at localhost)\n if 'localhost:' not in parsedURL.netloc:\n parts = parsedURL.path.split(entry_path, 1)\n if len(parts) > 1:\n parsedURL = parsedURL._replace(path=parts[0] + entry_path + version + '/' + parts[1])\n agnosticPath = urlunparse(parsedURL)\n return agnosticPath\n else:\n return path\n\n\ndef make_agnostic(path):\n handle_path = lambda x: x.split('://')[1] if len(x.split('://')) == 2 else path\n if path.startswith('http'):\n path = handle_path(path)\n return '//' + path\n else:\n return path\n\n\ndef make_api_url(request):\n base_path = request.registry.settings['apache_base_path']\n base_path = '' if base_path == 'main' else '/' + base_path\n host = request.host + base_path if 'localhost' not in request.host else request.host\n return ''.join((request.scheme, '://', host))\n\n\ndef check_url(url):\n if url is None:\n raise HTTPBadRequest('The parameter url is missing from the request')\n parsedUrl = urlparse(url)\n hostname = parsedUrl.hostname\n if hostname is None:\n raise HTTPBadRequest('Could not determine the hostname')\n domain = \".\".join(hostname.split(\".\")[-2:])\n if all(('admin.ch' not in domain, 'swisstopo.ch' not in domain, 'bgdi.ch' not in domain)):\n raise HTTPBadRequest('Shortener can only be used for admin.ch, swisstopo.ch and bgdi.ch domains')\n return url\n\n\ndef locale_negotiator(request):\n lang = request.params.get('lang')\n settings = get_current_registry().settings\n languages = settings['available_languages'].split()\n if lang == 'rm':\n return 'fi'\n elif lang is None or lang not in languages:\n if request.accept_language:\n return request.accept_language.best_match(languages, 'de')\n # the default_locale_name configuration variable\n return get_locale_name(request)\n return lang\n\n\ndef check_even(number):\n if number % 2 == 0:\n return True\n return False\n\n\ndef round(val):\n import math\n return math.floor(val + 0.5)\n\n\ndef format_search_text(input_str):\n return remove_accents(\n escape_sphinx_syntax(input_str)\n )\n\n\ndef remove_accents(input_str):\n if input_str is None:\n return input_str\n input_str = input_str.replace(u'ü', u'ue')\n input_str = input_str.replace(u'Ü', u'ue')\n input_str = input_str.replace(u'ä', u'ae')\n input_str = input_str.replace(u'Ä', u'ae')\n input_str = input_str.replace(u'ö', u'oe')\n input_str = input_str.replace(u'Ö', u'oe')\n return ''.join(c for c in unicodedata.normalize('NFD', input_str) if unicodedata.category(c) != 'Mn')\n\n\ndef escape_sphinx_syntax(input_str):\n if input_str is None:\n return input_str\n input_str = input_str.replace('|', '\\\\|')\n input_str = input_str.replace('!', '\\\\!')\n input_str = input_str.replace('@', '\\\\@')\n input_str = input_str.replace('&', '\\\\&')\n input_str = input_str.replace('~', '\\\\~')\n input_str = input_str.replace('^', '\\\\^')\n input_str = input_str.replace('=', '\\\\=')\n input_str = input_str.replace('/', '\\\\/')\n input_str = input_str.replace('(', '\\\\(')\n input_str = input_str.replace(')', '\\\\)')\n input_str = input_str.replace(']', '\\\\]')\n input_str = input_str.replace('[', '\\\\[')\n input_str = input_str.replace('*', '\\\\*')\n input_str = input_str.replace('<', '\\\\<')\n input_str = input_str.replace('$', '\\\\$')\n input_str = input_str.replace('\"', '\\\"')\n return input_str\n\n\ndef quoting(text):\n return quote(text.encode('utf-8'))\n\n\ndef parseHydroXML(id, root):\n html_attr = {'date_time': '-', 'abfluss': '-', 'wasserstand': '-', 'wassertemperatur': '-'}\n for child in root:\n fid = child.attrib['StrNr']\n if fid == id:\n if child.attrib['Typ'] == '10':\n for attr in child:\n if attr.tag == 'Datum':\n html_attr['date_time'] = attr.text\n # Zeit is always parsed after Datum\n elif attr.tag == 'Zeit':\n html_attr['date_time'] = html_attr['date_time'] + ' ' + attr.text\n elif attr.tag == 'Wert':\n html_attr['abfluss'] = attr.text\n break\n elif child.attrib['Typ'] == '02':\n for attr in child:\n if attr.tag == 'Datum':\n html_attr['date_time'] = attr.text\n # Zeit is always parsed after Datum\n elif attr.tag == 'Zeit':\n html_attr['date_time'] = html_attr['date_time'] + ' ' + attr.text\n elif attr.tag == 'Wert':\n html_attr['wasserstand'] = attr.text\n break\n elif child.attrib['Typ'] == '03':\n for attr in child:\n if attr.tag == 'Datum':\n html_attr['date_time'] = attr.text\n # Zeit is always parsed after Datum\n elif attr.tag == 'Zeit':\n html_attr['date_time'] = html_attr['date_time'] + ' ' + attr.text\n elif attr.tag == 'Wert':\n html_attr['wassertemperatur'] = attr.text\n break\n return html_attr\n\n\ndef transformCoordinate(wkt, srid_from, srid_to):\n srid_in = osr.SpatialReference()\n srid_in.ImportFromEPSG(srid_from)\n srid_out = osr.SpatialReference()\n srid_out.ImportFromEPSG(srid_to)\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AssignSpatialReference(srid_in)\n geom.TransformTo(srid_out)\n return geom\n","sub_path":"chsdi/lib/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"570991882","text":"# 若為Mac電腦,請先貼上此段程式碼\n########### For Mac user ###########\nimport os\nimport ssl\n# used to fix Python SSL CERTIFICATE_VERIFY_FAILED\nif (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):\n ssl._create_default_https_context = ssl._create_unverified_context\n####################################\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nfrom my_fake_useragent import UserAgent\nimport os\n\ndef random_header():\n ua = UserAgent()\n random_header = json.loads(r'''{\n \"Cache-Control\": \"max-age=0\",\n \"Connection\": \"keep-alive\",\n \"Host\": \"www.dogforum.com\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\":\"%s\"\n }'''%ua.random)\n return random_header\n########################################################\nresource_path = r'./DCTRY'\nif not os.path.exists(resource_path):\n os.mkdir(resource_path)\n\nheaders = {'User-Agent' : str(random_header())}\n\n#for i in range(0, 3):\nurl = 'https://www.dcard.tw/_api/forums/creditcard/posts?popular=false&limit=30&before=232004767'\n\nres = requests.get(url, headers = headers)\nsoup = BeautifulSoup(res.text, 'html.parser')\n#json_string = str(soup)\n#js = json.loads(json_string)\nprint(soup)\n #\n # last_id = js[len(js)-1]['id']\n #\n # for each_article in js:\n # print(each_article['title'])\n # print('https://www.dcard.tw/f/creditcard/p/' + str(each_article['id']))\n # print()\n #\n # url = 'https://www.dcard.tw/_api/forums/creditcard/posts?popular=false&limit=30&before=%s'%(last_id)","sub_path":"dcard-etl.py","file_name":"dcard-etl.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"424163874","text":"#! /usr/bin/python\n\nimport os;\nimport sys;\nimport json;\n\nimport graphcolors;\n\n# stack file format\n#\n# -------------- # stack delimiter\n# 12345AB some # hex some data\n#\ndef conv_str( r ):\n\taddr = r[0];\n\tmod\t = r[1];\n\tproc = r[2];\n\treturn \"{0:08x}_{1}_{2}\".format( addr, mod, proc );\n\ndef read_pairs( stack ):\n\tstack = stack[:];\n\tstack.reverse();\n\tprev = stack[0];\n\t\n\tfor cur in stack[1:]:\n\t\tyield ( prev, cur );\n\t\tprev = cur;\n\ndef main( stack_file ):\n\twith open( stack_file ) as fd:\n\t\tstacks = json.load( fd );\n\n\t# collect call pair count\n\tcall_count = {};\n\tfor stack in stacks:\n\t\tfor pair in read_pairs( stack ):\n\t\t\tk = ( pair[0][0], pair[1][0] ); # caller and callee addr\n\t\t\tif k in call_count:\n\t\t\t\tcall_count[ k ] += 1;\n\t\t\telse:\n\t\t\t\tcall_count[ k ] = 1;\n\n\t# collect modules and set color per modules\n\tmodules_colors = {};\n\tfor stack in stacks:\n\t\tfor entry in stack:\n\t\t\tmod = entry[1];\n\t\t\tif mod in modules_colors:\n\t\t\t\tcontinue;\n\t\t\tmodules_colors[ mod ] = graphcolors.get();\n\t\n\t# print out\n\tprint( \"digraph callstack {\" );\n\tnodes = [];\n\tfor stack in stacks:\n\t\tfor node in stack:\n\t\t\tnode_str = conv_str( node );\n\t\t\tif node_str in nodes:\n\t\t\t\tcontinue;\n\t\t\tnodes.append( node_str );\n\t\t\tprint(\n\t\t\t\t\"\\\"{0}\\\" [ color = \\\"{1}\\\" ]\".format(\n\t\t\t\t\tnode_str,\n\t\t\t\t\tmodules_colors[ node[1] ] ) );\n\t\n\tpairs = [];\n\tfor stack in stacks:\n\t\tfor pair in read_pairs( stack ):\n\t\t\t( caller, callee ) = pair;\n\t\t\tk = ( caller[0], callee[0] );\n\t\t\t\n\t\t\tif k in pairs:\n\t\t\t\tcontinue;\n\t\t\tpairs.append( k );\n\t\t\t\n\t\t\tprint( \"\\\"{0}\\\" -> \\\"{1}\\\" [label=\\\"{2}\\\"]\".format(\n\t\t\t\t\tconv_str( caller ),\n\t\t\t\t\tconv_str( callee ),\n\t\t\t\t\tcall_count[ k ],\n\t\t\t\t\t) );\n\tprint( \"}\" );\n\nif __name__ == \"__main__\":\n\tif len( sys.argv ) < 2:\n\t\tprint( \"{0} STACK_FILE.json\".format( sys.argv[0] ) );\n\t\tsys.exit( -1 );\n\t\n\tmain( sys.argv[1] );\n","sub_path":"tools/graph_overview_cs.py","file_name":"graph_overview_cs.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"91374727","text":"\"\"\"\nTensorgou cnn_dssm module graph define\n\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import rnn_cell\n\n__author__ = \"Yuanpeng Zhang\"\n__date__ = \"$2016-11-14$\"\n\n\nclass tgGraph(object):\n def __init__(self\n , hiddensize\n , batchsize\n , numsteps\n , keep_prob\n , numlayers\n , numlabel\n , max_grad_norm\n , embeddingnumpy\n , lr=0.0015\n , momentum=0.85\n , istrain=True\n , distributed=False\n , global_step=None):\n\n self.batchsize = batchsize\n self.numsteps = numsteps\n size = hiddensize\n #if lstm has num_proj,modify self.proj,otherwise self.proj=size\n self.proj=100\n self._input_data = tf.placeholder(tf.int32, [batchsize, numsteps])\n self._targets = tf.placeholder(tf.int32, [batchsize])\n #if reduce_max, use the second following line,otherwise the first\n self._lengths = tf.placeholder(tf.int32, [batchsize])\n #self._lengths=tf.placeholder(tf.float32,[numsteps,batchsize,100])\n\n # Slightly better results can be obtained with forget gate biases\n # initialized to 1 but the hyperparameters of the model would need to be\n # different than reported in the paper.\n #lstm_cell=rnn_cell.BasicLSTMCell(size)\n #lstm_cell = rnn_cell.LSTMCell(size, forget_bias=1.0,use_peepholes=True)\n lstm_cell = rnn_cell.LSTMCell(size, forget_bias=1.0,use_peepholes=True,num_proj=self.proj)\n #lstm_cell=tf.contrib.rnn.LayerNormBasicLSTMCell(size)\n #lstm_cell=tf.contrib.rnn.GridLSTMCell(size,use_peepholes=True)\n #lstm_cell=tf.contrib.rnn.NASCell(size) \n #lstm_cell=tf.contrib.rnn.PhasedLSTMCell(size,use_peepholes=True) \n #lstm_cell=tf.contrib.rnn.TimeFreqLSTMCell(size,use_peepholes=True,feature_size=100,frequency_skip=1)\n if istrain and keep_prob < 1:\n lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)\n\n cell = rnn_cell.MultiRNNCell([lstm_cell] * numlayers)\n\n self._initial_state = cell.zero_state(batchsize, tf.float32)\n \n #commit the following line if reduce_max\n offsets = (self._lengths - 1) * self.batchsize + tf.range(0, self.batchsize)\n\n with tf.device(\"/gpu:2\"):\n #modify by xjk\n self.embedding = tf.get_variable(\"embedding\", initializer=tf.constant(embeddingnumpy), trainable=False)\n# if(istrain):\n# self.embedding = tf.get_variable(\"embedding\", initializer=tf.constant(embeddingnumpy))\n# else:\n# self.embedding = tf.get_variable(\"embedding\")\n\n inputs = tf.nn.embedding_lookup(self.embedding, self._input_data)\n\n if istrain and keep_prob < 1:\n inputs = tf.nn.dropout(inputs, keep_prob)\n\n # Simplified version of tensorflow.models.rnn.rnn.py's rnn().\n # This builds an unrolled LSTM for tutorial purposes only.\n # In general, use the rnn() or state_saving_rnn() from rnn.py.\n #\n # The alternative version of the code below is:\n #\n # from tensorflow.models.rnn import rnn\n # inputs = [tf.squeeze(input_, [1])\n # for input_ in tf.split(1, num_steps, inputs)]\n # outputs, states = rnn.rnn(cell, inputs, initial_state=self._initial_state)\n outputs = []\n states = []\n state = self._initial_state\n with tf.variable_scope(\"RNN\"):\n for time_step in range(numsteps):\n if time_step > 0: tf.get_variable_scope().reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n outputs.append(cell_output)\n states.append(state)\n\n output = tf.reshape(tf.concat(outputs, 0), [-1, self.proj])\n #add by xjk \n # reduce_max method ; if not ,commit these 4 lines\n s=tf.split(output,num_or_size_splits=numsteps,axis=0)\n self.output2=s\n # if no need according to lengths , commit the following line\n #s=tf.multiply(s,self._lengths)\n #output=tf.reduce_max(s,0) \n \n #commit the following line if reduce_max\n output = tf.gather(output, offsets)\n logits = tf.nn.xw_plus_b(output,\n tf.get_variable(\"softmax_w\", [self.proj, numlabel]),\n tf.get_variable(\"softmax_b\", [numlabel]))\n self.results = tf.argmax(logits, 1)\n self.logits=logits\n batchsize = tf.size(self._targets)\n labels = tf.expand_dims(self._targets, 1)\n indices = tf.expand_dims(tf.range(0, batchsize), 1)\n concated = tf.concat([indices, labels],1 )\n onehot_labels = tf.sparse_to_dense(\n #modify by xjk tf.pack()->tf.stack\n concated, tf.stack([batchsize, numlabel]), 1.0, 0.0)\n #modify by xjk add \"logits=,labels=\" in paramer\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits,\n labels=onehot_labels,\n name='xentropy')\n\n corrects = tf.nn.in_top_k(logits, self._targets, 1)\n self._corrects_num = tf.reduce_sum(tf.cast(corrects, tf.int32))\n self._cost = cost = tf.reduce_mean(loss, name='xentropy_mean')\n self._final_state = states[-1]\n \n if istrain:\n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars)\n , max_grad_norm)\n #change 1.0 to lr is no effect\n #optimizer = tf.train.GradientDescentOptimizer(1.0) # TODO: No training effect if self.lr.\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001)\n self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)\n if distributed is True:\n tf.scalar_summary(\"cost\", self._cost)\n tf.scalar_summary(\"accuracy\", self._corrects_num)\n self.summary_op = tf.merge_all_summaries()\n\n","sub_path":"tensorgou/graph/rnn_classification/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"554074293","text":"\"\"\"推送平台企业\"\"\"\nimport os\nimport time\nimport unittest\n\nfrom parameterized import parameterized\nfrom selenium.common.exceptions import StaleElementReferenceException\n\nfrom Page.uniform_entry_page import UniformEntryPage\nfrom Util.DriverUtil import DriverUtils\nfrom config import BASE_DIR\n\n\ndef push_fuzzy_search_data():\n \"\"\"模糊搜索数据参数化\"\"\"\n data_dict = {\"test_search_chName\": [{\"chName\": \"新\", \"is_success\": True, \"expect\": \"新\"},\n {\"chName\": \"佩奇\", \"is_success\": False, \"expect\": \"暂无数据\"}]}\n\n search_cn_list = []\n data_cn = data_dict.get(\"test_search_chName\")\n for i in data_cn:\n search_cn_list.append((i.get(\"chName\"), i.get(\"is_success\"), i.get(\"expect\")))\n print(search_cn_list)\n return search_cn_list\n\n\nclass TestPushPlatform(unittest.TestCase):\n driver = None\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.driver = DriverUtils.get_driver_file()\n cls.uniform_entry_page = UniformEntryPage(cls.driver)\n # cls.uniform_entry_page.get_login_proxy().login(\"admin\", \"admin\") # 登录\n\n @classmethod\n def tearDownClass(cls) -> None:\n time.sleep(3)\n DriverUtils.quit_driver()\n\n def setUp(self) -> None:\n self.driver.get(\"http://10.100.81.181/#/login\")\n self.uniform_entry_page.get_login_proxy().login(\"admin\", \"admin\")\n self.uniform_entry_page.get_home_proxy().common_data_preset() # 公共数据预设\n # self.uniform_entry_page.get_home_proxy().common_data_preset() # 公共数据预设\n\n def test_push_download_template(self):\n \"\"\"下载模板\"\"\"\n new_path = BASE_DIR + \"\\File\\推送平台企业预设信息模板.xlsx\"\n if os.path.exists(new_path):\n # 判断路径中是否有重复内容\n os.remove(new_path)\n self.uniform_entry_page.get_push_platform_proxy().push_download_file() # 模板下载\n new_file_name = os.path.basename(new_path)\n print(\"文件名称:%s\" % new_file_name)\n # 获取文件类型(扩展名)\n new_file_type = os.path.splitext(new_path)\n print(\"文件扩展名:%s\", new_file_type)\n try:\n self.assertEqual(\"推送平台企业预设信息模板\", new_file_name)\n self.assertEqual(\".xlsx\", new_file_type)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError as msg:\n # 截图\n now_time = time.strftime(\"%Y%m%d%H%M%S\")\n self.driver.get_screenshot_as_file(\n \"./images/push_download_template_bug_{}_{}.png\".format(msg, now_time))\n self.uniform_entry_page.get_home_proxy().quit_login()\n else:\n self.uniform_entry_page.get_push_platform_proxy().push_download_file() # 模板下载\n new_file_name = os.path.basename(new_path)\n print(\"文件名称:%s\" % new_file_name)\n # 获取文件类型(扩展名)\n new_file_type = os.path.basename(new_path)\n print(\"文件扩展名:%s\" % new_file_type)\n try:\n self.assertEqual(\"推送平台企业预设信息模板\", new_file_name)\n self.assertEqual(\".xlsx\", new_file_type)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError as msg:\n # 截图\n now_time = time.strftime(\"%Y%m%d%H%M%S\")\n self.driver.get_screenshot_as_file(\n \"./images/push_download_template_bug_{}_{}.png\".format(msg, now_time))\n self.uniform_entry_page.get_home_proxy().quit_login()\n\n @parameterized.expand(push_fuzzy_search_data())\n def test_push_fuzzy_search(self, Name, is_success, expect):\n \"\"\"模糊搜索\"\"\"\n self.uniform_entry_page.get_push_platform_proxy().push_fuzzy_search(Name) # 模糊搜索\n push_name = self.driver.find_elements_by_xpath(\n '//*[@id=\"app\"]/div/div[2]/section/div/div[2]/div[1]/div[3]/table/tbody/tr[1]/td[2]')\n if is_success:\n # 有数据\n # 元素找到\n list_strNo = []\n for i in push_name:\n list_strNo.append(i.text)\n print(\"列表值{}\".format(list_strNo))\n try:\n self.assertIn(expect, \"{}\".format(list_strNo))\n time.sleep(2)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError as msg:\n # 截图\n now_time = time.strftime(\"%Y%m%d%H%M%S\") # 打时间戳\n self.driver.get_screenshot_as_file(\n \"./images/push_fuzzy_search_bug_{}_{}.png\".format(msg,now_time))\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n else:\n # 无数据\n push_no_data = self.uniform_entry_page.get_push_platform_proxy().get_push_search_no_data() # 获取提示信息\n try:\n self.assertIn(expect, push_no_data)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except StaleElementReferenceException:\n # 截图\n now_time = time.strftime(\"%Y%m%d%H%M%S\") # 打时间戳\n self.driver.get_screenshot_as_file(\n \"./images/push_fuzzy_search_bug_{}.png\".format(now_time))\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n\n def test_push_not_select_any_records_delete(self):\n \"\"\"不选择任何数据的删除\"\"\"\n self.uniform_entry_page.get_push_platform_proxy().push_no_choose_delete()\n push_delete_msg = self.uniform_entry_page.get_push_platform_proxy().get_push_delete_message()\n try:\n self.assertIn(\"请至少选择一项\", push_delete_msg)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError as msg:\n # 断言失败\n now_time = time.strftime(\"%Y%m%d%H%M%S\")\n self.driver.get_screenshot_as_file(\"./images/push_no_select_delete_bug_{}_{}.png\".format(msg, now_time))\n self.uniform_entry_page.get_home_proxy().quit_login() # 登出\n\n def test_push_auto_close_details(self):\n \"\"\"推送平台企业--详情测试\"\"\"\n self.uniform_entry_page.get_push_platform_proxy().push_details()\n try:\n push_details_msg = self.uniform_entry_page.get_details_proxy().get_detail_page_title()\n try:\n self.assertIn(\"企业详情\", push_details_msg)\n self.uniform_entry_page.get_details_proxy().push_details_close()\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError as msg:\n # 断言失败\n # 截图\n now_time = time.strftime(\"%Y%m%d%H%M%S\")\n self.driver.get_screenshot_as_file(\"./images/push_auto_details_bug_{}_{}.png\".format(msg, now_time))\n self.uniform_entry_page.get_home_proxy().quit_login() # 登出\n except StaleElementReferenceException as msg:\n # 截图\n now_time = time.strftime(\"%Y%m%d%H%M%S\")\n self.driver.get_screenshot_as_file(\"./images/push_auto_details_bug_{}_{}.png\".format(msg, now_time))\n self.uniform_entry_page.get_home_proxy().quit_login() # 登出\n","sub_path":"scripts/test_push_platform.py","file_name":"test_push_platform.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"246096228","text":"import datetime\n\nfrom server import data\nfrom server import serializable\nfrom server import util\n\nclass UserStatus(serializable.Serializable):\n PUBLIC_FIELDS = serializable.fields('user_phone_number',\n 'message', 'expiration_str')\n\n def __init__(self, status_id=None, user_phone_number=None,\n message=None, expiration_str=None):\n self.status_id = status_id\n self.user_phone_number = user_phone_number\n self.message = message\n self.expiration_str = expiration_str\n\n def initialize(self):\n if self.user_phone_number:\n self.user_phone_number = util.canonicalize_phone_number(self.user_phone_number)\n\n @property\n def expiration(self):\n return datetime.datetime.strptime(self.expiration_str, '%Y-%m-%dT%H:%M:%SZ')\n\nclass UserStatusBroadcast(serializable.Serializable):\n PUBLIC_FIELDS = serializable.fields(\n serializable.objf('user_status', UserStatus),\n serializable.listf('receiver_phone_numbers'))\n\n def __init__(self, user_status=None, receiver_phone_numbers=()):\n self.user_status = user_status\n self.receiver_phone_numbers = receiver_phone_numbers\n\n def initialize(self):\n if self.receiver_phone_numbers:\n self.receiver_phone_numbers = map(util.canonicalize_phone_number, self.receiver_phone_numbers)\n\n\ndef lookup_statuses(user_phone_number):\n results = data.lookup_all_statuses_broadcasting_to(user_phone_number)\n statuses = []\n for status_id, user_phone_number, message, expiration in results:\n statuses.append(UserStatus(status_id, user_phone_number, message, expiration))\n return statuses\n\ndef cancel_status(user_phone_number):\n return data.cancel_latest_status(user_phone_number)\n\ndef broadcast_status(user_phone_number, user_status_broadcast):\n status = user_status_broadcast.user_status\n data.preempt_latest_status(status.user_phone_number)\n result = data.write_status_broadcast(user_phone_number, status.message, status.expiration,\n user_status_broadcast.receiver_phone_numbers)\n return bool(result)\n","sub_path":"server/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"480262157","text":"\"\"\"\nvenvmanager.views\n~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2012 by Linovia.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom . import models\nfrom django.views.generic import ListView, UpdateView, DetailView, CreateView, DeleteView\nfrom django.core.urlresolvers import reverse_lazy\n\n\n#\n# SERVERS SECTION\n#\n\nclass ServerList(ListView):\n model = models.Server\n\n\nclass ServerNew(CreateView):\n model = models.Server\n success_url = reverse_lazy('servers')\n\n\nclass ServerDetail(DetailView):\n model = models.Server\n pk_url_kwarg = 'server_id'\n\n\nclass ServerUpdate(UpdateView):\n model = models.Server\n pk_url_kwarg = 'server_id'\n success_url = reverse_lazy('servers')\n\n\nclass ServerDelete(DeleteView):\n model = models.Server\n pk_url_kwarg = 'server_id'\n success_url = reverse_lazy('servers')\n\n\n#\n# VIRTUALENVS\n#\n\nclass VenvList(ListView):\n model = models.VirtualEnv\n\n\nclass VenvNew(CreateView):\n model = models.VirtualEnv\n success_url = reverse_lazy('venvs')\n\n\nclass VenvDetail(DetailView):\n model = models.VirtualEnv\n pk_url_kwarg = 'venv_id'\n\n\nclass VenvUpdate(UpdateView):\n model = models.VirtualEnv\n pk_url_kwarg = 'venv_id'\n success_url = reverse_lazy('venvs')\n\n\nclass VenvDelete(DeleteView):\n model = models.VirtualEnv\n pk_url_kwarg = 'venv_id'\n success_url = reverse_lazy('venvs')\n","sub_path":"venvmanager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"253726388","text":"import os\nimport sys\nimport logging\nimport numpy as np\nimport json\nimport re\nimport random\nfrom tqdm import tqdm\n\nwork_dir = os.getcwd()\nsys.path.extend([os.path.abspath(\"..\"), work_dir])\n\nfrom basic.basic_task import Basic_task, Task_Mode\nfrom basic.register import register_task, find_task\nfrom utils.build_vocab import Vocab\nfrom utils.utils import check_dir\n\nimport torch\nfrom torch import nn\nfrom transformers import BertPreTrainedModel, BertConfig, BertTokenizer, BertModel\n\n# from TorchCRF import CRF\nfrom metrics.ner_metrics import SeqEntityScore\nfrom models.crf import CRF\n\nlogging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nworkdir = os.getcwd() # 当前路径\nproject_dir = os.path.split(workdir)[0]\n\n\"\"\"\n命名实体识别任务:\n 模型:bert + BiLstm + CRF\n 数据集:中文clue评测任务中cluener数据集:下载地址:https://www.cluebenchmarks.com/introduce.html\n 开发集:precision: 0.7649 , recall: 0.7920 , f1: 0.7782 (bert-wwm-base)\n\"\"\"\n\nclass Config:\n\n seed = 42 # 随机种子\n gpuids = \"2\" # 设置显卡序号,若为None,则不使用gpu\n nlog = 50 # 多少step打印一次记���(loss,评估指标)\n early_stop = True\n\n train_batch_size = 32\n eval_batch_size = 32\n epochs = 5\n lr = 5e-5 # 学习率\n\n do_train = True\n do_eval = True\n do_infer = True\n\n # 新增超参数\n margin = 1\n max_len = 128\n rnn_dim = 128\n num_labels = 12\n use_lstm = True\n\n task_name = \"NER_Example\"\n\n # 配置路径\n train_data_path = \"/workspace/data/cluener/train.json\" # 训练集数据的路径,建议绝对路径\n dev_data_path = [\"/workspace/data/cluener/dev.json\"] # 验证集数据的路径,建议绝对路径\n test_data_path = [\"/workspace/data/cluener/test.json\"] # 测试集数据的路径,建议绝对路径\n\n # transformer结构(Bert, Albert, Roberta等)的预训练模型的配置, 路径也建议是绝对路径\n bert_model_path = \"/workspace/Idiom_cloze/pretrained_models/chinese_wwm_pytorch/pytorch_model.bin\" # 预训练模型路径, 例如bert预训练模型\n model_config_path = \"/workspace/Idiom_cloze/pretrained_models/chinese_wwm_pytorch/config.json\" # 预训练模型的config文件路径, 一般是json文件\n vocab_path = \"/workspace/Idiom_cloze/pretrained_models/chinese_wwm_pytorch/vocab.txt\" # vocab文件路径,可以是预训练模型的vocab.txt文件\n\n model_save_path = project_dir + f\"/model_save/{task_name.lower()}_model\" # 训练过程中最优模型或者训练结束后的模型保存路径\n output_path = project_dir + f\"/output/{task_name.lower()}_model\" # 模型预测输出预测结果文件的路径\n\n # 新增文件路径\n label_list_path = \"/workspace/data/cluener/label_list.txt\"\n\n\n# 构建模型动态计算图\nclass Model(BertPreTrainedModel):\n \"\"\"\n 模型说明:bert + BiLstm + CRF, 用于 命名实体识别 任务\n \"\"\"\n def __init__(self, model_config, task_config):\n super(Model, self).__init__(model_config)\n # 768 is the dimensionality of bert-base-uncased's hidden representations\n # Load the pretrained BERT model\n self.model_config = model_config\n self.task_config = task_config\n self.bert = BertModel(config=model_config)\n self.lstm = nn.LSTM(model_config.hidden_size, task_config.rnn_dim, num_layers=1, bidirectional=True, batch_first=True)\n self.dropout = nn.Dropout(0.5)\n self.linear = nn.Linear(task_config.rnn_dim * 2, task_config.num_labels)\n # self.crf = CRF(task_config.num_labels, use_gpu=True)\n self.crf = CRF(num_tags=task_config.num_labels, batch_first=True)\n\n self.init_weights()\n\n def forward(self, inputs):\n\n input_ids = inputs.get(\"input_ids\", None)\n attention_mask = inputs.get(\"input_masks\", None)\n token_type_ids = inputs.get(\"token_type_ids\", None)\n label_ids = inputs.get(\"label_ids\", None)\n\n # input_ids [batch, max_seq_length] sequence_outputs [batch, max_seq_length, hidden_state]\n bert_outputs = self.bert(input_ids, attention_mask, token_type_ids)\n sequence_outputs = bert_outputs[0]\n # blank_states = sequence_outputs[[i for i in range(len(positions))], positions] # [batch, hidden_state]\n if self.task_config.use_lstm:\n sequence_outputs, _ = self.lstm(sequence_outputs)\n\n sequence_outputs_drop = self.dropout(sequence_outputs)\n emissions = self.linear(sequence_outputs_drop)\n \n logits = self.crf.decode(emissions, attention_mask.byte())[0]\n\n outputs = {\n \"logits\": logits,\n }\n if label_ids is not None:\n loss = -1*self.crf(emissions, label_ids, mask=attention_mask.byte()) \n outputs[\"loss\"] = loss\n \n return outputs\n\n# 编写任务\n@ register_task\nclass NER_Example(Basic_task):\n def __init__(self, task_config):\n super().__init__(task_config)\n self.task_config = task_config\n self.max_len = task_config.max_len\n \n # model init 模型初始化,加载预训练模型\n self.model_config = BertConfig.from_pretrained(self.task_config.model_config_path)\n # self.tokenizer = BertTokenizer.from_pretrained(self.task_config.vocab_path, lowercase=True)\n self.vocab = Vocab(task_config.vocab_path)\n self.label_vocab = Vocab(self.task_config.label_list_path)\n task_config.num_labels = self.label_vocab.vocab_size\n if task_config.do_train:\n self.model = Model.from_pretrained(pretrained_model_name_or_path=self.task_config.bert_model_path,\n config=self.model_config, task_config=task_config)\n else:\n self.model = Model(self.model_config, task_config=task_config)\n if self.task_config.gpuids != None:\n self.model.to(self.device)\n # 单机多卡训练\n if self.n_gpu > 1:\n self.model = nn.DataParallel(self.model)\n\n def evaluate(self, dataset, mode=Task_Mode.Eval, epoch=None):\n data_loader = torch.utils.data.DataLoader(\n dataset,\n shuffle=False,\n batch_size=self.task_config.eval_batch_size\n )\n metric = SeqEntityScore(self.label_vocab.id2word, markup=\"bio\")\n outputs = self.predict(self.model, data_loader)\n for output in outputs:\n logits = output[\"logits\"] \n text = output[\"text\"]\n tag = logits[1:-1].numpy().tolist()\n text_len = min(len(text), self.max_len - 2)\n tag = tag[:text_len]\n pred_tags = [self.label_vocab.id2word[t] for t in tag]\n if mode == Task_Mode.Eval:\n label_ids = output['label_ids'].cpu().numpy().tolist()\n label = label_ids[1:text_len + 1]\n assert len(label) == text_len\n true_tags = [self.label_vocab.id2word[l] for l in label]\n metric.update(label_tags=[true_tags], pred_tags=[pred_tags])\n else:\n entities = metric.get_entity(pred_tags=pred_tags)\n output[\"result\"] = entities\n \n if mode == Task_Mode.Eval:\n eval_info, entity_info = metric.eval_result()\n info = \", \".join([f' {key}: {value:.4f} ' for key, value in eval_info.items()])\n logger.info(f\"Evaluate: epoch={epoch}, step={self.global_step}, {info}\")\n return eval_info[\"f1\"]\n else:\n return outputs\n\n def train(self, dataset, valid_dataset=None):\n logging.info(f\"train dataset size = {len(dataset)}\")\n if valid_dataset is not None:\n logging.info(f\"valid dataset size = {len(valid_dataset)}\")\n data_loader = torch.utils.data.DataLoader(\n dataset,\n shuffle=True,\n batch_size=self.task_config.train_batch_size,\n )\n num_train_steps = int(len(dataset) / self.task_config.train_batch_size * self.task_config.epochs)\n optimizer, scheduler = self.create_optimizer(self.model, use_scheduler=True, num_warmup_steps=1000,\n num_train_steps=num_train_steps)\n self.model.train()\n # Train the model on each batch\n # Reset gradients\n loss_buffer = 0\n for epoch in range(self.task_config.epochs):\n for bi, batch in enumerate(data_loader):\n self.model.zero_grad()\n outputs = self.run_one_step(batch, self.model)\n logits = outputs.pop(\"logits\")\n loss = outputs.pop(\"loss\")\n # Calculate gradients based on loss\n loss = loss.mean()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() #更新模型参数\n scheduler.step() # 更新learning rate\n self.global_step += 1\n\n loss_buffer += loss.item()\n if self.global_step % self.task_config.nlog == 0:\n logger.info(\"epoch={}, step={}, loss={:.4f}\".format(epoch+1, self.global_step, loss_buffer / self.task_config.nlog))\n loss_buffer = 0\n \n if valid_dataset != None:\n eval_score = self.evaluate(valid_dataset, mode=Task_Mode.Eval, epoch=epoch+1)\n self.model.train()\n if self.task_config.early_stop:\n self.es(epoch, eval_score, self.model, model_path=self.task_config.model_save_path)\n if self.es.early_stop:\n logger.info(\"********** Early stopping ********\")\n break\n # 保存训练过程中的模型,防止意外程序停止,可以接着继续训练\n # self.save_checkpoint(model=self.model, model_path=self.task_config.model_save_path, epoch=epoch)\n \n \n def read_data(self, file, mode):\n \"\"\"\n 根据不同任务编写数据处理,建议将原始数据进行预处理之后再在这里写数据处理成模型输入结构\n \"\"\"\n dataset = []\n with open(file, \"r\", encoding=\"utf-8\") as fin:\n lines = fin.readlines()\n tk0 = tqdm(lines, total=len(lines))\n for line in tk0:\n line = json.loads(line)\n text = line[\"text\"]\n\n input_ids = [self.vocab.get_id(\"[CLS]\")] + [self.vocab.word2id.get(t, self.vocab.get_id(\"[UNK]\")) for t in text][:self.max_len - 2] + [self.vocab.get_id(\"[SEP]\")]\n token_type_ids = [0] * len(input_ids) + [0] * (self.max_len - len(input_ids))\n input_masks = [1] * len(input_ids) + [0] * (self.max_len - len(input_ids))\n\n if mode != Task_Mode.Infer:\n label_entities = line.get('label', None)\n words = list(text)\n labels = ['O'] * len(words)\n if label_entities is not None:\n for key, value in label_entities.items():\n for sub_name,sub_index in value.items():\n for start_index,end_index in sub_index:\n assert ''.join(words[start_index:end_index+1]) == sub_name\n if start_index == end_index:\n labels[start_index] = 'B-'+key\n else:\n labels[start_index] = 'B-'+key\n labels[start_index+1:end_index+1] = ['I-'+key]*(len(sub_name)-1)\n\n label_ids = [0] + [self.label_vocab.word2id[each] for each in labels][:self.max_len - 2] + [0]\n assert len(input_ids) == len(label_ids)\n label_ids = label_ids + [0] * (self.max_len - len(label_ids))\n assert len(label_ids) == self.max_len\n\n input_ids = input_ids + [0] * (self.max_len - len(input_ids))\n assert len(input_ids) == self.max_len\n assert len(input_masks) == self.max_len\n assert len(token_type_ids) == self.max_len\n if mode != Task_Mode.Infer:\n dataset.append({\n \"text\": text,\n \"labels\": \" \".join(labels),\n 'input_ids': torch.tensor(input_ids, dtype=torch.long),\n 'input_masks': torch.tensor(input_masks, dtype=torch.long),\n 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),\n 'label_ids': torch.tensor(label_ids, dtype=torch.long),\n })\n else:\n dataset.append({\n \"text\": text,\n 'input_ids': torch.tensor(input_ids, dtype=torch.long),\n 'input_masks': torch.tensor(input_masks, dtype=torch.long),\n 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),\n })\n\n return dataset\n\n\ndef seed_set(seed):\n '''\n set random seed of cpu and gpu\n '''\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\ndef run():\n config = Config()\n check_dir([config.model_save_path, config.output_path])\n seed_set(config.seed)\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpuids # 设置gpu序号\n task_cls = find_task(config.task_name)\n task = task_cls(task_config=config)\n if config.do_train:\n dataset = task.read_data(config.train_data_path, mode=Task_Mode.Train)\n if config.do_eval:\n valid_dataset = task.read_data(config.dev_data_path[0], mode=Task_Mode.Eval)\n task.train(dataset, valid_dataset=valid_dataset)\n else:\n task.train(dataset)\n if config.do_eval:\n task.load_model(config.model_save_path)\n for dev_path in config.dev_data_path:\n logging.info(f\"Evaluating model in {dev_path}\")\n dataset = task.read_data(dev_path, mode=Task_Mode.Eval)\n logging.info(f\"dev dataset size = {len(dataset)}\")\n task.evaluate(dataset, mode=Task_Mode.Eval)\n if config.do_infer:\n task.load_model(config.model_save_path)\n for test_path in config.test_data_path:\n logging.info(f\"Testing model in {test_path}\")\n dataset = task.read_data(test_path, mode=Task_Mode.Infer)\n logging.info(f\"test dataset size = {len(dataset)}\")\n task.evaluate(dataset, mode=Task_Mode.Infer)\n\nif __name__ == '__main__':\n run()\n","sub_path":"examples/ner_example.py","file_name":"ner_example.py","file_ext":"py","file_size_in_byte":14757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"521305845","text":"from .contestant import Contestant\nfrom .rel import Rel\nimport csv\nimport os\n\nclass Cast:\n #constructor\n def __init__(self, file_name):\n self.file_name = file_name\n self.cast_list = []\n\n size = self.get_cast_from_csv()\n\n self.size = size\n self.rel = Rel(size)\n\n #prints the whole cast's name and age\n def print_cast(self):\n for i in range(self.size):\n name = self.cast_list[i].name\n age = str(self.cast_list[i].age)\n print(name + \", \" + age)\n \n #gets a Contestant by its index\n def get_cont_by_index(self, index):\n return self.cast_list[index]\n\n #eliminates a cast member\n def eliminate(self, cont_index):\n self.cast_list[cont_index].elim = True\n\n #resets weekly flags (nom and imn)\n def reset_weekly_flags(self):\n for i in range(self.size):\n self.cast_list[i].nom == False\n self.cast_list[i].imn == False\n\n #get cast from csv\n def get_cast_from_csv(self):\n line_count = 0\n with open(self.file_name) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n name = row[0]\n age = row[1]\n person = Contestant(name, age, line_count - 1)\n print(f'\\t{person.index}. {row[0]} is {row[1]} years old.')\n self.cast_list.append(person)\n line_count += 1\n print(f'Processed {line_count} lines.')\n return line_count - 1\n\n #gets traits/skills from csv\n def get_attributes_from_csv(self, filename, data_type):\n line_count = 0\n with open(filename) as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n if data_type == 't':\n self.cast_list[line_count].att.traits = row\n elif data_type == 's':\n self.cast_list[line_count].att.skills = row\n #person = self.cast_list[line_count]\n #print(f'\\t{person.index}. {person.name} has {person.att.traits[\"loyalty\"]} loyalty points.')\n line_count += 1\n print(f'Processed {line_count} lines.')\n return line_count\n","sub_path":"classes/cast.py","file_name":"cast.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"649258977","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.compat.v1.nn import rnn_cell\n\nfrom my_model import MyModel\n\n\nHIDDEN_SIZE = 30 # LSTM隐藏节点个数\nNUM_LAYERS = 2 # LSTM层数\nTIME_STEPS = 10 # 循环神经网络截断长度\nBATCH_SIZE = 32 # batch大小\n\nTRAINING_STEPS = 3000 # 训练轮数\nTRAINING_EXAMPLES = 10000 # 训练数据个数\nTESTING_EXAMPLES = 1000 # 测试数据个数\nSAMPLE_GAP = 0.01 # 采样间隔\n\n\ndef generate_data(seq):\n X = []\n y = []\n for i in range(len(seq) - TIME_STEPS - 1):\n X.append([seq[i:i + TIME_STEPS]])\n y.append([seq[i + TIME_STEPS]])\n return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)\n\n\nif __name__ == \"__main__\":\n\n # 用sin生成训练和测试数据集\n test_start = TRAINING_EXAMPLES * SAMPLE_GAP\n test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP\n train_X, train_y = generate_data(np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32)))\n test_X, test_y = generate_data(np.sin(np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32)))\n\n # lstm_model(train_X, train_y)\n\n model = MyModel(BATCH_SIZE, TIME_STEPS, 1, 1, HIDDEN_SIZE, NUM_LAYERS)\n model.train(train_X, train_y)\n res = model.predict(test_X, test_y)\n print(test_y[:5])\n print(res[:5])\n\n\n","sub_path":"Learning of TensorFlow 1.x/rnn_demo/demo_practice.py","file_name":"demo_practice.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"38020690","text":"\"\"\"\nPytorch based MLP for MNIST dataset\n\nhttp://yann.lecun.com/exdb/mnist/\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = ['mlp_mnist']\n\nclass MLP(nn.Module):\n def __init__(self, depth, num_class, dropout:bool=False, drop_rate:float=0.2):\n super(MLP, self).__init__()\n assert len(depth) > 0, \"The configuration of the MLP cannot be empty\"\n \n blocks = []\n in_channels = depth[0]\n for layer_idx in range(1,len(depth)):\n out_channels = depth[layer_idx]\n blocks += [\n nn.Linear(in_features=in_channels, out_features=out_channels),\n nn.ReLU(inplace=True)\n ]\n if dropout:\n blocks += [nn.Dropout(p=drop_rate)]\n \n in_channels = out_channels\n \n self.mlp = nn.Sequential(*blocks)\n self.classifier = nn.Linear(in_channels, num_class, bias=True) # last fc\n\n def forward(self, x):\n x = self.mlp(x)\n x = self.classifier(x)\n return x\n\nclass mlp_mnist:\n base = MLP\n args = list()\n kwargs = {'num_class': 10}\n\nif __name__ == '__main__':\n x = torch.randn(28, 28)\n model = MLP([784, 400, 400], num_class=10, dropout=True)\n y = model(x)\n print(model)\n print(f\"Size of the output = {y.size()}\")","sub_path":"models/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"119288947","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n hashTable = {} # hash saves index of the element\n for index, elem in enumerate(nums):\n needNum = target - elem # we need this num to have target\n # it is in case needNum == elem -> to ensure have two of the same key\n # need to check it is not the same one by index\n if (needNum in hashTable) and (hashTable[needNum] != index):\n # we can return as the q is about two sums\n return list((index, hashTable[needNum]))\n hashTable[elem] = index\n","sub_path":"LeetCode/Python/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"249506618","text":"import configs\nfrom Game2048 import Game2048\n\ndef main(args):\n \"\"\"\n screen_width: Width of the form\n screen_height: Height of the form\n block_gap: Gap between two blocks\n block_size: Size of a block\n \"\"\"\n screen_width = args.screen_width\n screen_height = args.screen_height\n block_gap = args.block_gap\n block_size = args.block_size\n block_arc = args.block_arc\n\n game = Game2048(screen_width, screen_height, block_gap, block_size, block_arc)\n game.Form()\n\n\nif __name__ == '__main__':\n args = configs.parse_args()\n main(args)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"563610418","text":"from datetime import datetime\nimport logging\nimport os\nimport unittest\n\nfrom .helpers import build_datapoint_with_counts, TemporaryDirectory\nfrom stats.csv_writer import CSVWriter\n\n\n# Prevent info/debug logging cluttering up test output\nlogging.disable(logging.INFO)\n\n\nclass TestCSVWriter(unittest.TestCase):\n def test_default_filename(self):\n start_date = datetime(2015, 2, 3, 9, 15, 30)\n end_date = datetime(2015, 3, 6, 14, 27, 17)\n writer = CSVWriter(start_date=start_date, end_date=end_date)\n\n expected_filename = 'report_2015-02-03_2015-03-06.csv'\n self.assertEqual(writer.output_filename, expected_filename)\n\n def test_writing_csv(self):\n datapoints = [\n build_datapoint_with_counts('/path1'),\n build_datapoint_with_counts('/path2'),\n ]\n\n expected_csv_lines = [\n 'uniquePageviews,problemReports,searchUniques,pagePath,_id,problemsPer100kViews,searchesPer100kViews',\n '10,2,5,/path1,_path1,20000.0,50000.0',\n '10,2,5,/path2,_path2,20000.0,50000.0',\n ]\n\n with TemporaryDirectory() as tempdir:\n csv_filename = os.path.join(tempdir, 'test_report.csv')\n writer = CSVWriter(output_filename=csv_filename)\n writer.write_datapoints(datapoints)\n\n with open(csv_filename, 'r') as open_file:\n file_lines = open_file.read().splitlines()\n self.assertEqual(file_lines, expected_csv_lines)\n","sub_path":"tests/csv_writer_tests.py","file_name":"csv_writer_tests.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"101975316","text":"import praw\nfrom time import gmtime, strftime\nimport time\nimport os\n\ndef login():\n reddit = praw.Reddit(client_id = os.environ.get('client_id'),\n client_secret = os.environ.get('client_secret'),\n user_agent=\"sbincounter\",\n username = os.environ.get('username'),\n password = os.environ.get('password'))\n print('Logged in! ')\n return reddit\n\ndef run_bot(reddit):\n fdank = reddit.subreddit('formuladank')\n\n count = 1\n start_time = \"\\nGMT: \"+time.strftime(\"%a, %d %b %Y %I:%M:%S %p %Z\", time.gmtime())\n\n for comments in fdank.stream.comments(skip_existing=True):\n print(comments.body)\n if 'sbin' in comments.body.lower() or 's🅱️in' in comments.body.lower():\n count += 1\n if count%10 == 0:\n bot_msg = \"I am a bot that counts the number of time S BIN has been commented.\\n\"\n blank_space = \" \"\n sbin_msg = \"This is the \" +str(count) +\"th time the word 'S BIN' has been used since I started counting on \" +start_time\n reply = bot_msg +blank_space +sbin_msg\n print(reply)\n comments.reply(reply)\nwhile True:\n reddit = login()\n run_bot(reddit)\n","sub_path":"sbin_counter.py","file_name":"sbin_counter.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"247529378","text":"#!/usr/bin/env python3\n\n##on APP machine\n\nfrom struct import Struct, unpack, pack\nimport numpy as np\n\nclass BinaryIOError(Exception):\n def __init__(self, err):\n self.err=err\n \n def __str__(self):\n return str(self.err)\n \n def __repr(self):\n return str(self)\n\n##codes:\n## 1: long\n## 2: double\n## 3: long array\n## 4: double array\n## 5: string\n\nlong_struct = Struct(' item1.startdate - interval_time) & (cmps.startdate < item1.enddate + interval_time) | (cmps.enddate > item1.startdate - interval_time) & (cmps.enddate < item1.enddate + interval_time)))]\n if not match.empty:\n match.insert(0, 'anml_1', item1.catalognumber)\n match.insert(1, 'anml_1_seq', item1.seq_num)\n final_set.extend(match[['anml_1', 'anml_1_seq', 'station',\n 'catalognumber', 'seq_num', 'startdate',\n 'enddate', 'startunqdetecid',\n 'endunqdetecid', 'total_count']].values.tolist())\n seen.extend([(idx1, i) for i in match.index.tolist()])\n\n output_df = pd.DataFrame(final_set)\n output_df.columns = ['anml_1',\n 'anml_1_seq',\n 'station',\n 'anml_2',\n 'anml_2_seq',\n 'anml_2_arrive',\n 'anml_2_depart',\n 'anml_2_startunqdetecid',\n 'anml_2_endunqdetecid',\n 'anml_2_detcount']\n return output_df","sub_path":"pycfiles/resonATe-1.0.7-py3.6/cohorts.cpython-36.py","file_name":"cohorts.cpython-36.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"375558495","text":"import tensorflow as t\r\n\r\n#tf constants\r\nx=t.constant([5,7])\r\ny=t.constant([8,8])\r\nz=t.constant(\"hello\")\r\n\r\n#placeholders are tensorflow variables accepts any data\r\nm=t.placeholder(t.int16)\r\nn=t.placeholder(t.int16)\r\n\r\n#waiting for call\r\nr=t.multiply(x,y)\r\no=t.multiply(m,n)\r\n\r\n#where it all happens\r\nwith t.Session() as ses:\r\n print (x,y)\r\n print(ses.run(r))\r\n print(sess.run(o,feed_dict={m:5,n:5}))\r\n\r\n","sub_path":"Tensorflow/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"184460098","text":"from torch.utils.data import Dataset\nimport os\nfrom glob import glob\nfrom PIL import Image\n\nclass dataset(Dataset):\n imext = set(['.png', '.jpg'])\n def __init__(self, path, transform=None):\n self.path = path\n self.imext = set(['.png', '.jpg'])\n self.files = self._collect_files(args.path)\n self.transform = transform\n \n def _collect_file_path(self, path):\n out = []\n files = os.listdir(path)\n files = [os.path.splitext(x) for x in files]\n for name, ext in files:\n if ext in self.imext:\n out.append(os.path.join(path, name+ext)) \n return out\n \n def _collect_files(self, path):\n return self._collect_file_path(path)\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, idx):\n impath = self.files[idx]\n image = Image.open(impath)\n if self.transform is not None:\n image = self.transform(image)\n return image\n\nclass datasetWSI_simple(dataset):\n def __init__(self, path, transform=None):\n super(datasetWSI_simple, self).__init__(path, transform)\n\n def _collect_files(self, path):\n out = []\n paths = glob(os.path.join(path, '*'))\n for p in paths:\n out += self._collect_file_path(p)\n return out\n\n","sub_path":"modules/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"317784142","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Post, Person\nfrom django.template import loader\n\ndef blogIndex(request):\n latest_posts = Post.objects.order_by('-pub_date')[:5]\n template = loader.get_template('blog/index.html')\n context = {\n 'latest_posts': latest_posts,\n }\n return HttpResponse(template.render(context,request))\n\ndef postDetails(request, pk):\n\tpost = get_object_or_404(Post, id=pk)\n\tcontext = {\n\t\t\"title\": post.title,\n\t\t\"post\": post,\n\t}\n\treturn render(request, \"blog/details.html\", context)","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"323446015","text":"\"\"\"Multiprocessing decorators required by the tiatoolbox.\"\"\"\n\nimport multiprocessing\nfrom functools import partial\n\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\n\nclass TIAMultiProcess:\n \"\"\"Multiprocessing class decorator for the toolbox, requires a list `iter_on`\n as input on which multiprocessing will run\n\n Attributes:\n iter_on (str): Variable on which iterations will be performed.\n workers (int): num of cpu cores to use for multiprocessing.\n\n Examples:\n >>> from tiatoolbox.decorators.multiproc import TIAMultiProcess\n >>> import cv2\n >>> @TIAMultiProcess(iter_on=\"input_path\")\n ... def read_images(input_path, output_dir=None):\n ... img = cv2.imread(input_path)\n ... return img\n >>> imgs = read_images(input_path)\n\n \"\"\"\n\n def __init__(self, iter_on):\n \"\"\"\n Args:\n iter_on: Variable on which iterations will be performed.\n \"\"\"\n self.iter_on = iter_on\n self.workers = multiprocessing.cpu_count()\n\n def __call__(self, func):\n \"\"\"\n Args:\n func: function to be run with multiprocessing\n\n Returns:\n\n \"\"\"\n\n def func_wrap(*args, **kwargs):\n \"\"\"Wrapping function for decorator call\n Args:\n *args: args inputs\n **kwargs: kwargs inputs\n\n Returns:\n\n \"\"\"\n if \"workers\" in kwargs:\n self.workers = kwargs.pop(\"workers\")\n try:\n iter_value = kwargs.pop(self.iter_on)\n except ValueError:\n raise ValueError(\"Please specify iter_on in multiprocessing decorator\")\n\n with Pool(self.workers) as p:\n results = p.map(partial(func, **kwargs), iter_value,)\n p.clear()\n\n return results\n\n func_wrap.__doc__ = func.__doc__\n\n return func_wrap\n","sub_path":"tiatoolbox/decorators/multiproc.py","file_name":"multiproc.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"504486311","text":"# import sys\n# sys.path.append('/home/litshit/booking_management_system/source')\n\nimport os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"source\"))\nfrom tkinter import *\n#from functools import partial\nfrom tkinter import *\nfrom functools import partial\nfrom tkinter import messagebox\nimport sqlite3\nfrom tkinter import *\n#from functools import partial\nfrom theaterdbcreator import *\nfrom abcdef import *\n\n\ndef getDetails(rec_code,screen):\n code=rec_code.get()\n print(code)\n # screen.destroy()\n theaterCreator()\n \n conn=sqlite3.connect('theater.db')\n c=conn.cursor()\n \n c.execute('''SELECT price FROM receipt WHERE receiptNum = (?)''',(code,))\n price=c.fetchall()\n\n c.execute('''SELECT seats FROM receipt WHERE receiptNum = (?)''',(code,))\n seats=c.fetchall()\n\n c.execute('''SELECT movie FROM receipt WHERE receiptNum = (?)''',(code,))\n movie=c.fetchall()\n\n screenA=Tk()\n displayReceipt(screenA,code,price,seats,movie)\n screenA.mainloop()\n\n\ndef displayReceipt(screen,code,price,seats,movie):\n root=screen\n print('price : ',price)\n print('seats : ',seats)\n print('movie :',movie)\n \n for tup in price:\n pri=tup[0]\n\n for tupl in seats:\n sea=tupl\n\n for tuplee in movie:\n mov=tuplee\n\n \n Label(root,text='Receipt Code : '+ str(code) ,width=\"30\",height=\"1\",font=(\"Calibri\",13)).pack(fill=\"x\")\n Label(text=\"\").pack()\n\n Label(root,text='seats : '+ str(sea) ,width=\"30\",height=\"1\",font=(\"Calibri\",13)).pack(fill=\"x\")\n Label(text=\"\").pack()\n\n Label(root,text='Price : '+ str(pri) ,width=\"30\",height=\"1\",font=(\"Calibri\",13)).pack(fill=\"x\")\n Label(text=\"\").pack()\n\n Label(root,text='Movie : '+ str(mov) ,width=\"30\",height=\"1\",font=(\"Calibri\",13)).pack(fill=\"x\")\n Label(text=\"\").pack()\n\n \n\n\ndef adminSide(screen2):\n\n # global screen2\n # screen2=Toplevel(screen)\n screen2.title(\"LOGIN PAGE\")\n screen2.geometry(\"300x250\")\n\n global receipt_code\n receipt_code=StringVar()\n\n Label(screen2,text=\"\",width=\"30\",height=\"1\",font=(\"Calibri\",13)).pack()\n Label(text=\"\").pack()\n\n Label(screen2,text=\"Enter receipt code\").pack()\n Entry(screen2,textvariable=receipt_code).pack()\n\n Label(screen2,text=\"\",width=30,height=1,font=(\"Calibri\",13)).pack()\n\n Button(screen2,text=\"LOGIN\",width=10,height=1,command=lambda:getDetails(receipt_code,screen2)).pack()\n \n\ndef main():\n mainScreen=Tk()\n adminSide(mainScreen)\n mainScreen.mainloop()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"adminside.py","file_name":"adminside.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"3254892","text":"#!/usr/bin/env python3\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Pango, GdkPixbuf, Gdk, Gio, GObject,GLib\n\nimport sys\nimport os\n\nfrom . import settings\nimport gettext\nimport threading\nimport datetime\nfrom . import Screenshot\n\ngettext.textdomain(settings.TEXT_DOMAIN)\n_ = gettext.gettext\n\n\nclass SiteBox(Gtk.VBox):\n\n\tSYNC_CONTENT_WAITING_CODE=22\n\tDELETING_SITE_WAITING_CODE=25\n\tACTIONS_SHOW_SITE_WAITING_CODE=26\n\tACTIONS_HIDE_SITE_WATIING_CODE=27\n\n\tdef __init__(self):\n\t\t\n\t\tGtk.VBox.__init__(self)\n\t\t\n\t\tself.core=Core.Core.get_core()\n\t\t\n\t\tbuilder=Gtk.Builder()\n\t\tbuilder.set_translation_domain(settings.TEXT_DOMAIN)\n\t\tui_path=self.core.ui_path\n\t\tbuilder.add_from_file(ui_path)\n\n\t\tself.css_file=self.core.rsrc_dir+\"easy-sites.css\"\n\t\tself.manage_site_image=self.core.rsrc_dir+\"manage_site.svg\"\n\t\tself.image_nodisp=self.core.rsrc_dir+\"no_disp.png\"\n\t\tself.main_box=builder.get_object(\"sites_data_box\")\n\t\tself.sites_box=builder.get_object(\"sites_box\")\n\t\tself.scrolledwindow=builder.get_object(\"scrolledwindow\")\n\t\tself.sites_list_box=builder.get_object(\"sites_list_box\")\n\t\tself.sites_list_vp=builder.get_object(\"sites_list_viewport\")\n\t\tself.pack_start(self.main_box,True,True,0)\n\t\tself.set_css_info()\n\t\tself.init_threads()\n\t\t\t\t\n\t#def __init__\n\n\tdef set_css_info(self):\n\t\t\n\t\tself.style_provider=Gtk.CssProvider()\n\n\t\tf=Gio.File.new_for_path(self.css_file)\n\t\tself.style_provider.load_from_file(f)\n\n\t\tGtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(),self.style_provider,Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n\t\tself.sites_list_box.set_name(\"WINDOW\")\t\t\t\n\t#def set_css_info\t\n\n\tdef init_threads(self):\n\n\t\tself.sync_folder_t=threading.Thread(target=self.sync_folder)\n\t\tself.open_site_browser_t=threading.Thread(target=self.open_site_browser)\n\t\tself.open_folder_t=threading.Thread(target=self.open_site_folder)\n\t\tself.delete_folder_t=threading.Thread(target=self.delete_site)\n\t\tself.manage_visibility_t=threading.Thread(target=self.manage_visibility)\n\t\tself.sync_folder_t.daemon=True\n\t\tself.open_site_browser_t.daemon=True\n\t\tself.open_folder_t.daemon=True\n\t\tself.delete_folder_t.daemon=True\n\t\tself.manage_visibility_t.daemon=True\n\n\t\tGObject.threads_init()\t\n\n\t#def init_threads\t\t\n\t\t\t\n\tdef init_sites_list(self):\n\t\n\t\ttmp=self.core.siteBox.sites_list_box.get_children()\n\t\tfor item in tmp:\n\t\t\tself.sites_list_box.remove(item)\n\n\t#def init_sites_list\n\t\t\t\n\n\tdef draw_site(self,search,args=None):\n\n\t\tself.init_sites_list()\n\t\tself.search_box=search\n\t\tif not self.search_box:\n\t\t\tself.sites_list=self.core.mainWindow.sites_info \n\t\t\t\n\t\telse:\n\t\t\tself.sites_list=self.core.mainWindow.search_list\n\n\t\tcont=len(self.sites_list)\n\t\tfor item in self.sites_list:\n\t\t\tself.new_site_box(item,cont)\n\t\t\tcont-=1\n\t\t\n\t#def draw_site\t\t\n\n\tdef new_site_box(self,siteId,cont,args=None):\n\n\t\thbox=Gtk.HBox()\n\t\t\n\t\timage=Gtk.HBox()\n\t\tcustom=False\n\t\tif self.sites_list[siteId][\"image\"][\"option\"]==\"custom\":\n\t\t\timage_name=self.sites_list[siteId][\"image\"][\"img_path\"].split(\"/.\")[1]\n\t\t\t#image_path=os.path.join(self.core.image_dir,image_name)\n\t\t\timage_path=self.sites_list[siteId][\"image\"][\"img_path\"]\n\t\t\tcustom=True\n\t\telse:\n\t\t\timage_path=self.sites_list[siteId][\"image\"][\"img_path\"]\n\t\t\timage_name=os.path.basename(image_path)\t\n\t\t\t\n\t\timage_info={}\n\t\timage_info[\"x\"]=110\n\t\timage_info[\"y\"]=110\n\t\timage_info[\"image_id\"]=image_name\n\t\timage_info[\"image_url\"]=image_path\n\t\timage_info[\"image_path\"]=image_path\n\t\timage_info[\"aspect_ratio\"]=False\n\n\t\tss=Screenshot.ScreenshotNeo()\n\t\tif custom:\n\t\t\tss.download_image(image_info)\n\t\telse:\n\t\t\tss.set_from_file(image_info)\n\t\t\n\t\timage.set_margin_left(15)\n\t\timage.set_margin_bottom(15)\n\t\timage.set_halign(Gtk.Align.CENTER)\n\t\timage.set_valign(Gtk.Align.CENTER)\n\t\timage.id=siteId\n\t\timage.pack_start(ss,True,True,5)\n\t\t\n\t\tvbox_site=Gtk.VBox()\n\t\thbox_site_data=Gtk.HBox()\n\t\thbox_site_description=Gtk.VBox()\n\t\tsite_name=Gtk.Label()\n\t\tsite_name.set_text(self.sites_list[siteId][\"name\"])\n\t\tsite_name.set_margin_left(10)\n\t\tsite_name.set_margin_right(5)\n\t\tsite_name.set_margin_top(25)\n\t\tsite_name.set_margin_bottom(1)\n\t\tsite_name.set_width_chars(15)\n\t\tsite_name.set_max_width_chars(15)\n\t\tsite_name.set_xalign(-1)\n\t\tsite_name.set_ellipsize(Pango.EllipsizeMode.MIDDLE)\n\t\tsite_name.set_name(\"SITE_NAME\")\n\t\tsite_name.set_valign(Gtk.Align.START)\n\n\t\thbox_site_author=Gtk.HBox()\n\t\tsite_author=Gtk.Label()\n\t\tauthor=self.sites_list[siteId][\"author\"]\n\t\tsite_author.set_text(_(\"Created by: \")+author)\n\t\tsite_author.set_margin_left(10)\n\t\tsite_author.set_margin_right(0)\n\t\tsite_author.set_margin_bottom(15)\n\t\tsite_author.set_width_chars(22)\n\t\tsite_author.set_max_width_chars(22)\n\t\tsite_author.set_xalign(-1)\n\t\tsite_author.set_ellipsize(Pango.EllipsizeMode.MIDDLE)\n\t\tsite_author.set_name(\"SITE_AUTHOR\")\n\t\tsite_author.set_valign(Gtk.Align.START)\n\n\t\tsite_updatedby=Gtk.Label()\n\t\tupdatedby=self.sites_list[siteId][\"updated_by\"]\n\t\tsite_updatedby.set_text(_(\"Updated by: \")+updatedby)\n\t\tsite_updatedby.set_margin_left(8)\n\t\tsite_updatedby.set_margin_right(5)\n\t\tsite_updatedby.set_margin_bottom(15)\n\t\tsite_updatedby.set_width_chars(25)\n\t\tsite_updatedby.set_max_width_chars(25)\n\t\tsite_updatedby.set_xalign(-1)\n\t\tsite_updatedby.set_ellipsize(Pango.EllipsizeMode.MIDDLE)\n\t\tsite_updatedby.set_name(\"SITE_AUTHOR\")\n\t\tsite_updatedby.set_valign(Gtk.Align.END)\n\n\t\thbox_site_author.pack_start(site_author,False,False,1)\n\t\thbox_site_author.pack_end(site_updatedby,False,False,1)\n\n\t\thbox_site_description.pack_start(site_name,False,False,10)\n\t\thbox_site_description.pack_start(hbox_site_author,False,False,1)\n\t\t\n\t\tmanage_site=Gtk.Button()\n\t\tmanage_site_image=Gtk.Image.new_from_file(self.manage_site_image)\n\t\tmanage_site.add(manage_site_image)\n\t\tmanage_site.set_margin_top(25)\n\t\tmanage_site.set_margin_right(15)\n\t\tmanage_site.set_halign(Gtk.Align.CENTER)\n\t\tmanage_site.set_valign(Gtk.Align.CENTER)\n\t\tmanage_site.set_name(\"EDIT_ITEM_BUTTON\")\n\t\tmanage_site.connect(\"clicked\",self.manage_site_options,hbox)\n\t\tmanage_site.set_tooltip_text(_(\"Manage site\"))\n\n\t\tpopover = Gtk.Popover()\n\t\tmanage_site.popover=popover\n\t\tvbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n\t\t\n\t\tbrowser_box=Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\tbrowser_box.set_margin_left(10)\n\t\tbrowser_box.set_margin_right(10)\n\t\tbrowser_eb=Gtk.EventBox()\n\t\tbrowser_eb.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK)\n\t\tbrowser_eb.connect(\"button-press-event\", self.open_site,hbox)\n\t\tbrowser_eb.connect(\"motion-notify-event\", self.mouse_over_popover)\n\t\tbrowser_eb.connect(\"leave-notify-event\", self.mouse_exit_popover)\n\t\tbrowser_label=Gtk.Label()\n\t\tbrowser_label.set_text(_(\"Open site in browser\"))\n\t\tbrowser_eb.add(browser_label)\n\t\tbrowser_eb.set_name(\"POPOVER_OFF\")\n\t\tbrowser_box.add(browser_eb)\n\n\t\tfolder_box=Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\tfolder_box.set_margin_left(10)\n\t\tfolder_box.set_margin_right(10)\n\t\tfolder_eb=Gtk.EventBox()\n\t\tfolder_eb.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK)\n\t\tfolder_eb.connect(\"button-press-event\", self.open_folder,hbox)\n\t\tfolder_eb.connect(\"motion-notify-event\", self.mouse_over_popover)\n\t\tfolder_eb.connect(\"leave-notify-event\", self.mouse_exit_popover)\n\t\tfolder_label=Gtk.Label()\n\t\tfolder_label.set_text(_(\"Open folder\"))\n\t\tfolder_eb.add(folder_label)\n\t\tfolder_eb.set_name(\"POPOVER_OFF\")\n\t\tfolder_box.add(folder_eb)\n\n\t\tsync_box=Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\tsync_box.set_margin_left(10)\n\t\tsync_box.set_margin_right(10)\n\t\tsync_eb=Gtk.EventBox()\n\t\tsync_eb.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK)\n\t\tsync_eb.connect(\"button-press-event\", self.sync_site_clicked,hbox)\n\t\tsync_eb.connect(\"motion-notify-event\", self.mouse_over_popover)\n\t\tsync_eb.connect(\"leave-notify-event\", self.mouse_exit_popover)\n\t\tsync_label=Gtk.Label()\n\t\tsync_label.set_text(_(\"Sync new content\"))\n\t\tsync_eb.add(sync_label)\n\t\tsync_eb.set_name(\"POPOVER_OFF\")\n\t\tsync_box.add(sync_eb)\n\n\t\tedit_box=Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\tedit_box.set_margin_left(10)\n\t\tedit_box.set_margin_right(10)\n\t\tedit_eb=Gtk.EventBox()\n\t\tedit_eb.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK)\n\t\tedit_eb.connect(\"button-press-event\", self.edit_site_clicked,hbox)\n\t\tedit_eb.connect(\"motion-notify-event\", self.mouse_over_popover)\n\t\tedit_eb.connect(\"leave-notify-event\", self.mouse_exit_popover)\n\t\tedit_label=Gtk.Label()\n\t\tedit_label.set_text(_(\"Edit site\"))\n\t\tedit_eb.add(edit_label)\n\t\tedit_eb.set_name(\"POPOVER_OFF\")\n\t\tedit_box.add(edit_eb)\n\t\t\n\t\tdelete_box=Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\tdelete_box.set_margin_left(10)\n\t\tdelete_box.set_margin_right(10)\n\t\tdelete_eb=Gtk.EventBox()\n\t\tdelete_eb.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK)\n\t\tdelete_eb.connect(\"button-press-event\", self.delete_site_clicked,hbox)\n\t\tdelete_eb.connect(\"motion-notify-event\", self.mouse_over_popover)\n\t\tdelete_eb.connect(\"leave-notify-event\", self.mouse_exit_popover)\n\t\tdelete_label=Gtk.Label()\n\t\tdelete_label.set_text(_(\"Delete site\"))\n\t\tdelete_eb.add(delete_label)\n\t\tdelete_eb.set_name(\"POPOVER_OFF\")\n\t\tdelete_box.add(delete_eb)\n\n\t\tvbox.pack_start(browser_box, True, True,8)\n\t\tvbox.pack_start(folder_box,True,True,8)\n\t\tvbox.pack_start(sync_box, True, True,8)\n\t\tvbox.pack_start(edit_box, True, True,8)\n\t\tvbox.pack_start(delete_box,True,True,8)\n\t\t\n\t\tvbox.show_all()\n\t\tif 'client' in self.core.sitesmanager.flavours:\n\t\t\tfolder_box.hide()\n\n\t\tpopover.add(vbox)\n\t\tpopover.set_position(Gtk.PositionType.BOTTOM)\n\t\tpopover.set_relative_to(manage_site)\n\n\n\t\tswitch_button=Gtk.Switch()\n\t\tswitch_button.set_halign(Gtk.Align.CENTER)\n\t\tswitch_button.set_valign(Gtk.Align.CENTER)\n\t\tswitch_button.set_margin_top(25)\n\t\t\n\n\t\tif self.sites_list[siteId][\"visibility\"]:\n\t\t\tswitch_button.set_active(True)\n\t\t\tswitch_button.set_tooltip_text(_(\"Click to hide the site in the server main page\"))\n\t\telse:\n\t\t\tswitch_button.set_active(False)\n\t\t\tswitch_button.set_tooltip_text(_(\"Click to show the site in the server main page\"))\n\n\t\t\n\t\tswitch_button.connect(\"notify::active\",self.on_switch_activaded,hbox)\n\t\thbox_site_data.pack_start(hbox_site_description,False,False,5)\n\t\thbox_site_data.pack_end(manage_site,False,False,5)\n\t\thbox_site_data.pack_end(switch_button,False,False,5)\n\n\t\tsite_separator=Gtk.Separator()\n\t\tsite_separator.set_margin_top(15)\n\t\tsite_separator.set_margin_left(10)\n\t\tsite_separator.set_margin_right(15)\n\t\tif cont!=1:\n\t\t\tsite_separator.set_name(\"SEPARATOR\")\n\t\telse:\n\t\t\tsite_separator.set_name(\"WHITE_SEPARATOR\")\t\n\n\t\tvbox_site.pack_start(hbox_site_data,False,False,5)\n\t\tvbox_site.pack_end(site_separator,False,False,5)\n\n\t\thbox.pack_start(image,False,False,5)\n\t\thbox.pack_start(vbox_site,True,True,5)\n\t\thbox.show_all()\n\t\thbox.set_halign(Gtk.Align.FILL)\n\n\t\tself.sites_list_box.pack_start(hbox,False,False,1)\n\t\tself.sites_list_box.queue_draw()\n\t\tself.sites_list_box.set_valign(Gtk.Align.FILL)\n\t\thbox.queue_draw()\t\n\n\t#def new_site_box\t\n\t\t\n\tdef on_switch_activaded (self,switch,gparam,hbox):\n\n\t\tself.core.mainWindow.msg_label.set_text(\"\")\n\t\tself.core.mainWindow.manage_message(True,False)\n\n\t\tsite_to_edit=hbox\t\t\n\t\tsiteId=site_to_edit.get_children()[0].id\n\t\tturn_on=False\n\n\t\tif switch.get_active():\n\t\t\tturn_on=True\n\t\t\t#msg_switch=self.core.mainWindow.get_msg(SiteBox.ACTIONS_SHOW_SITE_WAITING_CODE)\n\t\t\tmsg_switch=SiteBox.ACTIONS_SHOW_SITE_WAITING_CODE\n\t\t\tvisible=True\n\t\t\t\n\t\telse:\n\t\t\t#msg_switch=self.core.mainWindow.get_msg(SiteBox.ACTIONS_HIDE_SITE_WATIING_CODE)\n\t\t\tmsg_switch=SiteBox.ACTIONS_HIDE_SITE_WATIING_CODE\n\t\t\tvisible=False\n\n\t\tself.args_visibility=[\"visibility\",self.sites_list[siteId],visible]\n\t\t#self.core.mainWindow.waiting_label.set_text(msg_switch)\t\t\t\n\t\t#self.core.mainWindow.waiting_window.show_all()\n\t\t#self.core.mainWindow.waiting_window.hide()\n\t\tself.core.mainWindow.manage_waiting_stack(True,msg_switch)\n\t\tself.init_threads()\n\t\tself.manage_visibility_t.start()\n\t\tGLib.timeout_add(100,self.pulsate_manage_visibiliy,turn_on,siteId,hbox)\n\t\t\t\n\n\t#def on_switch_activaded\n\n\tdef pulsate_manage_visibiliy(self,turn_on,siteId,hbox):\n\n\t\tif self.manage_visibility_t.is_alive():\n\t\t\t#self.core.mainWindow.waiting_pbar.pulse()\n\t\t\treturn True\n\n\t\telse:\n\t\t\t#self.core.mainWindow.waiting_window.hide()\n\t\t\tself.core.mainWindow.manage_waiting_stack(False)\n\n\t\t\tif self.result_visibiliy['status']:\n\t\t\t\tself.core.mainWindow.sites_info[siteId][\"visible\"]=turn_on\n\t\t\t\tself.sites_list[siteId][\"visibility\"]=turn_on\n\t\t\t\tself.core.sitesmanager.read_conf()\n\t\t\t\tif turn_on:\n\t\t\t\t\thbox.get_children()[1].get_children()[0].get_children()[1].set_tooltip_text(_(\"Click to hide the site in the server main page\"))\n\t\t\t\t\tself.core.mainWindow.manage_message(False,False,6)\n\t\t\t\telse:\n\t\t\t\t\thbox.get_children()[1].get_children()[0].get_children()[1].set_tooltip_text(_(\"Click to show the site in the server main page\"))\n\t\t\t\t\tself.core.mainWindow.manage_message(False,False,7)\n\t\t\telse:\n\t\t\t\tself.core.mainWindow.manage_message(False,True,self.result_visibiliy['code'])\t\n\n\t#def pulsate_manage_visibiliy\n\t\n\tdef\tmanage_visibility(self):\n\n\t\tself.result_visibiliy=self.core.sitesmanager.save_conf(self.args_visibility)\n\n\t#def manage_visibility\t\t\t\n\n\n\tdef sync_site_clicked(self,widget,event,hbox):\n\n\t\tself.core.mainWindow.msg_label.set_text(\"\")\n\t\tpopover=hbox.get_children()[1].get_children()[0].get_children()[2].popover.hide()\n\t\tsite_to_edit=hbox\t\t\n\t\tsiteId=site_to_edit.get_children()[0].id\n\t\tsync=False\t\n\n\t\tdialog = Gtk.FileChooserDialog(_(\"Please choose a folder to sync content\"), None,\n\t\t\tGtk.FileChooserAction.SELECT_FOLDER,(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n\t\t\tGtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\n\t\tresponse = dialog.run()\n\t\tif response == Gtk.ResponseType.OK:\n\t\t\tfolder_to_sync=dialog.get_filename()\n\t\t\tsync=True\n\t\tdialog.destroy()\n\n\t\tif sync:\n\t\t\tnow = datetime.datetime.now()\n\t\t\tupdated_by=self.core.sitesmanager.credentials[0]\n\t\t\tlast_updated=now.strftime(\"%Y-%m-%d %H:%M\")\n\t\t\tself.args_sync=[\"sync\",self.sites_list[siteId],folder_to_sync,updated_by,last_updated]\n\t\t\t#self.core.mainWindow.waiting_label.set_text(self.core.mainWindow.get_msg(SiteBox.SYNC_CONTENT_WAITING_CODE))\t\t\t\n\t\t\t#self.core.mainWindow.waiting_window.show_all()\n\t\t\tself.core.mainWindow.manage_waiting_stack(True,SiteBox.SYNC_CONTENT_WAITING_CODE)\n\n\t\t\tself.init_threads()\n\t\t\tself.sync_folder_t.start()\n\t\t\tGLib.timeout_add(100,self.pulsate_sync_folder)\n\n\t#def sync_site_clicked\n\t\n\tdef pulsate_sync_folder(self):\n\n\t\tif self.sync_folder_t.is_alive():\n\t\t\t#self.core.mainWindow.waiting_pbar.pulse()\n\t\t\treturn True\n\n\t\telse:\n\t\t\t#self.core.mainWindow.waiting_window.hide()\n\t\t\tself.core.mainWindow.manage_waiting_stack(False)\n\t\t\tif self.result_sync['status']:\n\t\t\t\tself.core.mainWindow.sites_info[self.args_sync[1][\"id\"]][\"sync_folder\"]=self.args_sync[2]\n\t\t\t\tself.core.mainWindow.sites_info[self.args_sync[1][\"id\"]][\"updated_by\"]=self.args_sync[3]\n\t\t\t\tself.core.mainWindow.sites_info[self.args_sync[1][\"id\"]][\"last_updated\"]=self.args_sync[4]\n\t\t\t\tself.sites_list[self.args_sync[1][\"id\"]][\"sync_folder\"]=self.args_sync[2]\n\t\t\t\tself.sites_list[self.args_sync[1][\"id\"]][\"updated_by\"]=self.args_sync[3]\n\t\t\t\tself.sites_list[self.args_sync[1][\"id\"]][\"last_updated\"]=self.args_sync[4]\n\t\t\t\tself.core.sitesmanager.read_conf()\n\t\t\t\tself.core.mainWindow.manage_message(False,False,8)\n\t\t\telse:\n\t\t\t\tself.core.mainWindow.manage_message(False,True,self.result_sync['code'])\t\n\n\n\t#def pulsate_sync_folder\n\t\n\n\tdef sync_folder(self):\n\n\t\tself.result_sync=self.core.sitesmanager.save_conf(self.args_sync)\n\n\t#def sync_folder\n\n\tdef open_site(self,widget,event,hbox):\n\n\t\tpopover=hbox.get_children()[1].get_children()[0].get_children()[2].popover.hide()\n\t\tsite_to_edit=hbox\t\t\n\t\tsiteId=site_to_edit.get_children()[0].id\n\n\t\turl=self.sites_list[siteId][\"url\"]\n\n\t\tself.fcmd='xdg-open '+url\n\t\tself.init_threads()\n\t\tself.open_site_browser_t.start()\n\n\t#def open_site\t\n\n\tdef open_site_browser(self):\t\n\n\t\tos.system(self.fcmd)\n\n\t#def open_site_browser\t\n\n\tdef open_folder(self,widget,event,hbox):\t\n\n\t\tpopover=hbox.get_children()[1].get_children()[0].get_children()[2].popover.hide()\n\t\tsite_to_edit=hbox\t\t\n\t\tsiteId=site_to_edit.get_children()[0].id\n\n\t\tfolder=self.sites_list[siteId][\"site_folder\"]\n\t\tself.dcmd='xdg-open '+ folder\n\t\tself.init_threads()\n\t\tself.open_folder_t.start()\n\n\t#def open_folder\n\t\n\tdef open_site_folder(self):\n\n\t\tos.system(self.dcmd) \n\n\t#def open_site_folder \t\t\n\n\tdef edit_site_clicked(self,widget,event,hbox):\n\n\t\tself.core.mainWindow.msg_label.set_text(\"\")\n\t\tpopover=hbox.get_children()[1].get_children()[0].get_children()[2].popover.hide()\n\t\tsite_to_edit=hbox\t\t\n\t\tsite_to_edit=site_to_edit.get_children()[0].id\n\t\tself.core.editBox.init_form()\n\t\tself.core.editBox.render_form()\n\t\tself.core.editBox.load_values(site_to_edit)\n\t\tself.core.mainWindow.stack_window.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT)\n\t\tself.core.mainWindow.stack_window.set_visible_child_name(\"editBox\")\t\t\n\n\t#def edit_site_clicked\t\t\n\n\tdef delete_site_clicked(self,widget,event,hbox):\n\n\t\tself.core.mainWindow.msg_label.set_text(\"\")\n\t\tpopover=hbox.get_children()[1].get_children()[0].get_children()[2].popover.hide()\n\t\tdialog = Gtk.MessageDialog(None,0,Gtk.MessageType.WARNING, Gtk.ButtonsType.YES_NO, \"EASY SITE\")\n\t\tdialog.format_secondary_text(_(\"Do you want delete the site?\"))\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\tif response==Gtk.ResponseType.YES:\n\t\t\tsite_to_remove=hbox.get_children()[0].id\n\t\t\tself.args_delete=[\"delete\",site_to_remove]\n\t\t\t#self.core.mainWindow.waiting_label.set_text(self.core.mainWindow.get_msg(SiteBox.DELETING_SITE_WAITING_CODE))\t\t\t\n\t\t\t#self.core.mainWindow.waiting_window.show_all()\n\t\t\tself.core.mainWindow.manage_waiting_stack(True,SiteBox.DELETING_SITE_WAITING_CODE)\t\t\t\n\t\t\tself.init_threads()\n\t\t\tself.delete_folder_t.start()\n\t\t\tGLib.timeout_add(100,self.pulsate_delete_site,hbox)\n\n\t#def delete_site_clicked\n\t\n\tdef pulsate_delete_site(self,hbox):\t\n\n\t\tif self.delete_folder_t.is_alive():\n\t\t\t#self.core.mainWindow.waiting_pbar.pulse()\n\t\t\treturn True\n\n\t\telse:\n\t\t\t#self.core.mainWindow.waiting_window.hide()\n\t\t\tself.core.mainWindow.manage_waiting_stack(False)\t\t\t\n\t\t\tif self.result_delete['status']:\n\t\t\t\tself.sites_list_box.remove(hbox)\n\t\t\t\tself.core.mainWindow.sites_info.pop(self.args_delete[1])\n\t\t\t\tself.core.sitesmanager.read_conf()\n\t\t\t\tself.core.mainWindow.manage_message(False,False,9)\n\t\t\telse:\n\t\t\t\tself.core.mainWindow.manage_message(False,True,self.result_delete['code'])\t\n\n\n\t#def pulsate_delete_site\n\t\n\n\tdef delete_site(self):\n\n\t\tself.result_delete=self.core.sitesmanager.save_conf(self.args_delete)\n\n\t#def delete_site\t\t\t\n\n\t\t\n\tdef manage_sites_buttons(self,sensitive):\n\t\n\t\tfor item in self.sites_list_box:\n\t\t\titem.get_children()[1].get_children()[0].get_children()[2].set_sensitive(sensitive)\n\t\t\titem.get_children()[1].get_children()[0].get_children()[1].set_sensitive(sensitive)\n\n\t#def manage_sites_buttons\n\t\n\tdef manage_site_options(self,button,hbox,event=None):\n\t\t\n\t\tself.core.mainWindow.manage_message(True,False)\t\n\t\tbutton.popover.show()\n\n\t#def manage_site_options\t\n\n\tdef mouse_over_popover(self,widget,event=None):\n\n\t\twidget.set_name(\"POPOVER_ON\")\n\n\t#def mouser_over_popover\t\n\n\tdef mouse_exit_popover(self,widget,event=None):\n\n\t\twidget.set_name(\"POPOVER_OFF\")\t\t\n\t\n\n#class SiteBox\n\nfrom . import Core","sub_path":"lliurex-easy-sites/python3-easysites/SiteBox.py","file_name":"SiteBox.py","file_ext":"py","file_size_in_byte":19290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"423508368","text":"FileWrite = open('HealthCare.txt','w')\r\n\r\nend = '' #STRING\r\n\r\nwhile end != '!':\r\n name = input('Input name:') #STRING\r\n height = input('Input height:') #STRING\r\n weight = input('Input weight:') #STRING\r\n VitalCapacity = input('Input vital capacity:') #STRING\r\n WriteInformation = name + ' ' + height + ' ' + weight + ' ' + VitalCapacity + '\\n' #STRING\r\n FileWrite.write(WriteInformation)\r\n end = input('If you want to stop inputting information,please input\"!\":')#STRING\r\n\r\nFileWrite.close()\r\n","sub_path":"Assessments/files/Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"158178268","text":"from rest_framework import serializers\nfrom communities.models import Community\nfrom .models import CommunityDiscussion, CommunityDiscussionReply\n\n\nclass CommunityDiscussionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CommunityDiscussion\n fields = ('id', 'community', 'owner', 'title', 'text', 'archived',\n 'datetime', 'discussion_replies')\n extra_kwargs = {\n 'community': {'required': False},\n 'owner': {'required': False}\n }\n depth = 2\n\n def create(self, validated_data):\n request = self.context.get('request', None)\n try:\n community = Community.objects.get(slug=request.subdomain)\n validated_data['community'] = community\n validated_data['owner'] = request.user\n except:\n raise serializers.ValidationError('Community not found.')\n return CommunityDiscussion.objects.create(**validated_data)\n\n\nclass CommunityDiscussionReplySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CommunityDiscussionReply\n fields = ('id', 'discussion', 'owner', 'text', 'datetime')\n extra_kwargs = {\n 'discussion': {'required': False},\n 'owner': {'required': False}\n }\n depth = 2\n\n def create(self, validated_data):\n request = self.context.get('request', None)\n view = self.context.get('view', None)\n discussion_id = view.kwargs['discussionid']\n try:\n discussion = CommunityDiscussion.objects.get(id=discussion_id)\n validated_data['discussion'] = discussion\n validated_data['owner'] = request.user\n except:\n raise serializers.ValidationError('Discussion not found.')\n return CommunityDiscussionReply.objects.create(**validated_data)\n","sub_path":"neighborhoodnet/apps/discussion/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"104213068","text":"\"\"\"\n##################################################################################################\n# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.\n# Filename : rfl_res32_visual.py\n# Abstract : RF-learning visual counting Model\n\n# Current Version: 1.0.0\n# Date : 2021-06-11\n##################################################################################################\n\"\"\"\n# encoding=utf-8\n_base_ = [\n './baseline.py'\n]\n\ncharacter = \"/path/to/demo/text_recognition/__dictionary__/Scene_text_36.txt\"\n\n\"\"\"\n1. Model Settings\ninclude model-related setting, such as model type, user-selected modules and parameters.\n\n\"\"\"\n# model parameters for changing the rf-learning\nmodel = dict(\n type='RFLRecognizor',\n transformation=dict(\n type='TPS_SpatialTransformer',\n F=20,\n I_size=(32, 100),\n I_r_size=(32, 100),\n I_channel_num=1,\n ),\n backbone=dict(\n type='ResNetRFL',\n input_channel=1,\n output_channel=512,),\n neck_s2v=None, # Training strategy\n neck_v2s=None, # Step1: training total RF-Learning, train_type=\"visual\",\n # neck_v2s=dict( # Step2: training total RF-Learning, train_type=\"total\",\n # type='V2SAdaptor', # neck_v2s=V2SAdaptor, neck_s2v=S2VAdaptor\n # in_channels=512,),\n # neck_s2v=dict(\n # type='S2VAdaptor',\n # in_channels=512,),\n counting_head=dict(\n type='CNTHead',\n embed_size=512,\n encode_length=26,\n loss_count=dict(\n type=\"MSELoss\",\n reduction='mean'),\n converter=dict(\n type='RFLLabelConverter',\n character=character, ),),\n sequence_head=dict(\n type='AttentionHead',\n input_size=512,\n hidden_size=256,\n batch_max_length=25,\n converter=dict(\n type='AttnLabelConverter',\n character=character,\n use_cha_eos=True, ),\n loss_att=dict(\n type='StandardCrossEntropyLoss',\n ignore_index=0,\n reduction='mean',\n loss_weight=1.0),),\n train_type=\"visual\",\n # train_type=\"total\",\n _delete_=True\n # Step1: train_type=\"visual\"\n # Step2: train_type=\"semantic\",\n # Step3: train_type=\"total\"\n)\n\"\"\"\n2. Data Setting\ndescription:\n Pipeline and training dataset settings\n\nAdd keywords:\n None\n\"\"\"\n\n# dataset settings\n# support the dataset type\nppld = {\n 'LMDB_Standard': 'LoadImageFromLMDB', # open-source LMDB data\n\n # Davar dataset type\n 'LMDB_Davar': 'RCGLoadImageFromLMDB',\n 'File': 'RCGLoadImageFromFile',\n 'Loose': 'RCGLoadImageFromLoose',\n 'Tight': 'RCGLoadImageFromTight',\n}\n\n\"\"\"\nDataset Instruction manual:\n\ndata_types=['LMDB','File','Tight','File'] # corresponding to different data type\n\nann_files = ['train1|train2|train3',\n 'Datalist/train1.json|Datalist/train2.json', \n 'Datalist/train_xxx.json',\n 'Datalist/train_yyy.json'] # Separated by '|'\n\nimg_prefixes = ['xx/yy/zz/|aa/bb/cc/|mm/nn/', \n 'dd/ee/', 'ff/gg/hh/', \n 'ii/jj/kk/'] # Separated by '|', corresponding to the ann_files\n\nbatch_ratios = ['0.1|0.1|0.1',\n '0.2|0.2',\n '0.1',\n '0.2'] # string format, corresponding to the ann_files\n # sum of the batch_ratios equals to 1\n\"\"\"\n\n# Training dataset format\ndata_types = [\n 'LMDB_Standard',\n 'LMDB_Standard'\n]\n\n# File prefix path of the traning dataset\nimg_prefixes = [\n '*****/TextRecognition/LMDB/BenchEn/train/', # path to the training dataset\n '*****/TextRecognition/LMDB/BenchEn/train/', # path to the training dataset\n]\n\n\n# Dataset Name\nann_files = [\n 'MJ', 'SK'\n]\n\n# Training dataset load type\ndataset_type = 'DavarMultiDataset'\n\n# Normalization parameter\nimg_norm_cfg = dict(\n mean=[127.5],\n std=[127.5])\n\n# training pipeline parameter\ntrain_pipelines = [\n dict(\n type=ppld[\"LMDB_Standard\"],\n character=character, # recognition dictionary\n test_mode=False, # whether is in test mode\n sensitive=False, # sensitive to Upper or Lower\n color_types=[\"gray\"], # color loading type, [\"rgb\", \"bgr\", \"gray\"]\n fil_ops=True,\n ),\n dict(\n type='ResizeNormalize',\n size=(100, 32),\n interpolation=2,\n # Interpolation method of the Resize function\n # 0 - INTER_NEAREST(default) # 1 - INTER_LINEAR\n # 2 - INTER_CUBIC # 3 - INTER_AREA\n mean=img_norm_cfg[\"mean\"],\n std=img_norm_cfg[\"std\"], ),\n dict(type='DavarDefaultFormatBundle'), # Uniform Training data tensor format\n dict(type='DavarCollect', keys=['img', 'gt_text']), # Data content actually involved in training stage\n]\n\nprint('train_piplines:', train_pipelines)\n\nval_pipeline = [\n dict(type=ppld[\"LMDB_Standard\"], \n character=character,\n test_mode=True,\n sensitive=False,\n color_types=[\"gray\"], # color loading type, [\"rgb\", \"bgr\", \"gray\"]\n fil_ops=True, ),\n dict(type='ResizeNormalize',\n size=(100, 32),\n interpolation=2,\n mean=img_norm_cfg[\"mean\"],\n std=img_norm_cfg[\"std\"],\n ),\n dict(type='DavarDefaultFormatBundle'),\n dict(type='DavarCollect', keys=['img', 'gt_text'], meta_keys=[]),\n]\n\ntest_pipeline = [\n dict(type=ppld[\"LMDB_Standard\"],\n character=character,\n test_mode=True,\n sensitive=False,\n color_types=[\"gray\"],\n fil_ops=True, ),\n dict(type='ResizeNormalize',\n size=(100, 32),\n interpolation=2,\n mean=img_norm_cfg[\"mean\"],\n std=img_norm_cfg[\"std\"],\n ),\n dict(type='DavarDefaultFormatBundle'),\n dict(type='DavarCollect', keys=['img'], meta_keys=[]),\n]\n\ndata = dict(\n samples_per_gpu=128, # batchsize=100->memory:6400M\n workers_per_gpu=2,\n sampler=dict(\n type='DistBatchBalancedSampler', # BatchBalancedSampler or DistBatchBalancedSampler\n mode=0,\n # model 0: Balance in batch, calculate the epoch according to the first iterative data set\n # model 1: Balance in batch, calculate the epoch according to the last iterative data set\n # model 2: Balance in batch, record unused data\n # model -1: Each dataset is directly connected and shuffled\n ),\n train=dict(\n type=dataset_type,\n batch_ratios=['0.5', '0.5'],\n dataset=dict(\n type=\"DavarRCGDataset\",\n data_type=data_types,\n ann_file=ann_files,\n img_prefix=img_prefixes,\n batch_max_length=25,\n used_ratio=1,\n test_mode=False,\n pipeline=train_pipelines)\n ),\n val=dict(\n type=dataset_type,\n batch_ratios=1,\n samples_per_gpu=400,\n test_mode=True,\n dataset=dict(\n type=\"DavarRCGDataset\",\n data_type=\"LMDB_Standard\",\n ann_file='mixture',\n img_prefix='/path/to/validation/',\n batch_max_length=25,\n used_ratio=1,\n test_mode=True,\n pipeline=val_pipeline,)\n ),\n test=dict(\n type=dataset_type,\n batch_ratios=1,\n test_mode=True,\n dataset=dict(\n type=\"DavarRCGDataset\",\n data_type='LMDB_Standard',\n ann_file='IIIT5k_3000',\n img_prefix='/path/to/evaluation/',\n batch_max_length=25,\n used_ratio=1,\n test_mode=True,\n pipeline=test_pipeline, ),\n )\n)\n\n\n# checkpoint setting\ncheckpoint_config = dict(type=\"DavarCheckpointHook\",\n interval=1,\n iter_interval=5000,\n by_epoch=True,\n by_iter=True,\n filename_tmpl='ckpt/res32_ace_e{}.pth',\n metric=\"accuracy\",\n rule=\"greater\",\n save_mode=\"lightweight\",\n init_metric=-1,\n model_milestone=0.5\n )\n\n# logger setting\nlog_config = dict(\n interval=50,\n hooks=[dict(type='TextLoggerHook'), ])\n\n\n# evaluation setting\nevaluation = dict(start=3,\n start_iter=0.5,\n save_best=\"accuracy\",\n interval=1,\n iter_interval=5000,\n model_type=\"RECOGNIZOR\",\n eval_mode=\"lightweight\",\n by_epoch=True,\n by_iter=True,\n rule=\"greater\",\n metric=['accuracy', 'NED'],\n )\n\n# runner setting\nrunner = dict(type='EpochBasedRunner', max_epochs=6)\n\n# must specify this parameter\nfind_unused_parameters = True\n\n# Load from Pre-trained model path\nload_from = None\n\n# work directory\nwork_dir = '/path/to/davar_opensource/rflearning_visual/'\n\n# distributed training setting\ndist_params = dict(backend='nccl')\n","sub_path":"demo/text_recognition/rflearning/configs/rfl_res32_visual.py","file_name":"rfl_res32_visual.py","file_ext":"py","file_size_in_byte":9244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"410642836","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 23 22:20:23 2020\r\n\r\n@author: TaeHyun Hwang\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nf = open('./kaggle_covid_kor/Time.csv', 'r')\r\ndata = []\r\nfor line in f.readlines():\r\n data.append(line.split(','))\r\nf.close()\r\n\r\nI, R, D = [], [], []\r\nfor i in range(1, len(data)):\r\n I.append(float(data[i][4]))\r\n R.append(float(data[i][5]))\r\n D.append(float(data[i][6]))\r\n\r\nI = np.array(I)\r\nR = np.array(R)\r\nD = np.array(D)\r\n\r\nplt.figure()\r\nplt.plot(I, label='Infection')\r\nplt.plot(R, label='Released')\r\nplt.plot(D, label='Decesed')\r\nplt.legend()\r\nplt.show()\r\n\r\nI_t = I - (R+D)\r\nR_t = R+D\r\n\r\nplt.figure()\r\nplt.plot(I_t, label='# of infected people at time t')\r\nplt.plot(R_t, label='# of recovered people at time t')\r\nplt.legend()\r\nplt.show()\r\n\r\nnp.save('./I_t_kor.npy', I_t)\r\nnp.save('./R_t_kor.npy', R_t)\r\n\r\n","sub_path":"taehyun/data preprocess_korea.py","file_name":"data preprocess_korea.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"19674798","text":"# coding=utf-8\n\nimport sys\nimport json\nimport base64\n\n# 保证兼容python2以及python3\nIS_PY3 = sys.version_info.major == 3\nif IS_PY3:\n from urllib.request import urlopen\n from urllib.request import Request\n from urllib.error import URLError\n from urllib.parse import urlencode\n from urllib.parse import quote_plus\nelse:\n import urllib2\n from urllib import quote_plus\n from urllib2 import urlopen\n from urllib2 import Request\n from urllib2 import URLError\n from urllib import urlencode\n\n# 防止https证书校验不正确\nimport ssl\nimport torch\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n# 百度云控制台获取到ak,sk以及\n# EasyDL官网获取到URL\n\n# ak\nAPI_KEY = 'IKmh6dBcK9pegWxlwOwjGUjY'\n\n# sk\nSECRET_KEY = 'vIjUysxvZ4S3uj2yKEY6Y8ghFb9COPmv'\n\n# url\nEASYDL_PRO_CLASSIFY_URL = \"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/classification/3classes_dmos25\"\n\n\"\"\" TOKEN start \"\"\"\nTOKEN_URL = 'https://aip.baidubce.com/oauth/2.0/token'\n\n\"\"\"\n 获取token\n\"\"\"\n\n\ndef fetch_token():\n params = {'grant_type': 'client_credentials',\n 'client_id': API_KEY,\n 'client_secret': SECRET_KEY}\n post_data = urlencode(params)\n if (IS_PY3):\n post_data = post_data.encode('utf-8')\n req = Request(TOKEN_URL, post_data)\n try:\n f = urlopen(req, timeout=5)\n result_str = f.read()\n except URLError as err:\n print(err)\n if (IS_PY3):\n result_str = result_str.decode()\n\n result = json.loads(result_str)\n\n if ('access_token' in result.keys() and 'scope' in result.keys()):\n if not 'brain_all_scope' in result['scope'].split(' '):\n print('please ensure has check the ability')\n exit()\n return result['access_token']\n else:\n print('please overwrite the correct API_KEY and SECRET_KEY')\n exit()\n\n\n\"\"\"\n 读取文件\n\"\"\"\n\n\ndef read_file(image_path):\n f = None\n try:\n f = open(image_path, 'rb')\n return f.read()\n except:\n print('read image file fail')\n return None\n finally:\n if f:\n f.close()\n\n\n\"\"\"\n 调用远程服务\n\"\"\"\n\n\ndef request(url, data):\n if IS_PY3:\n req = Request(url, json.dumps(data).encode('utf-8'))\n else:\n req = Request(url, json.dumps(data))\n\n has_error = False\n try:\n f = urlopen(req)\n result_str = f.read()\n if (IS_PY3):\n result_str = result_str.decode()\n return result_str\n except URLError as err:\n print(err)\n\ndef classify(image_path = \"../../test_images/blur.jpg\"):\n # 获取access token\n token = fetch_token()\n\n # 拼接url\n url = EASYDL_PRO_CLASSIFY_URL + \"?access_token=\" + token\n\n # filename = \"../test_images/blur.jpg\"\n\n file_content = read_file(image_path)\n\n if IS_PY3:\n image_data = str(base64.b64encode(file_content), \"UTF8\")\n else:\n image_data = base64.b64encode(file_content)\n\n # 请求接口\n response = request(url,\n {\n 'image': image_data,\n 'top_num': 4\n })\n\n result_json = json.loads(response)\n # print(result_json)\n result = result_json[\"results\"]\n print(result)\n for i in range(len(result)):\n result[i]['score'] = round(result[i]['score'], 2)\n if len(result) == 0:\n print(\"无法识别到结果\")\n else:\n for item in result:\n print(\"置信度:\" + str(item['score']) + \" 类别名称:\" + item['name'])\n\n return result_json\n\nif __name__ == '__main__':\n classify(\"../../test_images/blur.jpg\")","sub_path":"app/assess/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"443015856","text":"#!/bin/python\n#\n# Copyright 2020, by the California Institute of Technology. ALL RIGHTS\n# RESERVED. United States Government Sponsorship acknowledged. Any commercial\n# use must be negotiated with the Office of Technology Transfer at the\n# California Institute of Technology.\n#\n# ------------------------------\n\nfrom datetime import datetime\n\nfrom pds_doi_service.core.util.config_parser import DOIConfigUtil\nfrom pds_doi_service.core.util.general_util import get_logger\n\n# Get the common logger and set the level for this file.\n\nlogger = get_logger('pds_doi_core.outputs.transaction')\n\n\nclass Transaction:\n # This class Transaction provide services to build a transaction object from action {draft,reserved}.\n\n m_doi_config_util = DOIConfigUtil()\n m_log_dict = None\n\n def __init__(self,\n output_content,\n node_id,\n submitter_email,\n dois,\n transaction_disk_dao,\n transaction_db_dao,\n input_path = None):\n self._config = self.m_doi_config_util.get_config()\n self._node_id = node_id.lower()\n self._submitter_email = submitter_email\n self._input_ref = input_path\n self._output_content = output_content\n self._transaction_time = datetime.now()\n self._dois = dois\n self._transaction_disk_dao = transaction_disk_dao\n self._transaction_db_dao = transaction_db_dao\n\n def log(self):\n transaction_io_dir = self._transaction_disk_dao.write(self._node_id, self._transaction_time,\n input_ref=self._input_ref,\n output_content=self._output_content)\n\n for doi in self._dois:\n lidvid = doi.related_identifier.split('::')\n doi_field = doi.__dict__\n k_doi_params = dict((k, doi_field[k]) for k in\n doi_field.keys() & {'doi', 'status', 'title', 'product_type', 'product_type_specific'})\n\n self._transaction_db_dao.write_doi_info_to_database(\n lid=lidvid[0],\n vid=lidvid[1] if len(lidvid) > 1 else None,\n transaction_date=self._transaction_time,\n submitter=self._submitter_email,\n discipline_node=self._node_id,\n transaction_key=transaction_io_dir,\n **k_doi_params\n )\n","sub_path":"pds_doi_service/core/outputs/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"368740089","text":"\n\nfrom xai.brain.wordbase.nouns._mishmash import _MISHMASH\n\n#calss header\nclass _MISHMASHES(_MISHMASH, ):\n\tdef __init__(self,): \n\t\t_MISHMASH.__init__(self)\n\t\tself.name = \"MISHMASHES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mishmash\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_mishmashes.py","file_name":"_mishmashes.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"532004116","text":"import tensorflow as tf\nfrom tensorflow.keras.models import load_model\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport os\nfrom pathlib import Path\n\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\ndef facecrop(image):\n facedata = r\"/home/prajeeth/Environments/tf_env/lib/python3.8/site-packages/cv2/data/haarcascade_frontalface_alt.xml\"\n cascade = cv2.CascadeClassifier(facedata)\n img = cv2.imread(image, 0)\n minisize = (img.shape[1],img.shape[0])\n miniframe = cv2.resize(img, minisize)\n faces = cascade.detectMultiScale(miniframe)\n\n for f in faces:\n x, y, w, h = [ v for v in f ]\n cv2.rectangle(img, (x,y), (x+w,y+h), (255,255,255))\n\n sub_face = img[y:y+h, x:x+w]\n fname, ext = os.path.splitext(image)\n new_path = fname + \"_cropped_\" + ext\n cv2.imwrite(new_path, sub_face)\n\n return new_path\n\n\nEmodel = load_model('Emotion-Model.h5')\n\ndef emotion_function(img_path):\n face_path = facecrop(img_path)\n face = Image.open(face_path)\n face = face.resize((48, 48))\n face_arr = np.asarray(face)\n face_arr = face_arr / 255\n face_arr.shape = (1, 48, 48, 1)\n\n y_classes = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']\n pred_arr = Emodel.predict(face_arr, verbose = 1)\n\n return y_classes[np.argmax(pred_arr)]\n","sub_path":"tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"591303456","text":"import os\nfrom os import path\nimport sys\nfrom typing import Tuple\n\nimport cv2\nimport numpy as np\n\ntry:\n from SFE.DL.Net import Net\nexcept:\n Net = None\n\n\nclass Toolbox:\n \"\"\" 工具箱 \"\"\"\n\n def __init__(self, model_path=\"smoke.h5\"):\n \"\"\"\n - model_path str[\"smoke.h5\"]: 要加载的模型路径\n \"\"\"\n self._net = None\n self._model_path = model_path\n\n def _loadModel(self):\n if self._net is not None:\n return self._net\n\n self._net = Net()\n self._net.load(self._model_path)\n return self._net\n\n def _getSmokeROI(self, img: np.ndarray, x: int, y: int, w: int, h: int) -> np.ndarray:\n \"\"\" 获取黑烟图片\n - img np.ndarray: 原图\n - x int: 车辆左上角x坐标\n - y int: 车辆左上角y坐标\n - w int: 车辆宽度\n - h int: 车辆高度\n - return np.ndarray: 返回黑烟图片\n \"\"\"\n img = img.copy()\n x2, y2 = x + w, y + h\n width, height = w // 2, h // 2\n img[y:y2, x:x2] = 0\n\n x, x2 = max(x - width, 0), x2 + width\n y, y2 = max(y - height, 0), y2 + height\n\n img = img[y:y2, x:x2]\n return img\n\n def _roi(self, img: np.ndarray) -> Tuple[int, int, int, int] or None:\n \"\"\" 选择ROI\n - img np.ndarray: 图片\n - return tuple: 如果选择有效区域大于1*1, 则返回选中box(x, y, w, h), 否则返回None\n \"\"\"\n box = cv2.selectROI(\"ROI\", img, False, False)\n # cv2.destroyWindow(\"ROI\")\n if box[:2] == (0, 0):\n return None\n return box\n\n def _predict(self, img: np.ndarray, box: tuple=None) -> int:\n \"\"\" 识别指定图片 如果box为None, 将直接识别图片, 否则为提取box的黑烟图片再进行识别\n - img np.ndarray: 图片\n - box tuple[None]: 车辆位置\n \"\"\"\n if box is not None:\n img = self._getSmokeROI(img, *box)\n return self._loadModel().predict(img)[0]\n\n def predictImage(self, img_path: str, box: tuple=None, roi: bool = False, scale: float = None, show=False):\n \"\"\" 识别指定图片\n - img_path str: 图片路径\n - box tuple[None]: 车辆位置, 如果为None则原图识别\n - roi bool[False]: 是否手动选择车辆位置\n - scale float[None]: 图片缩放比例\n - show bool[False]: 是否显示用于识别的图像\n - return int: 返回识别结果\n \"\"\"\n img = cv2.imread(img_path)\n if scale:\n img = cv2.resize(img, None, fx=scale, fy=scale)\n if roi is True:\n box = self._roi(img)\n\n print(box, self._predict(img, box))\n if show:\n cv2.imshow(\"SHOW\", img)\n cv2.waitKey()\n\n def predictImages(self, dir_path: str, box: tuple=None, roi: bool = False, scale: float = None, show=False):\n \"\"\" 识别指定图片目录\n - dir_path str: 图片目录\n - box tuple[None]: 车辆位置, 如果为None则原图识别\n - roi bool[False]: 是否手动选择车辆位置\n - scale float[None]: 图片缩放比例\n - show bool[False]: 是否显示用于识别的图像\n - return int: 返回识别结果\n \"\"\"\n for i in os.listdir(dir_path):\n self.predictImage(path.join(dir_path, i), box, roi, scale, show)\n\n def predictHKImage(self, img_path: str, scale: float=None):\n \"\"\" 识别海康图片 图片命名格式为 x-y-w-h.jpg\n - img_path str: 图片路径\n - scale float[None]: 图片缩放比例\n \"\"\"\n x, y, w, h = map(int, path.basename(img_path)[:-4].split(\"-\"))\n x, w = x * 4096 // 1000, w * 4096 // 1000\n y, h = y * 2160 // 1000, h * 2160 // 1000\n box = (x, y, w, h)\n img = cv2.imread(img_path)\n img = img[:2160, :4096]\n roi = self._getSmokeROI(img, *box)\n\n print(box, self._predict(img, box))\n if isinstance(scale, float):\n roi = cv2.resize(roi, None, fx=scale, fy=scale)\n cv2.imshow(\"roi\", roi)\n cv2.waitKey()\n\n def predictHKImages(self, img_dir: str, scale: float=None):\n \"\"\" 批量识别海康图片 图片命名格式为 x-y-w-h.jpg\n - img_dir str: 图片目录路径\n - scale float[None]: 图片缩放比例\n \"\"\"\n for i in os.listdir(img_dir):\n self.predictHKImage(path.join(img_dir, i), scale)\n\n def predictVideo(self, video_path, scale=None):\n \"\"\" 识别视频\n - video_path str: 视频路径\n - scale float[None]: 图片缩放比例\n \"\"\"\n cap = cv2.VideoCapture(video_path)\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n if scale:\n frame = cv2.resize(frame, None, fx=scale, fy=scale)\n box = self._roi(frame)\n if box is not None:\n print(self._predict(frame, box))\n\n def grabVideo(self,\n video_path: str,\n save_dir: str,\n name: str,\n msg: str,\n category: str,\n scale: float=None,\n start_idx: int=0,\n cycle: int = 5\n ):\n \"\"\" 截取视频素材\n - video_path str: 视频路径\n - save_dir str: 保存目录\n - name str: 名称(推荐格式为:日期_地点)\n - msg str: 文件名附加信息\n - category: 保存图片的类别\n - scale float[None]: 图片缩放比例\n - start_idx int[0]: 起始编号\n - cycle int[5]: 周期, 如:`5`表示每5帧显示1次图片\n\n 最终文件保存结构为:\n save_dir/src/name/category/name_msg_idx.jpg\n save_dir/dst/name/category/name_msg_idx.jpg\n \"\"\"\n # 初始化目录\n src_dir = path.join(save_dir, \"src\", str(name), str(category))\n dst_dir = path.join(save_dir, \"dst\", str(name), str(category))\n os.makedirs(src_dir, exist_ok=True)\n os.makedirs(dst_dir, exist_ok=True)\n\n cap = cv2.VideoCapture(video_path)\n c = start_idx\n while True:\n ret, frame = cap.read()\n for _ in range(cycle - 1):\n cap.read()\n if not ret:\n break\n if scale:\n frame = cv2.resize(frame, None, fx=scale, fy=scale)\n\n box = self._roi(frame)\n if box is not None:\n fn = f\"{name}_{msg}_{c:04}.jpg\"\n roi = self._getSmokeROI(frame, *box)\n cv2.imwrite(path.join(src_dir, fn), frame)\n cv2.imwrite(path.join(dst_dir, fn), roi)\n print(c)\n c += 1\n\n def grabImages(self,\n dir_path: str,\n save_dir: str,\n name: str,\n msg: str,\n category: str,\n scale: float=None,\n start_idx: int=0\n ):\n \"\"\" 截取图片\n - dir_path str: 图片目录路径\n - save_dir str: 保存目录\n - name str: 名称(推荐格式为:日期_地点)\n - msg str: 文件名附加信息\n - category: 保存图片的类别\n - scale float[None]: 图片缩放比例\n - start_idx int[0]: 起始编号\n\n 最终文件保存结构为:\n save_dir/src/name/category/name_msg_idx.jpg\n save_dir/dst/name/category/name_msg_idx.jpg\n \"\"\"\n src_dir = path.join(save_dir, \"src\", str(name), str(category))\n dst_dir = path.join(save_dir, \"dst\", str(name), str(category))\n os.makedirs(src_dir, exist_ok=True)\n os.makedirs(dst_dir, exist_ok=True)\n\n c = start_idx\n for i in os.listdir(dir_path):\n img = cv2.imread(path.join(dir_path, i))\n if scale:\n frame = cv2.resize(frame, None, fx=scale, fy=scale)\n box = self._roi(img)\n if box is not None:\n fn = f\"{name}_{msg}_{c:04}.jpg\"\n roi = self._getSmokeROI(img, *box)\n cv2.imwrite(path.join(src_dir, fn), frame)\n cv2.imwrite(path.join(dst_dir, fn), roi)\n print(c)\n c += 1\n\n\nif __name__ == \"__main__\":\n from fire import Fire\n Fire(Toolbox)\n","sub_path":"DL/SFESmoke/tools/toolbox.py","file_name":"toolbox.py","file_ext":"py","file_size_in_byte":8374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"413331577","text":"def verifica_progressao(lista):\n r = []\n q = []\n pa = False\n pg = False\n for i,e in enumerate(lista):\n razaopa = lista[-1]-lista[-2]\n razaopg = lista[-1]/lista[-2]\n del lista[-1]\n r.append(razaopa)\n q.append(razaopg)\n for f in range(1,len(r)):\n if not pa and r[f] != r[-f]:\n pa = False\n elif pa or r[f] == r[-f]:\n pa = True\n for g in range(1,len(q)):\n if not pg and q[f] != q[-f]:\n pg = False\n elif pg or q[f] == q[-f]:\n pg = True\n if pa and pg:\n return 'AG'\n elif pa:\n return 'PA'\n elif pg:\n return 'PG'\n else:\n return 'NA'","sub_path":"backup/user_299/ch57_2020_04_11_17_18_13_789985.py","file_name":"ch57_2020_04_11_17_18_13_789985.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"261915440","text":"string1 = input()\nstring2 = input()\ncount = 0\nif len(string1)!=len(string2):\n print(\"Strings are not of the same length\")\nelse: \n for i in range (len(string1)):\n if string1[i]!=string2[i]:\n count=count+1\n print(count)\n \n","sub_path":"Day3/Python/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"497226119","text":"from django.shortcuts import render, Http404, HttpResponse\n\nfrom .models import Product, ProductImage\n\ndef products(request):\n\tqueryset = Product.objects.all()\n\ttemplate = 'products/products.html'\t\n\tcontext = {\n\t 'object_list': queryset\n\t}\n\treturn render(request, template, context)\n\ndef product(request, slug):\n\ttry:\n\t\tproduct = Product.objects.get(slug=slug)\n\t\tcontext = {'product': product}\n\t\ttemplate = 'products/product.html'\t\t\n\t\treturn render(request, template, context)\n\texcept:\n\t\traise Http404\n\ndef product_search(request, q):\n\tproducts = Product.objects.filter(title__icontains=q)\n\tif products:\n\t\tcontext = {'query': q, 'products': products}\n\t\ttemplate = 'products/product_search.html'\t\n\telse:\n\t\ttemplate = 'home.html'\t\n\t\tcontext = {}\n\treturn render(request, template, context)","sub_path":"store/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"144820620","text":"from typing import List\nclass Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n res = []\n self.dfs([-1]*n,0,[],res)\n return res\n \n def dfs(self,nums,row,path,res):\n if row==len(nums):\n res.append(path)\n return\n for i in range(len(nums)):\n nums[row]=i\n if self.trueQueen(nums,row):\n self.dfs(nums,row+1,path+[\".\"*i+\"Q\"+\".\"*(len(nums)-i-1)],res)\n \n def trueQueen(self,nums,i):\n for j in range(i):\n if nums[j]==nums[i]:\n return False\n if abs(nums[j]-nums[i])==i-j:\n return False\n return True\n\nx = Solution()\nres = x.solveNQueens(4)\nprint(res)\n","sub_path":"leetcode051.py","file_name":"leetcode051.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"221870684","text":"import sys\nimport statistics\n\nname = \"\"\nnums = []\n\nfor line in sys.stdin:\n for v in line.split(\" \"):\n try:\n nums.append(float(v))\n except ValueError:\n name += v + \" \"\n\n print(round(statistics.mean(nums), 6), name.strip())\n\n name = \"\"\n nums = []\n","sub_path":"solutions/pervasiveheartmonitor.py","file_name":"pervasiveheartmonitor.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"176109300","text":"# beautifulsoup4 test\nfrom bs4 import BeautifulSoup\nhtml = \"\\\n\t\t\t01\\\n\t\t\t\\\n\t\t\t\t
\\\n\t\t\t\t\t독전\\\n\t\t\t\t
\\\n\t\t\t\\\n\t\t\t\\\n\t\t\t\\\n\t\t\t\\\n\t\t\t\tna\\\n\t\t\t0\\\n\t\t\"\n\n\ndef ex1():\n\tbs = BeautifulSoup(html, \"html.parser\")\n\tprint(bs, type(bs))\n\n\ttag_td = bs.td\n\tprint(tag_td, type(tag_td))\n\n\ttag_a = bs.a\n\tprint(tag_a, type(tag_a))\n\n\ttag_div = bs.div\n\tprint(tag_div, type(tag_div))\n\n\nex1()\n\n\ndef ex2():\n\tbs = BeautifulSoup(html, \"html.parser\")\n\n\ttag_td = bs.td\n\tprint(tag_td[\"class\"]) # 처음 만나게 되는 태그의 class 값 가져온다\n\n\ttag_a = bs.a\n\tprint(tag_a[\"href\"])\n\n\ttag_div = bs.div\n\tprint(tag_div[\"id\"])\n\n\tprint(tag_div.attrs)\n\n\nex2()\n\n\n# 3. Attribute 조회\ndef ex3():\n\tbs = BeautifulSoup(html, \"html.parser\")\n\n\ttag_td = bs.find(\"td\", attrs = {'class': 'title'}) # class가 title인 td 태그 가져오기\n\tprint(tag_td)\n\n\ttag = bs.find(attrs = {'title': '독전'})\n\tprint(tag) # 독전\n\n\ttag_a = bs.find('a')\n\tprint(tag_a) # 독전\n\t\n\nex3()\n","sub_path":"_test_/test_bs4.py","file_name":"test_bs4.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"52033691","text":"# 8888888888P d8b 888 888 888 888\n# d88P Y8P 888 888 888 888\n# d88P 888 888 888 888\n# d88P .d88b. 888 .d88888 88888b. .d88b. 888d888 .d88b. 88888b. .d88b. 888888\n# d88P d88\"\"88b 888 d88\" 888 888 \"88b d8P Y8b 888P\" d88P\"88b 888 \"88b d88\"\"88b 888\n# d88P 888 888 888 888 888 888 888 88888888 888 888 888 888 888 888 888 888\n# d88P Y88..88P 888 Y88b 888 888 d88P Y8b. 888 Y88b 888 888 d88P Y88..88P Y88b.\n# d8888888888 \"Y88P\" 888 \"Y88888 88888P\" \"Y8888 888 \"Y88888 88888P\" \"Y88P\" \"Y888\n# This software is provided free of charge without a warranty. 888\n# This Source Code Form is subject to the terms of the Y8b d88P\n# Mozilla Public License, v. 2.0. If a copy of the MPL was \"Y88P\"\n# this file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\n# This is designed to be used with Zoidberg bot, however I'm sure it could be adapted to work with your own projects.\n# If there is an issue that might cause issue on your own bot, feel free to pull request if it will improve something.<3\n\n# This isn't even really a cog. \n\nimport sqlite3\n\nfrom discord.ext import commands\nfrom dislash import Button, ButtonStyle, auto_rows\n\nfrom zoidbergbot.config import *\n\n# TODO: It's probably worth adding something to remove servers when the bot has been removed or perhaps even inactive\n# servers if it gets to the point of causing an actual response time issue\n\n# Yup! it's your local moron reusing code that I'm planning on getting rid of later!\n# To be fair, I don't really know how much it really matters here.\nif not os.path.isfile(os.getcwd() + '\\\\data\\\\logging.db'):\n print(\"Logging DB missing! Creating new DB. \")\n connection = sqlite3.connect(os.getcwd() + '\\\\data\\\\logging.db')\n cursor = connection.cursor()\n cursor.execute(\"\"\"CREATE TABLE logging (\n guild INTEGER PRIMARY KEY,\n log_channel INTEGER,\n message_log_channel INTEGER,\n log_joins BOOLEAN DEFAULT False,\n log_leaves BOOLEA NDEFAULT False,\n log_invites BOOLEAN DEFAULT False,\n log_messages BOOLEAN DEFAULT False,\n log_message_edits BOOLEAN DEFAULT False,\n log_roles BOOLEAN DEFAULT False,\n log_profile BOOLEAN DEFAULT False,\n log_nickname BOOLEAN DEFAULT False,\n log_user_nickname BOOLEAN DEFAULT False,\n log_bans BOOLEAN DEFAULT False,\n log_kicks BOOLEAN DEFAULT False,\n log_vc_mute BOOLEAN DEFAULT False,\n log_vc_move BOOLEAN DEFAULT False,\n log_vc_kick BOOLEAN DEFAULT False,\n log_vc_user_mute BOOLEAN DEFAULT False, \n log_vc_user_leave BOOLEAN DEFAULT False \n )\"\"\"\n )\nelse:\n # I know that sqlite3 will automatically make the db file, but I'd prefer to handle it like this.\n connection = sqlite3.connect(os.getcwd() + '\\\\data\\\\logging.db')\n cursor = connection.cursor()\n\n\ndef initialize_server(guild):\n # This might be worth having it back up the db every time.\n # Holy cow this is bad.\n # Please put this code out of it's misery.\n cursor.execute(\n f'''INSERT INTO logging({guild} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} {False} )''')\n # cursor.execute(f'''INSERT INTO logging({guild})''')\n connection.commit()\n\n\ndef update_attribute(column, value, guild):\n cursor.execute(f\"\"\"UPDATE logging \n SET {column} = {value} \n WHERE guild = {guild}; \n \"\"\")\n connection.commit()\n\n\ndef get_val(val, guild):\n data = cursor.execute(f'SELECT {val} FROM logging WHERE guild = {guild}')\n return int(''.join(map(str, data.fetchall()[0])))\n\n\nclass Logging(commands.Cog):\n\n # noinspection PyShadowingNames\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print('Ready!')\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n if member.guild == get_val(\"log_joins\", member.guild):\n channel = member.guild.system_channel\n guild = member.guild\n invites = await guild.invites()\n invite = None\n for each in invites:\n if member.id in each.uses:\n invite = each\n if channel is not None:\n await channel.send(f\"\"\"{member.display_name} has joined the server. \n :calendar_spiral:Account made: {member.created_at}\n :incoming_envelope:Invite used: {invite.id}\n --> :detective:Created by: {invite.inviter}\n \"\"\")\n\n @commands.command(name=\"setup-logging\")\n async def cmd_setup_logging(self, ctx, msg=None, inter=None):\n options = [\"log_channel\", \"message_log_channel\", \"log_joins\", \"log_leaves\", \"log_invites\",\n \"log_messages\", \"log_message_edits\", \"log_roles\", \"log_profile\", \"log_nickname\", \"log_user_nickname\",\n \"log_bans\", \"log_kicks\", \"log_vc_mute\", \"log_vc_move\", \"log_vc_kick\", \"log_vc_user_mute\",\n \"log_vc_user_leave\"]\n\n # jesus christ. fix it fix it fix it fix it fix it fix it fix it fix it fix it fix it fix it fix it fix it fixit\n # Also, this needs to be changed so I can localize it at some point. Ouch.\n friendly_options = [\"action logging channel\", \"message logging channel\", \"log user joins\", \"log user leaves\",\n \"log when invites are created or used.\", \"log message deletions\",\n \"log when messages are edited\", \"log when roles are given/removed\",\n \"log profile changes like usernames\", \"log nickname changes when changed by an admin\",\n \"log nicknames when changed by the user\", \"log bans\", \"log kicks\",\n \"log when someone is muted\", \"log if someone is moved VC channels by an admin\",\n \"log when someone is kicked from a voice channel\", \"log when someone mutes themselves\",\n \"log when someone leaves a voice channel\"]\n buttons = []\n button_grid = auto_rows(max_in_row=5)\n\n # This is a garbage solution.\n async def refresh_buttons(grid):\n for index, each in enumerate(options):\n color = ButtonStyle.blurple\n state = get_val(each, ctx.guild)\n if type(state) == bool:\n if state:\n color = ButtonStyle.green\n else:\n color = ButtonStyle.red\n\n button = Button(\n style=color,\n label=friendly_options[index],\n custom_id=each\n )\n buttons.append(button)\n grid = auto_rows(*buttons, max_in_row=5)\n return grid\n\n try:\n if inter is None:\n msg = await ctx.send(\"Click each button to configure the options.\", components=button_grid)\n else:\n await inter.reply(f\"\", components=[buttons], type=7)\n except ValueError:\n pass\n\n button_grid = await refresh_buttons(button_grid)\n\n def wait_for(inter):\n return inter.message.id == msg.id\n\n inter = await ctx.wait_for_button_click(wait_for)\n\n\n# noinspection PyShadowingNames\ndef setup(bot):\n bot.add_cog(Logging(bot))\n","sub_path":"cogs/data/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":7964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"621998012","text":"def data_to_bins(raw,data_min,bin_delta,bin_count):\n # raw is a list of floats\n # data_min is a float\n # bin_delta is a float\n # bin_count is an integer\n # this function puts data into the correct bins\n ou = []\n for i in range(bin_count):\n ou.append(0)\n for r in raw:\n b = int((r - data_min) / bin_delta)\n if b>=bin_count: # the largest data point will be in a bin larger than what we can handle, so we move it one down\n b = bin_count-1\n ou[b] = ou[b] + 1 # this increments that bin\n return ou\n\ndef normalize_bins(bin_array):\n # takes a list of bins and sets their sum to 1\n # bin_array is a list of floats\n # returns list of floats of same length\n if len(bin_array)==0:\n return []\n s = 0.0\n for i in range(len(bin_array)):\n s += float(bin_array[i])\n ou = []\n for i in range(len(bin_array)):\n ou.append(bin_array[i] / s) # be careful, we could have issues here\n return ou\n\ndef background_elimination(full_data,full_duration,background_data,background_duration,bin_count):\n # fulldata and background_data are lists of floats\n # bin_count is a integer\n # fulldata and background_data should be lists of integral sums for the same channel\n # first is to decide the bounds and size of the bins\n # full_duration and background_duration are floats representing seconds\n # returns a dict(float->float), that maps energies to rates (hits per hour, accounting for background)\n f_max = max(full_data)\n b_max = max(background_data)\n f_min = min(full_data)\n b_min = min(background_data)\n if f_max>b_max:\n data_max = f_max\n else:\n data_max = b_max\n if f_min rate\n return diff_bins\n\ndef function_subtraction(full_data,background_data,bin_count):\n f_max = max(full_data)\n b_max = max(background_data)\n f_min = min(full_data)\n b_min = min(background_data)\n if f_max>b_max:\n data_max = f_max\n else:\n data_max = b_max\n if f_min squeeze_factor:\n squeeze_factor = y\n del(y)\n # squeeze_factor is now determined\n alpha_bins = [] # this is where we are going to save the source only function (estimated)\n for i in range(bin_count):\n alpha_bins.append(full_bins[i] - (background_bins[i] / squeeze_factor))\n # this squeezes the background_data so it is always less than the full\n # then we subtract background from full to get an estimate of the source contribution\n # now normalize alpha\n alpha_bins = normalize_bins(alpha_bins)\n p = 1.0 - (1.0 / squeeze_factor) # this should give the source's contribution to the full\n diff_bins = dict()\n for i in range(bin_count):\n diff_bins[(float(i)+0.5)*bin_delta+data_min] = alpha_bins[i]\n # energy -> rate\n return [diff_bins,p]\n","sub_path":"background_elimination.py","file_name":"background_elimination.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"132518906","text":"import docker\nimport os\nimport subprocess\nimport re\nimport tarfile\nimport shutil\nimport glob\nfrom datetime import datetime\nfrom stuubehoggr import utils\nfrom stuubehoggr.utils import logger\nfrom click import ClickException\n\n\nclass CommandException(ClickException):\n pass\n\n\nclass Command:\n\n \"\"\"\n A command is an action to be preformed on a container.\n Eg. a container can be launched, destroyed or restartedself.\n \"\"\"\n\n image_compatible = False\n\n def __init__(self, docker_client):\n self.docker_client = docker_client\n\n def run(self, container_configuration):\n logger.debug('Running command %s for %s' %\n (self.name, container_configuration['name']))\n if 'type' in container_configuration and \\\n container_configuration['type'] == 'image' and \\\n not self.image_compatible:\n logger.warn('Skipping %s for %s. Command not supported for images'\n % (self.name, container_configuration['name']))\n return True\n\n # To not modify the cwd permanently, store it's location temporary.\n cwd = os.getcwd()\n\n # Change into to container's directory\n os.chdir(container_configuration['working_directory'])\n try:\n if self.name in container_configuration['commands']:\n result = self._run_user_defined(container_configuration)\n else:\n result = self._run_default(container_configuration)\n finally:\n # chdir back into the original cwd.\n os.chdir(cwd)\n\n return result\n\n def _run_user_defined(self, container_configuration):\n # Read the command and process variables...\n command = container_configuration['commands'][self.name]\n command = utils.process_variables(command, container_configuration)\n\n # Run the command and return the output\n output = subprocess.check_output(command, shell=True).decode('ascii')\n return output\n\n def _run_default(self, container_configuration):\n raise CommandException('Not implemented!')\n\n\nclass StartCommand(Command):\n name = 'start'\n\n def _run_default(self, container_configuration):\n container_name = container_configuration['name']\n if not utils.container_exists(self.docker_client, container_name):\n raise CommandException(\n 'Container \"%s\" does not exist and therefore can\\'t be started'\n % container_name)\n\n links = utils.get_linked_containers(container_configuration)\n volumes_from = utils.get_volumes_from(container_configuration)\n port_bindings = utils.get_port_bindings(container_configuration)\n\n # If container is not running, start it...\n if not utils.container_is_running(self.docker_client, container_name):\n # TODO: evaluate output\n self.docker_client.start(container_name, links=links,\n volumes_from=volumes_from,\n port_bindings=port_bindings)\n return True\n\n\nclass StopCommand(Command):\n name = 'stop'\n\n def _run_default(self, container_configuration):\n container_name = container_configuration['name']\n\n # Skip if container does not exist yet...\n if not utils.container_exists(self.docker_client, container_name):\n logger.info(\n 'Container \"%s\" does not exist and therefore can\\'t be stopped'\n % container_name)\n return True\n\n # Stop if container is running\n if not utils.container_is_running(self.docker_client, container_name):\n return True\n\n logger.info('stopping container %s...' % container_name)\n self.docker_client.stop(container_name)\n\n # Kill if c ontainer is still running\n if not utils.container_is_running(self.docker_client, container_name):\n return True\n\n logger.info('killing container %s...' % container_name)\n self.docker_client.kill(container_name)\n\n if not utils.container_is_running(self.docker_client, container_name):\n return True\n # TODO: It's probably better to throw an exception here...\n return False\n\n\nclass BuildCommand(Command):\n name = 'build'\n\n image_compatible = True\n\n def _run_default(self, container_configuration):\n\n tag_name = utils.get_tag_name(container_configuration)\n working_dir = container_configuration['working_directory']\n\n # RMI if image exists...\n if utils.image_exists(self.docker_client, tag_name):\n raise CommandException(\n \"Image for %s does already exist! You have to destroy\"\n \"it first!\" % tag_name)\n\n response = []\n for l in self.docker_client.build(path=working_dir, tag=tag_name):\n response.append(l)\n utils.log_docker_output(l)\n\n result = response[-1].decode()\n if re.match(r'\\{\"stream\"\\:\"Successfully built [a-zA-Z0-9].*\\\"\\}',\n result):\n return True\n logger.error('Failed to build container: %s' % tag_name)\n return False\n\n\nclass LaunchCommand(Command):\n name = 'launch'\n\n def _run_default(self, container_configuration):\n container_name = container_configuration['name']\n tag_name = utils.get_tag_name(container_configuration)\n\n result = self.docker_client.create_container(\n detach=True, name=container_name, image=tag_name)\n\n if 'Id' in result and 'Warnings' in result and not result['Warnings']:\n return True\n return False\n\n\nclass DestroyCommand(Command):\n name = 'destroy'\n\n image_compatible = True\n\n def _run_default(self, container_configuration):\n\n StopCommand(self.docker_client).run(container_configuration)\n\n recursively = True\n # TODO: Allow the user to controll this by using a CLI flag\n # or via configuration\n\n cleanup(self.docker_client, container_configuration, recursively)\n # TODO: verify!\n return True\n\n\nclass BootstrapCommand(Command):\n name = 'bootstrap'\n\n image_compatible = True\n\n def _run_default(self, container_configuration):\n container_name = container_configuration['name']\n\n if not BuildCommand(self.docker_client).run(container_configuration):\n raise CommandException(\n \"Build of container '%s' failed\" % container_name)\n\n if not LaunchCommand(self.docker_client).run(container_configuration):\n raise CommandException(\n \"Creation of container '%s' failed\" % container_name)\n\n if not RestoreCommand(self.docker_client).run(container_configuration):\n raise CommandException(\n \"Restore of container '%s' failed\" % container_name)\n\n if not StartCommand(self.docker_client).run(container_configuration):\n raise CommandException(\n \"Start of container '%s' failed\" % container_name)\n return True\n\n\nclass BackupCommand(Command):\n name = 'backup'\n\n def _run_default(self, container_configuration):\n container_name = container_configuration['name']\n\n # Do not backup containers without any volumes defined\n volumes = utils.get_volumes(self.docker_client, container_name)\n if len(volumes) == 0:\n return True\n\n backup_directory = utils.get_backup_dir(container_configuration)\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H%M\")\n\n if not os.path.exists(backup_directory):\n os.makedirs(backup_directory)\n\n # For each volume\n for (name, path) in volumes.items():\n archive_name = '%s/%s-%s.tar.gz' % (\n backup_directory, name.replace('/', '_')[1:], timestamp)\n logger.info(\"Backing up %s\" % archive_name)\n with tarfile.open(archive_name, \"w:gz\") as tar:\n tar.add(path, arcname='/')\n logger.info(\"Backup %s complete!\" % archive_name)\n\n return True\n\n\nclass RestoreCommand(Command):\n name = 'restore'\n\n def _run_default(self, container_configuration):\n container_name = container_configuration['name']\n\n # Do not restore containers without any volumes defined\n volumes = utils.get_volumes(self.docker_client, container_name)\n if len(volumes) == 0:\n return True\n\n backup_directory = utils.get_backup_dir(container_configuration)\n\n for (name, path) in volumes.items():\n archive_pattern = '%s/%s-*.tar.gz' % (\n backup_directory, name.replace('/', '_')[1:])\n all = sorted(glob.glob(archive_pattern))\n if len(all) == 0 or not os.path.isfile(all[-1]):\n logger.warn('No backups found..skipping restore')\n continue\n latest = all[-1]\n logger.info(\"Restoring %s from %s\" % (name, latest))\n tar = tarfile.open(latest)\n tar.extractall(path=path)\n tar.close()\n logger.info(\"%s restored!\" % (name))\n\n return True\n\n\ndef cleanup(docker_client, container_configuration, recursively):\n container_name = container_configuration['name']\n if not utils.container_exists(docker_client, container_name):\n tag_name = utils.get_tag_name(container_configuration)\n # Abort if image does not exist...\n if not utils.image_exists(docker_client, tag_name):\n logger.debug(\"Skipping recursive cleanup for non existing container %s\"\n % container_name)\n return\n image_id = tag_name\n else:\n # Remove the highest container...\n image_id = docker_client.inspect_container(container_name)['Image']\n docker_client.remove_container(container_name, v=True)\n\n while True:\n image = docker_client.inspect_image(image_id)\n if 'Parent' not in image:\n break\n\n if utils.container_exists(docker_client, image['Container']):\n try:\n docker_client.remove_container(image['Container'], v=True)\n logger.debug(\"Removed container %s\" % image['Container'])\n except Exception as e:\n logger.debug(\"Can not delete further than conontainer\"\n \" %s (%s)\" % (image['Container'], e))\n break\n\n try:\n docker_client.remove_image(image_id, noprune=True)\n logger.debug(\"Removed image %s\" % image_id)\n except Exception as e:\n logger.debug(\"Can not delete further than image\"\n \" %s (%s)\" % (image_id, e))\n break\n\n # Break if not recursi\n if not recursively:\n break\n\n # Somehow, it can happen that the parent value is emtpy\n # If this happens, there are no more parents - so quit!\n if image['Parent'].strip() == \"\":\n break\n image_id = image['Parent']\n\n\ndef tabularasa(docker_client):\n # TODO: user mUST confirm!\n running = docker_client.containers(quiet=True)\n\n # Stop all running containers\n for container in running:\n logger.debug(\"Stopping container %s\" % container['Id'])\n docker_client.stop(container['Id'])\n\n # Kill all running containers\n running = docker_client.containers(quiet=True)\n for container in running:\n logger.debug(\"Killing container %s\" % container['Id'])\n docker_client.kill(container['Id'])\n\n # Remove all containers\n all_containers = docker_client.containers(quiet=True, all=True)\n for container in all_containers:\n logger.debug(\"Removing container %s\" % container['Id'])\n docker_client.remove_container(container=container['Id'],\n v=True)\n # Remove all images\n all_images = docker_client.images(quiet=True, all=True)\n while(all_images):\n count = len(all_images)\n for image in all_images:\n try:\n # Skip if the image already is removed....\n if utils.image_exists(docker_client, image):\n logger.debug(\"Removing image %s\" % image)\n docker_client.remove_image(image=image)\n except Exception as e:\n exists = docker_client.inspect_image(image_id=image)\n logger.warn(\"Attempt failed to remove image %s (%s)\" %\n (image, e))\n\n all_images = docker_client.images(quiet=True, all=True)\n if len(all_images) == count:\n logger.warn(\"Failed to clean up all images (See Previous errors)\")\n break\n\n # TODO:\n\n # TODO: rm -Rf /var/lib/docker/vfs/dir/*\n # rm -Rf /var/lib/docker/volumes/*\n\n\ndef create_default_docker_client():\n client = docker.Client()\n try:\n client.ping()\n except:\n # TODO: Pass previous exception!\n raise CommandException('Docker is not running')\n return client\n","sub_path":"src/main/python/stuubehoggr/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":12985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"397208526","text":"#!/usr/bin/env python \n\nimport simpleMDM\n\nclass apps(simpleMDM.connection):\n def __init__(self, apiKey):\n simpleMDM.connection.__init__(self, apiKey)\n self.url = self._url(\"/apps\")\n\n def getApp(self, appID=\"all\"):\n url = self.url\n if not appID == \"all\":\n url = url + \"/\" + appID\n return self._getData(url)\n\n def createApp(self, name=None, appStoreID=None, bundleID=None, binary=None):\n data = {}\n files = {}\n if name:\n data['name'] = name\n if appStoreID:\n data['app_store_id'] = appStoreID\n elif bundleID:\n data['bundle_id'] = bundleID\n elif binary:\n files['binary'] = open(binary,'rb')\n return self._postData(self.url, data, files)\n \n def updateApp(self, appID, binary=None, name=None):\n url = self.url + \"/\" + appID\n data = {}\n files = {}\n if name:\n data['name'] = name\n if binary:\n files['binary'] = open(binary,'rb')\n return self._patchData(url, data, files)\n\n def deleteApp(self, appID):\n url = self.url + \"/\" + appID\n return self._deleteData(url)","sub_path":"simpleMDMpy/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"530250263","text":"import os\nimport json\nfrom flask import Flask, request, jsonify, abort\nfrom sqlalchemy import exc\nfrom flask_cors import CORS\nfrom .database.models import db_drop_and_create_all, setup_db, Drink\nfrom .auth.auth import AuthError, requires_auth\n\nAPP = Flask(__name__)\nsetup_db(APP)\nCORS(APP)\n\n\ndb_drop_and_create_all()\n\n\n# ROUTES\n@APP.route('/drinks', methods=['GET'])\ndef get_drinks():\n \"\"\"Publicly gets all drinks from the database\"\"\"\n # Get all drinks from the database and format them.\n available_drinks = Drink.query.all()\n drinks = [drink.short() for drink in available_drinks]\n\n # Abort if no drinks exist.\n if len(drinks) == 0:\n abort(404)\n else:\n return jsonify({\n 'success': True,\n 'drinks': drinks,\n 'status_code': 200\n }), 200\n\n\n@APP.route('/drinks-detail', methods=['GET'])\n@requires_auth(permission='get:drinks-detail')\ndef drinks_detail(payload):\n \"\"\"Privately gets all drinks from the database\"\"\"\n # Get all drinks from the database and format them.\n available_drinks = Drink.query.all()\n drinks = [drink.long() for drink in available_drinks]\n\n # Abort if there are no drinks.\n if len(available_drinks) == 0:\n abort(404)\n else:\n return jsonify({\n \"success\": True,\n \"drinks\": drinks,\n \"status_code\": 200\n }), 200\n\n\n@APP.route('/drinks', methods=['POST'])\n@requires_auth(permission='post:drinks')\ndef make_drink(payload):\n \"\"\"Adds a new drink to the database\"\"\"\n # Get the submitted drink data and format it for submission.\n body = request.get_json()\n title = body[\"title\"]\n recipe = body[\"recipe\"]\n\n # Try to add a new drink to the database.\n try:\n new_drink = Drink(title=title, recipe=json.dumps(recipe))\n new_drink.insert()\n\n return jsonify({\n \"success\": True,\n \"drinks\": new_drink.long(),\n \"status_code\": 200\n }), 200\n\n # On failure due to duplicate entry, abort 409.\n except exc.SQLAlchemyError:\n abort(409)\n\n\n@APP.route('/drinks/', methods=['PATCH'])\n@requires_auth(permission='patch:drinks')\ndef edit_drink(payload, drink_id):\n \"\"\"Edits an existing drink in the database.\"\"\"\n # Get the submitted drink data and format it for submission.\n body = request.get_json()\n new_title = body[\"title\"]\n new_recipe = json.dumps(body[\"recipe\"])\n\n # Search for the drink in the database.\n drink = Drink.query.filter_by(id=drink_id).one_or_none()\n\n # If the drink_id isn't in the database, abort.\n if not drink:\n abort(404)\n\n # Update the drink's data and submit it to the database.\n else:\n drink.title = new_title\n drink.recipe = new_recipe\n\n drink.update()\n\n return jsonify({\n \"success\": True,\n \"drinks\": [drink.long()],\n \"status_code\": 200\n }), 200\n\n\n@APP.route('/drinks/', methods=['DELETE'])\n@requires_auth(permission='delete:drinks')\ndef delete_drink(payload, drink_id):\n \"\"\"Deletes an existing drink from the database.\"\"\"\n # Search for the drink in the database.\n drink = Drink.query.filter_by(id=drink_id).one_or_none()\n\n # If the drink_id isn't in the database, abort.\n if not drink:\n abort(404)\n\n # If it is, delete it.\n elif drink.id == drink_id:\n drink.delete()\n\n return jsonify({\n \"success\": True,\n \"delete\": drink_id,\n \"status_code\": 200\n }), 200\n\n\n# Error Handling\n@APP.errorhandler(AuthError)\ndef auth_error(error):\n return jsonify({\n \"success\": False,\n \"error\": error.status_code,\n \"message\": error.error\n }), error.status_code\n\n\n@APP.errorhandler(400)\ndef bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"bad request\"\n }), 400\n\n\n@APP.errorhandler(404)\ndef not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"not found\"\n }), 404\n\n\n@APP.errorhandler(405)\ndef method_not_allowed(error):\n return jsonify({\n \"success\": False,\n \"error\": 405,\n \"message\": \"method not allowed\"\n }), 405\n\n\n@APP.errorhandler(409)\ndef duplicate_entry(error):\n return jsonify({\n \"success\": False,\n \"error\": 409,\n \"message\": \"duplicate of an existing entry\"\n }), 409\n\n\n@APP.errorhandler(422)\ndef unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n","sub_path":"projects/03_coffee_shop_full_stack/starter_code/backend/src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"205844158","text":"import Config\nimport pygame\nfrom Paddle import Paddle\n\nx = pygame.init()\nprint(x)\nscreen = pygame.display.set_mode((Config.SCREEN_WIDTH, Config.SCREEN_HEIGHT), pygame.DOUBLEBUF)\nclock = pygame.time.Clock()\npaddle = Paddle()\n\ndef start():\n while True:\n dt = clock.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n screen.fill((0,0,0))\n paddle.update(dt)\n paddle.draw(screen)\n pygame.display.update()\n\n\nstart()","sub_path":"GameLoop.py","file_name":"GameLoop.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"253646419","text":"def __compute_part_1(filename):\n counter = -1\n prev = -1\n with open(filename) as f:\n line = f.readline()\n while line:\n val = int(line)\n if val > prev:\n counter +=1\n \n prev = val\n line = f.readline()\n return counter\n\ndef __compute_part_2(filename):\n counter = 0\n prev_1 = -1\n prev_2 = -1\n prev_3 = -1\n with open(filename) as f: \n line = f.readline().rstrip()\n while line:\n val = int(line)\n if prev_1 == -1:\n prev_1 = val\n line = f.readline().rstrip()\n continue\n\n if prev_2 == -1:\n prev_2 = prev_1\n prev_1 = val\n line = f.readline().rstrip()\n continue\n\n if prev_3 == -1:\n prev_3 = prev_2\n prev_2 = prev_1\n prev_1 = val\n line = f.readline().rstrip()\n continue\n\n if val + prev_1 + prev_2 > prev_1 + prev_2 + prev_3:\n counter += 1\n \n prev_3 = prev_2\n prev_2 = prev_1\n prev_1 = val \n line = f.readline().rstrip()\n\n return counter\n\ndef main(filename):\n # result = __compute_part_1(filename)\n result = __compute_part_2(filename)\n print(result)\n \nif __name__== \"__main__\":\n main(\"1_example\")\n # main(\"1_input\")\n","sub_path":"adventofcode_2021/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"30923283","text":"import json\nimport subprocess\nimport sys\n\nwith open('dependencies.json') as json_data:\n dependencies = json.load(json_data)\n\ndependencies = dependencies[\"Dependencies\"]\nfor key, value in dependencies.items():\n package = key+'=='+value\n res = subprocess.run([\"pip\", \"install\",package])\n # print(res)\n if res.returncode==0:\n print(\"{} successfully installed\".format(key))\n else:\n print(\"Error in installing {}\".format(key))\n","sub_path":"section2/Linux Problem/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"27021354","text":"from __future__ import print_function\n\nimport pytest\n\nimport lena.core \nfrom lena.core import Sequence, Source\nfrom lena.core import FillSeq, FillInto\nfrom tests.examples.fill import StoreFilled\nfrom tests.examples.numeric import Add\n\n\ndef test_lena_sequence_fill():\n ## test LenaSequence\n s1 = FillSeq(StoreFilled())\n s2 = FillSeq(Add(0), StoreFilled())\n # can take len\n assert len(s1) == 1\n assert len(s2) == 2\n # can get item\n assert isinstance(s2[0], FillInto)\n # deletion is prohibited\n with pytest.raises(TypeError):\n del s2[0]\n # setting elements is prohibited\n with pytest.raises(TypeError):\n s2[0] = 0\n\n\ndef test_fill():\n store = StoreFilled()\n\n s1 = FillSeq(store)\n # empty FillSeq sequence doesn't transform\n s1.fill(1)\n assert store == [1]\n\n s2 = FillSeq(Add(0), store)\n store.list = []\n s2.fill(1)\n assert store == [1]\n\n # Store adds elements\n s3 = FillSeq(Add(1), store)\n s3.fill(1)\n assert store.list == [1, 2]\n # sequence of two elements\n s4 = FillSeq(Add(1), Add(-1), store)\n s4.fill(1)\n assert store == [1, 2, 1]\n\n # nested long FillSeq\n # won't work here.\n # store = StoreFilled()\n # s5 = FillSeq(s1, Add(0), *s4, Add(3), store)\n # s5.fill(1)\n # assert store == [4]\n # with pytest.raises(lena.core.LenaTypeError):\n # s = FillSeq(5, store)\n","sub_path":"tests/core/test_fill_seq.py","file_name":"test_fill_seq.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"384095340","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\ndef getBins(X, attri, num):\n tarseri = X[attri][X[attri]!=-1]\n #\n mnum = int(len(tarseri)/10)\n num = min([num, mnum])\n \n seri = pd.qcut(tarseri, num, duplicates='drop')\n vc = seri.value_counts()\n bins = sorted([ind.left for ind in vc.index]) + [10**20]\n bins[0] = -10**20\n \n return bins\n\ndef getBinQuan(aseri, bseri):\n avg = bseri.mean()\n df = pd.DataFrame({'a':aseri, 'b':bseri})\n pt = pd.pivot_table(df,index='a',values='b',aggfunc=np.mean)\n vnum = df.a.value_counts()\n vnumdic = dict(zip(list(vnum.index),list(vnum.values)))\n \n klist = list(pt.index)\n vlist = list(pt.b)\n \n rdic = dict(zip(klist, vlist))\n\n for k in rdic.keys():\n rdic[k] = (100.0*avg + vnumdic[k]*rdic[k])/(100.0+vnumdic[k])\n \n return rdic\n\ndef getTransData(X, y, tarlist):\n X_trans = pd.DataFrame()\n propdic = {}\n for k in X.columns:\n if not k in tarlist:\n X_trans[k] = X[k]\n continue\n \n valnum = X[k].value_counts().shape[0]\n\n if valnum > 20:\n try:\n bins = getBins(X, k, 30)\n if bins[1] > 0:\n bins.insert(1, 0.0)\n except:\n continue\n aseri = pd.cut(X[k],bins,include_lowest=True,labels=range(len(bins)-1)) \n else:\n aseri = X[k]\n bins = []\n \n mapdic = getBinQuan(aseri, y)\n X_trans[k] = aseri.apply(lambda x:mapdic[x])\n propdic[k] = propdic.get(k, {})\n propdic[k]['bins'] = bins\n propdic[k]['mapdic'] = mapdic\n \n return X_trans, propdic\n\ndef getTransTest(X, propdic):\n X_trans = pd.DataFrame()\n for k in X.columns:\n if not k in propdic.keys():\n X_trans[k] = X[k]\n else:\n if len(propdic[k]['bins']) == 0:\n aseri = X[k]\n else:\n aseri = pd.cut(X[k], propdic[k]['bins'], include_lowest=True, labels=range(len(propdic[k]['bins'])-1))\n X_trans[k] = [float(propdic[k]['mapdic'].get(x,0.01)) for x in aseri]\n# X_trans[k] = [float(x) for x in aseri]\n \n return X_trans\n\nif __name__ == '__main__':\n df = pd.DataFrame()\n df['x'] = range(100)\n df['y'] = [v*0.1 for v in range(100)]\n getBins(df,'x',10)\n getBinQuan(df['x'],df['y'])\n \n tarlist = ['x']\n X_trans, propdic = getTransData(df, df['y'], tarlist)\n print(X_trans.head(20))\n \n df_t = pd.DataFrame()\n df_t['x'] = range(1000)\n df_t['y'] = [v*0.1 for v in range(1000)]\n X_test = getTransTest(df_t, propdic)\n print(X_test.head(20))\n\n","sub_path":"experiment_zhuangyuan/data_trans.py","file_name":"data_trans.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"70932697","text":"# pylint:disable=unused-variable\n# pylint:disable=unused-argument\n# pylint:disable=redefined-outer-name\n\nimport sys\nfrom pathlib import Path\nfrom typing import Dict\n\nimport pytest\nimport sqlalchemy as sa\nfrom fastapi import FastAPI\nfrom starlette.testclient import TestClient\n\nfrom simcore_service_catalog.api.dependencies.director import get_director_api\nfrom simcore_service_catalog.core.application import init_app\n\ncurrent_dir = Path(sys.argv[0] if __name__ == \"__main__\" else __file__).resolve().parent\n\n\n@pytest.fixture\ndef app(\n monkeypatch, devel_environ: Dict[str, str], postgres_db: sa.engine.Engine\n) -> FastAPI:\n # Emulates environ so settings can get config\n for key, value in devel_environ.items():\n monkeypatch.setenv(key, value)\n\n app = init_app()\n yield app\n\n\n@pytest.fixture\ndef client(app: FastAPI) -> TestClient:\n with TestClient(app) as cli:\n # Note: this way we ensure the events are run in the application\n yield cli\n\n\n@pytest.fixture()\nasync def director_mockup(loop, monkeypatch, app: FastAPI):\n class FakeDirector:\n async def get(self, url: str):\n return \"\"\n\n app.dependency_overrides[get_director_api] = FakeDirector\n\n yield\n\n app.dependency_overrides[get_director_api] = None\n","sub_path":"services/catalog/tests/unit/with_dbs/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"264106007","text":"# this file are functions for future use\nimport torch\nfrom torch.utils import data\nfrom torch import nn\nfrom gensim.models import Word2Vec\n\ndef load_training_data(path):\n #data with label\n with open(path, 'r',encoding=\"utf-8\") as f:\n lines = f.readlines()\n lines = [line.strip('\\n').split(' ') for line in lines]\n x = [line[2:] for line in lines]\n y = [line[0] for line in lines]\n return x, y\ndef load_testing_data(path):\n with open(path, 'r',encoding=\"utf-8\") as f:\n lines = f.readlines()\n X = [\"\".join(line.strip('\\n').split(\",\")[1:]).strip() for line in lines[1:]]\n X = [sen.split(' ') for sen in X]\n return X\n\ndef evaluation(outputs, labels):\n # outputs => probability (float)\n # labels => labels\n outputs[outputs>=0.5] = 1 # 大於等於 0.5 為正面\n outputs[outputs<0.5] = 0 # 小於 0.5 為負面\n correct = torch.sum(torch.eq(outputs, labels)).item()\n return correct\n\nclass TwitterDataset(data.Dataset):\n \"\"\"\n Expected data shape like:(data_num, data_len)\n Data can be a list of numpy array or a list of lists\n input data shape : (data_num, seq_len, feature_dim)\n \n __len__ will return the number of data\n \"\"\"\n def __init__(self, X, y):\n self.data = X\n self.label = y\n def __getitem__(self, idx):\n if self.label is None: return self.data[idx]\n return self.data[idx], self.label[idx]\n def __len__(self):\n return len(self.data)\n\n# 這個 block 是要拿來訓練的模型\nclass LSTM_Net(nn.Module):\n def __init__(self, embedding, embedding_dim, hidden_dim, num_layers, dropout=0.5, fix_embedding=True):\n super(LSTM_Net, self).__init__()\n # 製作 embedding layer\n self.embedding = torch.nn.Embedding(embedding.size(0),embedding.size(1))\n self.embedding.weight = torch.nn.Parameter(embedding)\n # 是否將 embedding fix 住,如果 fix_embedding 為 False,在訓練過程中,embedding 也會跟著被訓練\n self.embedding.weight.requires_grad = False if fix_embedding else True\n self.embedding_dim = embedding.size(1)\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.dropout = dropout\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=num_layers, batch_first=True)\n self.linear = nn.Sequential(\n nn.Dropout(),\n nn.Linear(hidden_dim, 100, bias=True),\n nn.BatchNorm1d(100),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(100, 100, bias=True),\n nn.BatchNorm1d(100),\n nn.ReLU()\n )\n self.classifier = nn.Sequential( nn.Dropout(dropout),\n nn.Linear(100, 1),\n nn.Sigmoid() )\n def forward(self, inputs):\n inputs = self.embedding(inputs)\n x, _ = self.lstm(inputs, None)\n # x 的 dimension (batch, seq_len, hidden_size)\n # 取用 LSTM 最後一層的 hidden state\n x = x[:, -1, :]\n x = self.linear(x)\n x = self.classifier(x)\n return x\n# 這個 block 用來做 data 的預處理\nclass Preprocess():\n def __init__(self, sentences, sen_len, w2v_path=\"w2v_all.model\"):\n self.w2v_path = w2v_path\n self.sentences = sentences\n self.sen_len = sen_len\n self.idx2word = []\n self.word2idx = {}\n self.embedding_matrix = []\n def get_w2v_model(self):\n # 把之前訓練好的 word to vec 模型讀進來\n self.embedding = Word2Vec.load(self.w2v_path)\n self.embedding_dim = self.embedding.vector_size\n def add_embedding(self, word):\n # 把 word 加進 embedding,並賦予他一個隨機生成的 representation vector\n # word 只會是 \"\" 或 \"\"\n vector = torch.empty(1, self.embedding_dim)\n torch.nn.init.uniform_(vector)\n self.word2idx[word] = len(self.word2idx)\n self.idx2word.append(word)\n self.embedding_matrix = torch.cat([self.embedding_matrix, vector], 0)\n def make_embedding(self, load=True):\n print(\"Get embedding ...\")\n # 取得訓練好的 Word2vec word embedding\n if load:\n print(\"loading word to vec model ...\")\n self.get_w2v_model()\n else:\n raise NotImplementedError\n # 製作一個 word2idx 的 dictionary\n # 製作一個 idx2word 的 list\n # 製作一個 word2vector 的 list\n for i, word in enumerate(self.embedding.wv.vocab):\n print('get words #{}'.format(i+1), end='\\r')\n #e.g. self.word2index['he'] = 1 \n #e.g. self.index2word[1] = 'he'\n #e.g. self.vectors[1] = 'he' vector\n self.word2idx[word] = len(self.word2idx)\n self.idx2word.append(word)\n self.embedding_matrix.append(self.embedding[word])\n print('')\n self.embedding_matrix = torch.tensor(self.embedding_matrix)\n # 將 \"\" 跟 \"\" 加進 embedding 裡面\n self.add_embedding(\"\")\n self.add_embedding(\"\")\n print(\"total words: {}\".format(len(self.embedding_matrix)))\n return self.embedding_matrix\n def pad_sequence(self, sentence):\n # 將每個句子變成一樣的長度\n if len(sentence) > self.sen_len:\n sentence = sentence[:self.sen_len]\n else:\n pad_len = self.sen_len - len(sentence)\n for _ in range(pad_len):\n sentence.append(self.word2idx[\"\"])\n assert len(sentence) == self.sen_len\n return sentence\n def sentence_word2idx(self):\n # 把句子裡面的字轉成相對應的 index\n sentence_list = []\n for i, sen in enumerate(self.sentences):\n print('sentence count #{}'.format(i+1), end='\\r')\n sentence_idx = []\n for word in sen:\n if (word in self.word2idx.keys()):\n sentence_idx.append(self.word2idx[word])\n else:\n sentence_idx.append(self.word2idx[\"\"])\n # 將每個句子變成一樣的長度\n sentence_idx = self.pad_sequence(sentence_idx)\n sentence_list.append(sentence_idx)\n return torch.LongTensor(sentence_list)\n def labels_to_tensor(self, y):\n # 把 labels 轉成 tensor\n y = [int(label) for label in y]\n return torch.LongTensor(y)","sub_path":"hw4/src/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":6448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"197599798","text":"#coding=utf-8\n\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom cs import views as x\nurlpatterns = [\n url(r'^admin/', admin.site.urls,name='backstart'),\n url(r'^login/$', x.login,name=\"login\"),\n url(r'^nothing/$', x.nothing,name=\"nothing\"),\n url(r'^index/$', x.index,name=\"index\"),\n url(r'^logout/$', x.logout,name=\"logout\"),\n url(r'^raisetest/$', x.raisetest,name=\"raisetest\"),\n url(r'^needsoftest/$', x.needsoftest,name=\"needsoftest\"),\n url(r'^needsoftest/(\\d+)/$', x.search,name=\"search\"),#sousuo\n url(r'^wbedone/$', x.wbedone,name=\"wbedone\"),\n url(r'^cwbedone/$', x.cwbedone,name=\"cwbedone\"),\n url(r'^wbedone/ychange/(\\d+)/$', x.ychange,name=\"ychange\"),\n url(r'^cwbedone/cchange/(\\d+)/$', x.cchange,name=\"cchange\"),\n url(r'^cwbedone/closeid/(\\d+)/$', x.closeid,name=\"closeid\"),\n url(r'^needsoftest/prodetail/(\\d+)/$', x.prodetail,name=\"prodetail\"),\n url(r'^test/$', x.test,name=\"test\"),\n url(r'^tester/$', x.tester,name=\"tester\"),\n url(r'^needslist/$', x.needslist,name=\"needslist\"),\n url(r'^backstarter/adduser/$', x.adduser,name=\"adduser\"),\n\n url(r'^backstarter/$', x.backstarter,name=\"backstarter\"),\n url(r'^backstarter/backindex/$', x.backindex,name=\"backindex\"),\n\n url(r'^backstarter/userlist/$', x.userlist,name=\"userlist\"),\n url(r'^backstarter/needslist/$', x.backneedslist,name=\"backneedslist\"),\n\n url(r'^backstarter/userlist/del/(\\d+)/$', x.deleteuser,name=\"deleteuser\"),\n \n url(r'^backstarter/userlist/mod/(\\d+)/$', x.modify,name=\"modify\"),\n url(r'^backstarter/userlist/email/(\\d+)/$', x.modifyemail,name=\"modifyemail\"),\n\n url(r'^backstarter/needslist/del/(\\d+)/$', x.delneed,name=\"delneed\"),\n\n url(r'^backstarter/needslist/distributes/(\\d+)/$', x.distributes,name=\"distributes\"),\n \n\n \n \n \n \n \n\n\n]\n","sub_path":"pj/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"433149358","text":"import torch\nfrom PIL import Image, ImageDraw\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom lu_vp_detect import VPDetection\n\nnormalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n)\npreprocess = transforms.Compose([\n #transforms.Scale(256),\n #transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n])\n\nlength_thresh = 20\nprincipal_point = None\nfocal_length = 256\nseed = 1337\nvpd = VPDetection(length_thresh, principal_point, focal_length, seed)\n\n\nclass CityDataset(Dataset):\n def __init__(self, tfms):\n super(CityDataset, self).__init__()\n self.images = np.load('img.npy')\n self.depth = np.load('depth.npy')\n # self.vp = np.load('vp.npy')\n self.tfms = tfms\n\n def __getitem__(self, index):\n fn_img = self.images[index]\n img = Image.open(fn_img)\n img = np.array(img, dtype = np.uint8)\n\n vps = vpd.find_vps(fn_img)\n # print(vps)\n Seg_line=vpd.create_debug_VP_image(show_image=False)\n \n\n # fn_vp = self.vp[index]\n # vps = np.load(fn_vp)\n # vps = vps['vpts']\n # img=np.transpose(img, axes=[2,1,0])\n # print(img.shape)\n # print(img)\n # img_ = Image.fromarray(img)\n # img_.show()\n # img_1 = Image.fromarray(img)\n # img_1.show()\n \n fn_dep = self.depth[index]\n dep = np.load(fn_dep)\n dep = dep['depth'].squeeze()\n # dep=np.transpose(dep, axes=[1,0])\n # print(dep)\n # dep = (dep/dep.max())*255\n # dep = dep*10\n dep[dep>100]=100\n dep[dep==0]=100\n dep = (dep/dep.max())*255\n dep = dep.astype(np.uint8)\n # print(dep.shape)\n # print(dep)\n # img_ = Image.fromarray(dep)\n # img_.show()\n if self.tfms:\n tfmd_sample = self.tfms({\"image\":img, \"depth\":dep})\n img, dep = tfmd_sample[\"image\"], tfmd_sample[\"depth\"]\n \n return (img,dep,vps,Seg_line)\n\n def __len__(self):\n return len(self.images)\n\nclass CityDataset_normal(Dataset):\n def __init__(self, tfms):\n super(CityDataset_normal, self).__init__()\n self.images = np.load('img.npy')\n self.normal = np.load('normal.npy')\n self.tfms = tfms\n\n def __getitem__(self, index):\n fn_img = self.images[index]\n img = Image.open(fn_img)\n img = np.array(img, dtype = np.uint8)\n # img=np.transpose(img, axes=[2,1,0])\n # print(img.shape)\n # print(img)\n # img_ = Image.fromarray(img)\n # img_.show()\n # img_1 = Image.fromarray(img)\n # img_1.show()\n \n fn_normal = self.normal[index]\n normal = np.load(fn_normal)\n normal = normal['normal']\n # normal =np.transpose(normal, axes=[2,1,0])\n\n # dep=np.transpose(dep, axes=[1,0])\n # print(dep)\n # dep = (dep/dep.max())*255\n # dep = dep*10\n # dep[dep>100]=100\n # dep[dep==0]=100\n # dep = (dep/dep.max())*255\n # dep = dep.astype(np.uint8)\n # print(dep.shape)\n # print(dep)\n # img_ = Image.fromarray(dep)\n # img_.show()\n if self.tfms:\n tfmd_sample = self.tfms({\"image\":img, \"depth\":normal})\n img, dep = tfmd_sample[\"image\"], tfmd_sample[\"depth\"]\n \n return (img,dep)\n\n def __len__(self):\n return len(self.images)","sub_path":"dataset2.py","file_name":"dataset2.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"409158665","text":"import json\nfrom requests_oauthlib import OAuth1Session\nfrom twitter_settings import *\nfrom datetime import datetime\n\n\ndef send_secure_request(png_address):\n\n url_media = \"https://upload.twitter.com/1.1/media/upload.json\"\n url_text = \"https://api.twitter.com/1.1/statuses/update.json\"\n\n # OAuth認証 セッションを開始\n twitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\n # 画像投稿\n files = {\"media\": open(png_address, 'rb')}\n req_media = twitter.post(url_media, files=files)\n\n # レスポンスを確認\n if req_media.status_code != 200:\n print(\"画像アップデート失敗: %s\", req_media.text)\n exit()\n\n # Media ID を取得\n media_id = json.loads(req_media.text)['media_id']\n print(\"Media ID: %d\" % media_id)\n\n # Media ID を付加してテキストを投稿\n target = 'takashaaark'\n message = '画像認証です。よろしく頼みます'\n text = '@' + target + ' ' + message\n params = {'status': text, \"media_ids\": [media_id]}\n req_media = twitter.post(url_text, params=params)\n\n # 再びレスポンスを確認\n if req_media.status_code != 200:\n print(\"テキストアップデート失敗: %s\", req_media.text)\n exit()\n\n print(\"送信完了:\", text)\n\n\ndef tweet_sentence(message):\n\n tweet_time = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n\n url_text = \"https://api.twitter.com/1.1/statuses/update.json\"\n\n # OAuth認証 セッションを開始\n twitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\n text = message + '\\n' + tweet_time\n\n # Media ID を付加してテキストを投稿\n params = {'status': text}\n req_media = twitter.post(url_text, params=params)\n\n # 再びレスポンスを確認\n if req_media.status_code != 200:\n print(\"テキストアップデート失敗: %s\", req_media.text)\n exit()\n\n print(\"TWEET:\", message)\n\n\ndef p_tweet_sentence(message):\n \"\"\"\n 公開用ツイート\n \"\"\"\n\n url_text = \"https://api.twitter.com/1.1/statuses/update.json\"\n\n # OAuth認証 セッションを開始\n twitter = OAuth1Session(P_CONSUMER_KEY, P_CONSUMER_SECRET, P_ACCESS_TOKEN, P_ACCESS_TOKEN_SECRET)\n\n text = message\n\n # Media ID を付加してテキストを投稿\n params = {'status': text}\n req_media = twitter.post(url_text, params=params)\n\n # 再びレスポンスを確認\n if req_media.status_code != 200:\n print(\"テキストアップデート失敗: %s\", req_media.text)\n exit()\n\n print(\"TWEET:\", message)\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"twitter_tweet.py","file_name":"twitter_tweet.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"380924360","text":"\"\"\"\n Модуль обработки вакансий\n\"\"\"\nfrom colorama import init, Fore\ninit(autoreset=True)\n\nEUR_COURSE = 74.3\nUSD_COURSE = 65.1\nGROSS_COURSE = 0.87\n\n\ndef extract_valid_data(raw_vacancy: dict) -> dict:\n \"\"\"\n Производит обработку исходных данных, генерируя на выходе\n словарь со стандартизованными полями\n Сюда приходит метод add_vacancy из класса VacancyManager\n \"\"\"\n filthered_vacancy = dict()\n\n # сама вакансия\n filthered_vacancy['id'] = raw_vacancy.get('id')\n filthered_vacancy['name'] = raw_vacancy.get('name')\n filthered_vacancy['url'] = raw_vacancy.get('alternate_url')\n filthered_vacancy['date'] = raw_vacancy.get('published_at')\n filthered_vacancy['responsibility'] = None\n filthered_vacancy['requirement'] = None\n\n # работодатель\n filthered_vacancy['employer_name'] = None\n filthered_vacancy['employer_url'] = None\n\n # местоположение\n filthered_vacancy['area'] = None\n filthered_vacancy['metro'] = None\n\n # зарплата\n filthered_vacancy['salary_from'] = 0\n filthered_vacancy['salary_to'] = 0\n filthered_vacancy['gross'] = None\n filthered_vacancy['currency'] = None\n\n if 'salary' in raw_vacancy and raw_vacancy['salary']:\n actual_from = raw_vacancy['salary'].get('from')\n if actual_from:\n filthered_vacancy['salary_from'] = actual_from\n actual_to = raw_vacancy['salary'].get('to')\n if actual_to:\n filthered_vacancy['salary_to'] = actual_to\n filthered_vacancy['gross'] = raw_vacancy['salary'].get('gross')\n filthered_vacancy['currency'] = raw_vacancy['salary'].get('currency')\n\n if 'employer' in raw_vacancy and raw_vacancy['employer']:\n filthered_vacancy['employer_name'] = raw_vacancy['employer'].get('name')\n filthered_vacancy['employer_url'] = raw_vacancy['employer'].get('alternate_url')\n\n if 'area' in raw_vacancy and raw_vacancy['area']:\n filthered_vacancy['area'] = raw_vacancy.get('area')\n\n if 'snippet' in raw_vacancy and raw_vacancy['snippet']:\n filthered_vacancy['responsibility'] = raw_vacancy.get('responsibility')\n filthered_vacancy['requirement'] = raw_vacancy.get('requirement')\n\n if 'address' in raw_vacancy and raw_vacancy['address']:\n filthered_vacancy['metro'] = raw_vacancy['address'].get('metro')\n return filthered_vacancy\n\n\ndef str_sal(starting: int, ending: int, currency: str, gross: bool) -> str:\n \"\"\"\n Генерация строкового представления зарплаты для удобного отображения на экране\n Сюда приходит метод str_salary из класса Vacancy\n \"\"\"\n str_salary = ''\n\n if currency == 'USD' or currency == 'EUR':\n str_currency = ' ' + currency\n space = '-'\n else:\n str_currency = ''\n space = ' - '\n\n if gross:\n gross_koeff = GROSS_COURSE\n else:\n gross_koeff = 1\n\n salary_from = int((starting / 1000) * gross_koeff)\n salary_to = int((ending / 1000) * gross_koeff)\n\n if salary_from and salary_to:\n str_salary = str(salary_from) + 'к' + space + str(salary_to) + 'к' + str_currency\n\n elif salary_from and not salary_to:\n str_salary = 'от ' + str(salary_from) + 'к' + str_currency\n\n elif not salary_from and salary_to:\n str_salary = 'до ' + str(salary_to) + 'к' + str_currency\n\n return Fore.RED + str_salary.center(11) + Fore.RESET\n\n\ndef avg_sal(starting: int, ending: int, currency: str, gross: bool) -> int:\n \"\"\"\n Генерация числового выражения зарплаты для сортировки вакансий\n Сюда приходит метод avg_salary из класса Vacancy\n \"\"\"\n if currency == 'USD':\n currency_koef = USD_COURSE\n elif currency == 'EUR':\n currency_koef = EUR_COURSE\n else:\n currency_koef = 1\n\n if gross:\n gross_koeff = GROSS_COURSE\n else:\n gross_koeff = 1\n\n actual_starting = starting * currency_koef * gross_koeff\n actual_ending = ending * currency_koef * gross_koeff\n\n if actual_starting and actual_ending:\n num = (actual_starting + actual_ending) / 2\n\n elif not actual_starting and actual_ending:\n num = actual_ending\n\n elif actual_starting and not actual_ending:\n num = actual_starting\n else:\n num = 0\n\n return int(num)\n\n\ndef make_str_vacancy(vacancy_id: str, vacancy_name: str, ) -> str:\n \"\"\"\n Строковое представление вакансии\n Сюда приходит метод __str__ из класса Vacancy\n \"\"\"\n colored_vacancy_id = Fore.CYAN + str(vacancy_id) + Fore.RESET\n colored_vacancy_name = Fore.YELLOW + vacancy_name[:41]\n return colored_vacancy_id + ' ' + colored_vacancy_name\n","sub_path":"hh_processing.py","file_name":"hh_processing.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"290066861","text":"\nfrom rest_framework import serializers\nfrom compounds.models import *\n\n\nclass PropertyTypeSerializer(serializers.ModelSerializer):\n\n\n def create(self, validated_data):\n return PropertyType.objects.create(**validated_data)\n\n class Meta:\n model = PropertyType\n fields = (\"id\", \"name\")\n\nclass PropertySerializer(serializers.ModelSerializer):\n type = PropertyTypeSerializer(read_only=True)\n\n # flatten this thing for reading\n def to_representation(self, obj):\n representation = super().to_representation(obj)\n type_representation = representation.pop('type')\n representation[\"type\"] = type_representation[\"name\"]\n return representation\n\n def to_internal_value(self, data):\n # get the type\n typeName = data.get(\"type\")\n type = PropertyType.objects.filter(name=typeName)\n if not type:\n type = PropertyType.objects.create(name=typeName)\n else:\n type = PropertyType.objects.get(name=typeName)\n data[\"type\"] = type\n\n # get the trial\n trialID = data.get(\"trial\")\n trial = Trial.objects.get(id=trialID)\n data[\"trial\"] = trial\n return data\n\n class Meta:\n model = Property\n fields = ('id','type','value','trial')\n\nclass TrialSerializer(serializers.ModelSerializer):\n properties = PropertySerializer(many=True,read_only=True)\n\n # flatten this thing\n def to_representation(self, obj):\n representation = super().to_representation(obj)\n property_representation = representation.pop('properties') # this is a list of dicts\n for prop in property_representation:\n representation[prop[\"type\"]] = round(float(prop[\"value\"]),2)\n return representation\n\n class Meta:\n model = Trial\n # properties must be related_name\n fields = ('id','percent_replacement','properties','test')\n\nclass TestSerializer(serializers.ModelSerializer):\n trials = TrialSerializer(many=True, read_only=True)\n\n # get list of the props that have data\n def to_representation(self, obj):\n valid_props = set()\n representation = super().to_representation(obj)\n trials = Trial.objects.filter(test=obj)\n props = Property.objects.filter(trial__in=trials)\n for prop in props:\n valid_props.add(prop.type.name)\n representation[\"valid_props\"] = valid_props\n return representation\n\n class Meta:\n model = Test\n fields = ('id', 'compound', 'replacee', 'replacer', 'trials')\n\nclass PropertyGraphSerializer(serializers.ModelSerializer):\n # type = PropertyTypeSerializer(read_only=True)\n trial = TrialSerializer(read_only=True)\n\n # flatten this thing\n def to_representation(self, obj):\n representation = super().to_representation(obj)\n trial_representation = representation.pop('trial')\n representation[\"x\"] = trial_representation[\"percent_replacement\"]\n representation[\"y\"] = representation[\"value\"]\n representation[\"test\"] = trial_representation[\"test\"]\n return representation\n\n class Meta:\n model = Property\n fields = ('id','type','value','trial')\n\nclass IngredientTypeSerializer(serializers.ModelSerializer):\n # needed so that we can add new ingredients\n\n def create(self, validated_data):\n return IngredientType.objects.create(**validated_data)\n\n # TODO: ingredients_that_use = IngredientSerializer\n class Meta:\n model = IngredientType\n fields = ('id', 'name')\n\nclass IngredientSerializer(serializers.ModelSerializer):\n type = IngredientTypeSerializer()\n\n # flatten this thing for reading\n def to_representation(self, obj):\n representation = super().to_representation(obj)\n type_representation = representation.pop('type')\n representation[\"type\"] = type_representation[\"name\"]\n return representation\n\n def to_internal_value(self, data):\n # get the type\n typeName = data.get(\"type\")\n type = IngredientType.objects.filter(name=typeName)\n if not type:\n type = IngredientType.objects.create(name=typeName)\n else:\n type = IngredientType.objects.get(name=typeName)\n data[\"type\"] = type\n\n # get the compound\n compoundID = data.get(\"compound\")\n compound = Compound.objects.get(id=compoundID)\n data[\"compound\"] = compound\n return data\n\n def create(self, validated_data):\n return Ingredient.objects.create(**validated_data)\n\n class Meta:\n model = Ingredient\n fields = ('id', 'type', 'parts', 'compound')\n\nclass CompoundSerializer(serializers.ModelSerializer):\n tests = TestSerializer(many=True,read_only=True)\n ingredients = IngredientSerializer(many=True,read_only=True)\n\n class Meta:\n model = Compound\n fields = ('id', 'name', 'tests', 'ingredients','client')\n","sub_path":"compounds/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"98859169","text":"import chess\nimport numpy as np\nimport sys\nsys.path.insert(0, '..')\nfrom games import gameInterface\n\nWINSCORE = 999999999999\nLOSESCORE = -999999999999\n\ndef chessPieceScore(board, turn):\n pieces = [chess.PAWN, chess.KNIGHT, chess.BISHOP, chess.ROOK, chess.QUEEN]\n weights = np.array([1, 3, 3, 5, 9])\n score = weights.dot([board.getNumPiece(piece, turn) for piece in pieces])\n oppScore = weights.dot([board.getNumPiece(piece, 1-turn) for piece in pieces])\n return score - oppScore\n\ndef ninarowWinScore(board, turn):\n result = board.getResult()\n if result == gameInterface.ONGOING:\n return 0\n elif result == turn:\n return WINSCORE\n return LOSESCORE\n","sub_path":"agents/heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"601772671","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Imprima la cantidad de registros por letra para la primera columna del \n## archivo `data.csv`, ordenados alfabeticamente por la letra.\n##\n## Rta/\n## A,8\n## B,7\n## C,5\n## D,6\n## E,14\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\n\nimport csv\nwith open('data.csv', 'r') as file:\n x = csv.reader(file, delimiter=',')\n list = [] \n for row in x:\n list.append(row[0])\nx = [i.split('\\t') for i in list]\ndic = {}\nfor ele in x:\n if ele[0] in dic.keys():\n dic[ele[0]] = dic[ele[0]] + 1\n else:\n dic[ele[0]] = 1\nsortedkeys = sorted(dic, key=str.lower)\nfor i in sortedkeys:\n print(i+','+str(dic[i]))","sub_path":"03-python=1/q02=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"510003995","text":"from __future__ import print_function\n\nimport base64\nimport json\nimport boto3\nimport time\n\nBATCH_SIZE = 25\nENTITY_STREAM_NAME = 'entity-stream'\n\nprint('Loading function')\nclient_comprehend = boto3.client(service_name='comprehend')\nclient_firehose = boto3.client(service_name='firehose')\n\ndef lambda_handler(event, context):\n output = []\n text_list = []\n payload_list = []\n recordId_list = []\n title_list = []\n i = 0\n num_record = len(event['records'])\n\n for record in event['records']:\n i += 1\n print(record['recordId'])\n payload = json.loads(base64.b64decode(record['data']))\n\n payload_list.append(payload)\n text_list.append(payload['text'])\n recordId_list.append(record['recordId'])\n title_list.append(payload['title'])\n \n # batch detect\n if (i % BATCH_SIZE == 0 or i >= num_record):\n print('processing batch #{}'.format(i / BATCH_SIZE))\n \n # detect key phrases\n key_phrase_list = get_key_phrase_list(text_list)\n \n # detect sentiments\n sentiment_list = get_sentiment_list(text_list)\n \n # detect named entities and send to another firehose\n entity_list = send_entity(text_list, sentiment_list, recordId_list, title_list)\n\n # save text analysis result to payload\n j = 0\n for payload in payload_list:\n payload['@timestamp'] = str(int(time.time() * 1000))\n payload['key_phrase'] = key_phrase_list[j]\n payload['sentiment'] = sentiment_list[j]\n payload['entity'] = {\n 'text': entity_list[j][0],\n 'type': entity_list[j][1]\n }\n payload['doc_type'] = 'doc'\n \n output_record = {\n 'recordId': recordId_list[j],\n 'result': 'Ok',\n 'data': base64.b64encode(json.dumps(payload))\n }\n output.append(output_record)\n \n j += 1\n \n text_list = []\n payload_list = []\n recordId_list = []\n\n print('Successfully processed {} records.'.format(num_record))\n\n return {'records': output}\n \ndef send_entity(text_list, sentiment_list, recordId_list, title_list):\n response = client_comprehend.batch_detect_entities(TextList=text_list, LanguageCode='en')\n entity_list = []\n \n for result, sentiment, recordId, title in\\\n zip(response['ResultList'], sentiment_list, recordId_list, title_list):\n #entities = list(set((x['Text'], x['Type']) for x in result['Entities'] if x['Type'] != 'QUANTITY'))\n\n entity_text = []\n entity_type = []\n for entity in result['Entities']:\n payload = {\n '@timestamp': str(int(time.time() * 1000)),\n 'entity': {\n 'text': entity['Text'],\n 'type': entity['Type']\n \n },\n 'doc_type': 'entity',\n 'sentiment': sentiment,\n 'parent': recordId[:-6],\n 'title': title\n \n }\n client_firehose.put_record(DeliveryStreamName=ENTITY_STREAM_NAME, Record={'Data':json.dumps(payload)})\n entity_text.append(entity['Text'])\n entity_type.append(entity['Type'])\n\n entity_list.append((entity_text, entity_type))\n \n return entity_list\n \ndef get_key_phrase_list(text_list):\n response = client_comprehend.batch_detect_key_phrases(TextList=text_list, LanguageCode='en')\n key_phrase_list = []\n\n for result in response['ResultList']:\n key_phrases = list(set(x['Text'] for x in result['KeyPhrases']))\n key_phrase_list.append(key_phrases)\n return key_phrase_list\n \ndef get_sentiment_list(text_list):\n response = client_comprehend.batch_detect_sentiment(TextList=text_list, LanguageCode='en')\n sentiment_list = []\n\n for result in response['ResultList']:\n sentiment = result['Sentiment']\n sentiment_list.append(sentiment)\n return sentiment_list","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"507195460","text":"import argparse\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db import IntegrityError\nfrom url_shortener.core import clean_word\n\nfrom url_shortener.models import URLWord\n\n\nclass Command(BaseCommand):\n help = 'Add words from a file to the database'\n\n def add_arguments(self, parser):\n parser.add_argument('file', type=argparse.FileType('r'))\n\n def handle(self, *args, **options):\n num_words_before = URLWord.objects.count()\n\n file = options['file']\n words = set(map(clean_word, file))\n if '' in words:\n words.remove('')\n try:\n self.stdout.write(\"Attempting bulk create...\")\n self.bulk_create(words)\n except IntegrityError:\n self.stdout.write(\"Bulk create failed.\")\n self.stdout.write(\"Attempting one by one...\")\n self.create(words)\n\n num_words_after = URLWord.objects.count()\n self.stdout.write(\"Done! {0} words are in the database. {1} new words added.\".format(\n num_words_after, num_words_after - num_words_before))\n\n def bulk_create(self, words):\n url_words = list(map(lambda w: URLWord(word=w), words))\n URLWord.objects.bulk_create(url_words)\n\n def create(self, words):\n for word in words:\n URLWord.objects.get_or_create(word=word)\n","sub_path":"url_shortener/management/commands/importwords.py","file_name":"importwords.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"348436300","text":"from queue import Queue\n\n\nclass Solution:\n \"\"\"\n TC: O(V + E)\n SC: O(3V + E)\n V -> indegree list, V -> Queue, V + E -> adjacency list\n where V -> no. of courses and E -> prerequisites\n \"\"\"\n\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n\n # if there are no prereqs then return True\n if len(prerequisites) == 0:\n return True\n\n indegree = [0 for i in range(numCourses)]\n adjList = {}\n queue = Queue()\n\n # hashmap for storing the prereqs(adjaceny list) and indegree list\n\n for x in prerequisites:\n fRom = x[0]\n to = x[1]\n print(to)\n indegree[to] += 1\n\n if not fRom in adjList.keys():\n adjList[fRom] = []\n\n adjList[fRom].append(to)\n\n # queue to store independent and adjacent nodes\n for i in range(numCourses):\n\n if indegree[i] == 0:\n queue.put(i)\n\n # perform BFS on the graph\n while not queue.empty():\n front = queue.get()\n\n if front in adjList.keys():\n for x in adjList[front]:\n indegree[x] -= 1\n\n if indegree[x] == 0:\n queue.put(x)\n\n for x in indegree:\n if x != 0:\n return False\n\n return True\n","sub_path":"courseScheduling.py","file_name":"courseScheduling.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"215274466","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nfrom detector.cal import DistortionCorrector\nimport cv2\n\n# test5.jpg\nimport numpy as np\n\n\nclass SlidingWindow(object):\n \n def __init__(self):\n pass\n\n def run(self, lane_map):\n \"\"\"\n # Args\n lane_map : array\n bird eye's view binary image\n \n # Returns\n out_img\n left_pixels\n right_pixels\n \"\"\"\n self._lane_map = lane_map\n \n # 1. Create an output image to draw on and visualize the result\n self._out_img = np.dstack((lane_map, lane_map, lane_map)).astype(np.uint8)\n \n # 2. Step through the windows one by one\n left_lane_inds, right_lane_inds, nonzerox, nonzeroy = self._run_sliding_window()\n \n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds] \n \n left_pixels = np.concatenate([leftx.reshape(-1,1), lefty.reshape(-1,1)], axis=1)\n right_pixels = np.concatenate([rightx.reshape(-1,1), righty.reshape(-1,1)], axis=1)\n return self._out_img, left_pixels, right_pixels\n\n def _get_start_window(self, nwindows):\n leftx_base, rightx_base = self._get_base(self._lane_map)\n # Set height of windows\n window_height = np.int(self._lane_map.shape[0]/nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = self._lane_map.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n \n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n return window_height, nonzerox, nonzeroy, leftx_current, rightx_current, left_lane_inds, right_lane_inds\n\n def _get_base(self, image):\n roi = image[image.shape[0]//2:,:]\n histogram = np.sum(roi, axis=0)\n\n midpoint = np.int(histogram.shape[0]/2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n return leftx_base, rightx_base\n\n def _run_sliding_window(self, nwindows=9, margin=150, minpix=10):\n \n window_height, nonzerox, nonzeroy, leftx_current, rightx_current, left_lane_inds, right_lane_inds = self._get_start_window(nwindows)\n lane_map = self._lane_map\n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = lane_map.shape[0] - (window+1)*window_height\n win_y_high = lane_map.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n # Draw the windows on the visualization image\n cv2.rectangle(self._out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), (0,255,0), 2) \n cv2.rectangle(self._out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 2) \n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n return left_lane_inds, right_lane_inds, nonzerox, nonzeroy\n\n\nclass LaneCurveFit(object):\n def __init__(self):\n pass\n\n def run(self, left_pixels, right_pixels):\n \"\"\"\n # Args\n lane_map : array\n bird eye's view binary image\n nwindows : int\n number of windows\n margin : int\n the width of the windows +/- margin\n minpix : int\n minimum number of pixels found to recenter window\n \"\"\"\n left_x = left_pixels[:, 0]\n left_y = left_pixels[:, 1]\n\n right_x = right_pixels[:, 0]\n right_y = right_pixels[:, 1]\n\n # Fit a second order polynomial to each\n self._left_fit = np.polyfit(left_y, left_x, 2)\n self._right_fit = np.polyfit(right_y, right_x, 2)\n \n\n def plot(self, out_img, left_pixels, right_pixels):\n # Generate x and y values for plotting\n ploty = np.linspace(0, out_img.shape[0]-1, out_img.shape[0] )\n left_fitx = self._left_fit[0]*ploty**2 + self._left_fit[1]*ploty + self._left_fit[2]\n right_fitx = self._right_fit[0]*ploty**2 + self._right_fit[1]*ploty + self._right_fit[2]\n \n out_img[left_pixels[:, 1], left_pixels[:, 0]] = [255, 0, 0]\n out_img[right_pixels[:, 1], right_pixels[:, 0]] = [0, 0, 255]\n plt.imshow(out_img)\n plt.plot(left_fitx, ploty, color='yellow')\n plt.plot(right_fitx, ploty, color='yellow')\n plt.xlim(0, 1280)\n plt.ylim(720, 0)\n plt.show()\n\n\nclass Curvature(object):\n\n def __init__(self, xm_per_pix=3.7/700, ym_per_pix=30/720):\n # meters per pixel in x, y dimension\n self._xm_per_pix = xm_per_pix \n self._ym_per_pix = ym_per_pix \n\n def calc(self, left_pixels, right_pixels):\n def _calc(y_eval, coef):\n curverad = ((1 + (2*coef[0]*y_eval + coef[1])**2)**1.5) / np.absolute(2*coef[0])\n return curverad\n\n left_meters = self._pixel_to_meters(left_pixels)\n right_meters = self._pixel_to_meters(right_pixels)\n\n left_fit = np.polyfit(left_meters[:, 1], left_meters[:, 0], 2)\n right_fit = np.polyfit(right_meters[:, 1], right_meters[:, 0], 2)\n\n # Calculate the new radii of curvature : Now our radius of curvature is in meters\n left_curverad = _calc(np.max(left_meters[:, 1]), left_fit)\n right_curverad = _calc(np.max(right_meters[:, 1]), right_fit)\n return left_curverad, right_curverad\n\n def _pixel_to_meters(self, pixels):\n xs_pixels = pixels[:, 0]\n ys_pixels = pixels[:, 1]\n \n xs_meters = xs_pixels * self._xm_per_pix\n ys_meters = ys_pixels * self._ym_per_pix\n \n meters = np.concatenate([xs_meters.reshape(-1,1), ys_meters.reshape(-1,1)], axis=1)\n return meters\n\n\nclass LaneMarker(object):\n def __init__(self, warper):\n self._warper = warper\n \n def run(self, image, left_fit, right_fit, plot=False):\n \"\"\"\n # Args\n image : distortion corrected image\n \"\"\"\n ploty, left_fitx, right_fitx = self._generate_pts(image.shape[0], left_fit, right_fit)\n\n color_warp = np.zeros_like(image).astype(np.uint8)\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n \n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n \n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = self._warper.backward(color_warp)\n \n # Combine the result with the original image\n result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)\n if plot:\n plt.imshow(result)\n plt.show()\n return result\n\n def _generate_pts(self, height, left_curve, right_curve):\n ys = np.linspace(0, height-1, height)\n left_xs = left_curve[0]*ys**2 + left_curve[1]*ys + left_curve[2]\n right_xs = right_curve[0]*ys**2 + right_curve[1]*ys + right_curve[2]\n return ys, left_xs, right_xs\n\nif __name__ == \"__main__\":\n\n # 1. Get bird eye's view lane map\n img = plt.imread('../../test_images/straight_lines1.jpg')\n img = plt.imread('../../test_images/test6.jpg')\n\n corrector = DistortionCorrector.from_pkl(\"..//..//dataset//distortion_corrector.pkl\")\n\n # lane_map_ipt = run_framework(img)\n from detector.lane.lane import LaneDetector\n from detector.lane.edge import CannyEdgeExtractor\n from detector.lane.mask import LaneImageMask\n from detector.lane.binary import SchannelBin\n _edge_detector = CannyEdgeExtractor(50, 200)\n _binary_extractor = SchannelBin((48, 255))\n _image_mask = LaneImageMask()\n detector = LaneDetector(_edge_detector, _binary_extractor, _image_mask)\n\n undist_img = corrector.run(img)\n lane_map = detector.run(undist_img)\n \n from detector.curve.warp import LaneWarper\n warper = LaneWarper()\n lane_map_ipt = warper.forward(lane_map)\n\n\n win = SlidingWindow()\n out_img, left_pixels, right_pixels = win.run(lane_map_ipt)\n fitter = LaneCurveFit()\n fitter.run(left_pixels, right_pixels)\n fitter.plot(out_img, left_pixels, right_pixels)\n \n curv = Curvature()\n l, r = curv.calc(left_pixels, right_pixels)\n print(l, 'm', r, 'm')\n \n marker = LaneMarker(warper)\n marker.run(undist_img, fitter._left_fit, fitter._right_fit)\n \n \n\n","sub_path":"4_lane_detect/detector/curve/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":10058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"96667265","text":"__author__ = 'andong'\n\nimport nltk\n\nwith open('../corpus/全唐诗.out', 'r', encoding='utf-8') as file:\n content = file.read()\ndist = nltk.FreqDist(content)\nlst = dist.most_common()\nfor l in lst:\n if 0x4e00 <= ord(l[0]) < 0x9fa6:\n print(l)\n\n","sub_path":"test/nltk_count.py","file_name":"nltk_count.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"16608261","text":"def foo(bbr, s):\n if s == sum(bbr):return True\n if not bbr:return False\n ans = 0\n for i in range(1, len(bbr)):\n if s == sum(bbr[:1]):\n ans += foo(bbr[i:], s)\n return ans\n\nn = int(input())\narr = [int(x) for x in input()]\nans = 0\nfor i in range(1, len(arr)):\n ans += foo(arr[i:], sum(arr[:i]))\n if ans:break\nprint([\"NO\",\"YES\"][ans])\n","sub_path":"CodeForces/CFR512_2_3.py","file_name":"CFR512_2_3.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"382882748","text":"from __future__ import unicode_literals\n\nimport ast\n\nimport re\n\nimport pip\n\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('erpapp/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError):\n long_description = \"Python rulez\"\n\nlinks = [] # for repo urls (dependency_links)\nrequires = [] # for package names\n\n# new versions of pip requires a session\nrequirements = pip.req.parse_requirements(\n 'requirements/prod.txt', session=pip.download.PipSession()\n)\n\nfor item in requirements:\n if getattr(item, 'url', None): # older pip has url\n links.append(str(item.url))\n if getattr(item, 'link', None): # newer pip has link\n links.append(str(item.link))\n if item.req:\n requires.append(str(item.req)) # always the package name\n\nsetup(\n name='ERP-WebApp',\n version=version,\n url='https://github.com/jcass77/erp-conservation',\n license='Proprietary',\n author='John Cass',\n author_email='john.cass@erpconservation.com',\n description='Web application for ERP impact investment portfolio',\n long_description=open('README.rst').read(),\n packages=find_packages(exclude=['tests', 'tests.*']),\n zip_safe=False,\n include_package_data=True,\n install_requires=requires,\n dependency_links=links,\n entry_points={\n 'console_scripts': [\n 'erpapp = erpapp.cli:cli',\n ]\n },\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework:: Flask',\n 'License :: Other/Proprietary License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"169958572","text":"import inspect\r\n\r\n\r\nclass Dummy:\r\n @classmethod\r\n def _get_param_names(cls):\r\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\r\n print(f\"object.__init__ = {object.__init__}\")\r\n print(f\"init = {init}\")\r\n if init is object.__init__:\r\n print(\"same!\")\r\n\r\n init_signature = inspect.signature(init)\r\n print(\r\n f\"init_signature.parameters.values() = {init_signature.parameters.values()}\"\r\n )\r\n print(f\"kind = {[p.kind for p in init_signature.parameters.values()]}\")\r\n parameters = [\r\n p\r\n for p in init_signature.parameters.values()\r\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\r\n ]\r\n print(f\"filtered parameters = {parameters}\")\r\n print(f\"kind of filtered parameters = {[p.kind for p in parameters]}\")\r\n print(f\"final parameters = {sorted([p.name for p in parameters])}\")\r\n\r\n def get_params(self, deep=True):\r\n out = dict()\r\n params = self._get_param_names\r\n for key in params:\r\n try:\r\n value = getattr(self, key)\r\n except AttributeError:\r\n print(\r\n f\"WARNING: Only instance variables are returned whenever get_params is called: ignoring {key}\"\r\n )\r\n value = None\r\n if deep and hasattr(value, \"get_params\"):\r\n print(\"It has attribute and the method is deep!\")\r\n deep_items = value.get_params().items()\r\n out.update((key + \"__\" + k, val) for k, val in deep_items)\r\n out[key] = value\r\n return out\r\n\r\n\r\nclass DummyExt(Dummy):\r\n def __init__(self, a, b, c, d, *args, **kwargs):\r\n self.a = a\r\n self.b = b\r\n self.c = c\r\n self.d = d\r\n self.x = args\r\n self.y = kwargs\r\n\r\n def get_params(self):\r\n return self.__dict__.copy()\r\n\r\n\r\nobj = DummyExt(a=1, b=2, c=3, d=4, e=5, f=6)\r\nprint(obj.get_params())\r\n","sub_path":"dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"64377085","text":"import sys\nimport pygame\n\npygame.init()\n\n# Set the window size\nsize = 600, 500\nw, h = size\n\nscreen = pygame.display.set_mode(size)\n\nBLACK = 0, 0, 0\nYELLOW = 255, 255, 0\nr = 25\n\ndef draw_ball(x, y):\n center = (x, y)\n pygame.draw.circle(screen, YELLOW, center, r)\n\nballs = [\n {\"x\": 0, \"y\": 100, \"dx\": 5, \"dy\": 5},\n {\"x\": 100, \"y\": 100, \"dx\": 10, \"dy\": 10}\n]\n\ndef paint_ball(ball):\n draw_ball(ball['x'], ball['y'])\n ball['x'] = ball['x'] + ball['dx']\n ball['y'] = ball['y'] + ball['dy']\n\n if ball['x'] < 0 or ball['x'] > w:\n ball['dx'] = -ball['dx']\n\n if ball['y'] < 0 or ball['y'] > h:\n ball['dy'] = -ball['dy']\n\ndef paint():\n global x\n screen.fill(BLACK)\n for ball in balls:\n paint_ball(ball)\n\ndef main():\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n paint()\n pygame.display.flip()\n pygame.time.wait(50)\n\nmain()\n","sub_path":"game3.py","file_name":"game3.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"167204664","text":"import numpy as np\nimport scipy.io as scio\nfrom sklearn.utils import shuffle\n\n\n# 读取mat文件中的数据,和mat中的尺寸保持一致\ndef read_mat(filename):\n \"\"\"\n\n :param filename: 文件名\n :return: data数据,shape和mat文件一致\n\n 本项目中如无特别说明,x表示reference,y表示tracking error\n \"\"\"\n data = scio.loadmat(filename) # 读取mat文件\n if 'x' in data.keys():\n data = data['x']\n data = data * 1.0\n elif 'y' in data.keys():\n data = data['y']\n data = data * 1.0\n else:\n raise Exception(\"Invalid mat file!\")\n return data\n\n\n# 将mat中的数据读取后进行打乱\ndef read_data(file_path):\n # 读取x,y的mat文件\n dataX = read_mat('{}/xe.mat'.format(file_path))\n dataY = read_mat('{}/ye.mat'.format(file_path))\n print('data shape is {}'.format(dataX.shape))\n\n # 利用shuffle()打乱样本顺序\n dataX, dataY = shuffle(dataX, dataY)\n\n # scio.savemat('matlab_work/data/train/shuffle_x.mat', {'sx': dataX})\n # scio.savemat('matlab_work/data/train/shuffle_y.mat', {'sy': dataY})\n\n return dataX, dataY\n\n\n# 对原始的reference和tracking erorr数据进行一定的预处理\n# Used in training\ndef process_xy_for_train(origin_X, origin_Y, config):\n \"\"\"\n\n :param origin_X: 原始reference轨迹,一般形状为一个行向量\n :param origin_Y: 对应的tracking_error,一般形状为一个行向量\n :param config: 配置信息\n :return:处理后的用于训练的数据,形状为(,12)\n \"\"\"\n # 全部变成列向量,便于后续操作\n if len(origin_X.shape) == 1:\n origin_X = origin_X[:, np.newaxis] # 增加维数(例如:N → N×1)\n if len(origin_Y.shape) == 1:\n origin_Y = origin_Y[:, np.newaxis] # 增加维数(例如:N → N×1)\n\n '''\n 进行差分运算,利用位移求得速度,加速度,急动度\n example��\n X:|--|--|--|--|--|--|--|--|--|--| 后一时刻减去前一时刻,可得到中间时刻的速度\n V': |--|--|--|--|--|--|--|--|--| 再用后一时刻减去前一时刻,可以得到中间时刻的加速度\n A: |--|--|--|--|--|--|--|--| 此时A第一项的时间戳是和x的第二项的时间戳对应的。为了让时间戳对应,利用V(t)=(X(t+1)-X(t-1))/2求速度\n V: |--|--|--|--|--|--|--|--| 对于J的求法也一样,为了让时间戳可以对应,利用J(t)=(A(t+1)-A(t-1))/2求出\n J: |--|--|--|--|--|--|\n 这样对于一段轨迹,除了前2个点和后2个点,其余所有点的X,V,A,J都是对应的,因此对进行掐头去尾的处理\n '''\n V_tmp = origin_X[1:] - origin_X[:-1]\n A = V_tmp[1:] - V_tmp[:-1]\n V = (origin_X[2:] - origin_X[:-2]) / 2\n J = (A[2:] - A[:-2]) / 2\n\n # 掐头去尾\n X = origin_X[2:-2]\n V = V[1:-1]\n A = A[1:-1]\n\n # 处理后的数据\n processed_X = np.concatenate([X, V, A, J], axis=-1) # 此时processed_X的shape为(N-4,4),N为原始reference的点数\n processed_Y = origin_Y[2:-2]\n\n # 对数据进行归一化操作,所有数据范围归一化至[-1,1]中\n # 首先对processed_X的每一列进行归一化\n for i in range(processed_X.shape[-1]):\n processed_X[:, i] *= config['scales'][i]\n # 再对processed_Y进行归一化\n processed_Y *= config['scales'][-1]\n\n '''\n 获得最终用于GRU进行训练的数据\n time_step:预测步长,可以调整预测多个时刻后的轨迹(一般为1即可)\n c_step:每组预测数据中,x的前后步长(一般为1即可,即训练输入选取x的t-1/t/t+1,如果为2,则是t-2/t-1/t/t+1/t+2)\n training_data_num:训练数据的个数,一般掐头去尾即可\n 训练过程中,t0时刻训练集的输入为:\n Input(t0)= (X(t0-1),V(t0-1),A(t0-1),J(t0-1),X(t0),V(t0),A(t0),J(t0),X(t0+1),V(t0+1),A(t0+1),J(t0+1))\n '''\n output_X, output_Y = [], []\n _t = config['time_step']\n train_data_num = (processed_Y.shape[0] - 2 * config['c_step']) // _t\n for i in range(train_data_num): # 遍历所有的训练数据\n output_X.append(processed_X[i * _t:(i + 1) * _t + 2 * config['c_step']].ravel()) # 确定input vector\n output_Y.append(processed_Y[i * _t + config['c_step']].ravel()) # 找到对应的output(即tracking error)\n\n # 将output_X,output_Y转化为ndarray对象\n output_X = np.asarray(output_X, dtype=np.float32)\n output_Y = np.asarray(output_Y, dtype=np.float32)\n\n return output_X, output_Y\n\n\n# 对原始的reference数据进行一定的预处理\n# Used in implementation\ndef process_x_for_implementation(origin_X, config):\n \"\"\"\n 类似于process_xy_for_train的实现过程\n :param origin_X: 原始reference轨迹,一般形状为一个行向量\n :param config: 配置信息\n :return: 处理后的用于implementation的数据,形状为(,12)\n \"\"\"\n\n # 全部变成列向量,便于后续操作\n origin_X = origin_X.reshape([-1, 1])\n\n V_tmp = origin_X[1:] - origin_X[:-1]\n A = V_tmp[1:] - V_tmp[:-1]\n V = (origin_X[2:] - origin_X[:-2]) / 2\n J = (A[2:] - A[:-2]) / 2\n\n # 掐头去尾\n X = origin_X[2:-2]\n V = V[1:-1]\n A = A[1:-1]\n\n # 处理后的数据\n processed_X = np.concatenate([X, V, A, J], axis=-1) # 此时processed_X的shape为(N-4,4),N为原始reference的点数\n for i in range(processed_X.shape[-1]):\n processed_X[:, i] *= config['scales'][i]\n\n output_X = []\n _t = config['time_step']\n implementation_data_num = (processed_X.shape[0] - 2 * config['c_step']) // _t\n for i in range(implementation_data_num): # 遍历所有的数据\n output_X.append(processed_X[i * _t:(i + 1) * _t + 2 * config['c_step']].ravel()) # 确定input vector\n\n # 将output_X转化为ndarray对象\n output_X = np.asarray(output_X, dtype=np.float32)\n\n if len(output_X.shape) == 2:\n output_X = output_X[np.newaxis, :, :]\n # print(output_X.shape)\n\n return output_X\n","sub_path":"get_dataset.py","file_name":"get_dataset.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"178388175","text":"import cImage\nimport random\n\n# Negativo de imagem\ndef main_negativo(imagem_ficheiro):\n \"\"\"Constrói e vizualiza o negativo de uma imagem.\"\"\"\n # Obtém imagem\n imagem = cImage.FileImage(imagem_ficheiro)\n # Fabrica o negativo\n imagem_nova = negativo_imagem(imagem)\n # Define janela\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n janela = cImage.ImageWin('Negativo',2*largura, altura)\n # vizualiza\n imagem.draw(janela)\n imagem_nova.setPosition(largura+1,0)\n imagem_nova.draw(janela)\n # Termina\n janela.exitOnClick()\n \ndef negativo_imagem(imagem):\n \"\"\" Negativo de uma imagem.\"\"\"\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n imagem_nova = cImage.EmptyImage(largura,altura)\n # percorre pixel a pixel\n for coluna in range(largura):\n for linha in range(altura):\n # transforma\n pixel_original = imagem.getPixel(coluna,linha)\n novo_pixel = negativo_pixel(pixel_original)\n imagem_nova.setPixel(coluna,linha,novo_pixel)\n return imagem_nova\n \n \ndef negativo_pixel(pixel):\n red = 255 - pixel.getRed()\n green = 255 - pixel.getGreen()\n blue = 255 - pixel.getBlue()\n novo_pixel = cImage.Pixel(red,green,blue)\n return novo_pixel \n\n# Cinzentos \n\ndef main_cinzento(imagem_ficheiro):\n \"\"\"Constrói e vizualiza a escala de cinzentos de uma imagem.\"\"\"\n # Obtém imagem\n imagem = cImage.FileImage(imagem_ficheiro)\n # Fabrica a escala de cinzentos\n imagem_nova = cinzento_imagem(imagem)\n # Define janela\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n janela = cImage.ImageWin('Cinzento',2*largura, altura)\n # vizualiza\n imagem.draw(janela)\n imagem_nova.setPosition(largura+1,0)\n imagem_nova.draw(janela)\n # Termina\n janela.exitOnClick()\n \ndef cinzento_imagem(imagem):\n \"\"\" Escala de cinzentos de uma imagem.\"\"\"\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n imagem_nova = cImage.EmptyImage(largura,altura)\n # percorre pixel a pixel\n for coluna in range(largura):\n for linha in range(altura):\n # transforma\n pixel_original = imagem.getPixel(coluna,linha)\n novo_pixel = cinzento_pixel(pixel_original)\n imagem_nova.setPixel(coluna,linha,novo_pixel)\n return imagem_nova\n\ndef cinzento_pixel(pixel):\n \"\"\" Converte um pixel para escala de cinzentos.\"\"\"\n vermelho = pixel.getRed()\n verde = pixel.getGreen()\n azul = pixel.getBlue()\n int_media = (vermelho + verde + azul) // 3\n novo_pixel = cImage.Pixel(int_media,int_media, int_media)\n return novo_pixel\n\n# -------------------\n \ndef mostra_imagem(img_fich):\n # Carrega a imagem do disco\n imagem = cImage.FileImage(img_fich)\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n # Cria janela\n janela = cImage.ImageWin('Imagem', largura,altura)\n # Mostra imagem na janela\n imagem.draw(janela)\n # Termina\n janela.exitOnClick()\n\n\ndef cria_janela(nome,largura,altura):\n janela= cImage.ImageWin(nome,largura,altura)\n janela.exitOnClick()\n \ndef cria_janela_cor(nome,largura,altura,cor):\n janela= cImage.ImageWin(nome,largura,altura)\n janela.setBackground(cor)\n janela.exitOnClick()\n \ndef desenha_linha(p_1,p_2):\n \"\"\"Desenha uma linha entre dois pontos.\"\"\"\n x_1 = p_1[0]\n y_1 = p_1[1]\n x_2 = p_2[0]\n y_2 = p_2[1]\n janela = cImage.ImageWin('Linha', 3* abs(x_2 - x_1), 3*abs(y_2 - y_1))\n janela.setBackground((0,255,0))\n \n imagem = cImage.EmptyImage(abs(x_2 - x_1),abs(y_2 - y_1))\n print(imagem.getWidth(), imagem.getHeight())\n pix = cImage.Pixel(255,0,0)\n for x in range(0, imagem.getWidth()):\n for k in range(0, imagem.getHeight()):\n imagem.setPixel(x,k,cImage.Pixel(0,255,0))\n y = int(x * ((y_2 - y_1)/(x_2 - x_1)) + y_1 - x_1 * ((y_2 - y_1)/ (x_2 - x_1)))\n imagem.setPixel(x,y,pix)\n imagem.setPosition(janela.getWidth()//2 - imagem.getWidth()//2,janela.getHeight()//2 - imagem.getHeight()//2)\n imagem.draw(janela)\n janela.exitOnClick()\n \ndef quadrilatero(lado_1,lado_2,cor):\n \"\"\"\n Cria uma figura rectangular, colorida.\n \"\"\"\n imagem = cImage.EmptyImage(lado_1,lado_2)\n pixel = cImage.Pixel(cor[0],cor[1],cor[2])\n for coluna in range(lado_1): \n for linha in range(lado_2):\n imagem.setPixel(coluna,linha,pixel) \n return imagem\n\ndef cria_e_mostra(lado_1,lado_2, cor, pos,janela):\n imagem = quadrilatero(lado_1,lado_2,cor)\n imagem.setPosition(pos[0],pos[1])\n imagem.draw(janela)\n \ndef desenha_quad(lado_1, lado_2,cor):\n \"\"\" Desenha um quadrilátero colorido.\"\"\"\n janela = cImage.ImageWin('Quadrilátero', 3 * lado_1,3*lado_2)\n cria_e_mostra(lado_1,lado_2,cor,(20,20),janela)\n janela.exitOnClick()\n \ndef cria_imagem_vazia(largura,altura):\n imagem = cImage.EmptyImage(largura,altura)\n return imagem\n\ndef cria_imagem_vazia_cor(largura, altura, cor):\n imagem = cImage.EmptyImage(largura,altura)\n imagem.setSolidColor(cor)\n return imagem \n\ndef mostra_imagem_simples(imagem):\n # Dimensão\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n # Cria janela\n janela = cImage.ImageWin('Imagem', 2*largura,2*altura)\n # Mostra imagem na janela\n imagem.draw(janela)\n # Termina\n janela.exitOnClick() \n \ndef mostra_imagens(lista_imagens):\n janela = cImage.ImageWin('Imagem', 640,480)\n for img in lista_imagens:\n l = random.randint(0, img.getWidth()//3)\n a = random.randint(0, img.getHeight()//3)\n img.setPosition(l,a)\n img.draw(janela)\n janela.exitOnClick()\n \n \ndef desenha_linha(imagem):\n altura = imagem.getHeight()\n largura = imagem.getWidth()\n janela = cImage.ImageWin('Imagem',largura,altura)\n pix = cImage.Pixel(255,0,0)\n for col in range(largura):\n imagem.setPixel(col, altura//2,pix)\n imagem.draw(janela)\n janela.exitOnClick()\n \ndef cria_imagem_simples(largura, altura):\n pixel_vermelho = cImage.Pixel(255,0,0)\n pixel_azul = cImage.Pixel(0,0,255) \n \n imagem = cImage.EmptyImage(largura,altura)\n for coluna in range(largura):\n for linha in range(altura):\n if coluna % 10 == 0:\n imagem.setPixel(coluna,linha, pixel_vermelho)\n else:\n imagem.setPixel(coluna,linha, pixel_azul)\n imagem.setPosition(largura,altura) \n \n janela = cImage.ImageWin('Imagem Simples', 2*largura,2*altura)\n janela.setBackground('green')\n \n imagem.draw(janela)\n \n janela.exitOnClick()\n\ndef gera_tuplo():\n r = random.randint(0,255)\n g = random.randint(0,255)\n b = random.randint(0,255)\n return (r,g,b)\n\ndef gera_imagem(n,m):\n return [[ gera_tuplo() for coluna in range(m)] for linha in range(n)]\n\n\ndef distorcer(imagem, factor_x, factor_y):\n \"\"\"\n Distorce uma imagem de acordo com os factores indicados.\n Cada pixel vai darorigem a um rectângulo de dimensões\n factor_x X factor_y.\n \"\"\" \n # Cria imagens\n img = cImage.FileImage(imagem)\n nova_img = altera(img, factor_x,factor_y)\n # Cria janela\n largura = img.getWidth()\n altura = img.getHeight() \n janela = cImage.ImageWin('Distorce', factor_x * (largura+1) , factor_y * (altura+1))\n # Coloca imagens\n img.setPosition(0,0)\n nova_img.setPosition(largura + 1,0)\n img.draw(janela)\n nova_img.draw(janela)\n # Termina\n janela.exitOnClick() \n \n\ndef altera(imagem,factor_x, factor_y):\n \"\"\"\n Altera a imagem de acordo com os factores.\n Estes devem ser inteiros.\n \"\"\"\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n \n nova_imagem = cImage.EmptyImage(factor_x * largura, factor_y * altura)\n \n for coluna in range(largura):\n for linha in range(altura):\n pixel = imagem.getPixel(coluna, linha)\n\n for i_x in range(factor_x):\n for i_y in range(factor_y):\n nova_imagem.setPixel(factor_x * coluna + i_x, factor_y * linha + i_y, pixel)\n return nova_imagem\n\n\ndef transforma_imagem(imagem, funcao):\n \"\"\" Manipula uma imagem de acordo com uma função.\"\"\"\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n nova_imagem = cImage.EmptyImage(largura,altura)\n for coluna in range(largura):\n for linha in range(altura):\n pixel = imagem.getPixel(coluna,linha)\n novo_pixel = funcao(pixel)\n nova_imagem.setPixel(coluna,linha, novo_pixel)\n return nova_imagem\n\ndef main_funcao(imagem_ficheiro, funcao):\n \"\"\"Transforma uma imagem de acordo com a funcao.\"\"\"\n # Obtém imagem\n imagem = cImage.FileImage(imagem_ficheiro)\n # Transforma a imagem\n imagem_nova = transforma_imagem(imagem,funcao)\n # Define janela\n largura = imagem.getWidth()\n altura = imagem.getHeight()\n janela = cImage.ImageWin( funcao.__name__,2*largura, altura)\n # vizualiza\n imagem.draw(janela)\n imagem_nova.setPosition(largura+1,0)\n imagem_nova.draw(janela)\n # Termina\n janela.exitOnClick()\n\n\ndef preto_branco_pixel(pixel):\n pixel_aux = cinzento_pixel(pixel)\n if pixel_aux.getRed() < 128 :\n novo_pixel = cImage.Pixel(0,0,0)\n else:\n novo_pixel = cImage.Pixel(255,255,255) \n return novo_pixel\n \nif __name__ =='__main__':\n #mostra_imagem('/images/ants.jpg') \n #cria_janela('Janela Indiscreta', 320,240)\n #cria_janela_cor('Teste de Cor de Fundo', 320,240,'red')\n #cria_janela_cor('Teste de Cor de Fundo', 320,240,'#ff0000')\n #desenha_linha((50,50),(150,150))\n \"\"\"\n janela = cImage.ImageWin('Quadrilátero', 320,240)\n cria_e_mostra(50,100,(255,0,0),(20,20),janela)\n janela.exitOnClick()\n \n #desenha_quad(50,100,(255,0,0))\n #imagem = cria_imagem_vazia(320,240)\n largura = 320\n altura = 240\n imagem = cria_imagem_vazia_cor(largura,altura,(0,0,255))\n imagem.setPosition(largura//2,altura//2)\n mostra_imagem_simples(imagem)\n \"\"\"\n img_1 = cria_imagem_vazia_cor(100,100,(255,0,0))\n img_2 = cria_imagem_vazia_cor(50,50,(0,255,0))\n img_3 = cria_imagem_vazia_cor(150,150,(0,0,255))\n mostra_imagens([img_1, img_2, img_3])\n \"\"\"\n img = cria_imagem_vazia_cor(150,150,(0,0,255))\n desenha_linha(img)\n\n cria_imagem_simples(320,240)\n\n imagem = gera_imagem(2,3)\n \n fich = open('/Users/ernestojfcosta/imagem.txt','w')\n fich.write(str(imagem))\n fich.close()\n \"\"\"\n #distorcer('/images/calvin_leia_s.jpg', 1,1)\n #main_negativo('/images/calvin_leia_s.jpg')\n #main_cinzento('/images/calvin_leia_s.jpg')\n #main_funcao('/images/calvin_leia_s.jpg',cinzento_pixel)\n #main_funcao('/images/calvin_leia_s.jpg',preto_branco_pixel)\n \n\n \n \n \n \n ","sub_path":"visoes_2/imagens/para_livro.py","file_name":"para_livro.py","file_ext":"py","file_size_in_byte":10800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"279402723","text":"from cvxopt import matrix, spmatrix\nfrom .base import ModelBase\nfrom ..consts import *\nfrom ..utils.math import *\n\n\nclass Stagen(ModelBase):\n \"\"\"Static generator base class\"\"\"\n def __init__(self, system, name):\n super().__init__(system, name)\n self._group = 'StaticGen'\n self._data.update({'bus': None,\n 'busr': None,\n 'pg': 0.0,\n 'qg': 0.0,\n 'pmax': 1.0,\n 'pmin': 0.0,\n 'qmax': 0.0,\n 'qmin': 0.0,\n 'v0': 1.0,\n 'vmax': 1.4,\n 'vmin': 0.6,\n })\n self._units.update({'bus': 'na',\n 'busr': 'na',\n 'pg': 'pu',\n 'qg': 'pu',\n 'pmax': 'pu',\n 'pmin': 'pu',\n 'qmax': 'pu',\n 'v0': 'pu',\n 'vmax': 'pu',\n 'vmin': 'pu',\n })\n self._params.extend(['v0',\n 'pg',\n 'qg',\n 'pmax',\n 'pmin',\n 'qmax',\n 'qmin',\n 'vmax',\n 'vmin',\n ])\n self._descr.update({'bus': 'the idx of the installed bus',\n 'busr': 'the idx of remotely controlled bus',\n 'pg': 'active power set point',\n 'qg': 'reactive power set point',\n 'pmax': 'maximum active power output',\n 'pmin': 'minimum active power output',\n 'qmax': 'maximim reactive power output',\n 'qmin': 'minimum reactive power output',\n 'v0': 'voltage set point',\n 'vmax': 'maximum voltage voltage',\n 'vmin': 'minimum allowed voltage',\n })\n self._ac = {'bus': ['a', 'v']}\n self._powers = ['pg', 'qg', 'pmax', 'pmin', 'qmax', 'qmin']\n self._voltages = ['v0', 'vmax', 'vmin']\n self._service = ['Xs', 'Ra']\n self.calls.update({'gcall': True, 'gycall': True,\n 'init0': True, 'pflow': True,\n 'jac0': True, 'stagen': True,\n })\n\n\nclass PV(Stagen):\n \"\"\"Static PV generator for power flow\"\"\"\n def __init__(self, system, name):\n super().__init__(system, name)\n self._name = 'PV'\n self._algebs.extend(['q'])\n self._unamey = ['Q']\n self._fnamey = ['Q']\n self._service.extend(['qlim', 'above', 'below'])\n self._inst_meta()\n\n def init0(self, dae):\n \"\"\"Set initial voltage and reactive power for PQ. Overwrites Bus.voltage values\"\"\"\n dae.y[self.v] = self.v0\n dae.y[self.q] = mul(self.u, self.qg)\n\n def gcall(self, dae):\n if self.system.SPF.pv2pq and self.system.SPF.iter >= self.system.SPF.ipv2pq:\n d_min = dae.y[self.q] - self.qmin\n d_max = dae.y[self.q] - self.qmax\n idx_asc = sort_idx(d_min)\n idx_desc = sort_idx(d_max)\n\n nabove = nbelow = self.system.SPF.npv2pq\n nconv = min(self.system.SPF.npv2pq, self.n)\n\n for i in range(nconv-1, -1, -1):\n if d_min[idx_asc[i]] >= 0:\n nbelow -= 1\n if d_max[idx_desc[i]] <= 0:\n nabove -= 1\n\n self.below = idx_asc[0:nbelow] if nbelow else []\n self.above = idx_desc[0:nabove] if nabove else []\n self.qlim = list(set(self.q[self.below] + self.q[self.above]))\n\n dae.g -= spmatrix(mul(self.u, self.pg), self.a, [0] * self.n, (dae.m, 1), 'd')\n dae.g -= spmatrix(mul(self.u, dae.y[self.q]), self.v, [0] * self.n, (dae.m, 1), 'd')\n dae.g += spmatrix(mul(self.u, dae.y[self.v] - self.v0), self.q, [0] * self.n, (dae.m, 1), 'd')\n\n if self.qlim:\n dae.g[self.qlim] = 0\n\n def gycall(self, dae):\n if self.qlim:\n dae.algeb_windup(self.qlim)\n\n def jac0(self, dae):\n dae.set_jac('Gy0', 1e-6, self.v, self.v)\n dae.set_jac('Gy0', -self.u, self.v, self.q)\n dae.set_jac('Gy0', self.u, self.q, self.v)\n dae.set_jac('Gy0', self.u - 1 + 1e-6, self.q, self.q)\n\n def disable_gen(self, idx):\n \"\"\"Disable a PV element for TDS\"\"\"\n self.u[self.int[idx]] = 0\n self.system.DAE.factorize = True\n\n\nclass Slack(PV):\n \"\"\"Static slack generator\"\"\"\n def __init__(self, system, name):\n super().__init__(system, name)\n self._name = 'SW'\n self._algebs.extend(['p'])\n self._unamey.extend(['P'])\n self._fnamey.extend(['P'])\n self._service.extend(['a0'])\n self.calls.update({'gycall': False\n })\n self._inst_meta()\n\n def init0(self, dae):\n super().init0(dae)\n self.a0 = self.system.Bus.angle[self.a]\n dae.y[self.p] = mul(self.u, self.pg)\n\n def gcall(self, dae):\n dae.g[self.a] -= mul(self.u, dae.y[self.p])\n dae.g[self.v] -= mul(self.u, dae.y[self.q])\n dae.g[self.q] = mul(self.u, dae.y[self.v] - self.v0)\n dae.g[self.p] = mul(self.u, dae.y[self.a] - self.a0)\n\n def jac0(self, dae):\n super().jac0(dae)\n dae.set_jac('Gy0', -self.u, self.a, self.p)\n dae.set_jac('Gy0', self.u, self.p, self.a)\n dae.set_jac('Gy0', self.u - 1 + 1e-6, self.p, self.p)\n","sub_path":"andes/models/pv.py","file_name":"pv.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"157231321","text":"from turtle import *\nimport math\n\ndef PASEK(n):\n a = 700 / (6 + n * 10)\n przejscie_pasek_jeden(n)\n pd()\n diament(n)\n przejscie_pasek_dwa(n)\n pd()\n rządek(n)\n koniec(n)\n pd()\n diament(n)\n\ndef przejscie_pasek_jeden(n):\n a = 700 / (6 + n * 10)\n pu()\n setpos(-350,-100)\n setheading(0)\n fd(a)\n rt(90)\n fd(a)\n lt(135)\n\ndef przejscie_pasek_dwa(n):\n a = 700 / (6 + n * 10)\n setheading(45)\n pu()\n fd(2*a*math.sqrt(2))\n rt(135)\n fd(2*a)\n lt(90)\n fd(8*a)\n\ndef diament(n):\n a = 700/(6+n*10)\n fillcolor(\"yellowgreen\")\n begin_fill()\n fd(2*a*math.sqrt(2))\n lt(90)\n fd(2*a*math.sqrt(2))\n lt(90)\n fd(a*math.sqrt(2))\n lt(45)\n fd(2*a)\n lt(45)\n fd(a*math.sqrt(2))\n end_fill()\n\ndef portal(n):\n a = 700/(6+n*10)\n for i in range(2):\n lt(45)\n diament(n)\n lt(90)\n fd(2*a*math.sqrt(2))\n lt(90)\n fd(2*a*math.sqrt(2))\n rt(45)\n fd(2*a)\n lt(45)\n fd(a*math.sqrt(2))\n lt(90)\n diament(n)\n lt(90)\n fd(a*4*math.sqrt(2))\n lt(180)\n diament(n)\n rt(180)\n fd(a*math.sqrt(2))\n lt(90)\n diament(n)\n lt(45)\n\ndef przejscie(n):\n a = 700/(6+n*10)\n setheading(0)\n fd(10*a)\n\ndef rządek(n):\n for i in range(n):\n portal(n)\n pu()\n przejscie(n)\n pd()\n\ndef koniec(n):\n a = 700/(6+n*10)\n rt(180)\n pu()\n fd(6*a)\n rt(90)\n fd(4*a)\n lt(135)\nspeed(0)\nPASEK(2)\ndone()","sub_path":"konkursy/08_2009/pasek.py","file_name":"pasek.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"394107040","text":"'''\nCreated on 2016年3月20日\n\n@author: lei\n'''\nfrom project.forestToTree import *\nfrom project.allLib import *\n\n#测试用此例子,以后用预处理.log文件后的结果替换\nNtCalls = ['NtOpenFile', 'NtClose','NtOpenFile', 'NtSetInformationFile.FileBasicInformation', 'NtClose',\n 'NtOpenFile', 'NtQueryInformationFile.FileAttributeTagInformation', 'NtSetInformationFile.FileDispositionInformation', 'NtClose'\n ]\n\n#先生成树\nbehaviorTree = forestToTree(NtCallsTree)\nnode = behaviorTree[0]\n\n#比较新读入节点和所有根节点,以及所有currentNodes的第一层左子树和左子树的所有第一层右子树,返回应创建新的API头还是继续遍历当前分支\n#def matchingAllChild(newNode, currentNodes):\n \ndef treeMatching(allStraces, tree):\n #因为可能有多个API同时执行,所以要用数组保存每个API执行至的当前节点\n curMatchingIndex = 0 \n curMatchingNodes = []\n curMatchingNode = tree[0]\n curMatchingNodes.append(curMatchingNode)\n for call in allStraces: \n if call == curMatchingNode.callName:\n print(curMatchingNode.callName)\n curMatchingNode = curMatchingNode.leftChild\n if curMatchingNode.callName[:3] == 'Leaf' and curMatchingNode.leftChild == None and curMatchingNode.rightChild == None:\n print(curMatchingNode.callName)\n #else:\n\ntreeMatching(NtCalls, behaviorTree)","sub_path":"src/project/treeMatching.py","file_name":"treeMatching.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"649710222","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget, QPushButton, QSlider\nfrom PyQt5.QtGui import QIcon\n#******************************************************************\nimport matplotlib\nmatplotlib.use(\"Qt5Agg\")\n#******************************************************************\nfrom PyQt5 import QtCore \nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport random\n\nimport numpy as np\n \nfrom matplotlib.widgets import Slider, Button, RadioButtons\nfrom matplotlib.lines import Line2D\n \n \nclass App(QMainWindow):\n \n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.left = 10\n\t\tself.top = 10\n\t\tself.title = 'PyQt5 matplotlib example - pythonspot.com'\n\t\tself.width = 800\n\t\tself.height = 600\n\t\tself.initUI()\n\t\t\n\n\tdef initUI(self):\n\t\tself.setWindowTitle(self.title)\n\t\tself.setGeometry(self.left, self.top, self.width, self.height)\n\n\t\tm = PlotCanvas(self, width=6, height=5)\n\t\tm.move(0,0)\n\n\t\tbutton = QPushButton('PyQt5 button', self)\n\t\tbutton.setToolTip('This s an example button')\n\t\tbutton.move(600,0)\n\t\tbutton.resize(140,100)\n\t\t\n\t\tslider = QSlider(self)\n\t\tslider.move(0,530)\n\t\tslider.resize(600,25)\n\t\tslider.setRange(0, 2047)\n\t\tslider.setPageStep(1)\n\t\tslider.setTickPosition(QSlider.TicksBothSides)\n\t\tslider.setTickInterval(100)\n\t\tslider.setOrientation(QtCore.Qt.Horizontal)\n\t\tslider.valueChanged[int].connect(self.SliderMoved)\n\t\t\n\t\tline = QLineEdit(self)\n\t\tline.move(600,730)\n\t\tline.resize(600,25)\n\t\t\n\t\tself.show()\n\n\n\tdef SliderMoved(self, valueOfSlider):\n\t\tm = PlotCanvas(self)\n\t\tvalueinit = valueOfSlider\n\t\tm.plot(valueOfSlider)\n\t\tpass\n\n\t\t\n\t\t#slider.\n\t\t\n\t\t\n \n \nclass PlotCanvas(FigureCanvas):\n \n\tdef __init__(self, parent=None, width=5, height=4, dpi=100):\n\t\tfig = Figure(figsize=(width, height), dpi=dpi)\n\t\tself.axes = fig.add_subplot(111)\n\n\t\tFigureCanvas.__init__(self, fig)\n\t\tself.setParent(parent)\n\n\t\tFigureCanvas.setSizePolicy(self,\n\t\t\t\tQSizePolicy.Expanding,\n\t\t\t\tQSizePolicy.Expanding)\n\t\tFigureCanvas.updateGeometry(self)\n\t\tself.plot(valueinit)\n \n\tdef plot(self, valinit):\n\t\t\n\t\t#valinit=1\n\t\tfov = 200\n\n\t\t#generate figure\n\t\tfig = plt.figure()\n\t\tfig.subplots_adjust(left=0.25, bottom=0.2)\n\n\t\t#creating slices list\n\t\ts=[]\n\t\tfor i in range(3):\n\t\t\ts.append([slice(valinit-1, valinit, None) if j == i else slice(None) for j in range(3)])\n\t\t\n\t\tprint (s)\n\t\timg_z = img[s[0]].squeeze()\n\t\t#img_x = cube[s[1]].squeeze()\n\t\t\n\t\t\n\t\tdata = img_z\n\t\t#ax = plt.subplot(111)\n\t\tax = self.figure.add_subplot(111)\n\t\tax.imshow(data, cmap=\"gray\")\n\t\t#ax.set_title('PyQt Matplotlib Example')\n\t\t#plt.draw()\n\t\tself.draw()\n\t\t#plt.show()\n\t\t\n\t\t\n\t\t#print (img_z)\n\t\t\n\t\t#plt.imshow(img_z,cmap='gray', vmin = 0, vmax = 255)\n\t\t\n\t\t\"\"\"\n\t\tDisplay a 3d ndarray with a slider to move along the third dimension.\n\t\t\"\"\"\n\t\t'''\n\t\t#check dim\n\t\t#if not cube.ndim == 3:\n\t\t#\traise ValueError(\"cube should be an ndarray with ndim == 3\")\n\n\t\tvalinit=1024\n\t\tfov = 200\n\n\t\t#generate figure\n\t\tfig = plt.figure()\n\t\tfig.subplots_adjust(left=0.25, bottom=0.2)\n\n\t\t#creating slices list\n\t\ts=[]\n\t\tfor i in range(3):\n\t\t\ts.append([slice(valinit-1, valinit, None) if j == i else slice(None) for j in range(3)])\n\t\timg_z = cube[s[0]].squeeze()\n\t\timg_x = cube[s[1]].squeeze()\n\n\t\t#display initial image\n\t\tax1 = plt.subplot(121)\n\t\tax2 = plt.subplot(122)\n\n\t\tax1.imshow(img_z, cmap='gray')\n\t\tax2.imshow(img_x, cmap='gray')\n\t\t'''\n\t\t\t\n\t\t'''\n\t\tdata = [random.random() for i in range(25)]\n\t\tax = self.figure.add_subplot(111)\n\t\tax.plot(data, 'r-')\n\t\tax.set_title('PyQt Matplotlib Example')\n\t\tself.draw()\n\t\t'''\n \nif __name__ == '__main__':\n\tcube = \"/mnt/DataDisk/Scripts/Pitch_Roll_BIC_Test/recon/tomo-2048x2048x2048_8bit.b\"\n\tfd = open(cube, 'rb')\n\twidth = 2048\n\theight = 2048\n\tslices = 2048\n\tglobal img\n\tglobal valueinit\n\tvalueinit=1024\n\timg = np.memmap(cube, dtype=np.uint8, shape=(slices, width, height))\n\tfd.close()\n\t\n\tapp = QApplication(sys.argv)\n\tex = App()\n\tsys.exit(app.exec_())\n","sub_path":"Tests/Test_pyqt_matplotlib.py","file_name":"Test_pyqt_matplotlib.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"51441597","text":"__token__ = 'INSERT BOT TOKEN HERE'\n__prefix__ = ':'\n__botserverid__ = '102817255661772800' #Specifies the serverid from which the server-/modlog should be taken\n__adminid__ = 'YOUR USERID i.e. 102815825781596160'\n__adminrole__ = 'Administrator'\n__modrole__ = 'Moderators'\n__kawaiichannel__ = '207909155556687872' #OPTIONAL specified a channel where the :kawaii commands gets this pinned messages\n__botlogchannel__ = '165175306561388545' #Channel for the server-/modlog, should be probably a channel on the same server as __botserverid__\n__github__ = 'False' #OPTIONAL logs new commits of this bot into a specific channel, sorry hardcoded!\n__greetmsg__ = 'False' #HARDCODED Enable/Disable greetmsg at the entry channel of __botserverid__\n__selfassignrole__ = 'Blighttown' #OPTIONAL set to a role to be self assign-able\n","sub_path":"config.example.py","file_name":"config.example.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"424094919","text":"from cv2 import cv2\nimport numpy as np\n\nface_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Rakesh\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python38\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_default.xml')\n\ndef face_extrator(img):\n img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(img_gray,1.3,6)\n\n if faces is():\n return None\n \n for(x,y,w,h) in faces:\n crop_face = img[y:y+h, x:x+w]\n\n return crop_face\n\n\ncap = cv2.VideoCapture(0)\ncount = 0\n\nwhile True:\n ret, frame = cap.read()\n if face_extrator(frame) is not None:\n count+=1\n face = cv2.resize(face_extrator(frame),(200,200))\n face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)\n\n file_name_path = 'face_samples\\\\user'+str(count)+'.jpg'\n cv2.imwrite(file_name_path,face)\n\n cv2.putText(face,str(count),(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,45),2)\n\n cv2.imshow('face cropped',face)\n else:\n print('face not found')\n pass\n\n if cv2.waitKey(1) == 13 or count == 100:\n break\n\ncap.release()\ncv2.destroyAllWindows()\nprint('collecting samples complete')\n\n","sub_path":"facialrecognition.py","file_name":"facialrecognition.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"527602007","text":"# -*- coding: utf-8 -*-\n# @Time : 19-7-15\n# @Author : hay\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import HttpResponse\nfrom core.dictcode import dtype_ttype_dict, ttype_dtype_dict\nfrom home import models as home_models\nfrom jobs import models as jobs_models\nfrom core.tabledata import TableData\nfrom core.tablecols import tableCols\nfrom core.exporthelper import ExportHelper, UPLPAD_FILTER_EMAIL\n\n\ntabeldata = TableData()\n\n\nclass ExportViews(TemplateView):\n\n def post(self, request, *args, **kwargs):\n try:\n filed_list = tableCols['department']['table_cols']\n\n client_type = home_models.clientType.objects.filter(count=1, status=1).all()\n client_type_code = {}\n [client_type_code.update({r.id: r.name}) for r in client_type]\n\n quertset = home_models.Department.objects.values(\n 'id', 'english_name', 'chinese_name', 'nickname', 'remark', 'create_time', 'DepartmentType__id',\n 'Countrys__remark', 'service', 'web_url', 'email_send_filed', 'send_num'\n ).order_by('-id')\n\n filter_dict = tabeldata.filter_dict(request)\n\n params = request.POST.dict()\n extend_filter_dict = {}\n for k in params.keys():\n if '__in' in k and not isinstance(params[k], list):\n extend_filter_dict[k] = params[k].rstrip(',').split(',')\n\n filter_dict.update(**extend_filter_dict)\n\n if params.get('client_type') and params.get('client_type') != '0':\n cid = ttype_dtype_dict.get(int(params.get('client_type')))\n filter_dict['DepartmentType_id__in'] = cid\n del filter_dict['client_type']\n\n if params.get('is_crawl') and params.get('is_crawl') != '0':\n filter_dict = {'status': 1}\n dfilter_dict = {'status': 1}\n if params.get('Areas_id') and params.get('Areas_id') != '0':\n filter_dict.update({'Countrys__Areas_id': params.get('Areas_id')})\n dfilter_dict.update({'Department__Countrys__Areas_id': params.get('Areas_id')})\n if params.get('Countrys_id') and params.get('Countrys_id') != '0':\n dfilter_dict.update({'Department__Countrys_id': params.get('Countrys_id')})\n filter_dict.update({'Countrys_id': params.get('Countrys_id')})\n\n depids = home_models.Jobs.objects.filter(**dfilter_dict).values('Department_id').all()\n dids = set()\n [dids.add(str(row['Department_id'])) for row in depids]\n if params.get('is_crawl') == '2':\n ndids = []\n for v in dids:\n try:\n ndids.append(int(v))\n except:\n pass\n\n not_depids = home_models.Department.objects.filter(\n **dfilter_dict\n ).exclude(id__in=ndids).values('id').all()\n\n dids = set()\n [dids.add(str(row['id'])) for row in not_depids]\n\n filter_dict['id__in'] = dids\n # del filter_dict['is_crawl']\n\n for k, v in filter_dict.items():\n if 'notin' in k:\n quertset = quertset.exclude(**{k.replace('not', ''): v})\n else:\n quertset = quertset.filter(**{k: v})\n\n currdata = quertset.filter(**filter_dict).all()\n for q in currdata:\n q['DepartmentType__name'] = client_type_code.get(\n dtype_ttype_dict.get(q.get('DepartmentType__id'))\n ).lstrip('精准-')\n\n last_job = home_models.Jobs.objects.filter(Department_id=q.get('id')).order_by('-create_time').first()\n if last_job:\n q['last_crawl_date'] = last_job.create_time\n try:\n q['last_crawl'] = last_job.to_user.chinese_name\n except:\n pass\n\n q['pi_client'] = jobs_models.Teachers.objects.filter(\n Jobs__Department_id=q['id'], is_lab_leader=1, status=1\n ).count()\n q['pi_old_customer_client'] = jobs_models.Teachers.objects.filter(\n Jobs__Department_id=q['id'], is_old_customer=1, status=1\n ).count()\n\n tQs = jobs_models.Teachers.objects.filter(Jobs__Department_id=q['id'], status=1).count()\n q['total_client'] = tQs\n\n file_name = ExportHelper().SaveOriginalTable(\n filed_list, currdata, '{}/department.xlsx'.format(UPLPAD_FILTER_EMAIL)\n )\n code = 200\n except Exception as e:\n code = 400\n file_name = str(e)\n\n return HttpResponse(TableData().json_return(True, data={'code': code, 'file_name': file_name}))","sub_path":"home/view/exportviews.py","file_name":"exportviews.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"156721679","text":"'''\nEvent Designators\n An event designator is a reference to a command line entry in\n the history list. Unless the reference is absolute, events are rela‐\n tive to the current position in the history list.\n\n ! Start a history substitution, except when followed by a blank,\n newline, = or (.\n !n Refer to command line n.\n !-n Refer to the current command minus n.\n !! Refer to the previous command. This is a synonym for `!-1'.\n !string\n Refer to the most recent command preceding the current position\n in the history list starting with string.\n !?string[?]\n Refer to the most recent command preceding the current position\n in the history list containing string. The trailing ? may\n be omitted if string is followed immediately by a newline.\n ^string1^string2^\n Quick substitution. Repeat the last command, replacing string1\n with string2. Equivalent to ``!!:s/string1/string2/''\n !# The entire command line typed so far.\n'''\n\n\ndef write_history_file(args, curpath):\n history_file = open(curpath + '/.intek-sh_history', 'a')\n history_file.write(args + '\\n')\n history_file.close()\n\n\ndef check_and_write_history_file(args, curpath):\n written = False\n if '!#' not in args and '^' not in args:\n write_history_file(args, curpath)\n written = True\n return written\n\n\ndef expand_history_file(_args, special_cases, curpath, history_lst):\n written = False\n if not _args.startswith('!') and _args not in special_cases and\\\n not _args.startswith(' '):\n if history_lst:\n \"\"\" if the current command is different from the previous\n one in history list \"\"\"\n if _args != history_lst[-1].strip('\\n'):\n written = check_and_write_history_file(_args, curpath)\n else:\n written = check_and_write_history_file(_args, curpath)\n return written\n\n\ndef read_history_file(curpath):\n try:\n history_file = open(curpath + '/.intek-sh_history', 'r')\n except FileNotFoundError:\n return None\n history_lst = history_file.readlines()\n history_file.close()\n return history_lst\n\n\ndef raw_print(no, lst, to):\n ''' to: from `to` to the end of the list\n no: number of the line'''\n for index, element in enumerate(lst[to:]):\n # justify columns\n element = element.strip('\\n')\n # right justify the numbers\n _order = str(no + index+1).rjust(len(str(len(lst))), ' ')\n # left justify the commands\n command = element.ljust(len(max(lst, key=len)), ' ')\n print(' ' * 4 + _order + ' ' + command)\n\n\ndef print_history(type_in, history_lst):\n if len(type_in) == 1:\n ''' command: history '''\n raw_print(0, history_lst, 0)\n return 0\n\n elif len(type_in) == 2:\n ''' command: history randomstring '''\n if type_in[-1].isdigit():\n # if randomstring is numeric\n num = int(type_in[-1])\n if num < len(history_lst):\n # if the number is less than length of the list\n raw_print((len(history_lst) - num),\n history_lst, (len(history_lst) - num))\n else:\n # if the number is less than length of the list\n raw_print(0, history_lst, 0)\n return 0\n else:\n # if randomstring is anything else\n print('intek-sh: history: {}: numeric argument required'.format(\n type_in[-1]))\n return 1\n\n elif len(type_in) > 2:\n ''' command: history randomstring randomstring ... '''\n print('intek-sh: history: too many arguments')\n return 148\n\n\n# replace args as cmd and print it\ndef print_args(args, cmd):\n args = cmd\n print(args)\n return args.strip('\\n'), True\n\n\ndef get_prefix(args):\n ''' get the prefix `n` for !n and !-n '''\n for word in args[1:]:\n if word.isdigit():\n prefix += word\n else:\n break\n return int(prefix)\n\n\ndef handle_special_case(exist, args):\n continue_flag = False\n if args.startswith('!'):\n # no matched event in history_lst\n if not exist:\n if sub_failed2:\n continue_flag = True\n else:\n print('intek-sh: ' + args + ': event not found')\n continue_flag = True\n else: # match event in history_lst but\n # command starts with ! and followed by a blank space\n if len(args) is 1 or args == '! ':\n continue_flag = True\n # substitution errors\n elif args.startswith('^') and sub_failed:\n continue_flag = True\n # out of capability\n elif alert:\n continue_flag = True\n return continue_flag, args\n\n\ndef handle_emotion_prefix(args, history_lst):\n global sub_failed2\n exist = False\n sub_failed2 = False\n # command type: '!?'\n if args[1:].startswith('?'):\n args = args.strip('!?')\n # traverse through history_lst in reversed order\n for cmd in reversed(history_lst):\n # if cmd has args -> take the cmd and break the loop\n if args in cmd:\n args, exist = print_args(args, cmd.strip('\\n'))\n break\n\n # command type: '!!'\n elif args[1:].startswith('!'):\n temp = history_lst[len(history_lst) - 1].strip('\\n')\n new_args = args.replace('!!', temp)\n # command type: '!!:s/string1/string2/'\n if ':' in args[1:]:\n if 's/' in args[1:]:\n # command is !!:s/s1 -> pop s1 out of string\n if args.count('/') is 1:\n if args[5:] in temp:\n args, exist = print_args(args, temp.replace(\n args[5:], ''))\n else: # if p not in string -> raise error\n print('intek-sh: :' + args[3:] + ': substitu'\n 'tion failed')\n sub_failed2 = True\n # command has both s1 and s2 -> replace s1 with s2\n else:\n arg_lst = args[1:].strip('/').split('/')\n pos = new_args.find(':')\n new_args = new_args[:pos].replace(arg_lst[-2], arg_lst[-1])\n args, exist = print_args(args, new_args)\n else: # command doesn't follow the format\n print('intek-sh: ' + args[2:] + ': substitution failed')\n sub_failed2 = True\n else:\n args, exist = print_args(args, new_args)\n\n # command type: '!n'\n elif args[1].isdigit():\n prefix = get_prefix(args[1:])\n if (number-1) < len(history_lst):\n new_args = args.replace('!' + str(prefix), history_lst[number-1])\n args, exist = print_args(args, new_args.strip('\\n'))\n\n # command type: '!-n'\n elif args[1] is '-' and args[2].isdigit():\n prefix = get_prefix(args[2:])\n if number < len(history_lst):\n args, exist = print_args(args, args.replace('!-' + str(prefix),\n history_lst[len(\n history_lst) - number]).strip('\\n'))\n\n # command starts with '!string'\n elif args[1].isalpha():\n # command type: '!string randomstring'\n if ' ' in args:\n args_lst = args.split(' ')\n for cmd in reversed(history_lst):\n if cmd.startswith(args[1]):\n args_lst.pop(0)\n args_lst.insert(0, cmd.strip('\\n'))\n args, exist = print_args(args, ' '.join(args_lst))\n break\n else: # command type: '!string'\n for cmd in reversed(history_lst):\n if cmd.startswith(args[1:]):\n args, exist = print_args(args, cmd.strip('\\n'))\n break\n return args, exist\n\n\ndef handle_caret(args, history_lst):\n global sub_failed\n sub_failed = False\n exist = False\n new_args = ''\n temp = history_lst[len(history_lst) - 1].strip('\\n')\n # command is ^s -> pop s out of string\n if args.count('^') is 1:\n if args[1:] in temp:\n args, exist = print_args(args, temp.replace(args[1:], ''))\n else:\n print('intek-sh: :s' + args + ': substitution failed')\n sub_failed = True\n else: # command type: ^string1^string2 -> replace string1 with string2\n args_lst = args.strip('^').split('^')\n if args_lst[0] in temp:\n new_args = temp.replace(args_lst[0], args_lst[-1])\n args, exist = print_args(args, new_args)\n else:\n print('intek-sh: :s' + args + ': substitution failed')\n sub_failed = True\n return args, exist\n\n\ndef handle_command(args, history_lst):\n global sub_failed\n global alert\n alert = False\n exist = False\n if args.startswith('!'):\n if len(args) is 1 or args[1] is ' ' or args[1] is '=':\n return args, True\n elif args[1] is '(':\n return args[0], exist\n elif args[1] is '#':\n return args, True\n else:\n args, exist = handle_emotion_prefix(args, history_lst)\n # command type: '^string1^string2^'\n elif args.startswith('^'):\n args, exist = handle_caret(args, history_lst)\n elif '!#' in args:\n print('intek-sh: sorry this is out of my capability')\n alert = True\n return args, exist\n","sub_path":"history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":9573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"603322735","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef coor_axis_graph(x_range_list):\n fig = plt.figure(figsize=(10, 6))\n ax = fig.add_subplot(1, 1, 1) # figure로 만든 공간안에 add_subplot을 통해 1칸을 만들고 그것을 객체화\n # Move left and lower axes to (0,0) point.\n ax.spines['left'].set_position('zero') # spines는 matplotlib에 있다.\n ax.spines['bottom'].set_position('zero')\n\n # Eliminate upper and right axes. 우축과 상축 색 없애서 안보이게\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n # Show ticks in the left and lower axes only. 좌축과 하축에만 눈금표시\n ax.xaxis.set_tick_params(bottom=True, top=False)\n ax.yaxis.set_tick_params(left=True, right=False)\n\n list_x = [];\n list_y = [];\n list = [];\n for (y,x) in x_range_list:\n # list_x.append(x)\n # list_y.append(y(x))\n plt.plot(x , y(x))\n # list.append([x , y(x)])\n\n # plt.plot(list)\n plt.show()\n\ncoor_axis_graph([(lambda x: x + 1 , np.arange(-3, 4, 1)), (lambda x: x - 1, np.arange(-5, 6, 1))])","sub_path":"python_elastic/python_elastic/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"477000427","text":"import predictors\nimport pandas as pd\nimport numpy as np\nfrom constants import FIRST_DAY, NUM_DAYS\n\n\ndef get_accidents(df):\n \"\"\"\n Returns [day,node] nd.array where arr[i][j] indicates\n accidents in location i at j day\n Returns accident array\n \"\"\"\n\n # Categories for classifying the codes\n categorical = pd.Categorical(df[\"node\"])\n codes = categorical.codes\n df[\"datetime\"] = pd.to_datetime(df[\"datetime\"])\n num_days = (df[\"datetime\"].iloc[-1] - FIRST_DAY).days + 1\n num_nodes = len(categorical.categories)\n\n data_arr = np.zeros((num_nodes, num_days))\n for elem, i in zip(df.itertuples(), range(len(df))):\n data_arr[codes[i]][(elem.datetime - FIRST_DAY).days] += 1\n\n category_mapping = {}\n for node, idx in zip(categorical, categorical.codes):\n category_mapping[node] = idx\n\n return category_mapping, data_arr\n\n\ndef get_data():\n weather_df = pd.read_csv(\"../data/weather/processed/data.csv\")\n intersection_df = pd.read_csv(\"../data/intersection/processed/data.csv\")\n accident_df = pd.read_csv(\"../data/accident/processed/manhattan.csv\")\n accident_df = accident_df[\n accident_df[\"node\"].isin(list(intersection_df[\"nodes\"].unique()))\n ]\n accident_df[\"datetime\"] = pd.to_datetime(accident_df[\"datetime\"])\n accident_df = accident_df[accident_df[\"datetime\"] >= FIRST_DAY]\n categorical_mapping, accidents = get_accidents(accident_df)\n accidents = accidents[:, :NUM_DAYS]\n pred = predictors.get_predictors(\n weather_df, intersection_df, categorical_mapping\n )\n return accidents, pred\n","sub_path":"data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"594249154","text":"#! /usr/local/bin/python3\n\nimport os\nfrom sys import argv, exit\n\nif len(argv) < 2:\n print(f\"Error, you must provide a directory path to list the files from {len(argv)}\")\n exit(1)\n\npath = argv[1]\nprefix_path = argv[2]\nif \"\\\\\" in prefix_path:\n prefix_path = prefix_path.replace(\"\\\\\", \"/\")\n\nif prefix_path[-1] != \"/\":\n prefix_path += \"/\" \n\n# Don't know what exceptions are thrown by this function\nfiles = os.listdir(path)\n\nheader_files = list()\ncpp_files = list()\nfor f in files:\n if f[-2:] == \".h\":\n header_files.append(prefix_path + f)\n else:\n if f[-4:] == \".cpp\":\n cpp_files.append(prefix_path + f)\n\nprint(\"HEADER FILES:\")\nfor f in header_files:\n print('\"' + f + '\"')\n\nprint(\"\\n\\nCPP FILES:\")\nfor f in cpp_files:\n print('\"' + f + '\"')\n\nprint(f\"\\n\\n.cpp + .h file count: {len(header_files) + len(cpp_files)}\")","sub_path":"Tools/ListSources.py","file_name":"ListSources.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"12389267","text":"# -*- coding: utf-8 -*-\n\nBOT_NAME = 'books'\n\nSPIDER_MODULES = ['books.spiders']\nNEWSPIDER_MODULE = 'books.spiders'\n\nLOG_LEVEL = 'INFO'\nLOG_FILE = 'output.log'\n\nROBOTSTXT_OBEY = True\n\nSAVE_CONTENT = 'books.jl'\nITEM_PIPELINES = {\n 'books.pipelines.ChanelPipeline': 300,\n}\n","sub_path":"books/books/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"578217831","text":"#!/usr/bin/python3\n\nwith open('02/input.txt', 'r') as f:\n pos = [0, 0, 0] # horpos, depth, aim\n for line in f.readlines():\n cmd, n = line.split()\n n = int(n)\n if cmd == 'forward':\n pos[0] += n\n pos[1] += n * pos[2]\n elif cmd == 'down':\n pos[2] += n\n elif cmd == 'up':\n pos[2] -= n\n else:\n print('BOOBOO')\n break\n print(pos)\n print(pos[0] * pos[1])","sub_path":"2021/02/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"31308790","text":"l1 = [1, 2, 3, 4, 5]\nl2 = [9, 8, 7, 6, 5]\n\nonly_in_l1 = set(l1).difference(l2)\nonly_in_l2 = set(l2).difference(l1)\n\n# http://docs.python.org/3/library/stdtypes.html#set-types-set-frozenset\nprint(\"These are in l1 but not in l2:\")\nfor i in only_in_l1:\n print(i)\n\nprint(\"These are in l2 but not in l1:\")\nfor i in only_in_l2:\n print(i)\n","sub_path":"compare_lists.py","file_name":"compare_lists.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"84531334","text":"from Experiment import Experiment\nimport math\n\n\"\"\" This class gathers several experiments of a single library and enables statistics gathering of the overall lib \"\"\"\nclass ExperimentsAnalyser:\n def __init__(self, name, count):\n self.name = name\n self.experiments = []\n self.count = count\n self.load_experiments()\n\n def load_experiments(self):\n for i in range(self.count):\n self.experiments.append(Experiment(self.name, \"results/%s-%d.csv\" % (self.name, i), \"blue\"))\n\n def get_mean(self):\n mean = 0\n\n for exp in self.experiments:\n selfmean = 0\n for point in exp.power:\n selfmean += float(point)\n mean += (selfmean/len(exp.power))\n\n return float(mean)/self.count\n\n def get_standard_deviation(self):\n deviation = 0\n\n for exp in self.experiments:\n mean = 0\n distance = 0\n\n for point in exp.power:\n mean += point\n mean = float(mean)/len(exp.power)\n\n for point in exp.power:\n distance += (point - mean)**2\n\n deviation += math.sqrt(distance/len(exp.power))\n\n return float(deviation)/len(self.experiments)\n\n\n def get_markers(self):\n max = 0\n min = 500\n for exp in self.experiments:\n for point in exp.power:\n if point < min: min = point\n if point > max: max = point\n return {\"min\": min, \"max\": max}\n\n def get_quartiles(self):\n allvalues = []\n for exp in self.experiments:\n for point in exp.power:\n allvalues.append(point)\n allvalues.sort()\n\n size = len(allvalues)\n median = allvalues[int(round(size/2))-1]\n i = int(round(size/4))\n firstQ = allvalues[i-1]\n thirdQ = allvalues[i*3-1]\n\n return {\"firstQ\": firstQ, \"thirdQ\": thirdQ, \"median\": median}\n\n def print_results(self):\n print(\"Library tested: %s\" % self.name)\n print(\"Number of runs: %d\" % self.count)\n print(\"Average power withdrawn: %fW\" % self.get_mean())\n markers = self.get_markers()\n print(\"Min value: %fW, max value: %fW\" % (markers['min'], markers['max']))\n quartiles = self.get_quartiles()\n print(\"1st quartile: %fW, 3rd quartile: %fW, median: %fW\" % (quartiles[\"firstQ\"], quartiles[\"thirdQ\"],\n quartiles[\"median\"]))\n print(\"Average standard deviation: %fW\" % self.get_standard_deviation())\n print(\"------------------------------------------------\")\n","sub_path":"study/inria/classes/ExperimentsAnalyser.py","file_name":"ExperimentsAnalyser.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"312398757","text":"\"\"\"\nA helper for converting COCO to / from KW18 format.\n\nKW18 File Format\nhttps://docs.google.com/spreadsheets/d/1DFCwoTKnDv8qfy3raM7QXtir2Fjfj9j8-z8px5Bu0q8/edit#gid=10\n\nThe kw18.trk files are text files, space delimited; each row is one\nframe of one track and all rows have the same number of columns. The fields are:\n\n.. code ::\n\n 01) track_ID : identifies the track\n 02) num_frames: number of frames in the track\n 03) frame_id : frame number for this track sample\n 04) loc_x : X-coordinate of the track (image/ground coords)\n 05) loc_y : Y-coordinate of the track (image/ground coords)\n 06) vel_x : X-velocity of the object (image/ground coords)\n 07) vel_y : Y-velocity of the object (image/ground coords)\n 08) obj_loc_x : X-coordinate of the object (image coords)\n 09) obj_loc_y : Y-coordinate of the object (image coords)\n 10) bbox_min_x : minimum X-coordinate of bounding box (image coords)\n 11) bbox_min_y : minimum Y-coordinate of bounding box (image coords)\n 12) bbox_max_x : maximum X-coordinate of bounding box (image coords)\n 13) bbox_max_y    : maximum Y-coordinate of bounding box (image coords)\n 14) area : area of object (pixels)\n 15) world_loc_x : X-coordinate of object in world\n 16) world_loc_y : Y-coordinate of object in world\n 17) world_loc_z : Z-coordiante of object in world\n 18) timestamp : timestamp of frame (frames)\n For the location and velocity of object centroids, use fields 4-7.\n Bounding box is specified using coordinates of the top-left and bottom\n right corners. Fields 15-17 may be ignored.\n\n The kw19.trk and kw20.trk files, when present, add the following field(s):\n 19) object class: estimated class of the object, either 1 (person), 2\n (vehicle), or 3 (other).\n 20) Activity ID -- refer to activities.txt for index and list of activities.\n\"\"\"\nimport kwarray\nimport numpy as np\n\n\nclass KW18(kwarray.DataFrameArray):\n \"\"\"\n A DataFrame like object that stores KW18 column data\n\n Example:\n >>> import kwcoco\n >>> from kwcoco.kw18 import KW18\n >>> coco_dset = kwcoco.CocoDataset.demo('shapes')\n >>> kw18_dset = KW18.from_coco(coco_dset)\n >>> print(kw18_dset.pandas())\n \"\"\"\n\n # Define the ordering of the kw18 columns\n DEFAULT_COLUMNS = [\n 'track_id', # 1\n 'track_length', # 2\n 'frame_number', # 3\n 'tracking_plane_loc_x', 'tracking_plane_loc_y', # 4-5\n 'velocity_x', 'velocity_y', # 6-7\n 'image_loc_x', 'image_loc_y', # 8-9\n 'img_bbox_tl_x', 'img_bbox_tl_y', # 10-13\n 'img_bbox_br_x', 'img_bbox_br_y',\n 'area', # 14\n 'world_loc_x', 'world_loc_y', 'world_loc_z', # 15-17\n 'timestamp', # 18\n # kw18 can have more than 18 columns.\n 'confidence', # 19\n 'object_type_id', # 20\n 'activity_type_id', # 21\n ]\n\n def __init__(self, data):\n \"\"\"\n Args:\n data : the kw18 data frame.\n \"\"\"\n super().__init__(data)\n\n @classmethod\n def demo(KW18):\n import kwcoco\n coco_dset = kwcoco.CocoDataset.demo('shapes8')\n self = KW18.from_coco(coco_dset)\n return self\n\n @classmethod\n def from_coco(KW18, coco_dset):\n import kwimage\n raw = {col: None for col in KW18.DEFAULT_COLUMNS}\n anns = coco_dset.dataset['annotations']\n boxes = kwimage.Boxes(np.array([ann['bbox'] for ann in anns]), 'xywh')\n tlbr = boxes.to_ltrb()\n cxywh = tlbr.to_cxywh()\n tl_x, tl_y, br_x, br_y = tlbr.data.T\n\n cx = cxywh.data[:, 0]\n cy = cxywh.data[:, 1]\n\n # Create track ids if not given\n track_ids = np.array([ann.get('track_id', np.nan) for ann in anns])\n missing = np.isnan(track_ids)\n valid_track_ids = track_ids[~missing]\n if len(valid_track_ids) == 0:\n next_track_id = 1\n else:\n next_track_id = valid_track_ids.max() + 1\n num_need = np.sum(missing)\n new_track_ids = np.arange(next_track_id, next_track_id + num_need)\n track_ids[missing] = new_track_ids\n track_ids = track_ids.astype(int)\n\n scores = np.array([ann.get('score', -1) for ann in anns])\n image_ids = np.array([ann['image_id'] for ann in anns])\n cids = np.array([ann.get('category_id', -1) for ann in anns])\n\n num = len(anns)\n\n raw['track_id'] = track_ids\n raw['track_length'] = np.full(num, fill_value=-1)\n raw['frame_number'] = image_ids\n\n raw['tracking_plane_loc_x'] = cx\n raw['tracking_plane_loc_y'] = cy\n\n raw['velocity_x'] = np.full(num, fill_value=0)\n raw['velocity_y'] = np.full(num, fill_value=0)\n\n raw['image_loc_x'] = cx\n raw['image_loc_y'] = cy\n\n raw['img_bbox_tl_x'] = tl_x\n raw['img_bbox_tl_y'] = tl_y\n raw['img_bbox_br_x'] = br_x\n raw['img_bbox_br_y'] = br_y\n\n raw['area'] = boxes.area.ravel()\n\n raw['world_loc_x'] = np.full(num, fill_value=-1)\n raw['world_loc_y'] = np.full(num, fill_value=-1)\n raw['world_loc_z'] = np.full(num, fill_value=-1)\n\n raw['timestamp'] = np.full(num, fill_value=-1)\n\n raw['confidence'] = scores\n raw['object_type_id'] = cids\n\n raw = {k: v for k, v in raw.items() if v is not None}\n\n track_ids, groupxs = kwarray.group_indices(raw['track_id'])\n for groupx in groupxs:\n raw['track_length'][groupx] = len(groupx)\n\n self = KW18(raw)\n return self\n\n def to_coco(self, image_paths=None, video_name=None):\n \"\"\"\n Translates a kw18 files to a CocoDataset.\n\n Note:\n kw18 does not contain complete information, and as such\n the returned coco dataset may need to be augmented.\n\n Args:\n image_paths (Dict[int, str] | None):\n if specified, maps frame numbers to image file paths.\n\n video_name (str | None):\n if specified records the name of the video this kw18 belongs to\n\n TODO:\n - [X] allow kwargs to specify path to frames / videos\n\n Example:\n >>> from kwcoco.kw18 import KW18\n >>> import ubelt as ub\n >>> import kwimage\n >>> import kwcoco\n >>> # Prep test data - autogen a demo kw18 and write it to disk\n >>> dpath = ub.Path.appdir('kwcoco/kw18').ensuredir()\n >>> kw18_fpath = ub.Path(dpath) / 'test.kw18'\n >>> KW18.demo().dump(kw18_fpath)\n >>> #\n >>> # Load the kw18 file\n >>> self = KW18.load(kw18_fpath)\n >>> # Pretend that these image correspond to kw18 frame numbers\n >>> frame_names= kwcoco.CocoDataset.demo('shapes8').images().lookup('file_name')\n >>> frame_ids = sorted(set(self['frame_number']))\n >>> image_paths = dict(zip(frame_ids, frame_names))\n >>> #\n >>> # Convert the kw18 to kwcoco and specify paths to images\n >>> coco_dset = self.to_coco(image_paths=image_paths, video_name='dummy.mp4')\n >>> #\n >>> # Now we can draw images\n >>> canvas = coco_dset.draw_image(1)\n >>> # xdoctest: +REQUIRES(--draw)\n >>> kwimage.imwrite('foo.jpg', canvas)\n >>> # Draw all iamges\n >>> for gid in coco_dset.imgs.keys():\n >>> canvas = coco_dset.draw_image(gid)\n >>> fpath = dpath / 'gid_{}.jpg'.format(gid)\n >>> print('write fpath = {!r}'.format(fpath))\n >>> kwimage.imwrite(fpath, canvas)\n \"\"\"\n import kwcoco\n import ubelt as ub\n dset = kwcoco.CocoDataset()\n\n # kw18s don't have category names, so use ids as proxies\n unique_category_ids = sorted(set(self['object_type_id']))\n for cid in unique_category_ids:\n dset.ensure_category('class_{}'.format(cid), id=cid)\n\n unique_frame_idxs = ub.argunique(self['frame_number'])\n\n # kw18 files correspond to one video\n vidid = 1\n dset.add_video(id=vidid, name='unknown_kw18_video')\n\n # Index frames of the video\n for idx in unique_frame_idxs:\n frame_num = self['frame_number'][idx]\n timestamp = self['timestamp'][idx]\n if image_paths and frame_num in image_paths:\n file_name = image_paths[frame_num]\n else:\n file_name = ''.format(frame_num)\n dset.add_image(\n id=frame_num,\n file_name=file_name,\n video_id=vidid,\n frame_index=frame_num,\n timestamp=timestamp\n )\n\n for rx, row in self.iterrows():\n tl_x = row['img_bbox_tl_x']\n tl_y = row['img_bbox_tl_y']\n br_x = row['img_bbox_br_x']\n br_y = row['img_bbox_br_y']\n w = br_x - tl_x\n h = br_y - tl_y\n bbox = [tl_x, tl_y, w, h]\n\n world_loc = (row['world_loc_x'], row['world_loc_y'], row['world_loc_z'])\n velocity = (row['velocity_x'], row['velocity_y'])\n\n kw = {}\n if 'confidence' in row:\n kw['score'] = row['confidence']\n\n dset.add_annotation(\n id=rx,\n image_id=row['frame_number'],\n category_id=row['object_type_id'],\n track_id=row['track_id'],\n bbox=bbox,\n area=row['area'],\n velocity=velocity,\n world_loc=world_loc,\n **kw)\n return dset\n\n @classmethod\n def load(KW18, file):\n \"\"\"\n Example:\n >>> import kwcoco\n >>> from kwcoco.kw18 import KW18\n >>> coco_dset = kwcoco.CocoDataset.demo('shapes')\n >>> kw18_dset = KW18.from_coco(coco_dset)\n >>> print(kw18_dset.pandas())\n \"\"\"\n import pandas as pd\n try:\n EmptyDataError = pd.errors.EmptyDataError\n except Exception:\n EmptyDataError = pd.io.common.EmptyDataError\n\n try:\n df = pd.read_csv(\n file, sep=' +', comment='#', header=None, engine='python')\n except EmptyDataError:\n df = pd.DataFrame()\n renamer = dict(zip(df.columns, KW18.DEFAULT_COLUMNS))\n raw = df.rename(columns=renamer)\n raw = _ensure_kw18_column_order(raw)\n self = KW18(raw)\n return self\n\n @classmethod\n def loads(KW18, text):\n \"\"\"\n Example:\n >>> self = KW18.demo()\n >>> text = self.dumps()\n >>> self2 = KW18.loads(text)\n >>> empty = KW18.loads('')\n \"\"\"\n import io\n file = io.StringIO()\n file.write(text)\n file.seek(0)\n self = KW18.load(file)\n return self\n\n def dump(self, file):\n import os\n if isinstance(file, (str, os.PathLike)):\n with open(file, 'w') as fp:\n self.dump(fp)\n else:\n df = self.pandas()\n # Write column header\n file.write('#' + ' '.join(df.columns) + '\\n')\n df.to_csv(file, sep=' ', mode='a', index=False, header=False)\n\n def dumps(self):\n \"\"\"\n Example:\n >>> self = KW18.demo()\n >>> text = self.dumps()\n >>> print(text)\n \"\"\"\n import io\n file = io.StringIO()\n self.dump(file)\n file.seek(0)\n text = file.read()\n return text\n\n\ndef _ensure_kw18_column_order(df):\n \"\"\"\n Ensure expected kw18 columns exist and are in the correct order.\n\n Example:\n >>> import pandas as pd\n >>> df = pd.DataFrame(columns=KW18.DEFAULT_COLUMNS[0:18])\n >>> _ensure_kw18_column_order(df)\n >>> df = pd.DataFrame(columns=KW18.DEFAULT_COLUMNS[0:19])\n >>> _ensure_kw18_column_order(df)\n >>> df = pd.DataFrame(columns=KW18.DEFAULT_COLUMNS[0:18] + KW18.DEFAULT_COLUMNS[20:21])\n >>> assert np.all(_ensure_kw18_column_order(df).columns == df.columns)\n \"\"\"\n columns = list(KW18.DEFAULT_COLUMNS)\n\n # Columns after the 18th are optional\n # (note: the post 18th column spec not well defined in general)\n optional_columns = KW18.DEFAULT_COLUMNS[18:]\n for col in optional_columns[::-1]:\n if col not in df.columns:\n columns.remove(col)\n\n if len(df) == 0:\n # Ensure empty data frames have columns\n df = df.reindex(columns=columns)\n\n missing_cols = [c for c in columns if c not in df.columns]\n unknown_cols = [c for c in df.columns if c not in columns]\n\n if missing_cols:\n raise ValueError('missing_cols = {!r}'.format(missing_cols))\n\n if unknown_cols:\n raise ValueError('unknown_cols = {!r}'.format(unknown_cols))\n\n df = df.reindex(columns=columns)\n return df\n","sub_path":"kwcoco/kw18.py","file_name":"kw18.py","file_ext":"py","file_size_in_byte":13342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"461734602","text":"import random\nimport math\n\nimport pyglet\nfrom pyglet.gl import *\nfrom pyglet.window import key\n\nfrom vector import *\nfrom world import *\nfrom enums import *\n\nclass Meteor(WObject):\n # Meteor bass class\n def __init__(self, start_pos, start_deg, num_points, size, speed, max_health):\n WObject.__init__(self)\n self.init_pos(start_pos)\n self.vel = self.deg_to_vel(start_deg) * speed\n self.size = Vector2(size, size)\n self.num_points = num_points\n self.points = self.generate_points()\n self.turn_speed = random.uniform(-20, 20)\n\n self.max_health = max_health\n self.health = self.max_health\n\n self.draw_circle = False\n\n def bounding_circle(self):\n return BoundingCircle(self.pos, self.size.x / 2)\n\n def hit(self):\n self.health = self.health - 1\n if self.health == 0:\n self.remove = True\n\n def generate_points(self):\n interval = 360 / self.num_points\n points = []\n first = None\n for i in range(self.num_points):\n deg = i * interval\n length = random.uniform(0.7, 1) / 2\n p = self.deg_to_vel(deg) * length + Vector2(0.5, 0.5)\n points.append(p)\n if i == 0:\n first = p\n else:\n points.append(p)\n points.append(first)\n return points\n\n def update(self, time, window):\n # update color (white -> yellow -> red)\n # bias to make 1 health completely red and full health completely white\n # max_health must be greater than 1\n h = float(self.health - 1) / (self.max_health - 1)\n if h > 0.5: # approach yellow\n self.color = [1, 1, (h - 0.5) / 0.5]\n else: # approach red\n self.color = [1, h / 0.5, 0]\n \n # rotate\n self.update_deg(self.deg + self.turn_speed * time)\n\n # update position\n # allow to dissappear off edge, but jump to the opposite edge once that happens\n pos = self.pos + self.vel * time\n (winx, winy) = window.get_size()\n if pos.x < 0 - self.size.x / 2:\n pos.x = pos.x + (1.5 * self.size.x + winx)\n if pos.x > winx + self.size.x:\n pos.x = pos.x - (1.5 * self.size.x + winx)\n if pos.y < 0 - self.size.y:\n pos.y = pos.y + (1.5 * self.size.y + winy)\n if pos.y > winy + self.size.y:\n pos.y = pos.y - (1.5 * self.size.y + winy)\n self.update_pos(pos)\n\n\nclass Meteor1(Meteor):\n # big meteor\n def __init__(self, start_pos, start_deg):\n Meteor.__init__(self, start_pos, start_deg, 18, 200, 40, 6)\n\n\nclass Meteor2(Meteor):\n # medium meteor\n def __init__(self, start_pos, start_deg):\n Meteor.__init__(self, start_pos, start_deg, 12, 100, 60, 4)\n\n\nclass Meteor3(Meteor):\n # small meteor\n def __init__(self, start_pos, start_deg):\n Meteor.__init__(self, start_pos, start_deg, 8, 40, 80, 2)\n\n\nclass Bullet(WObject):\n # gun projectile\n def __init__(self, start_pos, start_deg):\n WObject.__init__(self)\n self.init_pos(start_pos)\n self.vel = self.deg_to_vel(start_deg) * 500\n self.init_deg(start_deg)\n self.size = Vector2(5, 9)\n self.points = self.to_points([0, 0, 0.5, 1, 0.5, 1, 1, 0, 1, 0, 0, 0])\n\n def update(self, time, window):\n # update position and flag for removal if off screen\n self.update_pos(self.pos + self.vel * time)\n (winx, winy) = window.get_size()\n if self.pos.x < 0 or self.pos.x > winx or self.pos.y < 0 or self.pos.y > winy:\n self.remove = True\n\n def hit(self):\n self.remove = True\n\nclass Ship(WObject):\n # the players ship\n def __init__(self, start_pos):\n WObject.__init__(self)\n self.init_pos(start_pos)\n self.size = Vector2(10, 20)\n self.init_deg(0)\n self.vel = Vector2(0, 0)\n\n self.accel = 100\n self.turn_speed = 300\n self.turn_state = None\n self.thrust_state = None\n\n self.points = self.to_points([0.5, 1, 1, 0, \n 1, 0, 0.5, 0.2, \n 0.5, 0.2, 0, 0, \n 0, 0, 0.5, 1])\n\n def hit(self):\n self.remove = True\n\n def turn(self, direction, press):\n if press:\n self.turn_state = direction\n else:\n self.turn_state = None\n\n def thrust(self, direction, press):\n if press:\n self.thrust_state = direction\n else:\n self.thrust_state = None\n\n def update(self, time, window):\n # update velocity\n if self.thrust_state:\n added_vel = self.deg_to_vel(self.deg) * self.accel * time\n if self.thrust_state == THRUST.forward:\n self.vel = self.vel + added_vel\n elif self.thrust_state == THRUST.back:\n self.vel = self.vel - added_vel\n\n # update angle\n if self.turn_state:\n if self.turn_state == TURN.left:\n self.update_deg(self.deg + self.turn_speed * time)\n elif self.turn_state == TURN.right:\n self.update_deg(self.deg - self.turn_speed * time)\n self.update_deg(self.deg % 360)\n \n # update position based on velocity and keep within window\n pos = self.pos + self.vel * time\n (winx, winy) = window.get_size()\n pos.x = pos.x % winx\n pos.y = pos.y % winy\n self.update_pos(pos)\n\n\nclass Collider():\n # Helper class to aid with collision detection.\n # Register collision detection and handling methods for particular pairs of\n # objects (by class name), and you can then just call Collider.collide(obj1, obj2),\n # and Collider.handle(obj1, obj2) and this class will call the appropriate methods.\n def __init__(self):\n self.method_dict = dict()\n\n def register_methods(self, detector, handler, type1, type2):\n # Pass in a collision detection method, a collision handling method,\n # and two object class names as strings.\n # The methods are expected to accept two arguments (the two objects)\n # in the order which their types are submitted. Will raise an error if you register\n # a method for the same pair of objects\n if self._find_methods(type1, type2) != None or self._find_methods(type2, type1) != None:\n raise('Already registered methods for ' + type1 + ' and ' + type2)\n if not type1 in self.method_dict:\n self.method_dict[type1] = dict()\n self.method_dict[type1][type2] = [detector, handler]\n\n def collide(self, obj1, obj2):\n # Pass it any two world objects (in any order) and it will call the appropriate\n # collision detection method (based on their type) and return true if they collided.\n # If there is no method registered for that pair, it will return false.\n if obj1.remove or obj2.remove:\n return False\n type1 = self._type(obj1)\n type2 = self._type(obj2)\n methods1 = self._find_methods(type1, type2)\n methods2 = self._find_methods(type2, type1)\n if methods1 == None and methods2 == None:\n return False\n # only one of these will do something\n if methods1 != None:\n return methods1[0](obj1, obj2)\n elif methods2 != None:\n return methods2[0](obj2, obj1)\n return False\n\n def handle(self, obj1, obj2):\n # Pass it any two world objects (in any order) and it will call the appropriate\n # collision handling method (based on their type).\n # If there is no method registered for that pair, it will do nothing.\n if obj1.remove or obj2.remove:\n return\n type1 = self._type(obj1)\n type2 = self._type(obj2)\n methods1 = self._find_methods(type1, type2)\n methods2 = self._find_methods(type2, type1)\n if methods1 == None and methods2 == None:\n return\n # only one of these will do something\n if methods1 != None:\n methods1[1](obj1, obj2)\n elif methods2 != None:\n methods2[1](obj2, obj1)\n\n def _find_methods(self, type1, type2):\n if type1 in self.method_dict:\n if type2 in self.method_dict[type1]:\n return self.method_dict[type1][type2]\n return None\n\n def _type(self, obj):\n return obj.__class__.__name__\n","sub_path":"meteors/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":8464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"604130304","text":"# Three-Dimensional Object\n#\n# This is a container class for three-dimensional objects.\n#\n# Last Modified 2013.01.13 22:24\n#\n\nimport ThreeVector\n\nclass ThreeDimensionalObject:\n\t\n\tdef __init__(self, X1 = 0, X2 = 0, Y1 = 0, Y2 = 0, Z1 = 0, Z2 = 0):\n\t\t\n\t\tself.X1 = X1\n\t\tself.X2 = X2\n\t\t\n\t\tself.Y1 = Y1\n\t\tself.Y2 = Y2\n\t\t\n\t\tself.Z1 = Z1\n\t\tself.Z2 = Z2\n\t\t\n\tdef Contains(self, Point):\n\t\t\n\t\tIsContained = False\n\t\t\n\t\tif ((Point.X > self.X1) and (Point.X < self.X2) and (Point.Y > self.Y1) and (Point.Y < self.Y1) and (Point.Z > self.Z1) and (Point.Z < self.Z1)):\n\t\t\tIsContained = True\n\t\t\n\t\treturn IsContained\n\t\t\n","sub_path":"Code Archive/Analysis_37_2013-01-27/ThreeDimensionalObject.py","file_name":"ThreeDimensionalObject.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"278013254","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nclass Solution:\n def canConvertString(self, s: str, t: str, k: int) -> bool:\n cnt = 0\n for i, j in zip(list(s), list(t)):\n print(ord(j) - ord(i))\n cnt += (ord(j) - ord(i))\n print(cnt)\n return cnt <= k\n\n\nif __name__ == '__main__':\n sn = Solution()\n s = \"input\"\n t = \"ouput\"\n k = 9\n\n # s = \"aab\"\n # t = \"bbb\"\n # k = 27\n print(sn.canConvertString(s, t, k))\n","sub_path":"贪心算法/1540.py","file_name":"1540.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"237757853","text":"import os\n\nimport requests\n\n\nAPI_KEY = 'trnsl.1.1.20161025T233221Z.47834a66fd7895d0.a95fd4bfde5c1794fa433453956bd261eae80152'\nURL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n\n\ndef translate_request(text, from_lang, to_lang):\n \"\"\"\n https://translate.yandex.net/api/v1.5/tr.json/translate ?\n key=\n & text=<переводимый текст>\n & lang=<направление перевода>\n & [format=<формат текста>]\n & [options=<опции перевода>]\n & [callback=<имя callback-функции>]\n \"\"\"\n\n params = {\n 'key': API_KEY,\n 'text': text,\n 'lang': '{}-{}'.format(from_lang, to_lang),\n }\n\n response = requests.get(URL, params=params)\n json_ = response.json()\n if response.status_code == 200: # если запрос к сервису прошёл удачно\n return ''.join(json_['text'])\n else:\n raise RuntimeError('Ошибка при выполнение запроса: {}'.format(json_))\n\n\ndef translate(input_file_path, output_file_path, from_lang, to_lang):\n text = read_file(input_file_path)\n translated_text = translate_request(text, from_lang, to_lang)\n write_file(translated_text, output_file_path)\n\n\ndef read_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n text = f.read()\n return text\n\n\ndef write_file(text, file_path):\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(text)\n\n\ndef main():\n data = [\n ['de', 'DE.txt', 'DE_RU.txt'],\n ['es', 'ES.txt', 'ES_RU.txt'],\n ['fr', 'FR.txt', 'FR_RU.txt']\n ]\n\n for item in data:\n lang, input_file, output_file = item\n translate(input_file, output_file, lang, 'ru')\n\n\nif __name__ == '__main__':\n main()","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"241961014","text":"from sklearn.model_selection import cross_val_score\n\nfrom rocs_iris import pipe1, clf2, pipe3, clf_labels, X_train, y_train\nfrom majority_vote_classifier import MajorityVoteClassifier\n\n# Majority Rule (hard) Voting\n\nmv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])\n\nclf_labels += ['Majority voting']\nall_clf = [pipe1, clf2, pipe3, mv_clf]\n\n# 2020.01.07 change\nif __name__ == '__main__':\n for clf, label in zip(all_clf, clf_labels):\n scores = cross_val_score(estimator=clf,\n X=X_train,\n y=y_train,\n cv=10,\n scoring='roc_auc')\n print(\"ROC AUC: %0.2f (+/- %0.2f) [%s]\"\n % (scores.mean(), scores.std(), label))\n\n","sub_path":"ch07/rocs_majority_voting_iris.py","file_name":"rocs_majority_voting_iris.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"74110217","text":"import sys\nimport base64\nimport uuid\nimport cStringIO\n\nfrom django.http import Http404\nfrom django.utils.translation import ugettext as _\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import ListView, TemplateView, CreateView, UpdateView\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.shortcuts import redirect\nfrom django.db.models import Sum\nfrom django.contrib import messages\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\nfrom apps.stripe_payment.models import (customer_get, charge_create,\n customer_total_charged)\nfrom .forms import CampaignForm, CampaignEdit\nfrom .models import Ads, DailySpending, billing_exists, msg_no_billing\nfrom . import BuildDate\n\n\nclass AddCampaign(CreateView):\n form_class = CampaignForm\n template_name = 'ads/campaign.html'\n success_url = reverse_lazy('ads_list')\n\n def get(self, request, *args, **kwargs):\n if billing_exists(self.request.user) is None:\n messages.error(self.request, msg_no_billing)\n return redirect(reverse('ads_list'))\n\n return super(AddCampaign, self).get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if billing_exists(self.request.user) is None:\n messages.error(self.request, msg_no_billing)\n return redirect(reverse('ads_list'))\n\n request.POST = request.POST.copy()\n request.POST['user'] = request.user.id\n\n # process base64 decoded image\n format, imgstr = request.POST['image'].split(';base64,')\n ext = format.split('/')[-1]\n file_type = format.split(':')[-1]\n\n file_name = str(uuid.uuid4())[:12] # 12 characters\n file_name = '%s.%s' % (file_name, ext)\n\n file = cStringIO.StringIO(base64.b64decode(imgstr))\n image = InMemoryUploadedFile(file,\n field_name='image',\n name=file_name,\n content_type=file_type,\n size=sys.getsizeof(file),\n charset=None\n )\n request.FILES['image'] = image\n\n return super(AddCampaign, self).post(request, *args, **kwargs)\n\n\nclass EditCampaign(UpdateView):\n model = Ads\n context_object_name = 'ad'\n form_class = CampaignEdit\n success_url = reverse_lazy('ads_list')\n\n\nclass TogglePause(TemplateView):\n def render_to_response(self, context, **kwargs):\n ad = Ads.objects.get(id=context['pk'])\n if ad.is_active != 1:\n ad.is_active = 1\n messages.success(\n self.request, 'Campaign %s has been resumed' % ad.campaign)\n else:\n ad.is_active = 0\n messages.success(\n self.request, 'Campaign %s has been paused' % ad.campaign)\n ad.save()\n\n return redirect(reverse('ads_list'))\n\n\nclass AdsView(TemplateView):\n def render_to_response(self, context, **kwargs):\n try:\n ad = Ads.objects.get(id=context['pk'])\n except:\n raise Http404(_('Page your looking does not exists!'))\n\n Ads.charge_click(self.request, context['pk'])\n return redirect(ad.url)\n\n\nclass OnwerFilter(ListView):\n def get_queryset(self):\n queryset = super(OnwerFilter, self).get_queryset()\n return queryset.filter(user=self.request.user)\n\n\nclass AdsListView(ListView):\n model = Ads\n context_object_name = 'ads_list'\n template_name = 'ads/ads_list.html'\n\n def get_queryset(self):\n queryset = super(AdsListView, self).get_queryset()\n return queryset.filter(user=self.request.user)\n\n\nclass SpendListView(ListView):\n model = DailySpending\n context_object_name = 'spend_list'\n template_name = 'ads/daily-spending.html'\n\n def get_queryset(self):\n queryset = super(SpendListView, self).get_queryset()\n return queryset.filter(ads__user=self.request.user)\n\n\nclass BillingView(TemplateView):\n model = Ads\n context_object_name = 'ads'\n template_name = 'ads/billing.html'\n\n def get_context_data(self, **kwargs):\n context = super(BillingView, self).get_context_data(**kwargs)\n\n try:\n stripe_customer = customer_get(self.request.user)\n cards = stripe_customer['sources']['data']\n except:\n cards = {}\n context.update({'cards': cards})\n return context\n\n\nclass PaymentListView(TemplateView):\n template_name = 'ads/payment.html'\n\n\nclass PayView(TemplateView):\n\n def render_to_response(self, context):\n if billing_exists(self.request.user) is None:\n messages.error(self.request, msg_no_billing)\n return redirect(reverse('ads_list'))\n\n dt_object = BuildDate()\n date = dt_object.get_month()\n\n # payable = total_charge - total_paid\n charged_amount = customer_total_charged\n charged_amount = charged_amount / 100 # convert value in dollar\n\n amount = DailySpending.objects.filter(\n ads__user=self.request.user).aggregate(\n Sum('spend'))['spend__sum'] or 0\n\n difference = amount - charged_amount\n if difference > 0:\n amount = amount * 100\n\n try:\n charge_create(\n self.request.user,\n amount=int(amount),\n currency='usd',\n customer=self.request.user.customer.stripe_customer_id,\n description='Charge for %s' % date['start'].strftime('%m')\n )\n messages.success(self.request, 'You have successfully paid.')\n except Exception as e:\n messages.success(self.request, e.message)\n else:\n messages.success(\n self.request, 'Thanks for payment attempt.'\n ' It seems there is no due to charge')\n return HttpResponseRedirect(reverse('ads_list'))\n","sub_path":"apps/ads/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"185976097","text":"import numpy as np\r\nimport pandas as pd\r\ndf=pd.read_csv(\"studentdata.csv\",sep=';')\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import GridSearchCV, cross_val_score\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.feature_selection import SelectKBest, chi2\r\nfrom sklearn.svm import LinearSVC #support vector machine Classifier Model\r\n\"\"\" split data into Training and Testing Sets \"\"\"\r\ndef split_data(X,Y):\r\n return train_test_split(X,Y,test_size=0.2,random_state=0)\r\n\"\"\" Confusion Matrix \"\"\"\r\ndef confuse(y_true,y_pred):\r\n cm=confusion_matrix(y_true=y_true,y_pred=y_pred)\r\n #print(\"\\nConfusion Matrix: \\n\",cm)\r\n fpr(cm)\r\n ffr(cm)\r\n\"\"\" False Pass Rate \"\"\"\r\ndef fpr(confusion_matrix):\r\n fp=confusion_matrix[0][1]\r\n tf=confusion_matrix[0][0]\r\n rate=float(fp)/(fp+tf)\r\n print(\"False pass rate :\",rate)\r\n\"\"\" False fail rate \"\"\"\r\ndef ffr(confusion_matrix):\r\n ff=confusion_matrix[1][0]\r\n tp=confusion_matrix[1][1]\r\n rate=float(ff)/(ff+tp)\r\n print(\"False Fail Rate :\",rate)\r\n return rate\r\n\"\"\" Train Model and Print score \"\"\"\r\ndef train_and_score(X,y):\r\n X_train,X_test,y_train,y_test=split_data(X,y)\r\n clf=Pipeline([\r\n ('reduce dim', SelectKBest(chi2,k=2)),\r\n ('train',LinearSVC(C=100))])\r\n scores=cross_val_score(clf,X_train,y_train,cv=5,n_jobs=2)\r\n print(\"Mean Model Accuracy :\",np.array(scores).mean())\r\n clf.fit(X_train,y_train)\r\n confuse(y_test,clf.predict(X_test))\r\n print()\r\n \r\n \"\"\" Main Program \"\"\"\r\ndef main():\r\n print(\"\\nStudent Performance Prediction\")\r\n #for each feature, encode to categorical values\r\n class_le=LabelEncoder()\r\n for column in df:\r\n df[column]=class_le.fit_transform(df[column].values)\r\n for i,column in df.itercolumnss():\r\n if column[ETP_100]<=100 and column[ETP_100]>=50:\r\n print(\"Great\")\r\n ","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"206815135","text":"# coding: utf8\nfrom bearlibterminal import terminal as blt\nimport libtcodpy as libtcod\n\nimport constants\n\n\n# based on STI library for LOVE2D\n# this places 0,0 at the top of the screen in the middle\n# as opposed to other isometric calculations which might place 0,0 in lower left\ndef draw_iso(x,y):\n # we're offsetting so that we can see the lower-left corner of the map, otherwise it only shows the right half of it\n offset_x = constants.MAP_WIDTH * 4\n # isometric\n tile_x = (x - y) * constants.TILE_WIDTH / 2 + offset_x\n tile_y = (x + y) * constants.TILE_HEIGHT / 2\n return tile_x, tile_y\n\ndef draw_map(map_draw, fov_map):\n for x in range(0, constants.MAP_WIDTH):\n for y in range(0, constants.MAP_HEIGHT):\n\n is_visible = libtcod.map_is_in_fov(fov_map, x, y)\n\n if is_visible:\n tile_x, tile_y = draw_iso(x, y)\n blt.color(\"white\")\n map_draw[x][y].explored = True\n\n if map_draw[x][y].stairs == True:\n # draw stairs\n blt.put(tile_x, tile_y, \">\")\n else:\n if map_draw[x][y].block_path == True:\n # draw wall\n blt.put(tile_x, tile_y, \"#\")\n\n else:\n # draw floor\n blt.put(tile_x, tile_y, 0x3002)\n #we draw the dot for reference so that we know what on-screen position the tile_x, tile_y refers to\n blt.put(tile_x, tile_y, \".\")\n\n elif map_draw[x][y].explored:\n tile_x, tile_y = draw_iso(x, y)\n # shade the explored tiles\n blt.color(\"gray\")\n\n if map_draw[x][y].stairs == True:\n # draw stairs\n blt.put(tile_x, tile_y, \">\")\n else:\n if map_draw[x][y].block_path == True:\n # draw wall\n blt.put(tile_x, tile_y, \"#\")\n\n else:\n # draw floor\n blt.put(tile_x, tile_y, 0x3002)\n #we draw the dot for reference so that we know what on-screen position the tile_x, tile_y refers to\n blt.put(tile_x, tile_y, \".\")\n\n\ndef draw_messages(msg_history):\n if len(msg_history) <= constants.NUM_MESSAGES:\n to_draw = msg_history\n else:\n to_draw = msg_history[-constants.NUM_MESSAGES:]\n\n start_y = 45 - (constants.NUM_MESSAGES)\n\n i = 0\n for message, color in to_draw:\n string = \"[color=\" + str(color) + \"] \" + message\n blt.puts(2, start_y+i, string)\n\n i += 1\n\n# GUI\n# based on https://github.com/FirstAidKitten/Roguelike-Sandbox\ndef create_window(x, y, w, h, title=None):\n #test\n blt.composition(False)\n\n last_bg = blt.state(blt.TK_BKCOLOR)\n blt.bkcolor(blt.color_from_argb(200, 0, 0, 0))\n blt.clear_area(x - 2, y - 2, w + 2, h + 2)\n blt.bkcolor(last_bg)\n\n # upper border\n border = '┌' + '─' * (w) + '┐'\n blt.puts(x - 1, y - 1, border)\n # sides\n for i in range(h):\n blt.puts(x - 1, y + i, '│')\n blt.puts(x + w, y + i, '│')\n # lower border\n border = '└' + '─' * (w) + '┘'\n blt.puts(x - 1, y + h, border)\n\n if title is not None:\n leng = len(title)\n offset = (w + 2 - leng) // 2\n blt.puts(x + offset, y - 1, title)\n\n\ndef menu(header, options, width, title=None):\n global FOV_CALCULATE\n\n FOV_CALCULATE = True\n\n menu_x = int((120 - width) / 2)\n\n if len(options) > 26:\n raise ValueError('Cannot have a menu with more than 26 options.')\n\n header_height = 2\n\n menu_h = int(header_height + 1 + 26)\n menu_y = int((50 - menu_h) / 2)\n\n # create a window\n\n create_window(menu_x, menu_y, width, menu_h, title)\n\n\n blt.puts(menu_x, menu_y, header)\n\n # print all the options\n y = menu_y + header_height + 1\n letter_index = ord('a')\n for option_text in options:\n text = '(' + chr(letter_index) + ') ' + option_text\n blt.puts(menu_x, y, text)\n y += 1\n letter_index += 1\n\n blt.refresh()\n # present the root console to the player and wait for a key-press\n blt.set('input: filter = [keyboard]')\n while True:\n key = blt.read()\n if blt.check(blt.TK_CHAR):\n # convert the ASCII code to an index; if it corresponds to an option, return it\n key = blt.state(blt.TK_CHAR)\n index = key - ord('a')\n if 0 <= index < len(options):\n blt.set('input: filter = [keyboard, mouse+]')\n blt.composition(True)\n return index\n else:\n blt.set('input: filter = [keyboard, mouse+]')\n blt.composition(True)\n return None\n\n\ndef inventory_menu(header, player):\n # show a menu with each item of the inventory as an option\n if len(player.container.inventory) == 0:\n options = ['Inventory is empty.']\n else:\n options = [item.display_name() for item in player.container.inventory]\n\n index = menu(header, options, 50, 'INVENTORY')\n\n # if an item was chosen, return it\n if index is None or len(player.container.inventory) == 0:\n return None\n return player.container.inventory[index]","sub_path":"renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"266781511","text":"import torch\nfrom utils.RawDataProcessing import VectorProcessor\nfrom utils.DataProviderLight import DataProviderLight\nfrom utils.DatasetGeneration import DeterministicGenerator\nfrom preprocessing.Word2Vec import Word2Vec\nfrom classification.KNNClassification import KNNClassifier\nfrom classification.ClassificationTest import ClassificationTest\n# script for corresponding test case\n# most test cases should be able to be executed without any further changes, if data is available\n\nSAMPLE_SIZE = 10000\nFEATURES = [25, 50, 100, 200, 400]\nBATCH_SIZE = 32\nEPOCHS = 5\n# change device to \"cpu\" if cuda not available\nDEVICE = \"cuda\"\n\n# raw words and labels\nfile_words = open(\"../data/unique_equations.json\")\nfile_labels = open(\"../data/unique_labels.json\")\n# pre calculated weight matrix\nfile_weights = open(\"../data/weights_0.json\")\ndata_provider = DataProviderLight(file_words, file_labels, sample_size=SAMPLE_SIZE, file_weights=file_weights)\n\nfor num_features in FEATURES:\n # training the word2vec net\n word2vec = Word2Vec(data_provider, num_features, DEVICE)\n word2vec.train(EPOCHS, BATCH_SIZE)\n # extracting weights and injecting them into the data provider\n data_provider.weights = torch.tensor(word2vec.get_weights())\n # generate dataset\n processor = VectorProcessor(data_provider)\n generator = DeterministicGenerator(data_provider, processor)\n dataset = generator.generate_dataset()\n # train knn classifier\n classifier = KNNClassifier(data_provider, dataset, DEVICE)\n classifier.n_neighbours = 5\n classifier.train()\n # test the classifier\n test = ClassificationTest(dataset, classifier)\n print(str(num_features) + \" Features: \" + str(test.test()) + \"% Präzision\")","sub_path":"tests/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"116789499","text":"import mysql.connector\nimport numpy\nfrom scipy import stats\nimport random\nimport string\nimport datetime\n\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\",\n database=\"myML\"\n)\n\nmycursor = mydb.cursor()\n\n#დაადგინეთ სქესის მიხედვით რომელი მომხარებელი მეტი;\nmycursor.execute(\"SELECT * FROM persons WHERE Gender='male'\")\n\nrow=mycursor.fetchall()\nmale=mycursor.rowcount\n\nmycursor.execute(\"SELECT * FROM persons WHERE Gender='female'\")\nrow=mycursor.fetchall()\nfemale=mycursor.rowcount\n\nprint(male,female)\n\nif(male>female):\n print(\"male არის მეტი\")\nelif(female>male):\n print(\"female არის მეტი\")\nelse:\n print(\"ტოლია\")\n\n#2) დაადგინეთ რამდენი მომხარებელია 30 წელზე ნაკლები;\nmycursor.execute(\"SELECT TIMESTAMPDIFF(YEAR, Date, CURDATE()) FROM persons WHERE TIMESTAMPDIFF(YEAR, Date, CURDATE())< 30\")\n\nrow=mycursor.fetchall()\n\nprint(mycursor.rowcount)\n#3) დაადგინეთ ასაკის საშუალო მონაცემი, მოდა, მედეა, საშუალო კვადრატული გადახრა;\nmycursor.execute(\"SELECT TIMESTAMPDIFF(YEAR, Date, CURDATE()) FROM persons \")\n\nrow=mycursor.fetchall()\n\nprint(mycursor.rowcount)\nmean = numpy.mean(row)\nmedian = numpy.median(row)\nmode = stats.mode(row)\nstd=numpy.std(row)\n\nprint(mean,median,mode,std)\n\n#4) რომელ ასაკზე ნაკლებია მომხარებლების 70%;\nx = numpy.percentile(row, 70)\n\nprint(x)\n#5) რომელ ასაკზე ნაკლებია მომხარებლის 87%;\nx = numpy.percentile(row, 87)\n\nprint(x)\n#6) დაადგინეთ ყელაზე ხშირად რა რომელი ეროვნების მომხარებელი გვხვდება\nmycursor.execute(\"SELECT Nationality,COUNT(*) FROM persons GROUP BY Nationality ORDER BY COUNT(*) DESC LIMIT 1\")\nrow=mycursor.fetchall()\nfor r in row:\n print(r)\n#7) დაადგინეთ სახელებში სიმბოლოების რაოდენობის საშუალო არითმეტიკული;\n\n\nmycursor.execute(\"SELECT CHAR_LENGTH(Name)FROM persons\")\nrow=mycursor.fetchall()\nmean = numpy.mean(row)\nprint(mean)\n#8) საშუალოდ რამდენი შვილი ყავს არამწეველ მომხარებელს;\nmycursor.execute(\"SELECT COALESCE(Children, 0) AS Children FROM persons WHERE Smoke='no'\")\nrow=mycursor.fetchall()\nmean = numpy.mean(row)\nprint(mean)\n#9) საშუალოდ რამდენი შვილი ყავს არამწეველ ქალბატონ მომხარებელს;\nmycursor.execute(\"SELECT COALESCE(Children, 0) AS Children FROM persons WHERE Smoke='no' AND Gender='female'\")\nrow=mycursor.fetchall()\nmean = numpy.mean(row)\nprint(mean)\n#10) საშუალოდ რა ასიკაა ქალბატონი მწეველი მომხარებლები.\nmycursor.execute(\"SELECT TIMESTAMPDIFF(YEAR, Date, CURDATE()) As Age FROM persons WHERE Smoke='yes' AND Gender='female'\")\nrow=mycursor.fetchall()\nmean = numpy.mean(row)\nprint(mean)","sub_path":"Lab_8/Lab_8.py","file_name":"Lab_8.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"25362194","text":"from prg1 import To_do_list\n\nma=To_do_list(\"work\")\nwhile True:\n\ts=input(\"enter the choice: \\n \\\n\t\t1. Add to tasks\\n \\\n\t\t2. Mark Done \\n \\\n\t\t3. See tasks \\n \\\n\t\t4. Exit\")\n\n\tif s == 1:\n\t\tma.add()\t\n\n\telif s == 2:\n\t\tma.mark_done()\n\n\telif s == 3:\n\t\tma.see_tasks()\n\n\telse:\n\t\tbreak\n\t\n\t\n\t\n","sub_path":"cpyprg1.py","file_name":"cpyprg1.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"555731994","text":"import findspark\nfindspark.init(\"/opt/apache-spark\")\n\nfrom pyspark import SparkContext\nsc = SparkContext(\"local\")\n\nFILE_NAME = \"test-graph.txt\"\nFILE_NAME = \"web-Stanford.txt\"\n\nfile_content = sc.textFile(FILE_NAME) \\\n .filter(lambda x: not x.startswith(\"#\")) \\\n .map(lambda x: x.split())\n\nadj_rdd = file_content.flatMap(lambda x: [(x[0], {x[1]}), (x[1], {x[0]})]) \\\n .reduceByKey(set.union)\n\nadj = adj_rdd.collectAsMap()\n\ndef calc_cc(node, value):\n deg = len(adj[node])\n if deg > 1:\n return value/(deg*(deg-1))\n else:\n return 0\n\ncc = adj_rdd.flatMap(lambda x: [(x[0], v) for v in adj[x[0]]]) \\\n .map(\n lambda x: (x[0], len(adj[x[0]].intersection(adj[x[1]])))\n ).reduceByKey(lambda v1, v2: v1 + v2) \\\n .map(lambda x: (x[0], calc_cc(*x))) \\\n\nresult = cc.collect()\nresult[:5]\n","sub_path":"MapReduce/src/main/scala/example/cc.py","file_name":"cc.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"451622218","text":"import numpy as np\nfrom scipy.spatial.transform import Rotation\n\nclass SE3Control(object):\n \"\"\"\n\n \"\"\"\n def __init__(self, quad_params):\n \"\"\"\n This is the constructor for the SE3Control object. You may instead\n initialize any parameters, control gain values, or private state here.\n\n For grading purposes the controller is always initialized with one input\n argument: the quadrotor's physical parameters. If you add any additional\n input arguments for testing purposes, you must provide good default\n values!\n\n Parameters:\n quad_params, dict with keys specified by crazyflie_params.py\n\n \"\"\"\n\n # Quadrotor physical parameters.\n self.mass = quad_params['mass'] # kg\n self.Ixx = quad_params['Ixx'] # kg*m^2\n self.Iyy = quad_params['Iyy'] # kg*m^2\n self.Izz = quad_params['Izz'] # kg*m^2\n self.arm_length = quad_params['arm_length'] # meters\n self.rotor_speed_min = quad_params['rotor_speed_min'] # rad/s\n self.rotor_speed_max = quad_params['rotor_speed_max'] # rad/s\n self.k_thrust = quad_params['k_thrust'] # N/(rad/s)**2\n self.k_drag = quad_params['k_drag'] # Nm/(rad/s)**2\n\n # You may define any additional constants you like including control gains.\n self.inertia = np.diag(np.array([self.Ixx, self.Iyy, self.Izz])) # kg*m^2\n self.g = 9.81 # m/s^2\n\n # STUDENT CODE HERE\n self.gamma = self.k_drag/self.k_thrust\n self.Kp = np.diag(np.array([8.5,8.5,8]))\n self.Kd = np.diag(np.array([5,5,5]))\n self.Kr = np.diag(np.array([2500,2500,400]))\n self.Kw = np.diag(np.array([60,60,50]))\n self.u2Matrix = np.array([[1,1,1,1],[0,self.arm_length,0,-self.arm_length],[-self.arm_length,0,self.arm_length,0],[self.gamma,-self.gamma,self.gamma,-self.gamma]])\n\n def update(self, t, state, flat_output):\n \"\"\"\n This function receives the current time, true state, and desired flat\n outputs. It returns the command inputs.\n\n Inputs:\n t, present time in seconds\n state, a dict describing the present state with keys\n x, position, m\n v, linear velocity, m/s\n q, quaternion [i,j,k,w]\n w, angular velocity, rad/s\n flat_output, a dict describing the present desired flat outputs with keys\n x, position, m\n x_dot, velocity, m/s\n x_ddot, acceleration, m/s**2\n x_dddot, jerk, m/s**3\n x_ddddot, snap, m/s**4\n yaw, yaw angle, rad\n yaw_dot, yaw rate, rad/s\n\n Outputs:\n control_input, a dict describing the present computed control inputs with keys\n cmd_motor_speeds, rad/s\n cmd_thrust, N (for debugging and laboratory; not used by simulator)\n cmd_moment, N*m (for debugging; not used by simulator)\n cmd_q, quaternion [i,j,k,w] (for laboratory; not used by simulator)\n \"\"\"\n cmd_motor_speeds = np.zeros((4,))\n cmd_thrust = 0\n cmd_moment = np.zeros((3,))\n cmd_q = np.zeros((4,))\n\n # STUDENT CODE HERE\n\n # extract the value of state\n x_state = state['x']\n v = state['v']\n q = state['q']\n w = state['w']\n\n # extract the value of desired output\n x_flat = flat_output['x']\n x_dot = flat_output['x_dot']\n x_ddot = flat_output['x_ddot']\n x_dddot = flat_output['x_dddot']\n x_ddddot = flat_output['x_ddddot']\n yaw = flat_output['yaw']\n yaw_dot = flat_output['yaw_dot']\n\n # computing F des\n # r trajectory\n r_T = x_flat.reshape(x_flat.shape[0],1)\n r_dT = x_dot.reshape(x_dot.shape[0],1)\n r_ddT = x_ddot.reshape(x_ddot.shape[0],1)\n # state estimate r\n r = x_state.reshape(x_state.shape[0],1)\n r_d = v.reshape(v.shape[0],1)\n # calculate rdes_ddot\n rdes_ddot = r_ddT-self.Kd@(r_d-r_dT)-self.Kp@(r-r_T)\n Fdes = self.mass*rdes_ddot+np.array([[0],[0],[self.mass*self.g]]) # column\n\n # compute u1\n R_matrix = Rotation.from_quat(q)\n R_matrix = R_matrix.as_dcm()\n b3 = R_matrix@np.array([[0],[0],[1]])\n b3_T = np.transpose(b3)\n u1 = b3_T@Fdes # a number\n\n # determine Rdes\n b3_des = Fdes/np.linalg.norm(Fdes) # column\n b3_des_T = np.transpose(b3_des) # row, for computing\n cos_yaw = np.cos(np.array([yaw]))\n cos_yaw = cos_yaw[0]\n sin_yaw = np.sin(np.array([yaw]))\n sin_yaw = sin_yaw[0]\n a_yaw = np.array([[cos_yaw,sin_yaw,0]]) # row\n b2_des_T = np.cross(b3_des_T,a_yaw)/np.linalg.norm(np.cross(b3_des_T,a_yaw)) # row\n b1_des_T = np.cross(b2_des_T,b3_des_T)\n R_des_T = np.array([b1_des_T[0],b2_des_T[0],b3_des_T[0]]) # transpose of R\n R_des = np.transpose(R_des_T)\n\n # find eR\n eR_vee = 1/2*(R_des_T@R_matrix-R_matrix.T@R_des)\n eR = np.array([[eR_vee[2,1]],[eR_vee[0,2]],[eR_vee[1,0]]]) # column\n\n # find u2\n omega = w.reshape(w.shape[0],1)\n u2 = self.inertia@(-self.Kr@eR-self.Kw@omega) # column\n u = np.array([[u1[0]],[u2[0,0]],[u2[1,0]],[u2[2,0]]])\n\n # compute F1, F2, F3 and F4\n F = np.linalg.inv(self.u2Matrix)@u\n cmd_thrust = np.array([F[0,0],F[1,0],F[2,0],F[3,0]])\n\n cmd_motor_speeds=[]\n # compute motor speed:\n for i in range(0,4):\n if cmd_thrust[i,0]>0:\n cmd_motor_speeds.append(np.sqrt(cmd_thrust[i,0]/self.k_thrust))\n else:\n cmd_motor_speeds.append(0)\n\n \n cmd_motor_speeds = np.array(cmd_motor_speeds)\n cmd_thrust = F[0,0]+F[1,0]+F[2,0]+F[3,0]\n\n # cmd_motor_speeds=np.array([0,0,0,0])\n # compute motor moment\n cmd_moment = np.array([u2[0,0],u2[1,0],u2[2,0]])\n\n # compute motor orientation\n cmd_q = Rotation.from_dcm(R_des)\n cmd_q = cmd_q.as_quat()\n\n control_input = {'cmd_motor_speeds':cmd_motor_speeds,\n 'cmd_thrust':cmd_thrust,\n 'cmd_moment':cmd_moment,\n 'cmd_q':cmd_q}\n return control_input","sub_path":"proj1_3/meam620-2020/proj1_3/code/se3_control.py","file_name":"se3_control.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"266939273","text":"from __future__ import division, absolute_import\n\n__copyright__ = \"Copyright (C) 2012 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom six.moves import range, zip\nimport sumpy.symbolic as sym # noqa\n\nfrom sumpy.symbolic import vector_xreplace\nfrom sumpy.expansion import (\n ExpansionBase, VolumeTaylorExpansion, LaplaceConformingVolumeTaylorExpansion,\n HelmholtzConformingVolumeTaylorExpansion)\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n__doc__ = \"\"\"\n\n.. autoclass:: VolumeTaylorMultipoleExpansion\n.. autoclass:: H2DMultipoleExpansion\n.. autoclass:: Y2DMultipoleExpansion\n\n\"\"\"\n\n\nclass MultipoleExpansionBase(ExpansionBase):\n pass\n\n\n# {{{ volume taylor\n\nclass VolumeTaylorMultipoleExpansionBase(MultipoleExpansionBase):\n \"\"\"\n Coefficients represent the terms in front of the kernel derivatives.\n \"\"\"\n\n def coefficients_from_source(self, avec, bvec, rscale):\n from sumpy.kernel import DirectionalSourceDerivative\n kernel = self.kernel\n\n from sumpy.tools import mi_power, mi_factorial\n\n if not self.use_rscale:\n rscale = 1\n\n if isinstance(kernel, DirectionalSourceDerivative):\n if kernel.get_base_kernel() is not kernel.inner_kernel:\n raise NotImplementedError(\"more than one source derivative \"\n \"not supported at present\")\n\n from sumpy.symbolic import make_sym_vector\n dir_vec = make_sym_vector(kernel.dir_vec_name, kernel.dim)\n\n coeff_identifiers = self.get_full_coefficient_identifiers()\n result = [0] * len(coeff_identifiers)\n\n for idim in range(kernel.dim):\n for i, mi in enumerate(coeff_identifiers):\n if mi[idim] == 0:\n continue\n\n derivative_mi = tuple(mi_i - 1 if iaxis == idim else mi_i\n for iaxis, mi_i in enumerate(mi))\n\n result[i] += (\n - mi_power(avec, derivative_mi) * mi[idim]\n * dir_vec[idim])\n for i, mi in enumerate(coeff_identifiers):\n result[i] /= (mi_factorial(mi) * rscale ** sum(mi))\n else:\n avec = [sym.UnevaluatedExpr(a * rscale**-1) for a in avec]\n\n result = [\n mi_power(avec, mi) / mi_factorial(mi)\n for mi in self.get_full_coefficient_identifiers()]\n return (\n self.derivative_wrangler.get_stored_mpole_coefficients_from_full(\n result, rscale))\n\n def get_scaled_multipole(self, expr, bvec, rscale, nderivatives,\n nderivatives_for_scaling=None):\n if nderivatives_for_scaling is None:\n nderivatives_for_scaling = nderivatives\n\n if self.kernel.has_efficient_scale_adjustment:\n return (\n self.kernel.adjust_for_kernel_scaling(\n vector_xreplace(\n expr,\n bvec, bvec * rscale**-1),\n rscale, nderivatives)\n / rscale ** (nderivatives - nderivatives_for_scaling))\n else:\n return (rscale**nderivatives_for_scaling * expr)\n\n def evaluate(self, coeffs, bvec, rscale):\n if not self.use_rscale:\n rscale = 1\n\n taker = self.get_kernel_derivative_taker(bvec)\n\n result = sym.Add(*tuple(\n coeff\n * self.get_scaled_multipole(taker.diff(mi), bvec, rscale, sum(mi))\n for coeff, mi in zip(coeffs, self.get_coefficient_identifiers())))\n\n return result\n\n def get_kernel_derivative_taker(self, bvec):\n return (self.derivative_wrangler.get_derivative_taker(\n self.kernel.get_expression(bvec), bvec))\n\n def translate_from(self, src_expansion, src_coeff_exprs, src_rscale,\n dvec, tgt_rscale):\n if not isinstance(src_expansion, type(self)):\n raise RuntimeError(\"do not know how to translate %s to \"\n \"Taylor multipole expansion\"\n % type(src_expansion).__name__)\n\n if not self.use_rscale:\n src_rscale = 1\n tgt_rscale = 1\n\n logger.info(\"building translation operator: %s(%d) -> %s(%d): start\"\n % (type(src_expansion).__name__,\n src_expansion.order,\n type(self).__name__,\n self.order))\n\n from sumpy.tools import mi_factorial\n\n src_mi_to_index = dict((mi, i) for i, mi in enumerate(\n src_expansion.get_coefficient_identifiers()))\n\n for i, mi in enumerate(src_expansion.get_coefficient_identifiers()):\n src_coeff_exprs[i] *= mi_factorial(mi)\n\n result = [0] * len(self.get_full_coefficient_identifiers())\n from pytools import generate_nonnegative_integer_tuples_below as gnitb\n\n for i, tgt_mi in enumerate(\n self.get_full_coefficient_identifiers()):\n\n tgt_mi_plus_one = tuple(mi_i + 1 for mi_i in tgt_mi)\n\n for src_mi in gnitb(tgt_mi_plus_one):\n try:\n src_index = src_mi_to_index[src_mi]\n except KeyError:\n # Omitted coefficients: not life-threatening\n continue\n\n contrib = src_coeff_exprs[src_index]\n\n for idim in range(self.dim):\n n = tgt_mi[idim]\n k = src_mi[idim]\n assert n >= k\n from sympy import binomial\n contrib *= (binomial(n, k)\n * sym.UnevaluatedExpr(dvec[idim]/tgt_rscale)**(n-k))\n\n result[i] += (\n contrib\n * sym.UnevaluatedExpr(src_rscale/tgt_rscale)**sum(src_mi))\n\n result[i] /= mi_factorial(tgt_mi)\n\n logger.info(\"building translation operator: done\")\n return (\n self.derivative_wrangler.get_stored_mpole_coefficients_from_full(\n result, tgt_rscale))\n\n\nclass VolumeTaylorMultipoleExpansion(\n VolumeTaylorExpansion,\n VolumeTaylorMultipoleExpansionBase):\n\n def __init__(self, kernel, order, use_rscale=None):\n VolumeTaylorMultipoleExpansionBase.__init__(self, kernel, order, use_rscale)\n VolumeTaylorExpansion.__init__(self, kernel, order, use_rscale)\n\n\nclass LaplaceConformingVolumeTaylorMultipoleExpansion(\n LaplaceConformingVolumeTaylorExpansion,\n VolumeTaylorMultipoleExpansionBase):\n\n def __init__(self, kernel, order, use_rscale=None):\n VolumeTaylorMultipoleExpansionBase.__init__(self, kernel, order, use_rscale)\n LaplaceConformingVolumeTaylorExpansion.__init__(\n self, kernel, order, use_rscale)\n\n\nclass HelmholtzConformingVolumeTaylorMultipoleExpansion(\n HelmholtzConformingVolumeTaylorExpansion,\n VolumeTaylorMultipoleExpansionBase):\n\n def __init__(self, kernel, order, use_rscale=None):\n VolumeTaylorMultipoleExpansionBase.__init__(self, kernel, order, use_rscale)\n HelmholtzConformingVolumeTaylorExpansion.__init__(\n self, kernel, order, use_rscale)\n\n# }}}\n\n\n# {{{ 2D Hankel-based expansions\n\nclass _HankelBased2DMultipoleExpansion(MultipoleExpansionBase):\n def get_storage_index(self, k):\n return self.order+k\n\n def get_coefficient_identifiers(self):\n return list(range(-self.order, self.order+1))\n\n def coefficients_from_source(self, avec, bvec, rscale):\n if not self.use_rscale:\n rscale = 1\n\n from sumpy.symbolic import sym_real_norm_2\n bessel_j = sym.Function(\"bessel_j\")\n avec_len = sym_real_norm_2(avec)\n\n arg_scale = self.get_bessel_arg_scaling()\n\n # The coordinates are negated since avec points from source to center.\n source_angle_rel_center = sym.atan2(-avec[1], -avec[0])\n return [\n self.kernel.postprocess_at_source(\n bessel_j(l, arg_scale * avec_len)\n / rscale ** abs(l)\n * sym.exp(sym.I * l * -source_angle_rel_center),\n avec)\n for l in self.get_coefficient_identifiers()]\n\n def evaluate(self, coeffs, bvec, rscale):\n if not self.use_rscale:\n rscale = 1\n\n from sumpy.symbolic import sym_real_norm_2\n hankel_1 = sym.Function(\"hankel_1\")\n bvec_len = sym_real_norm_2(bvec)\n target_angle_rel_center = sym.atan2(bvec[1], bvec[0])\n\n arg_scale = self.get_bessel_arg_scaling()\n\n return sum(coeffs[self.get_storage_index(l)]\n * self.kernel.postprocess_at_target(\n hankel_1(l, arg_scale * bvec_len)\n * rscale ** abs(l)\n * sym.exp(sym.I * l * target_angle_rel_center), bvec)\n for l in self.get_coefficient_identifiers())\n\n def translate_from(self, src_expansion, src_coeff_exprs, src_rscale,\n dvec, tgt_rscale):\n if not isinstance(src_expansion, type(self)):\n raise RuntimeError(\"do not know how to translate %s to %s\"\n % (type(src_expansion).__name__,\n type(self).__name__))\n\n if not self.use_rscale:\n src_rscale = 1\n tgt_rscale = 1\n\n from sumpy.symbolic import sym_real_norm_2\n dvec_len = sym_real_norm_2(dvec)\n bessel_j = sym.Function(\"bessel_j\")\n new_center_angle_rel_old_center = sym.atan2(dvec[1], dvec[0])\n\n arg_scale = self.get_bessel_arg_scaling()\n\n translated_coeffs = []\n for l in self.get_coefficient_identifiers():\n translated_coeffs.append(\n sum(src_coeff_exprs[src_expansion.get_storage_index(m)]\n * bessel_j(m - l, arg_scale * dvec_len)\n * src_rscale ** abs(m)\n / tgt_rscale ** abs(l)\n * sym.exp(sym.I * (m - l) * new_center_angle_rel_old_center)\n for m in src_expansion.get_coefficient_identifiers()))\n return translated_coeffs\n\n\nclass H2DMultipoleExpansion(_HankelBased2DMultipoleExpansion):\n def __init__(self, kernel, order, use_rscale=None):\n from sumpy.kernel import HelmholtzKernel\n assert (isinstance(kernel.get_base_kernel(), HelmholtzKernel)\n and kernel.dim == 2)\n\n super(H2DMultipoleExpansion, self).__init__(\n kernel, order, use_rscale=use_rscale)\n\n def get_bessel_arg_scaling(self):\n return sym.Symbol(self.kernel.get_base_kernel().helmholtz_k_name)\n\n\nclass Y2DMultipoleExpansion(_HankelBased2DMultipoleExpansion):\n def __init__(self, kernel, order, use_rscale=None):\n from sumpy.kernel import YukawaKernel\n assert (isinstance(kernel.get_base_kernel(), YukawaKernel)\n and kernel.dim == 2)\n\n super(Y2DMultipoleExpansion, self).__init__(\n kernel, order, use_rscale=use_rscale)\n\n def get_bessel_arg_scaling(self):\n return sym.I * sym.Symbol(self.kernel.get_base_kernel().yukawa_lambda_name)\n\n# }}}\n\n# vim: fdm=marker\n","sub_path":"sumpy/expansion/multipole.py","file_name":"multipole.py","file_ext":"py","file_size_in_byte":12225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"18772857","text":"import sqlite3\nimport numeral\n\n\ndef analyze_form(form, number):\n ordn, adj, zbirn = numeral.make_numeral(number)\n\n founds = []\n j = 0\n\n for l in [ordn, adj, zbirn]:\n\n element = l\n\n if type(element) == list:\n temp = element\n element = dict()\n element[0] = temp\n\n for key in element.keys():\n i = 0\n for item in element[key]:\n for word in item:\n if word == form:\n founds.append((j,key,i))\n i += 1\n j += 1\n\n return founds\n\n\ndef numeral_to_digits(num):\n\n num = num.split(' ')\n\n #print(num)\n\n conn = sqlite3.connect('test.db')\n\n cur = conn.execute('''select id, osnova_1, osnova_2, osnova_3, stem from numerals\n left join adj_stem on numerals.id == adj_stem.num''')\n\n all_list = cur.fetchall()\n\n conn.close()\n\n all_dict = dict()\n\n for item in all_list:\n all_dict[item[0]] = list()\n for element in item:\n if element is not None and type(element) != int:\n all_dict[item[0]].append(element)\n\n #print(all_dict)\n all_dict[1] = ['од', 'перш']\n\n\n match_list = list()\n\n i = 0\n\n for word in num:\n match = False\n match_list.append([])\n while len(word) > 1 and match is False:\n for key in reversed(sorted(all_dict.keys())):\n for item in all_dict[key]:\n if word == item:\n #match = True\n match_list[i].append(key)\n #print(key)\n word = word[:-1]\n i += 1\n\n #print(match_list)\n\n for item in match_list:\n item = set(item)\n\n #print(match_list)\n\n for item in match_list:\n if len(item) > 1:\n for digit in item:\n print('!!!!' + ' ' + str(digit) + ' ' + num[match_list.index(item)])\n res = analyze_form(num[match_list.index(item)], str(digit))\n print(res)\n if res == []:\n item.remove(digit)\n #print(match_list)\n\n result = 0\n\n for item in match_list:\n if item[0] == 1000:\n result = result * item[0]\n else:\n result += item[0]\n\n return str(result)\n\n\nres = analyze_form(\"чотирмастами сімдесятьома п'ятьома\",'475')\nprint(res)","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"319385045","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 27 14:38:37 2018\r\n\r\n@author: Gab\r\n\"\"\"\r\n\r\nDNAsequence = 'ACGT' * 3 + 'TTATT' * 5\r\nprint (DNAsequence)\r\n\r\ni = 1 \r\n\r\nfor letter in DNAsequence: \r\n print(DNAsequence[i:])\r\n i = i + 1\r\n\r\nuniquesubstrings = set()\r\n \r\nrange(len(DNAsequence))\r\nfor i in range(len(DNAsequence)) : \r\n if i < len(DNAsequence)-2:\r\n kmer = DNAsequence [i:i+3]\r\n print(kmer)\r\n uniquesubstrings.add(kmer)\r\n \r\nprint(uniquesubstrings)","sub_path":"AssignmentDay1_2.py","file_name":"AssignmentDay1_2.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"329544565","text":"# Copyright: (c) Oskar Petersons 2013\n\n\"\"\"Frame for editing infeasible states in the conflict.\n\nLoaded by the a_Main_Window module, and implements all of its required\ninterfaces.\n\"\"\"\n\nfrom tkinter import Tk, StringVar, N, S, E, W, VERTICAL, HORIZONTAL\nfrom tkinter import ttk\nfrom frame_00_frameTemplate import FrameTemplate\nfrom widgets_f02_01_radioButtonEntry import RadiobuttonEntry\nfrom widgets_f02_02_infeasTreeview import TreeInfeas\nfrom widgets_f02_03_feasDisp import FeasDisp\nfrom data_01_conflictModel import ConflictModel\nimport data_03_gmcrUtilities as gmcrUtil\n\nNSEW = (N, S, E, W)\n\n\nclass InfeasInpFrame(FrameTemplate):\n \"\"\"Frame for input of infeasible states.\"\"\"\n\n # Label used for button to select frame in the main program.\n buttonLabel = 'Infeasible States'\n # Image used on button to select frame, when frame is active.\n activeIcon = 'icons/Infeasible_States_ON.gif'\n # Image used on button to select frame, when frame is inactive.\n inactiveIcon = 'icons/Infeasible_States_OFF.gif'\n # Help text to be displayed when screen is active.\n helpText = (\"Enter infeasible states using the box at left. Removing as \"\n \"infeasible state will remove all states that match the \"\n \"pattern from the conflict. Removing as mutually exclusive \"\n \"will remove all states where ANY TWO OR MORE of the specified\"\n \" options occur together.\")\n\n# ######################## INITIALIZATION ################################\n def __init__(self, master, conflict, *args):\n \"\"\"Initialize the Frame. Does not build widgets.\"\"\"\n FrameTemplate.__init__(self, master, conflict, self.buttonLabel,\n self.activeIcon, self.inactiveIcon,\n self.helpText)\n\n self.lastBuildDMs = None\n self.lastBuildOptions = None\n self.lastBuildInfeasibles = None\n\n# ############################ METHODS ###################################\n\n def hasRequiredData(self):\n \"\"\"Check that minimum data required to render the frame exists.\"\"\"\n if len(self.conflict.decisionMakers) < 1:\n return False\n if len(self.conflict.options) < 1:\n return False\n else:\n return True\n\n def dataChanged(self):\n \"\"\"Check if data has changed since the last build of the Frame.\"\"\"\n if self.lastBuildDMs != self.conflict.decisionMakers.export_rep():\n return True\n if self.lastBuildOptions != self.conflict.options.export_rep():\n return True\n if self.lastBuildInfeasibles != self.conflict.infeasibles.export_rep():\n return True\n else:\n return False\n\n def buildFrame(self):\n \"\"\"Contruct frame widgets and initialize data.\"\"\"\n if self.built:\n return\n\n # Ensure all required parts of the conflict model are properly set-up.\n self.conflict.reorderOptionsByDM()\n self.conflict.options.set_indexes()\n self.conflict.infeasibles.validate()\n self.conflict.recalculateFeasibleStates()\n\n self.lastBuildDMs = self.conflict.decisionMakers.export_rep()\n self.lastBuildOptions = self.conflict.options.export_rep()\n self.lastBuildInfeasibles = self.conflict.infeasibles.export_rep()\n\n # Define variables that will display in the infoFrame\n self.originalStatesText = StringVar(value='Original States: init')\n self.removedStatesText = StringVar(value='States Removed: init')\n self.feasStatesText = StringVar(value='States Remaining: init')\n\n # Define frame-specific variables\n self.warnText = StringVar(value='')\n\n # infoFrame: frame and label definitions (with master 'self.infoFrame')\n self.originalStatesLabel = ttk.Label(\n self.infoFrame, textvariable=self.originalStatesText)\n self.removedStatesLabel = ttk.Label(\n self.infoFrame, textvariable=self.removedStatesText)\n self.feasStatesLabel = ttk.Label(\n self.infoFrame, textvariable=self.feasStatesText)\n\n # helpFrame: frame and label definitions (with master 'self.helpFrame')\n self.helpLabel = ttk.Label(self.helpFrame, textvariable=self.helpVar,\n wraplength=150)\n\n # Define frame-specific input widgets (with 'self' as master)\n self.optsFrame = ttk.Frame(self)\n self.hSep = ttk.Separator(self, orient=VERTICAL)\n self.infeasFrame = ttk.Panedwindow(self, orient=HORIZONTAL)\n\n self.optsInp = RadiobuttonEntry(self.optsFrame, self.conflict)\n self.infeasDisp = TreeInfeas(self.infeasFrame, self.conflict)\n self.feasList = FeasDisp(self.infeasFrame, self.conflict)\n self.infeasFrame.add(self.infeasDisp)\n self.infeasFrame.add(self.feasList)\n\n # ######## preliminary gridding and option configuration\n\n # configuring the input frame\n self.grid(column=0, row=0, rowspan=5, sticky=NSEW)\n self.grid_remove()\n\n self.columnconfigure(2, weight=3)\n self.rowconfigure(0, weight=1)\n\n # configuring infoFrame & infoFrame widgets\n self.infoFrame.grid(column=2, row=0, sticky=NSEW, padx=3, pady=3)\n self.infoFrame.grid_remove()\n self.originalStatesLabel.grid(column=0, row=1, sticky=NSEW)\n self.removedStatesLabel.grid(column=0, row=2, sticky=NSEW)\n self.feasStatesLabel.grid(column=0, row=3, sticky=NSEW)\n\n # configuring helpFrame & helpFrame widgets\n self.helpFrame.grid(column=2, row=1, sticky=NSEW, padx=3, pady=3)\n self.helpFrame.grid_remove()\n self.helpLabel.grid(column=0, row=0, sticky=NSEW)\n\n # configuring frame-specific options\n self.optsFrame.columnconfigure(0, weight=1)\n self.optsFrame.rowconfigure(0, weight=1)\n self.optsFrame.grid(column=0, row=0, sticky=NSEW)\n\n self.infeasFrame.grid(column=2, row=0, sticky=NSEW)\n\n self.optsInp.grid(column=0, row=0, columnspan=2, sticky=NSEW)\n self.optsInp.bind('<>', self.addInfeas)\n self.optsInp.bind('<>', self.addMutEx)\n\n self.infeasDisp.bind('<>', self.selChg)\n self.infeasDisp.bind('<>', self.refresh)\n\n self.hSep.grid(column=1, row=0, rowspan=10, sticky=NSEW)\n\n self.refresh()\n\n self.built = True\n\n def refresh(self, *args):\n \"\"\"Refresh data in all active display widgets.\"\"\"\n self.infeasDisp.refreshView()\n self.feasList.refreshList()\n self.updateTotals()\n\n def updateTotals(self, event=None):\n \"\"\"Update data shown in the infobox.\"\"\"\n numO = len(self.conflict.options)\n numF = len(self.conflict.feasibles)\n self.originalStatesText.set('Original States: {}'.format(2**numO))\n self.feasStatesText.set('Feasible States: {}'.format(numF))\n self.removedStatesText.set('States Removed: {}'.format(2**numO - numF))\n\n def addInfeas(self, *args):\n \"\"\"Remove an infeasible state from the conflict.\"\"\"\n infeas = self.optsInp.getStates()\n self.conflict.infeasibles.append(infeas)\n self.conflict.recalculateFeasibleStates()\n self.refresh()\n\n def addMutEx(self, *args):\n \"\"\"Remove a set of Mutually Exclusive States from the conflict.\"\"\"\n mutEx = self.optsInp.getStates()\n mutEx = gmcrUtil.mutuallyExclusive(mutEx)\n for infeas in mutEx:\n self.conflict.infeasibles.append(list(infeas))\n self.refresh()\n\n def selChg(self, event):\n \"\"\"Triggered when the selection changes in the treeview.\"\"\"\n state = self.conflict.infeasibles[event.x].name\n self.optsInp.setStates(state)\n\n def enter(self):\n \"\"\"Run when entering the Infeasible States screen.\"\"\"\n if self.dataChanged():\n self.clearFrame()\n\n FrameTemplate.enter(self)\n\n self.optsInp.reloadOpts()\n\n# #############################################################################\n# ############### TESTING ###########\n# #############################################################################\n\n# Code in this section is only run when this module is run by itself. It serves\n# as a test of module functionality.\n\n\ndef main():\n \"\"\"Run screen in test window.\"\"\"\n root = Tk()\n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n\n cFrame = ttk.Frame(root)\n cFrame.columnconfigure(0, weight=1)\n cFrame.rowconfigure(1, weight=1)\n cFrame.grid(column=0, row=0, sticky=NSEW)\n\n hSep = ttk.Separator(cFrame, orient=VERTICAL)\n hSep.grid(column=1, row=0, rowspan=10, sticky=NSEW)\n\n g1 = ConflictModel('Prisoners.gmcr')\n\n testFrame = InfeasInpFrame(cFrame, g1)\n testFrame.enter()\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"frame_02_infeasibles.py","file_name":"frame_02_infeasibles.py","file_ext":"py","file_size_in_byte":8893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"282492322","text":"from flask import Flask, render_template, request, g, session, redirect, send_from_directory\nimport urllib2\nimport re\nimport json\nimport UNSWData\n\napp = Flask(__name__)\napp.secret_key = 'B3Dvm1BJF1'\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\treturn render_template(\"index.html\")\n\n@app.route('/get', methods=['GET', 'POST'])\ndef get():\n\t# open a data stream\n\tparams = request.args\n\tds = UNSWData.DataStream(params)\n\terr = ds.validateParams()\n\tif err != None:\n\t\treturn json.dumps(err)\n\t# get request type\n\treq = params[\"type\"]\n\t# handle request\n\tif(req == \"facCodes\"):\n\t\t# return a list of all fac codes for\n\t\t# given semester and year\n\t\tfacCodes = list(map(lambda x: x[0], ds.getAllFaculties()))\n\t\treturn json.dumps(facCodes)\n\telif(req == \"courseCodeValidty\"):\n\t\t# Check if a given course code is valid\n\t\tif \"courseCode\" not in request.args:\n\t\t\treturn json.dumps({\"error\": True, \"error-msg\": \"Course code not specified\"})\n\t\targ = request.args[\"courseCode\"]\n\t\treturn json.dumps(ds.validateCourseCode(arg))\n\telse:\n\t\t# Unknown request type\n\t\treturn json.dumps({\"error\": True, \"error-msg\": \"Unknown Request Type\"})\n\n@app.route('/search', methods=['POST'])\ndef search():\n\t# open a data stream\n\tparams = request.get_json()\n\tds = UNSWData.DataStream(params)\n\terr = ds.validateParams()\n\tif err != None:\n\t\treturn json.dumps(err)\n\t# do the search\n\tfilters = request.get_json()\n\treturn json.dumps(ds.courseSearch(filters))\n\n@app.route('/')\ndef send_js(path):\n return send_from_directory('', path)\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)","sub_path":"prod-server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"81014568","text":"# ##################################1\nmy_int = 102030405000006070809\nmy_int = str(my_int)\nprint(my_int.count('0'))\n# ##################################2\nnum = 50760700033000000\nnew_n = str(num)\nprint(len(new_n) - len(new_n.rstrip('0')))\n# Решила двумя способами,но первый кажется лучше\nnum = 5076070003300\ncount = 0\nfor e in str(num)[::-1]:\n if e == '0':\n count += 1\n else:\n break\nprint(count)\n# ##################################3a\nmy_list_1 = [1, 2, 3, 4, 5, 6, 7, 8]\nmy_list_2 = [9, 10, 11, 12, 13, 14, 15, 16]\nmy_result = []\nfor symbol in my_list_1[1::2]:\n my_result.append(symbol)\nfor symbol in my_list_2[::2]:\n my_result.append(symbol)\nprint(my_result)\n# ##################################3b\nmy_list_1 = [1, 3, 2, 4, 5]\nmy_list_2 = [10, 20, 15, 25, 22]\nmy_result = []\nfor element in my_list_1:\n if not element % 2:\n my_result.append(element)\nfor element in my_list_2:\n if element % 2:\n my_result.append(element)\nprint(my_result)\n# ##################################4\nmy_list = [1, 2, 3, 4]\nnew_list = my_list[1:].copy()\nnew_list.append(my_list[0])\nprint(new_list)\n# ##################################5\nmy_list = [1, 9, 5, 2]\nmy_list.append(my_list.pop(0))\nprint(my_list)\n# ##################################6\nmy_str = \"40 больше чем 30 но меньше чем 100\"\nwords = my_str.split(' ')\nproduct = 0\nfor word in words:\n try:\n value = int(word)\n product += value\n except:\n pass\nprint(product)\n# #################################7\nmy_str = 'acdbe'\npip = '_'\nif len(my_str) % 2:\n my_str = my_str + pip\nmy_list = [my_str[i:i+2] for i in range(0, len(my_str), 2)]\nprint(my_list)\n# ################################8\nmy_str = \"My_lore str\"\nl_limit = \"o\"\nr_limit = \"t\"\nsub_str = (my_str[my_str.find(l_limit)+1: my_str.find(r_limit)])\nprint(sub_str)\n# ####################################9\nmy_str = \"Li worem ipsum\"\nl_limit = \"o\"\nr_limit = \"m\"\ni1 = my_str.find(l_limit)\ni2 = my_str.rfind(r_limit)\nsub_str = my_str[i1+1:i2]\nprint(sub_str)\n# ##################################10\nmy_list = [2, 4, 1, 5, 3, 9, 0, 7]\nresult = 0\nfor i in range(1, len(my_list) - 1):\n if my_list[i - 1] < my_list[i] > my_list[i + 1]:\n result += 1\nprint(result)","sub_path":"H_W5.py","file_name":"H_W5.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"454369514","text":"from thewarden.models import AccountInfo, Trades\nfrom flask_wtf import FlaskForm\nfrom flask_login import current_user\nfrom wtforms import (\n StringField,\n SubmitField,\n PasswordField,\n DateTimeField,\n TextAreaField,\n SelectField,\n BooleanField,\n ValidationError,\n)\nfrom wtforms.widgets import PasswordInput\nfrom wtforms.validators import DataRequired\n\n\nclass DojoForm(FlaskForm):\n dojo_onion = StringField(\"Dojo Onion Address\")\n dojo_token = StringField(\"Dojo Authentication Token\")\n dojo_apikey = PasswordField(\"Dojo API Key\", widget=PasswordInput(hide_value=False))\n submit = SubmitField(\"Update\")\n\n\nclass AddressForm(FlaskForm):\n access_methods = (\n (\"0\", \"\"),\n (\"1\", \"Dojo Only\"),\n (\"2\", \"OXT (over Tor)\"),\n (\"3\", \"Dojo then OXT\"),\n )\n\n address = StringField(\"Bitcoin Address\")\n last_check = DateTimeField(\"Last check on the Blockchain\")\n check_method = SelectField(\n \"Select Method to monitor balance\", choices=access_methods, default=\"\"\n )\n auto_check = BooleanField(\"Monitor\")\n account = StringField(\"Linked to Account\")\n hd_parent = StringField(\"Parent HD Address\")\n notes = TextAreaField(\"Notes\")\n submit = SubmitField(\"Submit\")\n\n def validate_address(self, address):\n add = address.data\n if add == \"\":\n raise ValidationError(\"Address cannot be empty\")\n if \"xpub\" in add.lower():\n raise ValidationError(\"XPUBs not accepted here\")\n if \"ypub\" in add.lower():\n raise ValidationError(\"YPUBs not accepted here\")\n\n def validate_check_method(self, check_method):\n if check_method.data == \"0\":\n raise ValidationError(\"Please choose one method\")\n\n def validate_account(self, account):\n # Only accept accounts already registered in trades or accountinfo\n found = False\n tradeaccounts = Trades.query.filter_by(user_id=current_user.username).group_by(\n Trades.trade_account\n )\n\n accounts = AccountInfo.query.filter_by(user_id=current_user.username).group_by(\n AccountInfo.account_longname\n )\n\n for item in tradeaccounts:\n if account.data.upper() in item.trade_account.upper():\n found = True\n for item in accounts:\n if account.data.upper() in item.account_longname.upper():\n found = True\n\n if not found:\n raise ValidationError(\n \"Choose an existing account. If account is not registered, include first.\"\n )\n\n\nclass Custody_Account(FlaskForm):\n account_longname = StringField(\"Account Name\", [DataRequired()])\n account_blockchain_id = StringField(\"HD Address (XPUB, YPUB, others)\")\n access_methods = (\n (\"0\", \"\"),\n (\"1\", \"Dojo Only\"),\n (\"2\", \"OXT (over Tor)\"),\n (\"3\", \"Dojo then OXT\"),\n )\n check_method = SelectField(\n \"Select Method to monitor balance\", choices=access_methods, default=\"\"\n )\n auto_check = BooleanField(\"Monitor\")\n notes = TextAreaField(\"Notes\")\n submit = SubmitField(\"Submit\")\n","sub_path":"thewarden/node/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"77446398","text":"#------------------------------------------------------------------------------\n# Image Classification Model Builder\n# Copyright (c) 2019, scpepper All rights reserved.\n#------------------------------------------------------------------------------\nimport numpy as np\n\n# Please change these values for your environment.\n# -> -> -> -> -> -> -> -> -> -> -> -> -> -> -> -> -> -> -> ->\n\n# Specify root Directory\ngdrive_base='D:/20.programs/github/ml-image-classification/general/learning/'\n\n# Specufy dataset name\ndataset_name='carp150x200'\n\n# Specufy number of classes\nnum_classes = 10\n\n# Specify class names\nlabels = np.array([\n '1_suzuki',\n '2_tanaka',\n '12_kuri',\n '14_ohsera',\n '19_nomura',\n '27_aizawa',\n '28_tokoda',\n '33_kikuchi',\n '55_matsuyama',\n '95_batista'])\n\n# Specufy number of files in each class\nnum_images = 600\n\n# Specufy image size\nheight, width, color = 200, 150, 3\n\n# Specufy Model Structure (CNN, VGG16, RESNET1 or RESNET2)\nmodel_opt=\"RESNET2\"\n\n# Specify the rate of validation\nvalidate_rate=0.2\n\n# Specify training epoches\nepochs=10\n\n# Specify training batch size\nbatch_size=16\n\n# <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <-\n","sub_path":"general/learning/config_carp150x200_class10.py","file_name":"config_carp150x200_class10.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"36716050","text":"assignments = []\n\ndef cross(A, B):\n \"Cross product of elements in A and elements in B.\"\n return [s + t for s in A for t in B]\n\ndef diagonals(A, B):\n assert len(A) == len(B), \"A and B must be strings of same length\"\n diag1 = []\n diag2 = []\n slen = len(A)\n for ii in range(0, len(A)):\n diag1.append(A[ii]+B[ii])\n diag2.append(A[ii]+B[slen-ii-1])\n return [diag1, diag2]\n\n\n\n\n\nrows = 'ABCDEFGHI'\ncols = '123456789'\nrow_units = [cross(r, cols) for r in rows]\nboxes = cross(rows, cols)\ncolumn_units = [cross(rows, c) for c in cols]\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\ndiags = diagonals(rows, cols)\nunitlist = row_units + column_units + square_units + diags\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)\n\ndef check_diagonal_constraint(values):\n for diag in diags:\n vdict = {values[bx]: bx for bx in diag}\n #print(\"Vdict : \", vdict)\n if len(vdict) != 9:\n return False\n return values\n\ndef precheck_diagonal_constraint(values):\n for diag in diags:\n if all(len(values[bx]) == 1 for bx in diag):\n vdict = {values[bx]: bx for bx in diag}\n if len(vdict) != 9:\n return False\n return values\n\ndef eliminate_in_diagonals(values):\n for diag in diags:\n solved_values = [box for box in diag if len(values[box]) == 1]\n ##print(\"Xyz: \", solved_values)\n for box in solved_values:\n for peer_box in diag:\n if (peer_box != box) and (len(values[peer_box]) != 1):\n assign_value(values, peer_box, values[peer_box].replace(values[box], ''))\n return values\n\ndef assign_value(values, box, value):\n \"\"\"\n Please use this function to update your values dictionary!\n Assigns a value to a given box. If it updates the board record it.\n \"\"\"\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values\n\ndef add_double_digit_box(double_digit_dict, ddigit_val, ddigit_box):\n if (ddigit_val in double_digit_dict) :\n double_digit_dict[ddigit_val].append(ddigit_box)\n else:\n double_digit_dict[ddigit_val] = [ddigit_box]\n\ndef naked_twins(values):\n \"\"\"Eliminate values using the naked twins strategy.\n Args:\n values(dict): a dictionary of the form {'box_name': '123456789', ...}\n\n Returns:\n the values dictionary with the naked twins eliminated from peers.\n \"\"\"\n for unit in unitlist:\n # Find all instances of naked twins\n # track all boxes with double digits, maintain them in a dict with ddigit:list_of_boxes\n double_digit_boxes = {}\n for box in unit:\n if (len(values[box]) == 2):\n add_double_digit_box(double_digit_boxes, values[box], box)\n # Eliminate the naked twins as possibilities for their peers\n #print(double_digit_boxes)\n for ddgt, box_list in double_digit_boxes.items():\n if (len(box_list) == 2):\n for box in unit:\n if (box not in box_list) and (len(values[box]) > 1):\n rval = values[box].replace(ddgt[0], '').replace(ddgt[1], '')\n ### print(\"attempting to replace value \", values[box], \" for box \", box, \" with \", rval)\n assign_value(values, box, rval)\n return values\n\n\ndef grid_values(grid):\n \"\"\"\n Convert grid into a dict of {square: char} with '123456789' for empties.\n Args:\n grid(string) - A grid in string form.\n Returns:\n A grid in dictionary form\n Keys: The boxes, e.g., 'A1'\n Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\n \"\"\"\n assert len(grid) == 81, \"Input grid must be a string of length 81 (9x9)\"\n values = []\n all_digits = '123456789'\n for c in grid:\n if c == '.':\n values.append(all_digits)\n elif c in all_digits:\n values.append(c)\n assert len(values) == 81\n return dict(zip(boxes, values))\n\ndef display(values):\n \"\"\"\n Display the values as a 2-D grid.\n Args:\n values(dict): The sudoku in dictionary form\n \"\"\"\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-' * (width * 3)] * 3)\n for r in rows:\n print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return\n\n\n\ndef eliminate(values):\n \"\"\"Eliminate values from peers of each box with a single value.\n\n Go through all the boxes, and whenever there is a box with a single value,\n eliminate this value from the set of values of all its peers.\n\n Args:\n values: Sudoku in dictionary form.\n Returns:\n Resulting Sudoku in dictionary form after eliminating values.\n \"\"\"\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n assign_value(values, peer, values[peer].replace(digit, ''))\n return values\n\ndef only_choice(values):\n \"\"\"Finalize all values that are the only choice for a unit.\n\n Go through all the units, and whenever there is a unit with a value\n that only fits in one box, assign the value to this box.\n\n Input: Sudoku in dictionary form.\n Output: Resulting Sudoku in dictionary form after filling in only choices.\n \"\"\"\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n assign_value(values, dplaces[0], digit)\n return values\n\ndef reduce_puzzle(values):\n stalled = False\n while not stalled:\n # Check how many boxes have a determined value\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n\n # Your code here: Use the Eliminate Strategy\n values = eliminate(values)\n ###values = eliminate_in_diagonals(values) ## as diagonal units are added to unitlist, the eliminate above\n ## shall take care of this.\n ## Do the next level elimination using naked_twins\n values = naked_twins(values)\n\n # Your code here: Use the Only Choice Strategy\n values = only_choice(values)\n\n # Check how many boxes have a determined value, to compare\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n # If no new values were added, stop the loop.\n stalled = solved_values_before == solved_values_after\n # Sanity check, return False if there is a box with zero available values:\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n\ndef search(values):\n \"Using depth-first search and propagation, try all possible values.\"\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes):\n return check_diagonal_constraint(values) ## Solved only if diag contraint is also met!\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recurrence to solve each one of the resulting sudokus, and\n for value in values[s]:\n new_sudoku = values.copy()\n ##new_sudoku[s] = value\n assign_value(new_sudoku, s, value)\n ## after selectig a value for the box, see if this violates the diagonal constraint before next search-reduce..\n ## if it violates, continue to pick the next feasible selection\n if precheck_diagonal_constraint(new_sudoku):\n attempt = search(new_sudoku)\n if attempt:\n return attempt\n\ndef solve(grid):\n \"\"\"\n Find the solution to a Sudoku grid.\n Args:\n grid(string): a string representing a sudoku grid.\n Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n Returns:\n The dictionary representation of the final sudoku grid. False if no solution exists.\n \"\"\"\n return(search(grid_values(grid)))\n\n\nif __name__ == '__main__':\n diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n display(solve(diag_sudoku_grid))\n\n try:\n from visualize import visualize_assignments\n visualize_assignments(assignments)\n\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":9041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"592734501","text":"# data_scraper_for_CAB420.py\n# Modified by Shaun Sewell on 28/5/19\n#\n# Modified version of my footywire.comau data scraper.\n# Original version availible from https://github.com/shaunsewell/AFL-Analysis\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nfrom progress.bar import Bar\nimport csv\n\n# Outline stats to gather\nstats = ['Disposals', 'Kicks', 'Handballs', 'Marks', \n 'Tackles', 'Hitouts', 'Clearances', 'Clangers',\n 'Frees For', 'Frees Against', 'Goals Kicked', \n 'Behinds Kicked', 'Rushed Behinds', 'Scoring Shots', \n 'Goal Assists', 'Inside 50s', 'Rebound 50s']\n\nadvanced_stats = ['Contested Possessions', 'Uncontested Possessions', \n 'Effective Disposals', 'Disposal Efficiency %','Contested Marks', \n 'Marks Inside 50','One Percenters',\n 'Bounces']\n\n# Need to remove spaces from team names\nconverted_names = {'Gold Coast' : 'Gold_Coast', 'North Melbourne' : 'North_Melbourne', \n 'Port Adelaide': 'Port_Adelaide', 'St Kilda': 'St_Kilda', \n 'West Coast': 'West_Coast', 'Western Bulldogs': 'Western_Bulldogs'}\n\nteams_to_numbers = {'Adelaide' : 1, 'Brisbane' : 2, 'Carlton' : 3, 'Collingwood' : 4, \n 'Essendon' : 5, 'Fremantle' : 6, 'Geelong' : 7, 'Gold_Coast' : 8,\n 'GWS' : 9, 'Hawthorn' : 10, 'Melbourne' : 11, 'North_Melbourne' : 12,\n 'Port_Adelaide' : 13, 'Richmond' : 14, 'St_Kilda' : 15, 'Sydney' : 16,\n 'West_Coast' : 17, 'Western_Bulldogs' : 18}\n\n#-----------------------------------------------------------------------------------------------------------------------\n# Convenience functions\n\ndef export_match_stats(list_of_matches, season, file_name):\n\n stat_list = []\n\n with Bar('Processing stats for export', max=len(list_of_matches), suffix='%(percent)d%% - %(eta)ds remaining') as stats_bar:\n for match in list_of_matches:\n # Home team\n home_stat_line = [season, match.round_number]\n home_team_number = 0\n away_team_number = 0\n for k in teams_to_numbers:\n if k == match.home_team:\n home_team_number = teams_to_numbers[k]\n elif k == match.away_team:\n away_team_number = teams_to_numbers[k]\n\n home_stat_line.append(home_team_number) # Team\n home_stat_line.append(away_team_number) # Opponent\n home_stat_line.append(1) # Home team flag\n for keys in match.home_team_stats:\n home_stat_line.append(match.home_team_stats[keys])\n stat_list.append(home_stat_line)\n \n # Away Team\n away_stat_line = [season, match.round_number]\n\n away_stat_line.append(away_team_number) # Team\n away_stat_line.append(home_team_number) # Opponent\n away_stat_line.append(0) # Home team flag\n for keys in match.away_team_stats:\n away_stat_line.append(match.away_team_stats[keys])\n stat_list.append(away_stat_line)\n stats_bar.next()\n stats_bar.finish()\n\n with Bar('Exporting', max=len(stat_list), suffix='%(percent)d%% - %(eta)ds remaining') as export_bar:\n with open(file_name, mode='w') as stats_file:\n stats_writer = csv.writer(stats_file, delimiter=',')\n for row in stat_list:\n stats_writer.writerow(row)\n export_bar.next()\n export_bar.finish()\n#-----------------------------------------------------------------------------------------------------------------------\n\nclass DataScraper:\n def __init__(self):\n # Original Header for footywire.com.au \n # Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\n # Removed the image references to reduce response data.\n self.headers = {\"User-Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\",\"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\", \"Referer\":\"http://www.google.com.au\",\"Cache-Control\":\"max-age=0\"}\n self.base_URL = \"http://www.footywire.com/afl/footy/ft_match_statistics?mid=\"\n self.session_obj = requests.Session()\n \n def fix_title_string(self, soup):\n # Home Team defeated by Away Team at Venue Round Number Day, Date(dd,mm,yy)\n # St Kilda defeated by Melbourne at Marvel Stadium Round 1 Saturday, 25th March 2017\n # After spliting: \n # ['AFL', 'Match', 'Statistics', ':', 'St_Kilda', 'defeated_by', 'Melbourne', \n # 'at', 'Marvel', 'Stadium', 'Round', '1', 'Saturday,', '25th', 'March', '2017']\n\n seperators = [\"defeated by\", \"defeats\", \"defeat\", \"drew with\", \"drew\"]\n for s in seperators:\n title = soup.find(string=re.compile(s))\n if title != None:\n #replace defeated by with defeated_by to making traversing the array simpler\n if s == \"defeated by\":\n title = title.replace('defeated by', 'defeated_by')\n elif s == \"drew with\":\n title = title.replace('drew with', 'drew_with')\n\n #do the same for the multi word team names\n for key in converted_names:\n title = title.replace(key, converted_names[key])\n\n split_title = title.split(' ')\n return split_title\n \n def get_matches(self, start_match_id, end_match_id):\n Matches = []\n with Bar('Getting Matches', max=(end_match_id + 1) - start_match_id, suffix='%(percent)d%% - %(eta)ds remaining') as bar:\n for id in range(start_match_id, end_match_id + 1): \n try:\n match = self.get_match(id)\n Matches.append(match)\n except:\n print(\"Failed to get match : \" + str(id))\n pass\n \n bar.next()\n bar.finish()\n return Matches\n \n def get_match(self, match_id):\n response = self.session_obj.get(self.base_URL + str(match_id), headers=self.headers)\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n # returns a page title of the form: \n \n\n split_title = self.fix_title_string(soup) \n # index 0 - 3 is 'AFL' 'Match' 'Statistics' ':' and can be ignored\n # Set home and away teams\n home_team = split_title[4]\n away_team = split_title[6]\n \n # Set the venue and round number\n venue = \"\"\n round_number = \"\"\n if split_title[9] == 'Round':\n venue = split_title[8]\n round_number = split_title[10]\n elif split_title[10] == 'Round': \n venue = split_title[8] + \" \" + split_title[9]\n round_number = split_title[11]\n elif split_title[11] == 'Round':\n venue = split_title[8] + \" \" + split_title[9] + \" \" + split_title[10]\n round_number = split_title[12]\n else: # Must be a final\n if split_title[10] == 'Final':\n venue = split_title[8]\n round_number = split_title[9] + \" \" + split_title[10]\n elif split_title[11] == 'Final': \n venue = split_title[8] + \" \" + split_title[9]\n round_number = split_title[10] + \" \" + split_title[11]\n\n\n # Set the day and date of the match\n day = split_title[-4].replace(',','')\n date = split_title[-3] + ' ' + split_title[-2] + ' ' + split_title[-1]\n\n # Set the attendance\n attendance_string = soup.find(text=re.compile('Attendance:')).split(' ')\n attendance = attendance_string[-1]\n\n # Get the stats\n home_team_stats, away_team_stats = self.get_stats(soup)\n home_team_stats, away_team_stats = self.get_advanced_stats(match_id, home_team_stats, away_team_stats)\n home_team_stats, away_team_stats = self.get_match_winner(soup, home_team_stats, away_team_stats)\n return Match(match_id, home_team, away_team, venue, round_number, day, date, attendance, home_team_stats, away_team_stats)\n \n def get_stats(self, soup):\n home_stats = {}\n away_stats = {}\n\n for stat in stats:\n stat_row = soup.find_all('td', text=stat)[0].find_parent('tr')\n stat_elements = stat_row.find_all('td')\n\n if stat_elements != None:\n if stat_elements[0].text == '-':\n home_stats[stat] = None\n else:\n home_stats[stat] = stat_elements[0].text\n \n if stat_elements[2].text == '-':\n away_stats[stat] = None\n else:\n away_stats[stat] = stat_elements[2].text\n \n\n return home_stats, away_stats\n\n def get_advanced_stats(self, match_id, home_stats, away_stats):\n response = self.session_obj.get(self.base_URL + str(match_id) + \"&advv=Y\", headers=self.headers)\n advanced_soup = BeautifulSoup(response.text, features=\"html.parser\")\n\n for stat in advanced_stats:\n try:\n advanced_stat_row = advanced_soup.find_all('td', text=stat)[0].find_parent('tr')\n advanced_stat_elements = advanced_stat_row.find_all('td')\n except:\n break\n \n\n if advanced_stat_elements != None:\n #Remove any annoying % signs\n temp_home = advanced_stat_elements[0].text.replace('%','')\n temp_away = advanced_stat_elements[2].text.replace('%','')\n\n if temp_home == '-':\n home_stats[stat] = None\n else:\n home_stats[stat] = temp_home\n \n if temp_away == '-':\n away_stats[stat] = None\n else:\n away_stats[stat] = temp_away\n \n return home_stats, away_stats\n\n def get_match_winner(self, soup, home_stats, away_stats):\n end_result = soup.find_all('td', text='End of Game')[0].find_parent('tr')\n end_result_element = end_result.find_all('td')\n if 'Won' in end_result_element[0].text:\n home_stats['Winner'] = 1\n away_stats['Winner'] = 0\n elif 'Won' in end_result_element[2].text:\n home_stats['Winner'] = 0\n away_stats['Winner'] = 1\n else:\n home_stats['Winner'] = 0\n away_stats['Winner'] = 0\n \n return home_stats, away_stats\n\nclass Match:\n def __init__(self, match_id, home_team, away_team, venue, round_number, day, date, attendance, home_team_stats, away_team_stats):\n self.match_id = match_id\n self.home_team = home_team\n self.away_team = away_team\n self.venue = venue\n self.round_number = round_number\n self.day = day\n self.date = date\n self.attendance = attendance\n self.home_team_stats = home_team_stats\n self.away_team_stats = away_team_stats\n \nclass Player:\n def __init__(self, player_id, player_name, player_team, player_age, player_stats):\n self.player_id = player_id\n self.player_name = player_name\n self.player_team = player_team\n self.player_stats = player_stats\n \n#-----------------------------------------------------------------------------------------------------------------------\n# Gather the data. This takes a long time, 20 min per season approx. \n\nscraper = DataScraper()\n\nmatch_list_2012 = scraper.get_matches(5343, 5549) #5343 5549\nexport_match_stats(match_list_2012, 2012, \"AFLStats-2012.csv\")\nmatch_list_2013 = scraper.get_matches(5550, 5756)\nexport_match_stats(match_list_2013, 2013, \"AFLStats-2013.csv\")\nmatch_list_2014 = scraper.get_matches(5757, 5963)\nexport_match_stats(match_list_2014, 2014, \"AFLStats-2014.csv\")\nmatch_list_2015 = scraper.get_matches(5964, 6171)\nexport_match_stats(match_list_2015, 2015, \"AFLStats-2015.csv\")\nmatch_list_2016_1 = scraper.get_matches(6172 , 6369)\nmatch_list_2016_2 = scraper.get_matches(9298, 9306)\nmatch_list_2016 = match_list_2016_1 + match_list_2016_2\nexport_match_stats(match_list_2016, 2016, \"AFLStats-2016.csv\")\nmatch_list_2017 = scraper.get_matches(9307, 9513)\nexport_match_stats(match_list_2017, 2017, \"AFLStats-2017.csv\")\nmatch_list_2018 = scraper.get_matches(9514, 9720) \nexport_match_stats(match_list_2018, 2018, \"AFLStats-2018.csv\")\n\n# Layout of columns in export files\n# year, round, team, opponent, h/a flag, disposals, kicks, handballs, marks,\n# tackles, hitouts, clearances, clangers, frees for, frees against, goals kicked,\n# behinds kicked, rushed behinds, scoring shots, goal assists, inside 50s, rebound 50s,\n# contested possessions, uncontested possessions, effective disposals, disposal efficiency %,\n# contested marks, marks inside 50, one percenters, bounces\n#\n","sub_path":"data_scraper_for_CAB420.py","file_name":"data_scraper_for_CAB420.py","file_ext":"py","file_size_in_byte":13089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"393194783","text":"y, sr = librosa.load(librosa.util.example_audio_file(),\n offset=30, duration=2.0)\noenv = librosa.onset.onset_strength(y=y, sr=sr)\n# Detect events without backtracking\nonset_raw = librosa.onset.onset_detect(onset_envelope=oenv,\n backtrack=False)\n# Backtrack the events using the onset envelope\nonset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)\n# Backtrack the events using the RMS energy\nrmse = librosa.feature.rmse(S=np.abs(librosa.stft(y=y)))\nonset_bt_rmse = librosa.onset.onset_backtrack(onset_raw, rmse[0])\n\n# Plot the results\nimport matplotlib.pyplot as plt\nplt.figure()\nplt.subplot(2,1,1)\nplt.plot(oenv, label='Onset strength')\nplt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')\nplt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')\nplt.legend(frameon=True, framealpha=0.75)\nplt.subplot(2,1,2)\nplt.plot(rmse[0], label='RMSE')\nplt.vlines(onset_bt_rmse, 0, rmse.max(), label='Backtracked (RMSE)', color='r')\nplt.legend(frameon=True, framealpha=0.75)\n","sub_path":"Others/library using example code/librosa-onset-onset_backtrack-1.py","file_name":"librosa-onset-onset_backtrack-1.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"137116082","text":"\"\"\"The Lorenz attractor is a set of chaotic solutions of\na particular system of ordinary differential equations\"\"\"\nfrom vedo import *\n\np = (25.0, -10.0, -7.0) # starting point (initial condition)\ndt = 0.002\n\npts, vel = [], []\nfor t in np.arange(0, 20, dt):\n x, y, z = p\n dpdt = [-8/3 * x + y*z, -10*(y-z), -y*x + 28*y-z]\n p = p + np.array(dpdt) * dt\n pts.append(p)\n vel.append(mag(dpdt))\n\nline = Line(pts).lw(3).cmap(\"winter\", vel)\nline.add_scalarbar(\"speed\")\nline.add_shadow(\"x\", 3, alpha=0.2)\nline.add_shadow(\"z\", -25, alpha=0.2)\n\nplt = Plotter(axes=dict(xygrid=False))\nplt.show(__doc__, line, viewup=\"z\").close()\n","sub_path":"examples/simulations/lorenz.py","file_name":"lorenz.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"454872193","text":"#!/usr/bin/env python\nimport sys\n\n# suprathreshold simulation config\nrunnum = int(sys.argv[1])-1\nnmpa = 2.6 # NMDA-AMPA ratio\nl1, l2 = 90, 150\n\n# launch simulations\nexecfile('hoc/pyloop.py')\npyloop(ratio=nmpa, loc1=l1, loc2=l2, simiter=runnum, numbranches=8)\n","sub_path":"scripts/figureS5.py","file_name":"figureS5.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"610945733","text":"import unittest\nimport numpy\nimport os\nimport amulet_nbt.amulet_nbt_py as pynbt\n\ntry:\n import amulet_nbt.amulet_cy_nbt as cynbt\nexcept (ImportError, ModuleNotFoundError) as e:\n cynbt = None\n\n\nclass AbstractNBTTest:\n class MassiveNBTTests(unittest.TestCase):\n def _setUp(self, nbt_library):\n self.maxDiff = None\n self.nbt = nbt_library\n\n def test_api(self):\n test_ = self.nbt.NBTFile(self.nbt.TAG_Compound(), name=\"hello\")\n\n test = self.nbt.NBTFile(\n name=\"hello\"\n ) # fill with an empty compound if not defined\n\n # the nbt objects with no inputs\n test[\"emptyByte\"] = self.nbt.TAG_Byte()\n test[\"emptyShort\"] = self.nbt.TAG_Short()\n test[\"emptyInt\"] = self.nbt.TAG_Int()\n test[\"emptyLong\"] = self.nbt.TAG_Long()\n test[\"emptyFloat\"] = self.nbt.TAG_Float()\n test[\"emptyDouble\"] = self.nbt.TAG_Double()\n test[\"emptyByteArray\"] = self.nbt.TAG_Byte_Array()\n test[\"emptyString\"] = self.nbt.TAG_String()\n test[\"emptyList\"] = self.nbt.TAG_List()\n test[\"emptyCompound\"] = self.nbt.TAG_Compound()\n test[\"emptyIntArray\"] = self.nbt.TAG_Int_Array()\n test[\"emptyLongArray\"] = self.nbt.TAG_Long_Array()\n\n # the nbt objects with zero or empty inputs (pure python only)\n test[\"zeroByte\"] = self.nbt.TAG_Byte(0)\n test[\"zeroShort\"] = self.nbt.TAG_Short(0)\n test[\"zeroInt\"] = self.nbt.TAG_Int(0)\n test[\"zeroLong\"] = self.nbt.TAG_Long(0)\n test[\"zeroFloat\"] = self.nbt.TAG_Float(0)\n test[\"zeroDouble\"] = self.nbt.TAG_Double(0)\n test[\"zeroByteArray\"] = self.nbt.TAG_Byte_Array([])\n test[\"zeroString\"] = self.nbt.TAG_String(\"\")\n test[\"zeroList\"] = self.nbt.TAG_List([])\n test[\"zeroCompound\"] = self.nbt.TAG_Compound({})\n test[\"zeroIntArray\"] = self.nbt.TAG_Int_Array([])\n test[\"zeroLongArray\"] = self.nbt.TAG_Long_Array([])\n\n # empty inputs but numpy arrays for the array types\n test[\"zeroNumpyByteArray\"] = self.nbt.TAG_Byte_Array(numpy.array([]))\n test[\"zeroNumpyIntArray\"] = self.nbt.TAG_Int_Array(numpy.array([]))\n test[\"zeroNumpyLongArray\"] = self.nbt.TAG_Long_Array(numpy.array([]))\n\n # test the array types with some python data\n test[\"listByteArray\"] = self.nbt.TAG_Byte_Array(\n [i for i in range(-128, 127)]\n )\n test[\"listIntArray\"] = self.nbt.TAG_Int_Array([i for i in range(-400, 400)])\n test[\"listLongArray\"] = self.nbt.TAG_Long_Array(\n [i for i in range(-400, 400)]\n )\n\n # test the array types with numpy data of varying dtypes\n test[\"numpyDtypeTestByteArray\"] = self.nbt.TAG_Byte_Array(\n numpy.array([i for i in range(-128, 127)], dtype=int)\n )\n test[\"numpyDtypeuTestByteArray\"] = self.nbt.TAG_Byte_Array(\n numpy.array([i for i in range(-128, 127)], dtype=numpy.uint)\n )\n test[\"numpyDtypeTestIntArray\"] = self.nbt.TAG_Int_Array(\n numpy.array([i for i in range(-400, 400)], dtype=int)\n )\n test[\"numpyDtypeuTestIntArray\"] = self.nbt.TAG_Int_Array(\n numpy.array([i for i in range(-400, 400)], dtype=numpy.uint)\n )\n test[\"numpyDtypeTestLongArray\"] = self.nbt.TAG_Long_Array(\n numpy.array([i for i in range(-400, 400)], dtype=int)\n )\n test[\"numpyDtypeuTestLongArray\"] = self.nbt.TAG_Long_Array(\n numpy.array([i for i in range(-400, 400)], dtype=numpy.uint)\n )\n\n test[\"numpyDtypedTestByteArray\"] = self.nbt.TAG_Byte_Array(\n numpy.array([i for i in range(-128, 127)])\n )\n test[\"numpyDtypedTestIntArray\"] = self.nbt.TAG_Int_Array(\n numpy.array([i for i in range(-400, 400)])\n )\n test[\"numpyDtypedTestLongArray\"] = self.nbt.TAG_Long_Array(\n numpy.array([i for i in range(-400, 400)])\n )\n\n # test the extremes of the array types\n # byte array tested above\n test[\"numpyExtremeTestIntArray\"] = self.nbt.TAG_Int_Array(\n numpy.array([-(2 ** 31), (2 ** 31) - 1], dtype=int)\n )\n test[\"numpyExtremeTestLongArray\"] = self.nbt.TAG_Long_Array(\n numpy.array([-(2 ** 63), (2 ** 63) - 1], dtype=\"q\")\n )\n\n test[\"minByte\"] = self.nbt.TAG_Byte(-128)\n test[\"minShort\"] = self.nbt.TAG_Short(-(2 ** 15))\n test[\"minInt\"] = self.nbt.TAG_Int(-(2 ** 31))\n test[\"minLong\"] = self.nbt.TAG_Long(-(2 ** 63))\n\n test[\"maxByte\"] = self.nbt.TAG_Byte(127)\n test[\"maxShort\"] = self.nbt.TAG_Short(2 ** 15 - 1)\n test[\"maxInt\"] = self.nbt.TAG_Int(2 ** 31 - 1)\n test[\"maxLong\"] = self.nbt.TAG_Long(2 ** 63 - 1)\n\n # these should either overflow when setting or error when saving. Test each and if it errors just comment it out\n # test['overflowByte'] = self.nbt.TAG_Byte(300)\n # test['underflowByte'] = self.nbt.TAG_Byte(-300)\n # test['overflowShort'] = self.nbt.TAG_Short(2**16)\n # test['underflowShort'] = self.nbt.TAG_Short(-2**16)\n # test['overflowInt'] = self.nbt.TAG_Int(2**32)\n # test['underflowInt'] = self.nbt.TAG_Int(-2**32)\n # test['overflowLong'] = self.nbt.TAG_Long(2**64)\n # test['underflowLong'] = self.nbt.TAG_Long(-2**64)\n\n # test['overflowByteArray'] = self.nbt.TAG_Byte_Array([-129, 128])\n # test['overflowIntArray'] = self.nbt.TAG_Int_Array([-2**31-1, 2**31])\n # test['overflowLongArray'] = self.nbt.TAG_Long_Array([-2**63-1, 2**63])\n\n # test['overflowNumpyByteArray'] = self.nbt.TAG_Byte_Array(numpy.array([-129, 128]))\n # test['overflowNumpyIntArray'] = self.nbt.TAG_Int_Array(numpy.array([-2**31-1, 2**31]))\n # test['overflowNumpyLongArray'] = self.nbt.TAG_Long_Array(numpy.array([-2**63-1, 2**63]))\n\n # save then load back up and check the data matches\n\n os.makedirs(\"temp\", exist_ok=True)\n test.save_to(\n os.path.join(\"temp\", \"massive_nbt_test_big_endian.nbt\"),\n compressed=False,\n )\n test.save_to(\n os.path.join(\"temp\", \"massive_nbt_test_big_endian_compressed.nbt\"),\n compressed=True,\n )\n test.save_to(\n os.path.join(\"temp\", \"massive_nbt_test_little_endian.nbt\"),\n compressed=False,\n little_endian=True,\n )\n\n test_be = self.nbt.load(\n os.path.join(\"temp\", \"massive_nbt_test_big_endian.nbt\")\n )\n test_be_compressed = self.nbt.load(\n os.path.join(\"temp\", \"massive_nbt_test_big_endian_compressed.nbt\")\n )\n test_le = self.nbt.load(\n os.path.join(\"temp\", \"massive_nbt_test_little_endian.nbt\"),\n little_endian=True,\n )\n\n assert test_be == test\n assert test_be_compressed == test\n assert test_le == test\n\n\n@unittest.skipUnless(cynbt, \"Cythonized library not available\")\nclass CythonMassiveNBTTest(AbstractNBTTest.MassiveNBTTests):\n def setUp(self):\n self._setUp(cynbt)\n\n\nclass PythonMassiveNBTTest(AbstractNBTTest.MassiveNBTTests):\n def setUp(self):\n self._setUp(pynbt)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_massive_nbt.py","file_name":"test_massive_nbt.py","file_ext":"py","file_size_in_byte":7764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"646027255","text":"from django.shortcuts import render, reverse, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom .models import Category, Product\nfrom .forms import ProductForm\n\n# Create your views here.\n\n\ndef index(request):\n \"\"\" A view to return the index page \"\"\"\n\n return render(request, 'shop/index.html')\n\n\ndef product_list(request, category_slug=None):\n query = None\n category = None\n categories = None\n\n categories = Category.objects.all()\n products = Product.objects.filter(available=True)\n if category_slug:\n category = get_object_or_404(Category, slug=category_slug)\n products = products.filter(category=category)\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n products = products.filter(Category__slug__in=categories)\n categories = Category.objects.filter(slug__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria\")\n return redirect(reverse('index'))\n\n queries = Q(name__icontains=query) | Q(description__icontains=query)\n products = products.filter(queries)\n\n return render(request,\n 'shop/product/list.html',\n {'category': category,\n 'categories': categories,\n 'products': products,\n 'search_term': query,\n 'current_categories': categories})\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n\n return render(request,\n 'shop/product/detail.html',\n {'product': product})\n\n\n","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"200088735","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom .models import *\nfrom loginapp.models import*\n\n# Create your views here.\n\ndef wallindex(request):\n logged_user = User.objects.get(id = request.session['user'])\n context = {\n 'user' : User.objects.get(id = request.session['user']),\n 'all_messages': Message.objects.all(),\n 'user_messages': logged_user.user_message.all()\n }\n return render (request, 'wallapp.html', context)\n\ndef post_message(request):\n user = User.objects.get(id = request.session['user'])\n message = request.POST['message']\n new_message = Message.objects.create(user = user, message = message)\n return redirect ('/wall')\n\ndef post_comment(request):\n user = User.objects.get(id = request.session['user'])\n message = Message.objects.get(id = request.POST['message_id'])\n comment = request.POST['comment']\n new_comment = Comment.objects.create(user = user, message = message, comment = comment)\n return redirect ('/wall')\n\ndef delete_comment(request, comment_id): \n comment = Comment.objects.get(id = comment_id)\n comment.delete()\n return redirect ('/wall')","sub_path":"wall_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"336546174","text":"from django.conf.urls import patterns, url\n\nfrom website import ajax\n\nurlpatterns = patterns('',\n url(r'^$', ajax.home),\n url(r'^sounds/$', ajax.sounds),\n url(r'^pictures/$', ajax.pictures),\n url(r'^shows/$', ajax.shows),\n url(r'^contact/$', ajax.contact),\n)\n","sub_path":"website/ajax-urls.py","file_name":"ajax-urls.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"299368168","text":"import datetime\n\nimport flask.scaffold\nflask.helpers._endpoint_from_view_func = flask.scaffold._endpoint_from_view_func\nimport flask_restful\nfrom flask import request, g\nfrom marshmallow import ValidationError\nfrom elasticsearch import NotFoundError\n\nfrom app import RestException, db, elastic_index, auth\nfrom app.model.resource import Resource\nfrom app.model.resource_category import ResourceCategory\nfrom app.model.admin_note import AdminNote\nfrom app.model.resource_change_log import ResourceChangeLog\nfrom app.schema.schema import ResourceSchema\nfrom app.model.event import Event\nfrom app.model.location import Location\nfrom app.model.role import Permission\nfrom app.model.user_favorite import UserFavorite\nfrom app.wrappers import requires_permission\n\n\nclass ResourceEndpoint(flask_restful.Resource):\n\n schema = ResourceSchema()\n\n def get(self, id):\n model = db.session.query(Resource).filter_by(id=id).first()\n if model is None: raise RestException(RestException.NOT_FOUND)\n return self.schema.dump(model)\n\n @auth.login_required\n @requires_permission(Permission.delete_resource)\n def delete(self, id):\n resource = db.session.query(Resource).filter_by(id=id).first()\n resource_id = resource.id\n resource_title = resource.title\n\n try:\n elastic_index.remove_document(resource, 'Resource')\n except NotFoundError:\n pass\n\n db.session.query(AdminNote).filter_by(resource_id=id).delete()\n db.session.query(Event).filter_by(id=id).delete()\n db.session.query(Location).filter_by(id=id).delete()\n db.session.query(ResourceCategory).filter_by(resource_id=id).delete()\n db.session.query(UserFavorite).filter_by(resource_id=id).delete()\n db.session.query(Resource).filter_by(id=id).delete()\n db.session.commit()\n self.log_update(resource_id=resource_id, resource_title=resource_title, change_type='delete')\n return None\n\n @auth.login_required\n @requires_permission(Permission.edit_resource)\n def put(self, id):\n request_data = request.get_json()\n instance = db.session.query(Resource).filter_by(id=id).first()\n try:\n updated = self.schema.load(request_data, instance=instance, session=db.session)\n except Exception as e:\n raise RestException(RestException.INVALID_OBJECT, details=e)\n updated.last_updated = datetime.datetime.utcnow()\n db.session.add(updated)\n db.session.commit()\n elastic_index.update_document(updated, 'Resource')\n self.log_update(resource_id=updated.id, resource_title=updated.title, change_type='edit')\n return self.schema.dump(updated)\n\n def log_update(self, resource_id, resource_title, change_type):\n log = ResourceChangeLog(resource_id=resource_id, resource_title=resource_title, user_id=g.user.id,\n user_email=g.user.email, type=change_type)\n db.session.add(log)\n db.session.commit()\n\n\nclass ResourceListEndpoint(flask_restful.Resource):\n\n resourcesSchema = ResourceSchema(many=True)\n resourceSchema = ResourceSchema()\n\n def get(self):\n resources = db.session.query(Resource).all()\n return self.resourcesSchema.dump(resources)\n\n @auth.login_required\n @requires_permission(Permission.create_resource)\n def post(self):\n request_data = request.get_json()\n try:\n load_result = self.resourceSchema.load(request_data)\n db.session.add(load_result)\n db.session.commit()\n elastic_index.add_document(load_result, 'Resource')\n self.log_update(resource_id=load_result.id, resource_title=load_result.title, change_type='create')\n return self.resourceSchema.dump(load_result)\n except ValidationError as err:\n raise RestException(RestException.INVALID_OBJECT,\n details=load_result.errors)\n\n def log_update(self, resource_id, resource_title, change_type):\n log = ResourceChangeLog(resource_id=resource_id, resource_title=resource_title, user_id=g.user.id,\n user_email=g.user.email, type=change_type)\n db.session.add(log)\n db.session.commit()\n\n\nclass EducationResourceListEndpoint(flask_restful.Resource):\n\n resourcesSchema = ResourceSchema(many=True)\n\n def get(self):\n resources = db.session.query(Resource)\\\n .filter_by(is_uva_education_content=True, is_draft=False)\\\n .order_by(Resource.last_updated.desc())\\\n .all()\n return self.resourcesSchema.dump(resources)\n\n\nclass Covid19ResourceListEndpoint(flask_restful.Resource):\n\n resourcesSchema = ResourceSchema(many=True)\n\n def get(self, category):\n resources = db.session.query(Resource)\\\n .filter(Resource.covid19_categories.any(category), Resource.is_draft == False)\\\n .order_by(Resource.last_updated.desc())\\\n .all()\n return self.resourcesSchema.dump(resources)\n","sub_path":"backend/app/resources/ResourceEndpoint.py","file_name":"ResourceEndpoint.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"401624049","text":"import networkx as nx\nimport csv\nimport numpy as np\nimport pydot\nimport functools\nimport graphviz as gv\nimport matplotlib.pyplot as plt\n\ngraph = pydot.Dot(graph_type='digraph', rankdir='LR', size = \"20, 10\", layout = \"dot\", outputorder = \"nodesfirst\", maxiter = \"10000\", nodesep = \".25\", ranksep = \".5\", overlap = \"scale\", splines = \"spline\", spline = \"true\")\ngraph.set_node_defaults(fontname = \"Adobe Garamond Pro\", fontsize=\"60\", penwidth=\"1\")\ngraph.set_edge_defaults(fontname = \"Adobe Garamond Pro\", arrowhead = \"normal\", arrowtail = \"inv\", dir=\"both\")\n\n# read_file = open('subreddit_resubmission_links.txt', 'r')\n# networkx_graph = nx.read_edgelist(read_file, delimiter='\\t', create_using=nx.DiGraph(), nodetype=str, data=(('weight',float),))\n# read_file.close()\n#\n# nodes_by_out_degree = nx.out_degree_centrality(networkx_graph)\n#\n# for node in reversed(sorted(nodes_by_out_degree, key=nodes_by_out_degree.get)):\n# \tgraph.add_node(pydot.Node(str(node)))\n\nread_file = open('subreddit_resubmission_links.txt', 'r')\n\nfile_reader = csv.reader(read_file, delimiter = '\\t')\nfor edge in file_reader:\n\tif int(edge[2]) > 10 and edge[0] != edge[1]:\n\t\tsrc = pydot.Node(str(edge[0]))\n\t\tdest = pydot.Node(str(edge[1]))\n\t\tweight = int(edge[2])\n\t\t# penwidth = .25+np.log(.25*weight)\n\t\tpenwidth = .25 + np.log10(.09*weight)\n\t\tcolor = '#000000'\n\t\tif weight < 50:\n\t\t\tcolor = \"#550000\"\n\t\telif 50 <= weight < 100:\n\t\t\tcolor = \"#660000\"\n\t\telif 100 <= weight < 200:\n\t\t\tcolor = \"#770000\"\n\t\telif 200 <= weight < 400:\n\t\t\tcolor = \"#880000\"\n\t\telif 400 <= weight < 800:\n\t\t\tcolor = \"#990000\"\n\t\telif 800 <= weight < 1600:\n\t\t\tcolor = \"#AA0000\"\n\t\telif 1600 <= weight < 3200:\n\t\t\tcolor = \"#BB0000\"\n\t\telif 3200 <= weight < 6400:\n\t\t\tcolor = \"#CC0000\"\n\t\telif 6400 <= weight:\n\t\t\tcolor = \"#DD0000\"\n\t\t# graph.add_edge(pydot.Edge(src, dest, label=\" \"+ str(weight), color = color, penwidth=str(penwidth)))\n\t\tgraph.add_edge(pydot.Edge(src, dest, color = color, penwidth=str(penwidth)))\nread_file.close()\n\n# graph.savefig('myimage.svg', format='svg', dpi=1200)\n\ngraph.write('graph1.svg', format='svg')","sub_path":"reddit_net_visualizer.py","file_name":"reddit_net_visualizer.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"609949232","text":"import ast\n\n# reads index and makes it a dict\nwith open('../query_dict.txt', \"r\", encoding='utf-8') as f1:\n queries_dict = ast.literal_eval(f1.read())\n f1.close()\n\nwith open('../given_resources/common_words', \"r\", encoding='utf-8') as c:\n common_words = []\n for ci in c.read().split():\n common_words.append(ci)\n\nqueries_dict_stopped = {}\n\nfor query_id in queries_dict.keys():\n new_query_words = []\n for query_word in queries_dict[query_id]:\n if query_word not in common_words:\n new_query_words.append(query_word)\n\n queries_dict_stopped[query_id] = new_query_words\n fw = open('../query_dict_stopped.txt', 'w', encoding='utf-8')\n fw.write(str(queries_dict_stopped))\n fw.close()\n","sub_path":"resources/source_code/query_dict_generator_stopped.py","file_name":"query_dict_generator_stopped.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"569923670","text":"from flask import Flask, Response, request\nfrom flask_cors import CORS\nimport json\nimport logging\n\nfrom application_services.imdb_artists_resource import IMDBArtistResource\nfrom application_services.UsersResource.user_service import UserResource\nfrom application_services.UsersResource.address_resource import AddressResource\nfrom database_services.RDBService import RDBService as RDBService\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\napp = Flask(__name__)\nCORS(app)\n\n# ------------------- routing functions -------------------\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/users', methods = ['POST'])\ndef create_user():\n user_data = request.get_json()\n msg, id = create_user_helper(user_data)\n return msg\n\n@app.route('/users', methods = ['GET'])\n@app.route('/users/', methods = ['GET'])\ndef get_users(user_id=None):\n res = UserResource.get_users(user_id)\n rsp = Response(json.dumps(res, default=str), status=200, content_type=\"application/json\")\n return rsp\n\n@app.route('/users/', methods = ['PUT'])\ndef update_user(user_id):\n user_data = request.get_json()\n msg = update_user_helper(user_id, user_data)\n return msg\n\n@app.route('/users/', methods = ['DELETE'])\ndef delete_user(user_id):\n res = UserResource.delete_user(user_id)\n return res\n\n@app.route('/users//address', methods = ['GET'])\ndef get_user_address(user_id):\n res = UserResource.get_users(user_id)\n rsp = get_addresses(res[0]['addressID'])\n return rsp\n\n@app.route('/users//address', methods = ['POST'])\ndef create_address_for_user(user_id):\n addr_data = request.get_json()\n msg, addr_id = AddressResource.create_address(addr_data)\n\n user_data = {'addressID': addr_id}\n UserResource.update_user(user_id, user_data)\n\n return \"Successfully created address for user!\"\n\n@app.route('/addresses', methods = ['POST'])\ndef create_address():\n data = request.get_json()\n msg, id = AddressResource.create_address(data)\n return msg\n\n@app.route('/addresses', methods = ['GET'])\n@app.route('/addresses/', methods = ['GET'])\ndef get_addresses(address_id=None):\n res = AddressResource.get_addresses(address_id)\n rsp = Response(json.dumps(res, default=str), status=200, content_type=\"application/json\")\n return rsp\n\n@app.route('/addresses/', methods = ['PUT'])\ndef update_address(address_id):\n data = request.get_json()\n res = AddressResource.update_address(address_id, data)\n return res\n\n@app.route('/addresses/', methods = ['DELETE'])\ndef delete_address(address_id):\n res = AddressResource.delete_address(address_id)\n return res\n\n@app.route('/addresses//users', methods = ['GET'])\ndef get_all_users_under_address(address_id):\n template = {'addressID': address_id}\n res = UserResource.get_by_template(template)\n rsp = Response(json.dumps(res, default=str), status=200, content_type=\"application/json\")\n return rsp\n\n@app.route('/addresses//users', methods = ['POST'])\ndef create_user_under_address(address_id):\n user_data = request.get_json()\n msg, user_id = create_user_helper(user_data)\n\n user_data = {'addressID': address_id}\n msg = update_user_helper(user_id, user_data)\n \n return \"Successfully created user under address!\"\n\n# ------------------- helper functions -------------------\n\ndef create_user_helper(user_data):\n msg, user_id = UserResource.create_user(user_data)\n return msg, user_id\n\ndef update_user_helper(user_id, user_data):\n msg = UserResource.update_user(user_id, user_data)\n return msg\n\n# ------------------- main function -------------------\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)\n\n\n\n\n\n\n\n# @app.route('/imdb/artists/')\n# def get_artists_by_prefix(prefix):\n# res = IMDBArtistResource.get_by_name_prefix(prefix)\n# rsp = Response(json.dumps(res), status=200, content_type=\"application/json\")\n# return rsp\n\n# @app.route('////')\n# def get_by_prefix(db_schema, table_name, column_name, prefix):\n# res = RDBService.get_by_prefix(db_schema, table_name, column_name, prefix)\n# rsp = Response(json.dumps(res, default=str), status=200, content_type=\"application/json\")\n# return rsp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"32378247","text":"from contextlib import contextmanager\nfrom fabric.colors import green, magenta, yellow\nfrom fabric.contrib.files import upload_template, exists\nfrom fabric.decorators import task\nfrom fabric.operations import local\n\nfrom fabric.api import *\n\nenv.hosts = ['54.173.21.90']\nenv.server_name = 'ec2-54-173-21-90.compute-1.amazonaws.com'\nenv.user = 'ubuntu'\nenv.key_filename = '~/.ssh/blog_analytics.pem'\nenv.virtualenv_name = 'blog_analytics'\nenv.project_name = 'rocketu_blog_analytics'\nenv.project_path = '/home/ubuntu/rocketu_blog_analytics_2'\nenv.github_https = 'https://github.com/brittkistner/rocketu_blog_analytics.git'\nenv.db_password = 'hello'\nenv.db_name = 'blog_analytics'\nenv.db_user = 'blog_analytics'\n# env.shell = \"/bin/bash -l -i -c\"\n\n@task\ndef hello():\n print(green(\"I'm alive!\"))\n\n@task\ndef create_file(file_name):\n local(\"touch ~/Desktop/{}.txt\".format(file_name))\n\n@task\ndef create_directory():\n local(\"mkdir ~/Desktop/my_directory\")\n\n@task\ndef create_folder_in_directory(folder_name, directory_path):\n local(\"mkdir {}/{}\".format(directory_path, folder_name))\n\n@task\ndef ubuntu_hello():\n with hide(\"stdout\"):\n output = run(\"lsb_release -a\")\n print(yellow(output))\n\ndef restart_app():\n sudo(\"service supervisor restart\")\n sudo(\"service nginx restart\")\n\n@contextmanager\ndef virtualenv():\n \"\"\"\n Runs commands within the project's virtualenv.\n \"\"\"\n if not exists(\"source ~/.virtualenvs/{}/bin/activate\".format(env.virtualenv_name)):\n run(\"mkdir source ~/.virtualenvs/{}/bin/activate\".format(env.virtualenv_name))\n with cd(env.virtualenv_name):\n with prefix(\"source ~/.virtualenvs/{}/bin/activate\".format(env.virtualenv_name)):\n yield\n\n@task\ndef deploy():\n # with prefix(\"source ~/.virtualenvs/blog_analytics/bin/activate\"):\n with virtualenv():\n with cd(env.project_path):\n run(\"git pull origin master\")\n run(\"pip install -r requirements.txt\")\n run(\"python manage.py migrate\")\n run(\"python manage.py collectstatic --noinput\")\n restart_app()\n\n@task\ndef setup_postgres():\n sudo(\"adduser {}\".format(env.db_name))\n sudo(\"apt-get install postgresql postgresql-contrib libpq-dev\")\n\n with settings(sudo_user='postgres'):\n sudo(\"createuser {}\".format(env.db_user))\n sudo(\"createdb {}\".format(env.db_name))\n alter_user_statement = \"ALTER USER {} WITH PASSWORD '{}';\".format(env.db_user, env.db_password)\n sudo('psql -c \"{}\"'.format(alter_user_statement))\n\n@task\ndef setup_nginx(): #project_name is what is being called with the .conf file, server name = ec2 url\n upload_template(\"./deploy/nginx.conf\",\n \"/etc/nginx/sites-enabled/{}.conf\".format(env.project_name),\n {'server_name': env.server_name},\n use_sudo=True,\n backup=False)\n\n restart_app()\n\n@task\ndef create_symlinks():\n run(\"rm /etc/nginx/sites-enabled/default\")\n restart_app()\n # run(\"ln -s /etc/nginx/sites-available/{}\".format(env.project_name),\n # \"/etc/nginx/sites-enabled/{}\".format(env.project_name)\n # )\n restart_app()\n #check that nginx is actually running\n\n@task\ndef setup_gunicorn(workers):\n # create directory\n upload_template(\"./deploy/gunicorn.conf\",\n \"{}/gunicorn.conf.py\".format(env.project_name),\n # pip install similar to the deploy setup with prefix\n {\"proc_name\": env.project_name, \"workers\": workers},\n use_sudo=True,\n backup=False)\n restart_app()\n\n@task\ndef setup_supervisor():\n # create directory\n upload_template(\"./deploy/supervisor.conf\",\n \"/etc/supervisor/conf.d/{}.conf\".format(env.project_name),\n {\"project_name\": env.project_name, \"virtual_env_name\": env.virtualenv_name},\n use_sudo=True,\n backup=False)\n restart_app()\n\n\n@task\ndef pip(packages):\n \"\"\"\n Installs one or more Python packages within the virtual environment.\n \"\"\"\n with virtualenv():\n return sudo(\"pip install %s\" % packages)\n\ndef create_local_settings():\n local_settings_path = run('mkdir {}/local_settings.py'.format(env.project_path))\n upload_template(\"./deploy/local_settings.conf\",\n local_settings_path,\n {\"db_name\": env.db_name,\n \"db_user\": env.db_user,\n \"db_password\": env.db_password,\n \"server_name\": env.server_name},\n use_sudo=False,\n backup=False)\n #check on sudo for this case\n\n#django manage.py commands, apt-get\n\n@task\ndef startup(workers):\n \"\"\"\n Runs commands to startup project.\n Installs and completes setup for postgres, nginx, gunicorn, and supervisor.\n \"\"\"\n sudo(\"apt-get update\")\n sudo(\"apt-get upgrade\")\n sudo(\"apt-get install nginx\")\n if not exists(env.project_path):\n run(\"mkdir {}\".format(env.project_path))\n sudo(\"apt-get install git\")\n if not exists(\"{}/.git\".format(env.project_path)):\n run(\"git clone {} {}\".format(env.github_https, env.project_path))\n # ensure ubuntu user owns the repo folder\n sudo(\"apt-get install python-pip python-dev build-essential\")\n sudo(\"pip install pip --upgrade\")\n # create virtualenv folder\n # with virtualenv():\n create_local_settings()\n setup_postgres()\n deploy()\n setup_nginx()\n setup_gunicorn(workers)\n setup_supervisor()\n #create_symlinks()\n #launch site\n\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"576737710","text":"import pygame\nfrom random import randint\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\ngreen = (0, 200, 0)\nred = (200, 0, 0)\n\npygame.init()\n\nsize = 800, 600\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Nick's Frisbee Golf Challenge\")\n\ndone = False\nclock = pygame.time.Clock()\n\n\ndef ball(x, y):\n pygame.draw.circle(screen, black, [x, y], 15)\n\ndef game_over():\n font = pygame.font.SysFont(None, 25)\n text = font.render(\"Game over\", True, red)\n screen.blit(text, (150, 250))\n\ndef obstacle(xloc, yloc, xsize, ysize):\n pygame.draw.rect(screen, green, [xloc, yloc, xsize, ysize])\n pygame.draw.rect(screen, green, [xloc, int(yloc + ysize + space), xsize, 500])\n\ndef Score(score_count):\n font = pygame.font.SysFont(None, 25)\n text = font.render(\"Score: \" + str(score_count), True, green)\n screen.blit(text, (0, 0))\n\nx = 50\ny = 20\ny_speed = 15\nground = 575\nxloc = 700\nyloc = 0\nxsize = 70\nysize = randint(0, 350)\nspace = 200\nobspeed = 2.5\nscore_count = 0\n\n\nwhile not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n y_speed = -10\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n y_speed = 10\n\n screen.fill(white)\n obstacle(xloc, yloc, xsize, ysize)\n ball(x, y)\n Score(score_count)\n\n y += y_speed\n xloc -= obspeed\n\n if y > ground:\n game_over()\n y_speed = 0\n obspeed = 0\n\n if x + 20 > xloc and y - 20 < ysize and x - 15 < xsize + xloc:\n game_over()\n obspeed = 0\n y_speed = 0\n\n if xloc < -80:\n xloc = 700\n ysize = randint(0, 350)\n score_count += 1\n\n pygame.display.flip()\n clock.tick(60)\n\npygame.quit()\n\n","sub_path":"Frisbee Golf/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"443011087","text":"from opentrons import protocol_api\nimport threading\nimport math\nimport os\nimport json\nimport contextlib\n\n# metadata\nmetadata = {\n 'protocolName': 'Pooling - 2ml Tuberack to 2ml Tuberack',\n 'author': 'Nick ',\n 'source': 'Custom Protocol Request',\n 'apiLevel': '2.11'\n}\n\n\n# Definitions for deck light flashing\n@contextlib.contextmanager\ndef flashing_rail_lights(\n protocol: protocol_api.ProtocolContext, seconds_per_flash_cycle=1.0\n):\n \"\"\"Flash the rail lights on and off in the background.\n\n Source: https://github.com/Opentrons/opentrons/issues/7742\n\n Example usage:\n\n # While the robot is doing nothing for 2 minutes, flash lights quickly.\n with flashing_rail_lights(protocol, seconds_per_flash_cycle=0.25):\n protocol.delay(minutes=2)\n\n When the ``with`` block exits, the rail lights are restored to their\n original state.\n\n Exclusive control of the rail lights is assumed. For example, within the\n ``with`` block, you must not call `ProtocolContext.set_rail_lights`\n yourself, inspect `ProtocolContext.rail_lights_on`, or nest additional\n calls to `flashing_rail_lights`.\n \"\"\"\n original_light_status = protocol.rail_lights_on\n\n stop_flashing_event = threading.Event()\n\n def background_loop():\n while True:\n protocol.set_rail_lights(not protocol.rail_lights_on)\n # Wait until it's time to toggle the lights for the next flash or\n # we're told to stop flashing entirely, whichever comes first.\n got_stop_flashing_event = stop_flashing_event.wait(\n timeout=seconds_per_flash_cycle/2\n )\n if got_stop_flashing_event:\n break\n\n background_thread = threading.Thread(\n target=background_loop, name=\"Background thread for flashing rail \\\nlights\"\n )\n\n try:\n if not protocol.is_simulating():\n background_thread.start()\n yield\n\n finally:\n # The ``with`` block might be exiting normally, or it might be exiting\n # because something inside it raised an exception.\n #\n # This accounts for user-issued cancelations because currently\n # (2021-05-04), the Python Protocol API happens to implement user-\n # issued cancellations by raising an exception from internal API code.\n if not protocol.is_simulating():\n stop_flashing_event.set()\n background_thread.join()\n\n # This is questionable: it may issue a command to the API while the API\n # is in an inconsistent state after raising an exception.\n protocol.set_rail_lights(original_light_status)\n\n\ndef run(ctx):\n\n tip_track = True\n p300_mount = 'left'\n flash = True\n\n # load labware\n rack = ctx.load_labware('eurofins_96x2ml_tuberack', '2', 'tuberack')\n tips300 = [\n ctx.load_labware('opentrons_96_tiprack_300ul', slot)\n for slot in ['11']]\n\n # pipette\n p300 = ctx.load_instrument('p300_single_gen2', p300_mount,\n tip_racks=tips300)\n\n tip_log = {val: {} for val in ctx.loaded_instruments.values()}\n\n folder_path = '/data/tip_track'\n tip_file_path = folder_path + '/tip_log.json'\n if tip_track and not ctx.is_simulating():\n if os.path.isfile(tip_file_path):\n with open(tip_file_path) as json_file:\n tip_data = json.load(json_file)\n for pip in tip_log:\n if pip.name in tip_data:\n tip_log[pip]['count'] = tip_data[pip.name]\n else:\n tip_log[pip]['count'] = 0\n else:\n for pip in tip_log:\n tip_log[pip]['count'] = 0\n else:\n for pip in tip_log:\n tip_log[pip]['count'] = 0\n\n for pip in tip_log:\n if pip.type == 'multi':\n tip_log[pip]['tips'] = [tip for rack in pip.tip_racks\n for tip in rack.rows()[0]]\n else:\n tip_log[pip]['tips'] = [tip for rack in pip.tip_racks\n for tip in rack.wells()]\n tip_log[pip]['max'] = len(tip_log[pip]['tips'])\n\n def _pick_up(pip, loc=None):\n if tip_log[pip]['count'] == tip_log[pip]['max'] and not loc:\n if flash:\n if not ctx._hw_manager.hardware.is_simulator:\n with flashing_rail_lights(ctx, seconds_per_flash_cycle=1):\n ctx.pause('Replace ' + str(pip.max_volume) + 'µl \\\ntipracks before resuming.')\n pip.reset_tipracks()\n tip_log[pip]['count'] = 0\n if loc:\n pip.pick_up_tip(loc)\n else:\n pip.pick_up_tip(tip_log[pip]['tips'][tip_log[pip]['count']])\n tip_log[pip]['count'] += 1\n\n # check barcode scans (tube, plate)\n tuberack1_bar, tuberack2_bar = input_file.splitlines()[3].split(',')[:2]\n if not tuberack1_scan[:len(tuberack1_scan)-4] == tuberack1_bar.strip():\n raise Exception(f'Tuberack 1 scans do not match ({tuberack1_bar}, \\\n{tuberack1_scan})')\n if not tuberack2_scan[:len(tuberack2_scan)-4] == tuberack2_bar.strip():\n raise Exception(f'Tuberack 2 scans do not match ({tuberack2_bar}, \\\n{tuberack2_bar})')\n\n # parse\n data = [\n [val.strip() for val in line.split(',')]\n for line in input_file.splitlines()[4:]\n if line and line.split(',')[0].strip()]\n\n tubes1_ordered = [\n well for col in rack.columns()\n for well in col[:8]]\n\n tubes2_ordered = [\n well for col in rack.columns()\n for well in col[8:]]\n\n prev_dest = None\n for line in data:\n tube1 = tubes1_ordered[int(line[0])-1]\n tube2 = tubes2_ordered[int(line[1])-1]\n if len(line) >= 3 and line[2]:\n transfer_vol = float(line[2])\n else:\n transfer_vol = default_transfer_vol\n\n # tip capacity 280 with 20 uL air gap\n reps = math.ceil(transfer_vol / 280)\n\n vol = transfer_vol / reps\n\n # transfer\n if tube2 != prev_dest:\n if p300.has_tip:\n p300.drop_tip()\n _pick_up(p300)\n\n for rep in range(reps):\n p300.move_to(tube1.top())\n p300.air_gap(20)\n p300.aspirate(vol, tube1.bottom(0.5))\n p300.dispense(vol+20, tube2.top(-5), rate=2)\n ctx.delay(seconds=1)\n p300.blow_out()\n\n prev_dest = tube2\n p300.drop_tip()\n\n # track final used tip\n if not ctx.is_simulating():\n if not os.path.isdir(folder_path):\n os.mkdir(folder_path)\n tip_data = {pip.name: tip_log[pip]['count'] for pip in tip_log}\n with open(tip_file_path, 'w') as outfile:\n json.dump(tip_data, outfile)\n","sub_path":"protocol-supplements/121d15-2/templates/pooling_2ml_2ml.ot2.apiv2.py","file_name":"pooling_2ml_2ml.ot2.apiv2.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"148496850","text":"class Solution(object):\r\n def longestPalindrome(self, s):\r\n \"\"\"\r\n Based on Manacher Algorithm.\r\n :type s: str\r\n :rtype: str\r\n \"\"\"\r\n # to deal with odd-length palindromes, we insert an \"#\" between each\r\n # character. s = 'abcd' -> new_s = 'a#b#c#d'\r\n new_s = '^' + '#'.join(\"^{}$\".format(s)) + '&'\r\n P, center, right_bound, len_new_s= [], 0, 0, len(new_s)\r\n\r\n for i in range(len_new_s):\r\n P.append(right_bound > i and min(P[center*2-i], right_bound-i))\r\n while i + 1 + P[i] < len_new_s and i - 1 - P[i] >= 0 and\\\r\n new_s[i + 1 + P[i]] == new_s[i - 1 - P[i]]:\r\n P[i] += 1\r\n\r\n if i + P[i] > right_bound:\r\n center, right_bound = i, i + P[i]\r\n\r\n max_len, center= max((j, i) for i, j in enumerate(P))\r\n return s[(center - max_len)// 2 - 1: (center + max_len) // 2 - 1]\r\n\r\ns = Solution()\r\nprint (\"expect:\", \"12123432121\", \"\\n get:\",\r\n s.longestPalindrome(\"21121234321212123\"))\r\nprint (\"expect:\", \"121\", \"\\n get:\", s.longestPalindrome(\"121\"))\r\nprint (\"expect:\", \"$\", \"\\n get:\", s.longestPalindrome(\"$\"))\r\n\r\n\r\n","sub_path":"Algorithms/005.LongestPalindromicSubstring.py","file_name":"005.LongestPalindromicSubstring.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"602411198","text":"import logging\nimport logging.config\n\nimport redis\nfrom contorller import Infx\nfrom contorller.Insert_info import docker_info\nimport time\n\n\nclass Main:\n def run(self):\n logging.config.fileConfig('D:\\pycharm_workspaces\\config_file\\\\logging.conf')\n ip_file = 'D:\\pycharm_workspaces\\config_file\\\\ip.txt'\n logger_main = logging.getLogger('main')\n try:\n # 获取到indlxdb连接信息\n print(\"influxdb is connection...\")\n inflxdb = Infx.Infx('192.168.152.129', 'pangziyang', 'pangziyang', 'docker', 8086).influx()\n print(\"influxdb connection success\")\n # 获取redis 连接池\n print(\"redis_pool is connection....\")\n pool = redis.ConnectionPool(host='192.168.212.12', port=6379)\n redis_pool = redis.Redis(connection_pool=pool)\n\n dock = docker_info(inflxdb=inflxdb, Redis_pool=redis_pool)\n print(\"redis_pool connection is success\")\n\n #########每隔60查询一次ip###################\n while True:\n time.sleep(60)\n with open(ip_file) as f:\n ip = f. readline()\n ip_list = ip.split(',')\n logger_main.info(\"monitor_ip: \" + ''.join(ip_list))\n for ip_v in ip_list:\n jsos = redis_pool.hgetall(ip_v)\n # 写入 cpu表\n dock.cpu_value(jsos, inflxdb)\n # 写入mem 表\n dock.memory_value(jsos, inflxdb)\n # 写入cpu_load 表\n dock.load_value(jsos, inflxdb)\n # 写入newwork_incoming表\n dock.network_incoming_value(jsos, inflxdb)\n # 写入newwork_outgoing 表\n dock.network_outgoing_value(jsos, inflxdb)\n # 写入disk_used表\n dock.disk_used_usage(jsos, inflxdb)\n # 写入disk_export_表\n dock.disk_export_use(jsos, inflxdb)\n # 写入tcp_connections表\n dock.tcp_connections(jsos, inflxdb)\n except Exception as f:\n print(f)\n\n\nif __name__ == '__main__':\n Go = Main()\n Go.run()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"3303425","text":"import statistics as stat\nimport math\nimport matplotlib.pyplot as plt\n\nfrom analysis import indirect_uniformity_analysis, period, aperiodicity_interval_length\nfrom rng import random_sequence\nfrom reader import read_positive_integer_from_keyboard\n\nRANDOM_SEQUENCE_SIZE = 50000\nHISTOGRAM_BINS_COUNT = 20\n\n# For a = 0, b = 1\nUNIFORM_DISTRIBUTION_EXPECTED = 0.5 # 0.5 * (a + b)\nUNIFORM_DISTRIBUTION_VARIANCE = 1 / 12 # 1 / 12 * (a + b) ^ 2\nUNIFORM_DISTRIBUTION_STANDARD_DEVIATION = math.sqrt(UNIFORM_DISTRIBUTION_VARIANCE) # σ = √D\nREFERENCE_UNIFORMITY_EVALUATION = math.pi / 4\n\n\ndef main():\n a = read_positive_integer_from_keyboard('a')\n m = read_positive_integer_from_keyboard('m', lambda x: x > a, 'm must be a positive integer, greater than a')\n r_0 = read_positive_integer_from_keyboard('R0')\n\n rng = lambda size: random_sequence(a, m, r_0, size)\n generated_sequence = tuple(rng(RANDOM_SEQUENCE_SIZE))\n\n expected = stat.mean(generated_sequence)\n variance = stat.variance(generated_sequence)\n standard_deviation = stat.stdev(generated_sequence)\n uniformity_evaluation = indirect_uniformity_analysis(generated_sequence)\n generator_period = period(rng)\n generator_aperiodicity_interval_length = \\\n aperiodicity_interval_length(rng, generator_period) if generator_period is not None else None\n\n show_difference('Expected', expected, UNIFORM_DISTRIBUTION_EXPECTED)\n show_difference('Variance', variance, UNIFORM_DISTRIBUTION_VARIANCE)\n show_difference('Standard Deviation', standard_deviation, UNIFORM_DISTRIBUTION_STANDARD_DEVIATION)\n show_difference('Uniformity Evaluation (2K/N)', uniformity_evaluation, REFERENCE_UNIFORMITY_EVALUATION)\n print(f'Period: {generator_period}')\n print(f'Aperiodicity Interval Length: {generator_aperiodicity_interval_length}')\n\n plot_freqency_histogram(generated_sequence)\n plt.show()\n\n\ndef show_difference(name, actual_value, reference_value):\n print(f'{name}: {actual_value:.5f} '\n f'(reference = {reference_value:.5f}, '\n f'Δ = {abs(reference_value - actual_value):.5f})')\n\n\ndef plot_freqency_histogram(sequence):\n fig, ax = plt.subplots()\n fig.canvas.set_window_title('Lehmer Random Number Generator')\n fig.suptitle('RNG Frequency Histogram')\n ax.set_xlabel('Generated value')\n ax.set_ylabel('Frequency')\n ax.hist(\n\tsequence,\n\tbins=HISTOGRAM_BINS_COUNT,\n\tweights=([1 / RANDOM_SEQUENCE_SIZE] * RANDOM_SEQUENCE_SIZE),\n\tedgecolor='black',\n\tlinewidth=0.5)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"335399051","text":"\n\n##########################################################\n##########################################################\n# description: example that works with connection/disconnections of boxes\n#\n# autor: jeraman\n# date: 26/04/2010\n##########################################################\n##########################################################\n\n#imports Pyata library\nfrom Pd import *\nimport math\nimport datetime\n\n\n\n#planet class for rotate boxes\nclass Clock():\n def __init__(self, radius, c_x, c_y, center, inlet=0):\n self.radius = radius\n self.center = center\n self.inlet = inlet\n self.list = []\n self.draw()\n self.pointer = 0\n\n def increment(self):\n disconnect(self.list[self.pointer], 0, self.center, self.inlet)\n self.pointer = (self.pointer+1)%12\n connect(self.list[self.pointer], 0, self.center, self.inlet)\n\n def draw(self):\n q_boxes = 12\n total = 360\n slice_angle = total/q_boxes\n angle = -90 - slice_angle\n\n for i in range(0,q_boxes+1):\n angle += slice_angle\n rad_angle = math.radians(angle)\n x = self.radius * math.cos(rad_angle)\n y = self.radius * math.sin(rad_angle)\n x+=self.center.x\n y+=self.center.y\n x = int(x)\n y = int(y)\n n = Number(x, y)\n n.set(i)\n self.list.append(n)\n\n\n#mains method\nif __name__ == '__main__':\n #creates an instance of Pd\n pd = Pd()\n\n #initializes Pyata\n pd.init()\n\n #creates a center\n centro = Object(300, 300, \"outlet\")\n\n c1=Clock(100, 300, 300, centro)\n c2=Clock(300, 300, 300, centro)\n\n #varibles to stores the second\n s = 0\n\n #runs during 20 seconds\n for i in range (40):\n s = (s+1)%12\n c1.increment()\n if s==0:\n c2.increment()\n sleep(0.5)\n\n #finishes Pyata\n pd.quit()\n\n","sub_path":"examples/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"94290638","text":"import math\r\n\r\nx = (input(\"Enter X: \"))\r\nx = float(x.replace(',','.'))\r\n\r\nif -(math.pi) <= x <= math.pi:\r\n y = math.cos(3*x)\r\n print(\"По условиям первого уравнения: y = \", y)\r\nelif x < -(math.pi) or x > math.pi:\r\n y = x\r\n print(\"По условиям второго уравнения: y = \", y)\r\n","sub_path":"Lesson_3/задание_3_2.py","file_name":"задание_3_2.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"126350543","text":"\"\"\"\nPseudocode:\nFor each element of arr starting at 2nd position, \ncompare each value with all values to the left. \nIf that current element is lesser than the one on the left swap the values. \nKeep doing it until you reach an element of greater size, or the first element. \n\n\"\"\"\n\ndef insertion_sort(arr):\n for i in range(1, len(arr)):\n curr_value = arr[i]\n prev_index = i - 1\n while prev_index >= 0:\n if curr_value < arr[prev_index]:\n arr[prev_index+1] = arr[prev_index]\n arr[prev_index] = curr_value\n prev_index -= 1\n else:\n break\na1 = []\na2 = [4]\na3 = [12, 3, 7, 3, 5, -3]\n\ninsertion_sort(a1)\ninsertion_sort(a2)\ninsertion_sort(a3)\n\nprint(a1)\nprint(a2)\nprint(a3)","sub_path":"python/miscPracticeProblems/Fundamentals/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160750414","text":"from selenium import webdriver\nimport sys\n\ndef exception_handling():\n print(\"{} :: {} :: line: {}\".format(\\\n sys.exc_info()[0],\\\n sys.exc_info()[1],\\\n sys.exc_info()[2].tb_lineno))\n\ndef find_href_from_link_of_page_with_selenium(URL, search_query):\n \n # Convert string query for selenium usage\n search_query = \"\\'\" + search_query + \"\\'\"\n\n result = None\n\n # Run Selenium with Chrome\n driver = webdriver.Chrome()\n driver.get(URL)\n\n element = driver.find_element_by_xpath(\"//a[contains(@href, {})]\".format(search_query))\n result = element.get_property([\"href\"])\n\n # Close driver\n driver.close()\n\n return result","sub_path":"update_coins/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"383228949","text":"from .. import loader, utils\nimport os\ndef register(cb):\n\tcb(RipperMod())\nclass RipperMod(loader.Module):\n\t\"\"\"Ripper\"\"\"\n\tstrings = {'name': 'Ripper'}\n\tdef __init__(self):\n\t\tself.name = self.strings['name']\n\t\tself._me = None\n\t\tself._ratelimit = []\n\tasync def client_ready(self, client, db):\n\t\tself._db = db\n\t\tself._client = client\n\t\tself.me = await client.get_me()\n\tasync def ripcmd(self, message):\n\t\t\n\t\t\n\t\treply = await message.get_reply_message()\n\t\tif utils.get_args_raw(message):\n\t\t\tript = utils.get_args_raw(message)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treply.sender\n\t\t\t\tript = reply.sender.first_name\n\t\t\texcept:\n\t\t\t\tawait message.edit(\"\"\"\n⁠ _\n __| |__ \n |_R.I.P_|\n | | \n | | \n | | \n |_|\n\"\"\")\n\t\t\t\treturn\n\t\tawait message.edit(f\"\"\"\n⁠ _\n __| |__ \n |_R.I.P_|\n | | \n | | \n | | \n |_|\n\n{ript} я сделал тебе крест\n\"\"\")","sub_path":"Ripper.py","file_name":"Ripper.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"571884840","text":"# Given a single integer n, create an (n x n) 2D array with 1 on the border and 0 on the inside.\n#\n# Note: Make sure the array is of type int.\n#\n# Example:\n# Input 1:\n# 4\n# Output 1:\n# [[1 1 1 1]\n# [1 0 0 1]\n# [1 0 0 1]\n# [1 1 1 1]]\n# Input 2:\n# 2\n# Output 2:\n# [[1 1]\n# [1 1]]\n\nimport numpy as np\n\nn = int(input('Enter a number: '))\n\na = np.ones((n, n), dtype=int)\na[1:-1, 1:-1] = 0\nprint(a)\n","sub_path":"numpy_programs/array_one_border_inside_zero.py","file_name":"array_one_border_inside_zero.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"174233420","text":"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport pandas as pd\nfrom skimage.transform import resize\nimport build_model\n\n\nORIG_ROW = 420\nORIG_COL = 580\n\n\ndef run_len_encoding(img):\n \"\"\"Compress image using run-length encoding.\n\n Args:\n img: binary array of image\n\n Returns: string of encoded image\n\n \"\"\"\n position = 0\n pixel = 0\n count_one = 0\n previous = 0\n encoded_img = []\n for col in range(img.shape[1]):\n for row in range(img.shape[0]):\n position += 1\n pixel = img[row, col]\n if pixel == 1:\n if pixel != previous:\n encoded_img.append(str(position))\n count_one += 1\n elif pixel == 0 and pixel != previous:\n encoded_img.append(str(count_one))\n count_one = 0\n \n previous = pixel\n \n return \" \".join(encoded_img)\n\n\ndef predict_mask(model, imgs, fnames):\n \"\"\"Predict masks for test images.\n\n Args:\n model: best trained model.\n imgs: float ndarray of images\n fnames: list of names of the images\n\n Returns: DataFrame of image names and encoded mask predictions\n\n \"\"\"\n pred = pd.DataFrame([], columns=['img', 'pixels'])\n \n for idx, fname in enumerate(fnames):\n img = np.expand_dims(imgs[idx], axis=0)\n mask_pred = model.predict(img)\n mask_pred = resize(mask_pred[0,:,:,0], (ORIG_ROW, ORIG_COL))\n mask_pred = np.rint(mask_pred)\n print(fname)\n pred = pred.append(\n {'img':fname, \n 'pixels':run_len_encoding(mask_pred)}, ignore_index=True)\n \n return pred\n\n\ndef predict_masks(test_imgs_npy, best_weight_fname, \n pred_mask_fname=\"test_masks_pred.csv\"):\n # Load images\n test_imgs = np.load(test_imgs_npy) #\"test_imgs.npy\")\n \n # Load model and weight\n model = build_model.unet()\n model.load_weights(best_weight_fname) #\"best_weight.h5\")\n \n # Predict masks for test data\n test_fnames = os.listdir(os.path.join('..', '..', 'data', 'raw', 'test'))\n test_masks_pred = predict_mask(model, test_imgs, test_fnames)\n test_masks_pred.to_csv(pred_mask_fname, index=False)\n","sub_path":"src/models/predict_model.py","file_name":"predict_model.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"125188376","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n \nimport tweepy\nimport datetime\nfrom PIL import Image, ImageDraw, ImageFont\nimport xml.etree.ElementTree as ET\nimport requests\nimport os\n\nurl = \"http://www.drk7.jp/weather/xml/13.xml\"\nd = datetime.datetime.today()\ntoday = '{year}/{month}/{day}'.format(year=d.year, month=str(d.month).zfill(2), day=str(d.day).zfill(2))\ncontent = '今日{}時の天気予報です。\\n'.format(d.hour)\n\nresponse = requests.get(url)\nresponse.encoding = response.apparent_encoding\nfeed = response.text\n\ntree = ET.fromstring(feed)\nweather = tree.findall(\".//area[4]/info[@date='{date}']/weather\".format(date=today))\ncontent += '{}月{}日の東京の天気は{}\\n'.format(d.month, d.day, weather[0].text)\nchance = tree.findall(\".//area[4]/info[@date='{date}']/rainfallchance/period\".format(date=today))\ncontent += '降水確率は午前{am}%、午後{pm}%、夜{night}%\\n'.format(am=chance[1].text, pm=chance[2].text, night=chance[3].text)\ntemp = tree.findall(\".//area[4]/info[@date='{date}']/temperature/range\".format(date=today))\ncontent += '最低気温は{min}℃、最高気温は{max}℃\\n'.format(min=temp[1].text, max=temp[0].text)\ncontent += 'http://www.jma.go.jp/jp/yoho/319.html'\n\nimage = Image.new('RGB', (800, 450), (255, 255, 255))\n\ndraw = ImageDraw.Draw(image)\nfont_path = os.path.join(os.path.dirname(__file__), 'GenShinGothic-P-Regular.ttf')\nfont_50 = ImageFont.truetype(font_path, 50)\nfont_40 = ImageFont.truetype(font_path, 40)\nfont_30 = ImageFont.truetype(font_path, 30)\nfont_26 = ImageFont.truetype(font_path, 26)\nfont_20 = ImageFont.truetype(font_path, 20)\n\n\ndraw.text((30, 20), u'{}月{}日 東京の天気'.format(d.month, d.day), font=font_50, fill=(0,0,0))\n\nif weather[0].text.find(u'雪') > -1:\n emoji = Image.open(os.path.join(os.path.dirname(__file__), 'snowy.png'))\nelif weather[0].text.find(u'雨') > -1:\n emoji = Image.open(os.path.join(os.path.dirname(__file__), 'rainy.png'))\nelif weather[0].text.find(u'くもり') > -1:\n emoji = Image.open(os.path.join(os.path.dirname(__file__), 'clowdy.png'))\nelse:\n emoji = Image.open(os.path.join(os.path.dirname(__file__), 'sunny.png'))\n\nemoji = emoji.resize((150, 150))\nimage.paste(emoji, (60, 150))\n\nfill = 5-len(weather[0].text)/2\ndraw.text((0, 330), u'{}{}{}'.format(u' '*int(fill), weather[0].text, u' '*int(fill)), font=font_30, fill=(0,0,0))\n\ndraw.text((300, 130), u'最高 {}℃'.format(temp[0].text), font=font_40, fill=(200,50,50))\ndraw.text((300, 190), u'最低 {}℃'.format(temp[1].text), font=font_40, fill=(50,50,200))\n\nx = 300\ny = 280\nw = 480\nh = 120\n\ndraw.line((x,y,x+w,y), (150,150,150), 1)\ndraw.line((x,y+h,x+w,y+h), (150,150,150), 1)\ndraw.line((x,y,x,y+h), (150,150,150), 1)\ndraw.line((x+w,y,x+w,y+h), (150,150,150), 1)\ndraw.line((x,y+h/2,x+w,y+h/2), (150,150,150), 1)\ndraw.line((x+w*1/5,y,x+w*1/5,y+h), (150,150,150), 1)\ndraw.line((x+w*2/5,y,x+w*2/5,y+h), (150,150,150), 1)\ndraw.line((x+w*3/5,y,x+w*3/5,y+h), (150,150,150), 1)\ndraw.line((x+w*4/5,y,x+w*4/5,y+h), (150,150,150), 1)\n\ndraw.text((x+15+2, y+10), u'時間', font=font_30, fill=(50,50,50))\ndraw.text((x+w*1/5+15, y+12), u'00-06', font=font_26, fill=(50,50,50))\ndraw.text((x+w*2/5+15, y+12), u'06-12', font=font_26, fill=(50,50,50))\ndraw.text((x+w*3/5+15, y+12), u'12-18', font=font_26, fill=(50,50,50))\ndraw.text((x+w*4/5+15, y+12), u'18-24', font=font_26, fill=(50,50,50))\n\ndraw.text((x+8, y+h/2+15), u'降水確率', font=font_20, fill=(50,50,50))\ndraw.text((x+w*1/5+20, y+h/2+10), u'{}%'.format(chance[0].text.rjust(2)), font=font_30, fill=(50,50,50))\ndraw.text((x+w*2/5+20, y+h/2+10), u'{}%'.format(chance[1].text.rjust(2)), font=font_30, fill=(50,50,50))\ndraw.text((x+w*3/5+20, y+h/2+10), u'{}%'.format(chance[2].text.rjust(2)), font=font_30, fill=(50,50,50))\ndraw.text((x+w*4/5+20, y+h/2+10), u'{}%'.format(chance[3].text.rjust(2)), font=font_30, fill=(50,50,50))\n\nimage.save(os.path.join(os.path.dirname(__file__), 'weather.png'))\n\n#Authorization\nf = open(os.path.join(os.path.dirname(__file__), 'config.txt'))\ndata = f.read()\nf.close()\nlines = data.split('\\n')\n\nKEY = lines[0]\nSECRET = lines[1]\nATOKEN = lines[2]\nASECRET = lines[3]\n#What to tweet\nauth = tweepy.OAuthHandler(KEY, SECRET)\nauth.set_access_token(ATOKEN, ASECRET)\napi = tweepy.API(auth)\napi.update_with_media(os.path.join(os.path.dirname(__file__), 'weather.png'), status=content)\n","sub_path":"tenki.py","file_name":"tenki.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"635434580","text":"from telegram.ext import Updater, PicklePersistence\n\nfrom config import BOT_TOKEN\n\nupdater = Updater(\n BOT_TOKEN,\n persistence=PicklePersistence(filename=\"data\")\n)\ndp = updater.dispatcher\n\n\ndef main():\n from handlers import all_handlers\n\n for handler in all_handlers:\n if len(handler) == 2:\n if handler[0] == \"error\":\n dp.add_error_handler(\n handler[1]\n )\n else:\n dp.add_handler(\n handler[0],\n handler[1]\n )\n else:\n dp.add_handler(\n handler[0]\n )\n\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"529779688","text":"import smbus2\nfrom time import sleep, clock, time\nfrom timeit import default_timer as timer \n\nfrom python.devices.I2c_slave import I2c_slave\nfrom python.logger import get_sub_logger \n\nlogger = get_sub_logger(__name__)\n\nclass Mh_z16_ndir_co2(I2c_slave):\n\n cmd_measure = [0xFF,0x01,0x9C,0x00,0x00,0x00,0x00,0x00,0x63]\n ppm = 0\n\n IOCONTROL = 0X0E << 3\n FCR = 0X02 << 3\n LCR = 0X03 << 3\n DLL = 0x00 << 3\n DLH = 0X01 << 3\n THR = 0X00 << 3\n RHR = 0x00 << 3\n TXLVL = 0X08 << 3\n RXLVL = 0X09 << 3\n\n\n def initialize(self):\n\n #TODO - Add code to check that the configuration matches this sensors\n # capabilities.\n\n start_time = timer()\n\n logger.info('initializing MH-Z16 Co2 Sensor at address {} (hex), {} (decimal)'.format(hex(self.i2c_addr), self.i2c_addr))\n\n try:\n self.write_register(self.IOCONTROL, 0x08)\n except IOError:\n pass\n\n trial = 10\n\n for i in range(trial):\n try:\n self.write_register(self.FCR, 0x07)\n self.write_register(self.LCR, 0x83)\n self.write_register(self.DLL, 0x60)\n self.write_register(self.DLH, 0x00)\n self.write_register(self.LCR, 0x03)\n\n logger.info('MH-Z16 Co2 Sensor initialized successfully in {:.3f} seconds'.format(timer() - start_time))\n return True\n # except IOError:\n except:\n logger.error('MH-Z16 Co2 Sensor failed to initialize after {:.3f} seconds'.format(timer() - start_time))\n return False \n\n logger.error('MH-Z16 Co2 Sensor failed to initialize after {} trials and {:.3f} seconds'.format(trial, timer() - start_time))\n return False\n\n def update_sensor_readings(self):\n try:\n ts = time()\n \n self.write_register(self.FCR, 0x07)\n self.send(self.cmd_measure)\n sleep(0.01)\n\n self.ppm = None\n self.parse(self.receive())\n if self.ppm:\n self.vals[self.attribute_value_indexes['co2']]['value'] = '{:+.1f}'.format(self.ppm)\n else:\n self.vals[self.attribute_value_indexes['co2']]['value'] = None\n\n self.vals[self.attribute_value_indexes['co2']]['ts'] = ts \n\n #- except IOError:\n except:\n logger.error('cannot read MH-Z16 Co2 sensor: {}, {}'.format(exc_info()[0], exc_info()[1]))\n # Blank the sensor readings\n self.vals[self.attribute_value_indexes['co2']]['value'] = None\n\n\n def parse(self, response):\n checksum = 0\n\n if len(response) < 9:\n return\n\n for i in range (0, 9):\n checksum += response[i]\n\n if response[0] == 0xFF:\n if response[1] == 0x9C:\n if checksum % 256 == 0xFF:\n self.ppm = (response[2]<<24) + (response[3]<<16) + (response[4]<<8) + response[5]\n\n def read_register(self, reg_addr):\n sleep(0.001)\n return self.bus.read_byte_data(self.i2c_addr, reg_addr)\n\n def write_register(self, reg_addr, val):\n sleep(0.001)\n self.bus.write_byte_data(self.i2c_addr, reg_addr, val)\n\n def send(self, command):\n if self.read_register(self.TXLVL) >= len(command):\n self.bus.write_i2c_block_data(self.i2c_addr, self.THR, command)\n\n def receive(self):\n n = 9\n buf = []\n start = clock()\n\n while n > 0:\n rx_level = self.read_register(self.RXLVL)\n\n if rx_level > n:\n rx_level = n\n\n buf.extend(self.bus.read_i2c_block_data(self.i2c_addr, self.RHR, rx_level))\n n = n - rx_level\n\n if clock() - start > 0.2:\n break\n\n return buf\n","sub_path":"python/devices/Mh_z16_ndir_co2.py","file_name":"Mh_z16_ndir_co2.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"423217979","text":"\"\"\"\nMIT License\n\nCopyright (c) 2021 TheHamkerCat\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\nfrom pyrogram import filters\nfrom pyrogram.types import (InlineKeyboardButton, InlineKeyboardMarkup,\n InputMediaPhoto, InputMediaVideo, Message)\n\nfrom wbb import app2\nfrom wbb.core.decorators.errors import capture_err\n\nfrom_list = [\"cursed_videos\", \"shitpost_status\", 972029825]\nRICE_CHANNEL = \"Decomposed\"\n\n\n@app2.on_message(\n filters.chat(from_list)\n & filters.video\n & ~filters.forwarded\n & ~filters.edited\n)\nasync def rice(_, message: Message):\n await message.copy(RICE_CHANNEL, caption=\"\")\n \n","sub_path":"wbb/modules/rice.py","file_name":"rice.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"25763859","text":"import numpy as np\nfrom datetime import datetime\nimport pandas as pd\nimport time\n\ndef load_poi2pos(poi2pos_file):\n poi2pos = {}\n f = open(poi2pos_file, 'r')\n lines = f.readlines()\n \n for i, line in enumerate(lines):\n tokens = line.strip().split('\\t')\n location = int(tokens[0])\n x = float(tokens[1])\n y = float(tokens[2])\n z = float(tokens[3])\n poi2pos[location] = np.array([x,y,z])\n \n return poi2pos\n\n# use this to load a preprocessed file\ndef treat_prepro(train, step):\n train_f = open(train, 'r')\n lines = train_f.readlines()\n \n # fsb1: outdated\n # Need to change depending on threshold\n #if step==1:\n # lines = train_f.readlines()#[:86445] #659 #[:309931]\n #elif step==2:\n # lines = train_f.readlines()#[:13505]#[:309931]\n #elif step==3:\n # lines = train_f.readlines()#[:30622]#[:309931]\n\n # update fsb1: ld's are computed only for positive samples\n \n train_user = []\n train_td = []\n train_ld = []\n train_loc = []\n train_dst = []\n\n user = 1\n user_td = []\n user_ld = []\n user_loc = []\n user_dst = []\n\n for i, line in enumerate(lines):\n tokens = line.strip().split('\\t')\n if len(tokens) < 3:\n if user_td: \n train_user.append(user)\n train_td.append(user_td)\n train_ld.append(user_ld)\n train_loc.append(user_loc)\n train_dst.append(user_dst)\n user = int(tokens[0])\n user_td = []\n user_ld = []\n user_loc = []\n user_dst = []\n continue\n td = np.array([float(t) for t in tokens[0].split(',')])\n ld = np.array([float(t) for t in tokens[1].split(',')])\n loc = np.array([int(t) for t in tokens[2].split(',')])\n dst = int(tokens[3])\n user_td.append(td)\n user_ld.append(ld)\n user_loc.append(loc)\n user_dst.append(dst)\n\n # process last line\n if user_td: \n train_user.append(user)\n train_td.append(user_td)\n train_ld.append(user_ld)\n train_loc.append(user_loc)\n train_dst.append(user_dst)\n\n return train_user, train_td, train_ld, train_loc, train_dst\n\n# WGS84 to R3:\ndef WGS84_to_R3(lati, longi):\n ''' transfer lat lon to 3d points on earth approxiation '''\n R = 6371. # average radius of the earth\n lat_r, lon_r = lati*np.pi/180., longi*np.pi/180.\n x = R * np.cos(lat_r) * np.cos(lon_r)\n y = R * np.cos(lat_r) * np.sin(lon_r)\n z = R * np.sin(lat_r)\n return (x, y, z)\n\n# Note this is called from preprocess using gowalla full.\ndef load_data(train, max_users=0):\n \n # maps to resolve ids (ids are continous, numbers are not)\n user2id = {} # map a user number to its id\n poi2id = {} # map a location number to its id\n poi2pos = {} # map a location number to lat long\n\n # arrays to store data\n train_user = []\n train_time = []\n train_coord = [] \n train_loc = []\n valid_user = []\n valid_time = []\n valid_coord = []\n valid_loc = []\n test_user = []\n test_time = []\n test_coord = []\n test_loc = []\n\n user_time = []\n user_coord = []\n user_loc = []\n visit_thr = 30 # only consider users with 30 checkins\n \n # organize the data by users\n # (assign each line (checkin) to the corresponding user)\n # (ids are continous, users are distinctive numbers)\n\n # collect all users with visit_thr checkins:\n train_f = open(train, 'r')\n lines = train_f.readlines()\n \n prev_user = int(lines[0].split('\\t')[0])\n visit_cnt = 0\n for i, line in enumerate(lines):\n tokens = line.strip().split('\\t')\n user = int(tokens[0])\n if user==prev_user:\n visit_cnt += 1\n else:\n if visit_cnt >= visit_thr:\n user2id[prev_user] = len(user2id)\n prev_user = user\n visit_cnt = 1\n if max_users > 0 and len(user2id) >= max_users:\n break # restrict to max users\n\n # we read the file again:\n # this time we collect the data for the interresting users \n\n train_f = open(train, 'r')\n lines = train_f.readlines()\n\n prev_user = int(lines[0].split('\\t')[0])\n for i, line in enumerate(lines):\n tokens = line.strip().split('\\t')\n user = int(tokens[0])\n if user2id.get(user) is None:\n continue\n user = user2id.get(user)\n\n time = (datetime.strptime(tokens[1], \"%Y-%m-%dT%H:%M:%SZ\")\\\n -datetime(2009,1,1)).total_seconds()/60 # minutes since 1.1.2009\n lati = float(tokens[2]) # WGS84? Latitude\n longi = float(tokens[3]) # WGS84? Longitude\n coord = WGS84_to_R3(lati, longi)\n location = int(tokens[4]) # location nr\n if poi2id.get(location) is None: # get-or-set locations\n poi2id[location] = len(poi2id)\n poi2pos[poi2id.get(location)] = coord\n location = poi2id.get(location)\n\n if user == prev_user:\n # insert in front!\n user_time.insert(0, time)\n user_coord.insert(0, coord)\n user_loc.insert(0, location)\n else:\n # add each user once to train, once to validate and once to test:\n # 0%..70% to train\n # 70%..80% to validate\n # 80%..100% to test\n # ordered in time where training corresponds to the farthest in time.\n train_thr = int(len(user_time) * 0.7)\n valid_thr = int(len(user_time) * 0.8)\n train_user.append(user)\n train_time.append(user_time[:train_thr])\n train_coord.append(user_coord[:train_thr])\n train_loc.append(user_loc[:train_thr])\n valid_user.append(user)\n valid_time.append(user_time[train_thr:valid_thr])\n valid_coord.append(user_coord[train_thr:valid_thr])\n valid_loc.append(user_loc[train_thr:valid_thr])\n test_user.append(user)\n test_time.append(user_time[valid_thr:])\n test_coord.append(user_coord[valid_thr:])\n test_loc.append(user_loc[valid_thr:])\n\n prev_user = user\n user_time = [time]\n user_coord = [coord]\n user_loc = [location]\n\n # process also the latest user in the for loop\n if user2id.get(user) is not None:\n train_thr = int(len(user_time) * 0.7)\n valid_thr = int(len(user_time) * 0.8)\n train_user.append(user)\n train_time.append(user_time[:train_thr])\n train_coord.append(user_coord[:train_thr])\n train_loc.append(user_loc[:train_thr])\n valid_user.append(user)\n valid_time.append(user_time[train_thr:valid_thr])\n valid_coord.append(user_coord[train_thr:valid_thr])\n valid_loc.append(user_loc[train_thr:valid_thr])\n test_user.append(user)\n test_time.append(user_time[valid_thr:])\n test_coord.append(user_coord[valid_thr:])\n test_loc.append(user_loc[valid_thr:])\n\n return len(user2id), poi2id, poi2pos, train_user, train_time, train_coord, train_loc, valid_user, valid_time, valid_coord, valid_loc, test_user, test_time, test_coord, test_loc\n\ndef inner_iter(data, batch_size):\n data_size = len(data)\n num_batches = int(len(data)/batch_size)\n for batch_num in range(num_batches):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield data[start_index:end_index]\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":7497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"296686769","text":"import sys\n\ninput = sys.stdin\n\ndef solve(s):\n out = s[0]\n for c in s[1:]:\n if c 1:\n where = AND(*where_conditions)\n #\n sqlconn = dbconfig.new_conn_signups()\n return sqlconn.select(Newsletter, where=where, order_by=order_by)\n \n \n \ndef get_newsletters_by_token(token):\n sqlconn = dbconfig.new_conn_signups()\n return sqlconn.select_one(Newsletter, Newsletter.q.subs_token == token)","sub_path":"test/test/src/db/libnewsletter.py","file_name":"libnewsletter.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"626155859","text":"#!/usr/bin/env python3\nimport os\nimport logging\nimport inspect\nimport math\nimport struct\nimport csv\nfrom functools import partial\nfrom mysql import connector\nfrom abc import abstractmethod\n\n######################################################################\n\nclass Logger:\n\n def __init__(self, path, name):\n log_file = path + name + \".log\"\n if (os.path.isfile(log_file)):\n os.remove(log_file)\n self.logger = logging.getLogger(\"dt\")\n self.logger.setLevel(logging.INFO)\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n fmt = logging.Formatter(fmt='[%(asctime)s] %(message)s',datefmt='%Y-%m-%d %H:%M:%S')\n fh.setFormatter(fmt)\n ch.setFormatter(fmt)\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n\n def log(self, msg):\n class_name = inspect.currentframe().f_back.f_locals['self'].__class__.__name__\n func_name = inspect.currentframe().f_back.f_code.co_name\n file_name = inspect.currentframe().f_back.f_code.co_filename\n line_number = inspect.currentframe().f_back.f_code.co_firstlineno\n self.logger.info((\" %s [%s::%s][%s:%i]\") % (msg, class_name, func_name, file_name, line_number))\n\n######################################################################\n\nclass Future:\n\n TDX_PATH = \"C:\\\\zd_zszq_new\\\\vipdoc\\\\ds\\\\\"\n PATH = os.path.abspath(\".\") + os.sep\n NAME = \"future\"\n DB_HOST = \"127.0.0.1\"\n DB_USR = \"root\"\n DB_PWD = \"!QAZ2wsx#EDC\"\n DB_NAME = \"at\"\n\n def __init__(self):\n self.logger = Logger(self.PATH, self.NAME)\n\n def execute(self):\n self.logger.log(\"Start\")\n self.update_future_codes()\n self.update_future_trades()\n self.logger.log(\"Stop\")\n\n def update_future_codes(self):\n self.logger.log(\"Start\")\n\n # Re-Init Database\n connect = connector.connect(host=self.DB_HOST,\n user=self.DB_USR,\n password=self.DB_PWD,\n database=self.DB_NAME)\n cursor = connect.cursor()\n\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS future_code(\n code VARCHAR(50) NOT NULL,\n name VARCHAR(50) NOT NULL,\n exchange VARCHAR(50) NOT NULL, PRIMARY KEY(code));\"\"\")\n connect.commit()\n\n #self.logger.log(\"Create future_code table\")\n\n codes = \"AG,银,上海\\n\" + \\\n \"AL,铝,上海\\n\" + \\\n \"AU,金,上海\\n\" + \\\n \"BU,沥青,上海\\n\" + \\\n \"CU,铜,上海\\n\" + \\\n \"FU,燃油,上海\\n\" + \\\n \"HC,轧板,上海\\n\" + \\\n \"NI,镍,上海\\n\" + \\\n \"PB,铅,上海\\n\" + \\\n \"RB,螺纹钢,上海\\n\" + \\\n \"RU,橡胶,上海\\n\" + \\\n \"SN,锡,上海\\n\" + \\\n \"WR,线材,上海\\n\" + \\\n \"ZN,锌,上海\\n\" + \\\n \"A,大豆一,大连\\n\" + \\\n \"B,大豆二,大连\\n\" + \\\n \"BB,胶合板,大连\\n\" + \\\n \"C,玉米,大连\\n\" + \\\n \"CS,玉米淀粉,大连\\n\" + \\\n \"FB,纤维板,大连\\n\" + \\\n \"I,铁矿石,大连\\n\" + \\\n \"J,焦炭,大连\\n\" + \\\n \"JD,鸡蛋,大连\\n\" + \\\n \"JM,焦煤,大连\\n\" + \\\n \"L,聚乙烯,大连\\n\" + \\\n \"M,豆粕,大连\\n\" + \\\n \"P,棕榈油,大连\\n\" + \\\n \"PP,聚丙烯,大连\\n\" + \\\n \"V,聚氯乙烯,大连\\n\" + \\\n \"Y,豆油,大连\\n\" + \\\n \"CF,棉花,郑州\\n\" + \\\n \"FG,玻璃,郑州\\n\" + \\\n \"JR,梗稻,郑州\\n\" + \\\n \"LR,晚籼稻,郑州\\n\" + \\\n \"MA,甲醇,郑州\\n\" + \\\n \"OI,菜籽油,郑州\\n\" + \\\n \"PM,普麦,郑州\\n\" + \\\n \"RI,早籼稻,郑州\\n\" + \\\n \"RM,菜籽粕,郑州\\n\" + \\\n \"RS,油菜籽,郑州\\n\" + \\\n \"SF,硅铁,郑州\\n\" + \\\n \"SM,锰硅,郑州\\n\" + \\\n \"SR,白糖,郑州\\n\" + \\\n \"TA,PTA,郑州\\n\" + \\\n \"WH,强麦,郑州\\n\" + \\\n \"ZC,动力煤,郑州\\n\" + \\\n \"IF,沪深300,中金\\n\" + \\\n \"IC,中证500,中金\\n\" + \\\n \"IH,上证50,中金\\n\" + \\\n \"T,国债10,中金\\n\" + \\\n \"TF,国债5,中金\"\n\n reader = csv.reader(codes.split('\\n'), delimiter=',')\n for row in reader:\n cursor.execute(\"INSERT INTO future_code VALUES(%s, %s, %s) ON DUPLICATE KEY UPDATE code=%s;\",\n (row[0].strip(), row[1], row[2], row[0]))\n #self.logger.log(\"Insert Code (\" + row[0].strip() + \", \" + row[1] + \")\")\n connect.commit()\n\n cursor.close()\n connect.close()\n\n self.logger.log(\"Stop\")\n\n def update_future_trades(self):\n self.logger.log('Start')\n\n w_connect = connector.connect(host=self.DB_HOST,\n user=self.DB_USR,\n password=self.DB_PWD,\n database=self.DB_NAME)\n w_cursor = w_connect.cursor()\n w_cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS future_trade(\n code VARCHAR(50) NOT NULL,\n type VARCHAR(50) NOT NULL,\n datetime DATETIME NOT NULL,\n open DOUBLE NOT NULL,\n high DOUBLE NOT NULL,\n low DOUBLE NOT NULL,\n close DOUBLE NOT NULL,\n volume DOUBLE NOT NULL,\n oi DOUBLE NOT NULL,\n sp DOUBLE NOT NULL,\n PRIMARY KEY(code,datetime));\"\"\")\n\n w_cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS future_tick(\n instrument_id VARCHAR(50) NOT NULL,\n trading_day DATETIME NOT NULL,\n update_time DOUBLE NOT NULL,\n update_millisec DOUBLE NOT NULL,\n last_price DOUBLE NOT NULL,\n pre_settlement_price DOUBLE NOT NULL,\n pre_close_price DOUBLE NOT NULL,\n pre_open_interest DOUBLE NOT NULL,\n open_price DOUBLE NOT NULL,\n highest_price DOUBLE NOT NULL,\n lowest_price DOUBLE NOT NULL,\n close_price DOUBLE NOT NULL,\n settlement_price DOUBLE NOT NULL,\n average_price DOUBLE NOT NULL,\n volume DOUBLE NOT NULL,\n turnover DOUBLE NOT NULL,\n open_interest DOUBLE NOT NULL,\n bid_price1 DOUBLE NOT NULL,\n bid_volume1 DOUBLE NOT NULL,\n ask_price1 DOUBLE NOT NULL,\n ask_volume1 DOUBLE NOT NULL,\n pre_delta DOUBLE NOT NULL,\n curr_delta DOUBLE NOT NULL,\n action_day DATETIME NOT NULL,\n PRIMARY KEY(instrument_id,trading_day,update_time,update_millisec));\"\"\")\n w_connect.commit()\n w_cursor.close()\n w_connect.close()\n\n #self.logger.log(\"Create future_trade table\")\n\n r_connect = connector.connect(host=self.DB_HOST,\n user=self.DB_USR,\n password=self.DB_PWD,\n database=self.DB_NAME)\n r_cursor = r_connect.cursor()\n r_cursor.execute(\"SELECT code, name FROM at.future_code;\")\n results = []\n for (code, name) in r_cursor:\n results.append((code, name))\n r_cursor.close()\n r_connect.close()\n\n DAY_PATH = self.TDX_PATH + \"lday\" + os.sep\n LC5_PATH = self.TDX_PATH + \"fzline\" + os.sep\n LC1_PATH = self.TDX_PATH + \"minline\" + os.sep\n\n day_files = [day_file for day_file in os.listdir(DAY_PATH)\n if os.path.isfile(os.path.join(DAY_PATH, day_file))]\n lc5_files = [lc5_file for lc5_file in os.listdir(LC5_PATH) if\n os.path.isfile(os.path.join(LC5_PATH, lc5_file))]\n lc1_files = [lc1_file for lc1_file in os.listdir(LC1_PATH) if\n os.path.isfile(os.path.join(LC1_PATH, lc1_file))]\n\n for (code, name) in results:\n day_file = self._find_tdx_file(code, \"L8.day\", day_files)\n self._day_to_db(code, \"d\", DAY_PATH + day_file)\n #self.logger.log(\"Insert day trade (\" + code + \", \" + name + \")\")\n\n lc5_file = self._find_tdx_file(code, \"L8.lc5\", lc5_files)\n self._lc_to_db(code, \"5\", LC5_PATH + lc5_file)\n #self.logger.log(\"Insert 5m trade (\" + code + \", \" + name + \")\")\n\n lc1_file = self._find_tdx_file(code, \"L8.lc1\", lc1_files)\n self._lc_to_db(code, \"1\", LC1_PATH + lc1_file)\n #self.logger.log(\"Insert 1m trade (\" + code + \", \" + name + \")\")\n\n self.logger.log('Stop')\n\n def _find_tdx_file(self, code, suffix, files):\n for file in files:\n if file.endswith(\"#\" + code + suffix):\n return file\n\n # 通达信期货日数据格式:\n # 每32个字节为一个数据,每字段内低字节在前\n # 00 ~ 01 字节:日期,整型\n # 设其值为num则日期计算方法为:\n # year=floor(num/2048)+2004\n # month=floor(mod(num,2048)/100)\n # day=mod(mod(num,2048),100);\n # 02 ~ 03 字节:从0点开始至目前的分钟数,整型\n # 04 ~ 07 字节:开盘价,浮点型\n # 08 ~ 11 字节:最高价,浮点型\n # 12 ~ 15 字节:最低价,浮点型\n # 16 ~ 19 字节:收盘价,浮点型\n # 20 ~ 23 字节:成交量,整形\n # 24 ~ 27 字节:持仓量,整型\n # 28 ~ 31 字节:结算价,浮点型\n def _day_to_db(self, code, type, day_file):\n w_connect = connector.connect(host=self.DB_HOST,\n user=self.DB_USR,\n password=self.DB_PWD,\n database=self.DB_NAME)\n w_cursor = w_connect.cursor()\n\n with open(day_file, 'rb') as day:\n records = iter(partial(day.read, 32), b'')\n for r in records:\n data = struct.unpack(\" is this necessary? It should not be that slow. \n#2. Make sure that we can run 1 simulation round if we call the expand subclone function\n#3. Make sure that the data is written to files correctly, where:\n#\t-> Every simulation is stored in its own folder with an unique identifier. Here we obtain the underlying used data (no permutations)\n#\t-> The error is written to a new file (or will appending also work?)\n#4. After the simulations have run, we can combine the errors together and make the boxplots and confusion matrix plots.\n\ntaskId = int(sys.argv[1]) -1 #we use the task ID as mu value, \nuniqueID = sys.argv[2]\n#leaveOut = sys.argv[3]\n\n#2.\nstart_time = time.time()\nsimulator = Simulator()\nsimulator.initialize(taskId, uniqueID)\nsimulator.subclonalExpansion()\nprint(\"--- %s seconds for 1 simulation run with 10 permutations ---\" % (time.time() - start_time))\n#For running a simulation, we want to have a number of parameters that we can set beforehand. These are for example an increase in noise level, and also the mu range to select from.\n#THe noise level can be provided to the script (initialize). The mu range can also be defined there. The script should always select a random mu from this range!\n#we then provide the parameters to this script, such that we have 10 jobs where the mu is in a certain range, etc. For the noise levels we can du ","sub_path":"Simulations/runSimulation.py","file_name":"runSimulation.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"648809278","text":"\"\"\"Error generation and handling\"\"\"\n\nERROR_CODES = {\n 0: 'Missing parameter',\n 1: 'Error processing authentication credentials',\n 2: 'Verification of credentials failed',\n 3: 'Network error',\n 4: 'Application verification failed',\n}\n\n\ndef error_dict(error_code):\n \"\"\"Generates an Error dict suitable for storing in a key/value store.\"\"\"\n err = {'status': 'fail'}\n err['reason'] = {'code': error_code,\n 'description': ERROR_CODES[error_code]}\n return err\n","sub_path":"velruse/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"45934607","text":"import numpy as np\nimport pandas as pd\nimport sqlite3\n\n# READING IN DATA FROM CSV FILES\ndef read_data(path):\n\twith open(path) as f:\n\n\t\tdf = pd.read_csv(f)\n\treturn df\n\n\n# DATA PROCESSING\ndef process_zillow_data(df, cols_to_drop, locations):\n\t# Drop unnecessarily columns\n\tdf.drop(df.columns[cols_to_drop], axis=1, inplace=True)\n\t# Drop cities we don't care about\n\tdf = df[(df[\"City\"] + df[\"State\"]).isin(locations)]\n\t# Change data types\n\tdf[['City', 'State']] = df[['City', 'State']].astype('str', copy=False)\n\tfor c in df.columns[3:]:\n\t df[c] = df[c].astype('float64', copy=False)\n\t# print(df.columns)\n\t# print(df.describe())\n\t# print(df.dtypes)\n\t# fixing column names to match the airbnb data\n\tdf.columns = map(str.lower, df.columns)\n\tdf = df.rename(columns={'regionname':'zipcode'})\n\treturn df\n\n\n# INSERTING TABLES INTO DATABASE\ndef add_to_db(df, path_to_db, table_name):\n\tconn = sqlite3.connect(path_to_db)\n\t# c = conn.cursor()\n\tdf.to_sql(table_name, conn, if_exists=\"replace\")\n\tconn.close()\n\n\ndef main():\n\tpath_hv = \"./data/zillow/Zip_Zhvi_AllHomes.csv\"\n\tpath_r = \"./data/zillow/Zip_Zri_AllHomesPlusMultifamily.csv\"\n\tpath_to_db = './data/housing.db'\n\tlocations = [\"BostonMA\", \"ChicagoIL\", \"San FranciscoCA\", \"New YorkNY\", \"NashvilleTN\", \"Los AngelesCA\", \"AustinTX\", \"SeattleWA\", \"DenverCO\", \"AshvilleNC\"]\n\n\tdf_hv = read_data(path_hv)\n\tprint(\"Finished reading data from {}\".format(path_hv))\n\tdf_r = read_data(path_r)\n\tprint(\"Finished reading data from {}\".format(path_r))\n\n\t'''\n\t\tDrop the following columns from df_hv\n\t\t0. Region ID\n\t\t4. Metro\n\t\t5. CoutryName\n\t\t6. SizeRank\n\t\t7. - 231. All data from months before 2015\n\t'''\n\tcols_to_drop_hv = [0] + list(range(4, 232))\n\t'''\n\t\tDrop the following columns\n\t\t0. Region ID\n\t\t4. Metro\n\t\t5. CoutryName\n\t\t6. SizeRank\n\t\t7. - 58. All data from months before 2015\n\t'''\n\tcols_to_drop_r = [0] + list(range(4, 59))\n\n\tdf_hv = process_zillow_data(df_hv, cols_to_drop_hv, locations)\n\tprint(\"Finished processing data from {}\".format(path_hv))\n\tdf_r = process_zillow_data(df_r, cols_to_drop_r, locations)\n\tprint(\"Finished processing data from {}\".format(path_r))\n\n\tadd_to_db(df_hv, path_to_db, \"zillow_zhvi\")\n\tprint(\"Finished adding to database from {}\".format(path_hv))\n\tadd_to_db(df_r, path_to_db, \"zillow_zri\")\n\tprint(\"Finished adding to database from {}\".format(path_r))\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"projects/project_data/Databnb/preprocess_zillow.py","file_name":"preprocess_zillow.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"645203458","text":"import json\n\nfrom django.db.models import Q\nfrom django.db.models.functions import Lower, Concat\nfrom django.forms import model_to_dict, modelformset_factory\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views import View\nfrom django.views.generic import CreateView, TemplateView, DeleteView, UpdateView\n\nfrom system.forms import MenuUpdateForm, RoleForm\nfrom system.models import Menu, Role, User\nfrom utils.Paginator import paginate\nfrom django.http import JsonResponse\nfrom django.views.generic import TemplateView, CreateView, ListView\n\n# Create your views here.\nfrom system.models import Menu\n\n\nclass RoleMainView(TemplateView):\n template_name = 'system/role.html'\n\n\nclass RoleCreateView(CreateView):\n model = Role\n fields = '__all__'\n\n def post(self, request, *args, **kwargs):\n code = 1\n msg = '添加失败'\n form = self.get_form()\n if form.is_valid():\n form.save()\n code = 0\n msg = '添加成功'\n print(form.is_valid())\n ret = dict(code=code, msg=msg)\n return JsonResponse(ret)\n\n\nclass RoleListView(View):\n def get(self, request):\n code = 1\n msg = '获取失败'\n total = 0\n result = []\n try:\n code = 0\n msg = '获取成功'\n fields = ['id', 'name', 'desc']\n data = Role.objects.values(*fields)\n total = len(data)\n result = paginate(request, data)\n except Exception as e:\n msg = str(e)\n ret = dict(code=code, msg=msg, count=total, data=result)\n return JsonResponse(ret)\n\n\nclass RoleDeleteView(DeleteView):\n def post(self, request):\n code, msg = 0, \"删除成功\"\n data = request.POST.get('ids', None)\n if data:\n data = json.loads(data)\n role = Role.objects.filter(id__in=data)\n role.delete()\n else:\n code, msg = 1, \"删除失败\"\n ret = dict(code=code, msg=msg)\n return JsonResponse(ret)\n\n\nclass RoleUpdateView(View):\n def post(self, request):\n code, msg = 0, '更新成功'\n try:\n role = Role.objects.get(id=request.POST.get('id', None))\n if role:\n form = RoleForm(request.POST)\n form.is_valid()\n form.instance.id = role.id\n form.instance.save()\n else:\n raise ValueError('id 为{}的角色不存在'.format(id))\n except ValueError as e:\n code, msg = 1, str(e)\n ret = dict(code=code, msg=msg)\n return JsonResponse(ret)\n\n\nclass RoleBindUserView(View):\n def get(self, request):\n code, msg = 0, '用户获取成功'\n data = None\n try:\n roleid = request.GET.get('roleid', None)\n inrole = request.GET.get('inrole', None)\n fields = ['id', 'username', 'nickname']\n if roleid is not None and inrole is not None:\n role = Role.objects.get(id=int(roleid))\n if inrole == 'false':\n data = list(User.objects.values(*fields).filter(~Q(role=role)).all())\n elif inrole == 'true':\n data = list(User.objects.values(*fields).filter(role=role).all())\n else:\n data = list(User.objects.values(*fields).all())\n except User.DoesNotExist as e:\n print(e)\n code, msg = 1, 'id 为{}的角色不存在'.format(roleid)\n ret = dict(code=code, msg=msg, data=data)\n return JsonResponse(ret)\n\n def post(self, request):\n code, msg = 0, '角色关联成功'\n try:\n ids = json.loads(request.POST.get('ids'))\n roleid = request.POST.get('roleid')\n if ids and roleid:\n role = Role.objects.get(id=int(roleid))\n user = User.objects.filter(id__in=ids).all()\n role.user_set.set(user)\n role.save()\n elif len(ids) <= 0 and roleid:\n role = Role.objects.get(id=int(roleid))\n role.user_set.set([])\n role.save()\n else:\n code = 1\n msg = '关联失败'\n except Role.DoesNotExist as e:\n print(e)\n code, msg = 1, '系统内部错误'\n ret = dict(code=code, msg=msg)\n return JsonResponse(ret)\n\n\nclass RoleBindMenuView(View):\n def get(self, request):\n try:\n role = Role.objects.get(id=int(request.GET.get('id')))\n menus = list(map(lambda x: x['id'], role.permissions.values('id')))\n result = to_tree(None, Menu, 1, menus=menus)\n except ValueError as e:\n result = []\n return JsonResponse(result, safe=False)\n\n def post(self, request):\n code = 0\n msg = '授权成功'\n ids = request.POST.get('ids', None)\n if ids:\n role = Role.objects.get(id=request.POST.get('id'))\n ids = json.loads(ids)\n menus = Menu.objects.filter(id__in=ids)\n role.permissions.clear()\n role.permissions.set(menus)\n role.save()\n\n else:\n code = 1\n msg = '参数错误'\n ret = dict(code=code, msg=msg)\n return JsonResponse(ret)\n\n\ndef to_tree(parent, model, level=1, fields=['id'], menus=[]):\n result = []\n for item in model.objects.filter(parent=parent):\n data = model_to_dict(item, fields=fields)\n data['label'] = item.name\n if level > 4:\n break\n elif level < 4:\n tmp = to_tree(item, model, level + 1, menus=menus)\n if len(tmp) == 0 and data['id'] in menus:\n data['checked'] = 'true'\n data['children'] = tmp\n else:\n tmp = []\n for item2 in model.objects.filter(parent=item).values(*fields, label=Lower('name')):\n print('run')\n if item2['id'] in menus:\n item2['checked'] = 'true'\n tmp.append(item2)\n if data['id'] in menus:\n data['checked'] = 'true'\n data['children'] = tmp\n result.append(data)\n return result\n","sub_path":"apps/system/views_role.py","file_name":"views_role.py","file_ext":"py","file_size_in_byte":6265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"122613218","text":"\n\"\"\"\nCreated on Thu Jun 11 13:32:41 2020\n\n@author: aristizabal\n\"\"\"\ndef GOFS_RTOFS_vs_Argo_floats(lon_forec_track,lat_forec_track,lon_forec_cone,lat_forec_cone,lon_best_track,lat_best_track,lon_lim,lat_lim,folder_fig):\n #%% User input\n \n #GOFS3.1 output model location\n url_GOFS_ts = 'http://tds.hycom.org/thredds/dodsC/GLBy0.08/expt_93.0/ts3z'\n \n \n # RTOFS files\n folder_RTOFS = '/home/coolgroup/RTOFS/forecasts/domains/hurricanes/RTOFS_6hourly_North_Atlantic/'\n \n nc_files_RTOFS = ['rtofs_glo_3dz_f006_6hrly_hvr_US_east.nc',\\\n 'rtofs_glo_3dz_f012_6hrly_hvr_US_east.nc',\\\n 'rtofs_glo_3dz_f018_6hrly_hvr_US_east.nc',\\\n 'rtofs_glo_3dz_f024_6hrly_hvr_US_east.nc']\n \n # COPERNICUS MARINE ENVIRONMENT MONITORING SERVICE (CMEMS)\n url_cmems = 'http://nrt.cmems-du.eu/motu-web/Motu'\n service_id = 'GLOBAL_ANALYSIS_FORECAST_PHY_001_024-TDS'\n product_id = 'global-analysis-forecast-phy-001-024'\n depth_min = '0.493'\n out_dir = '/home/aristizabal/crontab_jobs'\n \n # Bathymetry file\n #bath_file = '/Users/aristizabal/Desktop/MARACOOS_project/Maria_scripts/nc_files/GEBCO_2014_2D_-100.0_0.0_-60.0_45.0.nc' \n bath_file = '/home/aristizabal/bathymetry_files/GEBCO_2014_2D_-100.0_0.0_-10.0_50.0.nc'\n \n # Argo floats\n url_Argo = 'http://www.ifremer.fr/erddap'\n \n #%%\n \n from matplotlib import pyplot as plt\n import numpy as np\n import xarray as xr\n import netCDF4 \n from datetime import datetime, timedelta\n import cmocean\n import matplotlib.dates as mdates\n from erddapy import ERDDAP\n import pandas as pd\n import os\n \n # Do not produce figures on screen\n plt.switch_backend('agg')\n \n # Increase fontsize of labels globally\n plt.rc('xtick',labelsize=14)\n plt.rc('ytick',labelsize=14)\n plt.rc('legend',fontsize=14)\n \n #%% Reading bathymetry data\n \n ncbath = xr.open_dataset(bath_file)\n bath_lat = ncbath.variables['lat'][:]\n bath_lon = ncbath.variables['lon'][:]\n bath_elev = ncbath.variables['elevation'][:]\n \n oklatbath = np.logical_and(bath_lat >= lat_lim[0],bath_lat <= lat_lim[-1])\n oklonbath = np.logical_and(bath_lon >= lon_lim[0],bath_lon <= lon_lim[-1])\n \n bath_latsub = bath_lat[oklatbath]\n bath_lonsub = bath_lon[oklonbath]\n bath_elevs = bath_elev[oklatbath,:]\n bath_elevsub = bath_elevs[:,oklonbath] \n \n #%% Get time bounds for current day\n #ti = datetime.today()\n ti = datetime.today() - timedelta(1) - timedelta(hours=6)\n tini = datetime(ti.year,ti.month,ti.day)\n te = ti + timedelta(2)\n tend = datetime(te.year,te.month,te.day)\n\n #%% Look for Argo datasets \n \n e = ERDDAP(server = url_Argo)\n \n # Grab every dataset available\n #datasets = pd.read_csv(e.get_search_url(response='csv', search_for='all'))\n \n kw = {\n 'min_lon': lon_lim[0],\n 'max_lon': lon_lim[1],\n 'min_lat': lat_lim[0],\n 'max_lat': lat_lim[1],\n 'min_time': str(tini),\n 'max_time': str(tend),\n }\n \n search_url = e.get_search_url(response='csv', **kw)\n \n # Grab the results\n search = pd.read_csv(search_url)\n \n # Extract the IDs\n dataset = search['Dataset ID'].values\n \n msg = 'Found {} Datasets:\\n\\n{}'.format\n print(msg(len(dataset), '\\n'.join(dataset)))\n \n dataset_type = dataset[0]\n \n constraints = {\n 'time>=': str(tini),\n 'time<=': str(tend),\n 'latitude>=': lat_lim[0],\n 'latitude<=': lat_lim[1],\n 'longitude>=':lon_lim[0],\n 'longitude<=': lon_lim[1],\n }\n \n variables = [\n 'platform_number', \n 'time',\n 'pres',\n 'longitude',\n 'latitude', \n 'temp',\n 'psal',\n ]\n \n e = ERDDAP(\n server = url_Argo,\n protocol = 'tabledap',\n response = 'nc'\n )\n \n e.dataset_id = dataset_type\n e.constraints=constraints\n e.variables=variables\n \n print(e.get_download_url())\n \n df = e.to_pandas(\n parse_dates=True,\n skiprows=(1,) # units information can be dropped.\n ).dropna()\n \n argo_ids = np.asarray(df['platform_number'])\n argo_times = np.asarray(df['time (UTC)'])\n argo_press = np.asarray(df['pres (decibar)'])\n argo_lons = np.asarray(df['longitude (degrees_east)'])\n argo_lats = np.asarray(df['latitude (degrees_north)'])\n argo_temps = np.asarray(df['temp (degree_Celsius)'])\n argo_salts = np.asarray(df['psal (PSU)'])\n \n #%% GOGF 3.1\n \n try:\n GOFS_ts = xr.open_dataset(url_GOFS_ts,decode_times=False)\n \n lt_GOFS = np.asarray(GOFS_ts['lat'][:])\n ln_GOFS = np.asarray(GOFS_ts['lon'][:])\n tt = GOFS_ts['time']\n t_GOFS = netCDF4.num2date(tt[:],tt.units) \n depth_GOFS = np.asarray(GOFS_ts['depth'][:])\n except Exception as err:\n print(err)\n GOFS_ts = np.nan\n lt_GOFS = np.nan\n ln_GOFS = np.nan\n depth_GOFS = np.nan\n t_GOFS = ti\n \n #%% Map Argo floats\n \n lev = np.arange(-9000,9100,100)\n plt.figure()\n plt.contourf(bath_lonsub,bath_latsub,bath_elevsub,lev,cmap=cmocean.cm.topo) \n plt.plot(lon_forec_track,lat_forec_track,'.-',color='gold')\n plt.plot(lon_forec_cone,lat_forec_cone,'.-b',markersize=1)\n plt.plot(lon_best_track,lat_best_track,'or',markersize=3)\n \n argo_idd = np.unique(argo_ids)\n for i,id in enumerate(argo_idd): \n okind = np.where(argo_ids == id)[0]\n plt.plot(np.unique(argo_lons[okind]),np.unique(argo_lats[okind]),'s',color='darkorange',markersize=5,markeredgecolor='k')\n \n plt.title('Argo Floats ' + str(tini)[0:13]+'-'+str(tend)[0:13],fontsize=16)\n plt.axis('scaled')\n plt.xlim(lon_lim[0],lon_lim[1])\n plt.ylim(lat_lim[0],lat_lim[1])\n \n file = folder_fig + 'ARGO_lat_lon'\n #file = folder_fig + 'ARGO_lat_lon_' + str(np.unique(argo_times)[0])[0:10]\n plt.savefig(file,bbox_inches = 'tight',pad_inches = 0.1) \n\n #%% Figure argo float vs GOFS and vs RTOFS\n \n argo_idd = np.unique(argo_ids)\n \n for i,id in enumerate(argo_idd): \n print(id)\n okind = np.where(argo_ids == id)[0]\n argo_time = np.asarray([datetime.strptime(t,'%Y-%m-%dT%H:%M:%SZ') for t in argo_times[okind]])\n \n argo_lon = argo_lons[okind]\n argo_lat = argo_lats[okind]\n argo_pres = argo_press[okind]\n argo_temp = argo_temps[okind]\n argo_salt = argo_salts[okind]\n \n # GOFS\n print('Retrieving variables from GOFS')\n if isinstance(GOFS_ts,float): \n temp_GOFS = np.nan\n salt_GOFS = np.nan\n else:\n #oktt_GOFS = np.where(t_GOFS >= argo_time[0])[0][0]\n ttGOFS = np.asarray([datetime(t_GOFS[i].year,t_GOFS[i].month,t_GOFS[i].day,t_GOFS[i].hour) for i in np.arange(len(t_GOFS))])\n tstamp_GOFS = [mdates.date2num(ttGOFS[i]) for i in np.arange(len(ttGOFS))]\n oktt_GOFS = np.unique(np.round(np.interp(mdates.date2num(argo_time[0]),tstamp_GOFS,np.arange(len(tstamp_GOFS)))).astype(int))[0]\n oklat_GOFS = np.where(lt_GOFS >= argo_lat[0])[0][0]\n oklon_GOFS = np.where(ln_GOFS >= argo_lon[0]+360)[0][0]\n temp_GOFS = np.asarray(GOFS_ts['water_temp'][oktt_GOFS,:,oklat_GOFS,oklon_GOFS])\n salt_GOFS = np.asarray(GOFS_ts['salinity'][oktt_GOFS,:,oklat_GOFS,oklon_GOFS])\n \n # RTOFS \n #Time window\n year = int(argo_time[0].year)\n month = int(argo_time[0].month)\n day = int(argo_time[0].day)\n tini = datetime(year, month, day)\n tend = tini + timedelta(days=1)\n \n # Read RTOFS grid and time\n print('Retrieving coordinates from RTOFS')\n \n if tini.month < 10:\n if tini.day < 10:\n fol = 'rtofs.' + str(tini.year) + '0' + str(tini.month) + '0' + str(tini.day)\n else:\n fol = 'rtofs.' + str(tini.year) + '0' + str(tini.month) + str(tini.day)\n else:\n if tini.day < 10:\n fol = 'rtofs.' + str(tini.year) + str(tini.month) + '0' + str(tini.day)\n else:\n fol = 'rtofs.' + str(tini.year) + str(tini.month) + str(tini.day)\n \n ncRTOFS = xr.open_dataset(folder_RTOFS + fol + '/' + nc_files_RTOFS[0])\n latRTOFS = np.asarray(ncRTOFS.Latitude[:])\n lonRTOFS = np.asarray(ncRTOFS.Longitude[:])\n depth_RTOFS = np.asarray(ncRTOFS.Depth[:])\n \n tRTOFS = []\n for t in np.arange(len(nc_files_RTOFS)):\n ncRTOFS = xr.open_dataset(folder_RTOFS + fol + '/' + nc_files_RTOFS[t])\n tRTOFS.append(np.asarray(ncRTOFS.MT[:])[0])\n \n tRTOFS = np.asarray([mdates.num2date(mdates.date2num(tRTOFS[t])) \\\n for t in np.arange(len(nc_files_RTOFS))])\n \n oktt_RTOFS = np.where(mdates.date2num(tRTOFS) >= mdates.date2num(argo_time[0]))[0][0]\n oklat_RTOFS = np.where(latRTOFS[:,0] >= argo_lat[0])[0][0]\n oklon_RTOFS = np.where(lonRTOFS[0,:] >= argo_lon[0])[0][0]\n \n nc_file = folder_RTOFS + fol + '/' + nc_files_RTOFS[oktt_RTOFS]\n ncRTOFS = xr.open_dataset(nc_file)\n #time_RTOFS = tRTOFS[oktt_RTOFS]\n temp_RTOFS = np.asarray(ncRTOFS.variables['temperature'][0,:,oklat_RTOFS,oklon_RTOFS])\n salt_RTOFS = np.asarray(ncRTOFS.variables['salinity'][0,:,oklat_RTOFS,oklon_RTOFS])\n #lon_RTOFS = lonRTOFS[0,oklon_RTOFS]\n #lat_RTOFS = latRTOFS[oklat_RTOFS,0]\n \n # Downloading and reading Copernicus output\n motuc = 'python -m motuclient --motu ' + url_cmems + \\\n ' --service-id ' + service_id + \\\n ' --product-id ' + product_id + \\\n ' --longitude-min ' + str(argo_lon[0]-2/12) + \\\n ' --longitude-max ' + str(argo_lon[0]+2/12) + \\\n ' --latitude-min ' + str(argo_lat[0]-2/12) + \\\n ' --latitude-max ' + str(argo_lat[0]+2/12) + \\\n ' --date-min ' + '\"' + str(tini-timedelta(0.5)) + '\"' + \\\n ' --date-max ' + '\"' + str(tend+timedelta(0.5)) + '\"' + \\\n ' --depth-min ' + depth_min + \\\n ' --depth-max ' + str(np.nanmax(argo_pres)+1000) + \\\n ' --variable ' + 'thetao' + ' ' + \\\n ' --variable ' + 'so' + ' ' + \\\n ' --out-dir ' + out_dir + \\\n ' --out-name ' + str(id) + '.nc' + ' ' + \\\n ' --user ' + 'maristizabalvar' + ' ' + \\\n ' --pwd ' + 'MariaCMEMS2018'\n \n os.system(motuc)\n # Check if file was downloaded\n\n COP_file = out_dir + '/' + str(id) + '.nc'\n # Check if file was downloaded\n resp = os.system('ls ' + out_dir +'/' + str(id) + '.nc')\n if resp == 0:\n COP = xr.open_dataset(COP_file)\n\n latCOP = np.asarray(COP.latitude[:])\n lonCOP = np.asarray(COP.longitude[:])\n depth_COP = np.asarray(COP.depth[:])\n tCOP = np.asarray(mdates.num2date(mdates.date2num(COP.time[:])))\n else:\n latCOP = np.empty(1)\n latCOP[:] = np.nan\n lonCOP = np.empty(1)\n lonCOP[:] = np.nan\n tCOP = np.empty(1)\n tCOP[:] = np.nan\n\n oktimeCOP = np.where(mdates.date2num(tCOP) >= mdates.date2num(tini))[0][0]\n oklonCOP = np.where(lonCOP >= argo_lon[0])[0][0]\n oklatCOP = np.where(latCOP >= argo_lat[0])[0][0]\n \n temp_COP = np.asarray(COP.variables['thetao'][oktimeCOP,:,oklatCOP,oklonCOP])\n salt_COP = np.asarray(COP.variables['so'][oktimeCOP,:,oklatCOP,oklonCOP])\n \n # Figure temp\n plt.figure(figsize=(5,6))\n plt.plot(argo_temp,-argo_pres,'.-',linewidth=2,label='ARGO Float id '+str(id))\n plt.plot(temp_GOFS,-depth_GOFS,'.-',linewidth=2,label='GOFS 3.1',color='red')\n plt.plot(temp_RTOFS,-depth_RTOFS,'.-',linewidth=2,label='RTOFS',color='g')\n plt.plot(temp_COP,-depth_COP,'.-',linewidth=2,label='Copernicus',color='darkorchid')\n plt.ylim([-1000,0])\n plt.title('Temperature Profile on '+ str(argo_time[0])[0:13] +\n '\\n [lon,lat] = [' \\\n + str(np.round(argo_lon[0],3)) +',' +\\\n str(np.round(argo_lat[0],3))+']',\\\n fontsize=16)\n plt.ylabel('Depth (m)',fontsize=14)\n plt.xlabel('$^oC$',fontsize=14)\n plt.legend(loc='lower right',fontsize=14)\n \n file = folder_fig + 'ARGO_vs_GOFS_RTOFS_COP_temp_' + str(id) \n plt.savefig(file,bbox_inches = 'tight',pad_inches = 0.1) \n \n # Figure salt\n plt.figure(figsize=(5,6))\n plt.plot(argo_salt,-argo_pres,'.-',linewidth=2,label='ARGO Float id '+str(id))\n plt.plot(salt_GOFS,-depth_GOFS,'.-',linewidth=2,label='GOFS 3.1',color='red')\n plt.plot(salt_RTOFS,-depth_RTOFS,'.-',linewidth=2,label='RTOFS',color='g')\n plt.plot(salt_COP,-depth_COP,'.-',linewidth=2,label='Copernicus',color='darkorchid')\n plt.ylim([-1000,0])\n plt.title('Salinity Profile on '+ str(argo_time[0])[0:13] +\n '\\n [lon,lat] = [' \\\n + str(np.round(argo_lon[0],3)) +',' +\\\n str(np.round(argo_lat[0],3))+']',\\\n fontsize=16)\n plt.ylabel('Depth (m)',fontsize=14)\n plt.legend(loc='lower right',fontsize=14)\n \n file = folder_fig + 'ARGO_vs_GOFS_RTOFS_COP_salt_' + str(id)\n plt.savefig(file,bbox_inches = 'tight',pad_inches = 0.1) \n\n\n\n","sub_path":"GOFS_RTOFS_oper_vs_Argo_floats_baffin.py","file_name":"GOFS_RTOFS_oper_vs_Argo_floats_baffin.py","file_ext":"py","file_size_in_byte":13514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"411412523","text":"import json\nimport random\n\nwith open('sample2.json', 'r') as f:\n data = json.load(f)\n\nwith open('users_dict.json', 'r') as f:\n users_dict = json.load(f)\n\nold_nodes = data['nodes']\nold_edges = data['links']\n\nnodes = []\nedges = []\ncounter = 0\n\ncolors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3']\n\nfor node in old_nodes:\n nodes.append({\n 'group': node['group'],\n 'id': str(node['name']),\n 'x': random.randint(0, 10000), # To be calculated in the API based on cluster, and filter options\n 'y': random.randint(0, 10000), # To be calculated in the API end based on cluster, and filter options\n 'color': colors[random.randint(0, 3)], # Pick from colors, based on cluster (just random right now)\n 'size': 0.1,\n 'label': 'screen name: %s | isis group: %s | following count: %s' % (users_dict[str(node['name'])]['screen_name'],\n users_dict[str(node['name'])]['isis'],\n users_dict[str(node['name'])]['following_count']),\n })\n\nfor edge in old_edges:\n counter += 1\n edges.append({\n 'source': str(edge['source']),\n 'target': str(edge['target']),\n 'id': str(counter),\n 'size': 0.01,\n 'type': 'curve'\n })\n\nnewData = {'nodes': nodes, 'edges': edges}\n\nwith open('sample3.json', 'w') as f:\n json.dump(newData, f)\n f.close()","sub_path":"analysis/sample_transform.py","file_name":"sample_transform.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"508013158","text":"# Inspired by r/dailyprogrammer\n# https://www.reddit.com/r/dailyprogrammer/comments/5bn0b7/20161107_challenge_291_easy_goldilocks_bear/\n\n\ndef determine_acceptable_seats(path):\n with open(path, 'r') as data:\n weight, max_temperature = map(int, data.readline().split(\" \"))\n line_index = 1\n for line in data:\n max_weight, temperature = map(int, line.split(\" \"))\n if (max_weight >= weight) and (max_temperature >= temperature):\n yield line_index\n line_index += 1\n\nif __name__ == \"__main__\":\n print(', '.join(str(x) for x in determine_acceptable_seats(\"seats.txt\")))\n","sub_path":"DailyProgrammer/Python/goldilocks/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"206644620","text":"import sys, retro\nfrom PyQt5.QtGui import QImage, QPixmap, QPainter, QPen, QBrush, QColor\nfrom PyQt5.QtCore import Qt, QTimer\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel\nimport numpy as np\n\nclass MyApp(QWidget):\n def __init__(self):\n super().__init__()\n # 게임 환경 생성\n self.env = retro.make(game='SuperMarioBros-Nes', state='Level1-1')\n # 새게임 시작\n self.env.reset()\n\n self.screen_size = 2\n\n # 키배열 : B, NULL, SELECT, START, Up, Down, Left, Right, A\n self.button = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n self.game_speed = 60\n\n # 화면 가져오기\n screen = self.env.get_screen()\n\n # 화면 크기\n self.width = screen.shape[0]\n self.height = screen.shape[1]\n\n # 창 크기 고정\n self.setFixedSize(self.width * self.screen_size + 600, self.height * self.screen_size)\n # 창 제목 설정\n self.setWindowTitle('GA-Mario')\n\n self.label_image = QLabel(self)\n self.label_image.setGeometry(0, 0, self.width * self.screen_size, self.height * self.screen_size)\n\n # 타이머 생성\n self.qtimer = QTimer(self)\n # 타이머에 실행할 함수 연결\n self.qtimer.timeout.connect(self.timer)\n # 1초(1000밀리초)마다 연결된 함수를 실행\n self.qtimer.start(1000//self.game_speed)\n\n # 창 띄우기\n self.show()\n\n\n def timer(self):\n self.env.step(np.array(self.button))\n\n # 화면 가져오기\n screen = self.env.get_screen()\n\n image = np.array(screen)\n qimage = QImage(image, image.shape[1], image.shape[0], QImage.Format_RGB888)\n pixmap = QPixmap(qimage)\n pixmap = pixmap.scaled(self.width * self.screen_size, self.height * self.screen_size, Qt.IgnoreAspectRatio)\n\n self.label_image.setPixmap(pixmap)\n\n self.update()\n\n\n\n def paintEvent(self,event):\n # 그리기 도구\n painter = QPainter()\n # 그리기 시작\n painter.begin(self)\n\n # 램\n ram = self.env.get_ram()\n\n ## 모든 화면의 타일 표시\n full_screen_tiles = ram[0x0500:0x069F + 1]\n\n full_screen_tile_count = full_screen_tiles.shape[0]\n\n full_screen_page1_tile = full_screen_tiles[:full_screen_tile_count // 2].reshape((13, 16))\n full_screen_page2_tile = full_screen_tiles[full_screen_tile_count // 2:].reshape((13, 16))\n\n full_screen_tiles = np.concatenate((full_screen_page1_tile, full_screen_page2_tile), axis=1).astype(np.int)\n\n ## 적의 타일 표시(풀스크린(모든화면 타일)에서 표시)\n\n # 0x000F-0x0013\tEnemy drawn? Max 5 enemies at once.\n # 0 - No\n # 1 - Yes (not so much drawn as \"active\" or something)\n enemy_drawn = ram[0x000F:0x0013 + 1]\n\n # 0x006E-0x0072\tEnemy horizontal position in level\n # 자신이 속한 화면 페이지 번호\n enemy_horizon_position = ram[0x006E:0x0072 + 1]\n\n # 0x0087-0x008B\tEnemy x position on screen\n # 자신이 속한 페이지 속 x 좌표\n enemy_screen_position_x = ram[0x0087:0x008B + 1]\n\n # 0x00CF-0x00D3\tEnemy y pos on screen\n enemy_position_y = ram[0x00CF:0x00D3 + 1]\n\n # 적 x 좌표\n enemy_position_x = (enemy_horizon_position * 256 + enemy_screen_position_x) % 512\n\n # 적 타일 좌표\n enemy_tile_position_x = enemy_position_x // 16\n enemy_tile_position_y = (enemy_position_y - 8) // 16 - 1\n\n for i in range(len(enemy_drawn)):\n if enemy_drawn[i] == 1:\n print(enemy_tile_position_x)\n full_screen_tiles[enemy_tile_position_y[i]][enemy_tile_position_x[i]] = -1\n\n for i in range(full_screen_tiles.shape[0]):\n for j in range(full_screen_tiles.shape[1]):\n if full_screen_tiles[i][j] == 0:\n painter.setBrush(QBrush(Qt.lightGray))\n painter.drawRect(self.width * self.screen_size + 16 * j, 0 + 16 * i, 16, 16)\n\n elif full_screen_tiles[i][j] == -1:\n painter.setBrush(QBrush(Qt.red))\n painter.drawRect(self.width * self.screen_size + 16 * j, 0 + 16 * i, 16, 16)\n\n else:\n painter.setBrush(QBrush(Qt.cyan))\n painter.drawRect(self.width * self.screen_size + 16 * j, 0 + 16 * i, 16, 16)\n\n ## 현재 화면의 타일 표시\n\n # 현재 화면이 속한 페이지 번호\n current_screen_page = ram[0x071A]\n\n # 페이지 속 현재 화면 위치\n screen_position = ram[0x071C]\n\n # 화면 오프셋\n screen_offset = (256 * current_screen_page + screen_position) % 512\n\n # 타일 화면 오프셋\n screen_tile_offset = screen_offset // 16\n\n # 현재 화면 추출\n screen_tiles = np.concatenate((full_screen_tiles, full_screen_tiles), axis=1)[:, screen_tile_offset:screen_tile_offset + 16]\n\n for i in range(screen_tiles.shape[0]):\n for j in range(screen_tiles.shape[1]):\n if screen_tiles[i][j] == 0:\n painter.setBrush(QBrush(Qt.lightGray))\n painter.drawRect(self.width * self.screen_size + 16 * j, 250 + 16 * i, 16, 16)\n\n elif screen_tiles[i][j] == -1:\n painter.setBrush(QBrush(Qt.red))\n painter.drawRect(self.width * self.screen_size + 16 * j, 250 + 16 * i, 16, 16)\n\n else:\n painter.setBrush(QBrush(Qt.cyan))\n painter.drawRect(self.width * self.screen_size + 16 * j, 250 + 16 * i, 16, 16)\n\n ## 플레이어의 타일 표시(현재 화면의 타일에 표시)\n\n # 0x03AD\tPlayer x pos within current screen offset\n # 현재 화면 속 플레이어 x 좌표\n player_position_x = ram[0x03AD]\n # 0x03B8\tPlayer y pos within current screen\n # 현재 화면 속 플레이어 y좌표\n player_position_y = ram[0x03B8]\n\n # 타일 좌표로 변환 (고해상도의 좌표를 타일에 표현하기 위해 16*16의 픽셀을 한 타일에 표현하기 위해서 나눠줌)\n player_tile_position_x = (player_position_x + 8) // 16\n player_tile_position_y = (player_position_y + 8) // 16 - 1\n\n painter.setBrush(QBrush(Qt.blue))\n painter.drawRect(self.width * self.screen_size + 16 * player_tile_position_x,250 + 16 * player_tile_position_y, 16, 16)\n\n\n\n\n\n\n\n\n def keyPressEvent(self, event):\n key = event.key()\n\n if key == Qt.Key_Up:\n self.button[4] = 1\n\n if key == Qt.Key_Down:\n self.button[5] = 1\n\n if key == Qt.Key_Left:\n self.button[6] = 1\n\n if key == Qt.Key_Right:\n self.button[7] = 1\n\n if key == Qt.Key_Z:\n self.button[8] = 1\n\n if key == Qt.Key_X:\n self.button[0] = 1\n\n if key == Qt.Key_N:\n self.button[2] = 1\n\n if key == Qt.Key_M:\n self.button[3] = 1\n\n # if key == Qt.Key_1:\n # self.screen_size -= 0.25\n # # 창 크기 고정\n # self.setFixedSize(self.width * self.screen_size, self.height * self.screen_size)\n # self.label_image.setGeometry(0, 0, self.width * self.screen_size, self.height * self.screen_size)\n # print(self.screen_size,\"size\")\n #\n # if key == Qt.Key_2:\n # self.screen_size += 0.25\n # # 창 크기 고정\n # self.setFixedSize(self.width * self.screen_size, self.height * self.screen_size)\n # self.label_image.setGeometry(0, 0, self.width * self.screen_size, self.height * self.screen_size)\n # print(self.screen_size, \"size\")\n\n if key == Qt.Key_R:\n self.env.reset()\n\n if key == 46:\n self.game_speed += 10\n if self.game_speed > 200:\n self.game_speed = 200\n self.qtimer.stop()\n self.qtimer.start(1000//self.game_speed)\n print(self.game_speed, \"speed\")\n\n if key == 44:\n self.game_speed -= 10\n if self.game_speed < 10:\n self.game_speed = 10\n self.qtimer.stop()\n self.qtimer.start(1000 // self.game_speed)\n print(self.game_speed, \"speed\")\n\n def keyReleaseEvent(self, event):\n key = event.key()\n\n if key == Qt.Key_Up:\n self.button[4] = 0\n\n if key == Qt.Key_Down:\n self.button[5] = 0\n\n if key == Qt.Key_Left:\n self.button[6] = 0\n\n if key == Qt.Key_Right:\n self.button[7] = 0\n\n if key == Qt.Key_Z:\n self.button[8] = 0\n\n if key == Qt.Key_X:\n self.button[0] = 0\n\n if key == Qt.Key_N:\n self.button[2] = 0\n\n if key == Qt.Key_M:\n self.button[3] = 0\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MyApp()\n sys.exit(app.exec_())","sub_path":"quest/Quest11(get_enemy_position).py","file_name":"Quest11(get_enemy_position).py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"399797534","text":"from .exc import *\n\nfrom typing import Generic, TypeVar, Union, Optional\nfrom threading import Lock, RLock, Condition\n\n\nT = TypeVar('T')\n\n\nclass Promise(Generic[T]):\n __value: T\n __error: Exception\n\n def __init__(self, lock: Union[Lock, RLock, None] = None):\n self.__cv = Condition(lock)\n self.__state = 0\n\n def set_result(self, value: T) -> None:\n with self.__cv:\n assert self.__state == 0\n self.__value = value\n self.__state = 1\n self.__cv.notify_all()\n\n def set_error(self, error: Exception) -> None:\n with self.__cv:\n assert self.__state == 0\n self.__error = error\n self.__state = 2\n self.__cv.notify_all()\n\n def get(self, timeout: Optional[float] = None) -> T:\n if self.__state == 0:\n with self.__cv:\n if not self.__cv.wait_for(lambda: self.__state != 0, timeout):\n raise TimeoutException()\n if self.__state != 1:\n raise self.__error\n return self.__value\n\n def cancel(self) -> None:\n self.set_error(CancelledException())\n\n @property\n def cancelled(self):\n return self.__state == 2 and isinstance(self.__error, CancelledException)\n\n @property\n def done(self):\n return self.__state != 0\n","sub_path":"concurrent/_promise.py","file_name":"_promise.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"191261121","text":"\"\"\"\nDjango settings for {{ project_name }} project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/\n\nglobal_settings.py, see\nhttps://github.com/django/django/blob/master/django/conf/global_settings.py\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nDJANGO_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nSITE_NAME = os.path.basename(BASE_DIR)\n\n####################\n# CORE #\n####################\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '{{ secret_key }}'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'django.contrib.humanize',\n\n 'storages',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nWSGI_APPLICATION = 'wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Asia/Tokyo'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nAPPEND_SLASH = False\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'media'))\n\n# URL that handles the media served from MEDIA_ROOT.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'static'))\n\n# URL that handles the static files served from STATIC_ROOT.\n# Static files (CSS, JavaScript, Images)\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\n# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/\nSTATIC_URL = '/static/'\n\n# List of locations of the template source files, in search order.\nTEMPLATE_DIRS = (\n os.path.normpath(os.path.join(BASE_DIR, 'templates')),\n)\n\n###############\n# STATICFILES #\n###############\n\n# A list of locations of additional static files\nSTATICFILES_DIRS = (\n os.path.normpath(os.path.join(BASE_DIR, 'assets')),\n)\n\n###########\n# STORAGE #\n###########\n\n# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings\nSTATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', '')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')\nAWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME', '')\nAWS_AUTO_CREATE_BUCKET = True\nAWS_QUERYSTRING_AUTH = False\n\n# AWS cache settings, don't change unless you know what you're doing:\nAWS_EXPIREY = 60 * 60 * 24 * 7\nAWS_HEADERS = {\n 'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (\n AWS_EXPIREY, AWS_EXPIREY)\n}\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nSTATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME\n\nEKUBO_DOMAIN = os.environ.get('EKUBO_DOMAIN', '')\n","sub_path":"apps/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"413071664","text":"#xml parser\n#import urllib for connecting to html page and download it in xml\n_copyright_='Copyright (c) 2016 Thodoris Vasilopoulos'\nfrom __future__ import print_function\nimport xml.etree.ElementTree as ET\nimport sys\nimport plotly\nfrom plotly.graph_objs import Scatter, Layout\nimport argparse\nimport csv\nfrom itertools import izip\n\nprint ('plotly version:',plotly.__version__) # version >1.9.4 required\nprint ('version >1.9.4 required')\nduration=list()\nduration.append('Duration(sec)')\nimpact=list()\nimpact.append('Impact(Gbps)')\ndate=list()\nalert_id=list()\nalert_id.append('Alert_ID')\n################argparse_module####################\nparser=argparse.ArgumentParser(usage='python [script filename][xml filename] [-h,--help] [-p,--plot] -c [user_filename]')\nparser.add_argument('filename')\nparser.add_argument('-c','--csv',help=' -c [user_filename] :extract output in csv file')\nparser.add_argument('-p','--plot',help='create additional plot outputs', action='store_true')\nargs = parser.parse_args()\n#print(args.csv)\nxml_to_parse = args.filename\nfile = open(xml_to_parse)\ntree=ET.parse(file)\nroot=tree.getroot()\n#print root[0].text=number of alerts to parse\nx=int(root[0].text)\n#print type(x)\nprint (\"number of alerts:\", x)\nprint ('')\nprint ('alert_id, start(UNIX time), start, duration(sec), impact(Gbps)')\ncount=1\nwhile count<=x:\n #printing alarm id, date,impact Gbps using python 3 print format to avoid spaces\n #between prints\n #.format(str) for formatted/alligned output\n sys.stdout.write('{:>8}'.format(root[count].attrib['id']))\n alert_id.append(root[count].attrib['id'])\n sys.stdout.write(', ')\n sys.stdout.write('{:>13}'.format(root[count].find('duration').attrib['start']))\n sys.stdout.write(', ')\n sys.stdout.write('{:>21}'.format(root[count].find('duration').attrib['start_ascii']))\n date.append(root[count].find('duration').attrib['start_ascii'])\n sys.stdout.write(', ')\n sys.stdout.write('{:>5}'.format(root[count].find('duration').attrib['length']))\n duration.append(int(root[count].find('duration').attrib['length']))\n sys.stdout.write(', ')\n if root[count].find('impact')==None:\n sys.stdout.write('{:^12}'.format('NA'),)\n impact.append('NA')\n count=count+1\n print ('')\n continue\n else:\n #sys.stdout.write('impact(in bps)'+str(root[count].find('impact').attrib['bps']))\n sys.stdout.write('{:<6}'.format(str(float(root[count].find('impact').attrib['bps'])/1000000000)),)\n impact.append(float(root[count].find('impact').attrib['bps'])/1000000000)\n print ('')\n count=count+1\n#split date and time\ndate1=list()\ndate1.append('Date')\ntime=list()\ntime.append('Time')\nfor val in date:\n\tvalue=val.split('T')\n\tdate1.append(value[0])\n\ttime.append(value[1])\n#print ('time', time)\n#print ('date',date1)\n############create .csv output############\nif args.csv is not None:\n\trows=izip(alert_id,date1,time,duration,impact)\n\twriter = csv.writer(open(args.csv,'wb'))\n\tfor row in rows:\n\t\twriter.writerow(row)\n#Use plotly.offline.plot() to create and standalone HTML\n#that is saved locally and opened inside your web browser.\n#create scatter plot (mode='markers')\nif args.plot is True:\n\tplotly.offline.plot({\n\t\"data\": [\n\t\tScatter(x=impact, y=duration, mode='markers')\n\t],\n\t\"layout\": Layout(\n\t\ttitle=\"Duration of attacks in relation with impact load\",yaxis=dict(title='Duration(secs)'),xaxis=dict(title='Impact load (Gbps)')\n\t)\n\t},filename='graph1.html')\n#create line plot of impact load per event\n\tdate2=list()\n\timpact2=list()\n\tcount=0\n\t#print ('lenimpact---->',len(impact))\n\t#print ('lendate----->',len(date))\n#extracting dates with \"NA\" results in impact and collecting all the other values\n#for date and impact\n\tfor count in range(len(date)):\n\t\tif impact[count] is not 'NA':\n\t\t\timpact2.append(impact[count])\n\t\t\tdate2.append(date[count])\n\t#print (date2)\n#creating the line plot\n\tplotly.offline.plot({\n\t\"data\": [\n\t\tScatter(x=date2,y=impact2)\n\t],\n\t\"layout\": Layout(\n\t\ttitle=\"Impact load per event\",yaxis=dict(title='Impact load (Gbps)'),xaxis=dict(title='datetime')\n\t)\n\t},filename='graph2.html')\n","sub_path":"python_arbor_parser/arbor_xml_parser_v3.py","file_name":"arbor_xml_parser_v3.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"3791003","text":"import numpy as np\nimport sympy\nfrom sympy.abc import _clash1\nimport warnings\nfrom bioscrape.types import Model\nimport libsbml\nfrom collections import OrderedDict # Need this to remove duplicates from lists\n\ndef read_model_from_sbml(sbml_file):\n return import_sbml(sbml_file)\n\ndef import_sbml(sbml_file, bioscrape_model = None, input_printout = False, **kwargs):\n \"\"\"\n Convert SBML model (in the sbml_file) to bioscrape Model object. Note that events, compartments, non-standard function definitions,\n and some kinds of rules will be ignored.\n Adds mass action kinetics based reactions with the appropriate mass action propensity in bioscrape.\n Propensities with the correct annotation are added as compiled propensity types.\n All other propensities are added as general propensity.\n Bioscrape delays are imported if SBML reaction has appropriate annotation.\n All SBML rules (except Algebraic) are imported. Bioscrape rule settings are imported as annotation.\n Local parameters are renamed if there is a conflict since bioscrape does not have a local environment.\n \"\"\"\n # Attempt to import libsbml and read the SBML model.\n try:\n import libsbml\n except:\n raise ImportError('libsbml not found. See sbml.org for installation help!\\n' +\n 'If you are using anaconda you can run the following:\\n' +\n 'conda install -c SBMLTeam python-libsbml\\n\\n\\n')\n\n reader = libsbml.SBMLReader()\n doc = reader.readSBML(sbml_file)\n errors = doc.getNumErrors()\n if errors > 0:\n err_message = doc.getErrorLog().toString()\n print(err_message)\n raise SyntaxError(\"SBML File {0} cannot be read without errors\".format(sbml_file))\n\n model = doc.getModel()\n if model is None:\n raise ValueError(\"SBML File {0} not found. Model could not be read.\".format(sbml_file))\n if 'sbml_warnings' in kwargs:\n sbml_warnings = kwargs.get('sbml_warnings')\n elif 'bioscrape' in model.getId() or 'biocrnpyler' in model.getId():\n sbml_warnings = False\n else:\n sbml_warnings = True\n # Parse through model components one by one\n allspecies = import_sbml_species(model)\n allparams = import_sbml_parameters(model)\n allreactions, allspecies = import_sbml_reactions(model, allspecies, allparams, input_printout, sbml_warnings)\n allrules, allreactions = import_sbml_rules(model, allspecies, allparams, allreactions, input_printout)\n # Check and warn if there are any unrecognized components (function definitions, packages, etc.)\n if len(model.getListOfCompartments()) > 0 or len(model.getListOfUnitDefinitions()) > 0 or len(model.getListOfEvents()) > 0: \n if sbml_warnings:\n warnings.warn('Compartments, UnitDefintions, Events, and some other SBML model components are not recognized by bioscrape. ' + \n 'Refer to the bioscrape wiki for more information.')\n #If no Model is passed into the function, a Model is returned\n if bioscrape_model == None:\n bioscrape_model = Model()\n #If a Model is passed into the function, that Model is modified\n if isinstance(bioscrape_model, Model):\n for species in allspecies.keys():\n if input_printout:\n print(\"Adding Species:\", species)\n bioscrape_model._add_species(species)\n\n for (param, val) in allparams.items():\n bioscrape_model._add_param(param)\n bioscrape_model.set_parameter(param, val)\n if input_printout:\n print(\"Adding Parameter:\", param, \"=\", val)\n\n for rxn in allreactions:\n if len(rxn) == 4:\n reactants, products, propensity_type, propensity_param_dict = rxn\n delay_type, delay_reactants, delay_products, delay_param_dict = [None]*4\n elif len(rxn) == 8:\n reactants, products, propensity_type, propensity_param_dict, delay_type, delay_reactants, delay_products, delay_param_dict = rxn\n else:\n raise SyntaxError(f\"Reaction tuple must have length 4 or 8. Found: {rxn}\")\n bioscrape_model.create_reaction(reactants, products, propensity_type, propensity_param_dict, \n delay_type, delay_reactants, delay_products, delay_param_dict, \n input_printout = input_printout)\n\n for rule in allrules:\n if len(rule) == 2:\n rule_type, rule_attributes = rule\n bioscrape_model.create_rule(rule_type, rule_attributes, input_printout = input_printout)\n elif len(rule) == 3:\n rule_type, rule_attributes, rule_frequency = rule\n bioscrape_model.create_rule(rule_type, rule_attributes, rule_frequency = rule_frequency, input_printout = input_printout)\n bioscrape_model.set_species(allspecies)\n bioscrape_model.py_initialize()\n return bioscrape_model\n else:\n raise ValueError(\"bioscrape_model keyword must be a Bioscrape Model object or None (in which case a Model object is returned).\")\n\ndef import_sbml_species(sbml_model):\n \"\"\"Import species from SBML model\n\n Args:\n sbml_model ([Model]): libsbml Model object\n\n Returns:\n [dict]: Dictionary of species identifiers with their initial concentration\n \"\"\"\n allspecies = {}\n for s in sbml_model.getListOfSpecies():\n sid = s.getId()\n if sid == \"volume\" or sid == \"t\":\n warnings.warn(\"You have defined a species called \" + sid +\n \". This species is being ignored and treated as a keyword.\")\n continue\n allspecies[sid] = 0.0\n if np.isfinite(s.getInitialAmount()):\n allspecies[sid] = s.getInitialAmount()\n if np.isfinite(s.getInitialConcentration()) and allspecies[sid] == 0:\n allspecies[sid] = s.getInitialConcentration()\n return allspecies\n\ndef import_sbml_parameters(sbml_model):\n \"\"\"Import parameters from SBML model\n\n Args:\n sbml_model ([Model]): libsbml Model object\n Returns:\n [dict]: Dictionary of parameter identifiers with their initial values\n \"\"\"\n allparams = {}\n for p in sbml_model.getListOfParameters():\n pid = p.getId()\n allparams[pid] = 0.0\n if np.isfinite(p.getValue()):\n allparams[pid] = p.getValue()\n return allparams\n\ndef import_sbml_reactions(sbml_model, allspecies, allparams, input_printout, sbml_warnings = True):\n \"\"\"Import reactions from SBML model\n\n Args:\n sbml_model ([Model]): libsbml Model object\n allspecies ([dict]): Species identifiers with their initial conditions\n allparams ([dict]): Parameter identifiers with their values\n input_printout ([bool]): Debug help input print out (True or False)\n\n Returns:\n List : List of all reaction tuples.\n Dict : Updated dict of allspecies.\n \"\"\"\n allreactions = []\n # Now go through reactions one at a time to get stoich and rates, then append to reaction_list.\n for reaction in sbml_model.getListOfReactions():\n # get the propensity\n kl = reaction.getKineticLaw()\n # capture any local parameters\n # also must save renamed local parameters to rename annotations later\n renamed_params = {}\n for p in kl.getListOfParameters():\n pid = p.getId()\n if pid in allparams:\n # If local parameter ID already exists in allparams due to another local/global parameter with same ID\n oldid = pid\n newid = oldid + '_' + reaction.getId()\n # Rename the ID everywhere it's used (such as in the Kinetic Law)\n kl.renameSIdRefs(oldid, newid)\n p.setId(newid)\n renamed_params[oldid] = newid #save the oldid-->newid mapping\n # Rename its usages\n for element in reaction.getListOfAllElements():\n element.renameSIdRefs(oldid, newid)\n pid = newid\n allparams[pid] = 0.0\n if np.isfinite(p.getValue()):\n allparams[pid] = p.getValue()\n # get the formula as a string \n math_ast = kl.getMath()\n if math_ast is None:\n raise ValueError(\"Could not import the rate law for reaction to SBML.\")\n kl_formula = libsbml.formulaToL3String(math_ast)\n rate_string = kl_formula\n if reaction.getReversible() and sbml_warnings:\n warnings.warn('SBML model contains reversible reaction!\\n' +\n 'Please check rate expressions and ensure they are non-negative before doing '+\n 'stochastic simulations.')\n\n #Get Reactants and Products\n reactant_list = []\n product_list = []\n for reactant in reaction.getListOfReactants():\n reactantspecies = sbml_model.getSpecies(reactant.getSpecies())\n reactantspecies_id = reactantspecies.getId()\n if reactantspecies_id in allspecies:\n if np.isfinite(reactant.getStoichiometry()):\n for i in range(int(reactant.getStoichiometry())):\n reactant_list.append(reactantspecies_id)\n else:\n reactant_list.append(reactantspecies_id)\n else:\n warnings.warn('Reactant in reaction {0} not found in the list of species in the SBML model.'\n + ' The species will be added with zero initial amount'.format(reaction.getId()))\n allspecies[reactantspecies_id] = 0.0\n if np.isfinite(reactant.getStoichiometry()):\n for i in range(int(reactant.getStoichiometry())):\n reactant_list.append(reactantspecies_id)\n else:\n reactant_list.append(reactantspecies_id)\n for product in reaction.getListOfProducts():\n productspecies = sbml_model.getSpecies(product.getSpecies())\n productspecies_id = productspecies.getId()\n if productspecies_id in allspecies:\n if np.isfinite(product.getStoichiometry()):\n for i in range(int(product.getStoichiometry())):\n product_list.append(productspecies_id)\n else:\n product_list.append(productspecies_id)\n else:\n warnings.warn('Reactant in reaction {0} not found in the list of species in the SBML model.' +\n ' The species will be added with zero initial amount'.format(reaction.getId()))\n allspecies[productspecies_id] = 0.0\n if np.isfinite(product.getStoichiometry()):\n for i in range(int(product.getStoichiometry())):\n product_list.append(productspecies_id)\n else:\n product_list.append(productspecies_id)\n\n #Identify propensities based upon annotations\n annotation_string = reaction.getAnnotationString()\n if \"PropensityType\" in annotation_string:\n ind0 = annotation_string.find(\"\")\n ind1 = annotation_string.find(\"\")\n if ind0 == -1 or ind1 == -1:\n # Annotation could not be read\n if input_printout:\n print('Annotation could not be read properly, adding reaction with general propensity.')\n propensity_params = {}\n propensity_params['type'] = 'general'\n propensity_params['rate'] = rate_string\n else:\n # propensity_definition = {}\n annotation_list = annotation_string[ind0:ind1].split(\" \")\n key_vals = [(i.split(\"=\")[0], i.split(\"=\")[1]) for i in annotation_list if \"=\" in i]\n propensity_params = {}\n for (k, v) in key_vals:\n #Change the name of a parameter if it was renamed earlier\n if v in renamed_params:\n v = renamed_params[v]\n try:\n propensity_params[k] = float(v)\n except ValueError:\n propensity_params[k] = v\n if input_printout:\n print(\"Reaction found:\", reactant_list, \"->\", product_list)\n print(\"Annotated propensity found with params:\", propensity_params)\n # rxn = (reactant_list, product_list, propensity_params['type'], propensity_params)\n else: #No annotation found\n propensity_params = {}\n propensity_params['type'] = 'general'\n propensity_params['rate'] = rate_string\n # rxn = (reactant_list, product_list, propensity_params['type'], propensity_params)\n if input_printout:\n print(\"Reaction found:\", reactant_list, \"->\", product_list)\n print(\"Propensity found with general ratestring:\", rate_string)\n \n # Identify delays from annotations\n if \"DelayType\" in annotation_string:\n ind0 = annotation_string.find(\"\")\n ind1 = annotation_string.find(\"\")\n if ind0 == -1 or ind1 == -1:\n # Annotation could not be read\n if input_printout:\n print('Annotation could not be read properly, adding reaction without delays.')\n # Add reaction without delays\n delay_type, delay_reactants, delay_products, delay_params = [None]*4\n else:\n # propensity_definition = {}\n annotation_list = annotation_string[ind0:ind1].split(\" \")\n key_vals = [(i.split(\"=\")[0], i.split(\"=\")[1]) for i in annotation_list if \"=\" in i]\n delay_params = {}\n for (k, v) in key_vals:\n #Change the name of a parameter if it was renamed earlier\n if v in renamed_params:\n v = renamed_params[v]\n if k == 'type':\n delay_type = v\n if k == 'reactants':\n delay_reactants = v.split(',')\n if k == 'products':\n delay_products = v.split(',')\n if k == 'mean':\n delay_params[k] = v \n if k == 'std':\n delay_params[k] = v \n if k == 'k':\n delay_params[k] = v \n if k == 'theta':\n delay_params[k] = v \n if k == 'delay':\n delay_params[k] = v \n if input_printout:\n print(\"Annotated delay found with params:\", delay_params)\n else:\n delay_type, delay_reactants, delay_products, delay_params = [None]*4\n delay_dict = {'type':delay_type, 'reactants':delay_reactants, \n 'products':delay_products, 'parameters':delay_params}\n if input_printout:\n print(\"Propensity attributes (importing reaction {0} -> {1}) are:{2}\".format(reactant_list, product_list, propensity_params))\n print(\"Delay attributes (importing reaction {0} -> {1}) are: {2}\".format(reactant_list, product_list, delay_dict))\n rxn = (reactant_list, product_list, propensity_params['type'], propensity_params, \n delay_type, delay_reactants, delay_products, delay_params)\n allreactions.append(rxn)\n return allreactions, allspecies\n\ndef import_sbml_rules(sbml_model, allspecies, allparams, allreactions, input_printout):\n \"\"\"Import rules from SBML model.\n\n Args:\n sbml_model (Model): libsbml Model object\n allspecies ([dict]): Species identifiers with their initial conditions\n allparams ([dict]): Parameter identifiers with their values\n allreaction ([List]): List of all reaction tuples\n input_printout ([bool]): Debug help input print out (True or False)\n\n Returns:\n List : List of all rule tuples\n List : List of updated allreactions\n \"\"\"\n # Go through rules one at a time\n allrules = []\n rule_rxn = None\n rule_type = None\n #\"Rules must be a tuple: (rule_type (string), rule_attributes (dict), rule_frequency (optional))\")\n for rule in sbml_model.getListOfRules():\n rule_formula = libsbml.formulaToL3String(rule.getMath())\n rulevariable = rule.getVariable()\n if rulevariable in allspecies:\n rule_string = rulevariable + '=' + rule_formula\n elif rulevariable in allparams:\n rule_string = rulevariable + '=' + rule_formula\n else:\n warnings.warn('SBML: Attempting to assign something that is not a parameter or species %s'\n % rulevariable)\n continue\n if rule.getElementName() == 'algebraicRule':\n warnings.warn('Unsupported rule type: %s' % rule.getElementName())\n continue\n elif rule.getElementName() == 'assignmentRule':\n rule_type = 'assignment'\n elif rule.getElementName() == 'rateRule':\n rate_rule_formula = rule_formula\n propensity_params = {}\n propensity_params['type'] = 'general'\n propensity_params['rate'] = rule_formula \n rule_rxn = ([], [rulevariable], propensity_params['type'], propensity_params) # Create --> X type reaction to model rate rules.\n else:\n raise ValueError('Invalid SBML Rule type.')\n annotation_string = rule.getAnnotationString()\n rule_frequency = \"repeated\"\n if \"BioscrapeRule\" in annotation_string:\n ind0 = annotation_string.find(\"\")\n ind1 = annotation_string.find(\"\")\n if ind0 == -1 or ind1 == -1:\n # Annotation could not be read\n if input_printout:\n print('Annotation could not be read properly, adding Rule without annotation may miss key information.')\n # Add reaction without delays\n rule_frequency = \"repeated\"\n else:\n # propensity_definition = {}\n annotation_list = annotation_string[ind0:ind1].split(\" \")\n key_vals = [(i.split(\"=\")[0], i.split(\"=\")[1]) for i in annotation_list if \"=\" in i]\n for (k, v) in key_vals:\n #Change the name of a parameter if it was renamed earlier\n if k == \"rule_frequency\":\n rule_frequency = v\n if input_printout:\n print(\"Annotated rule found with rule_frequency:\", rule_frequency)\n if rule_type is not None:\n rule_dict = {}\n rule_dict['equation'] = rule_string\n rule_tuple = (rule_type, rule_dict, rule_frequency)\n allrules.append(rule_tuple)\n if rule_rxn is not None:\n allreactions.append(rule_rxn)\n return allrules, allreactions\n \n\n\n# Helpful utility functions start here\n\ndef _get_species_list_in_formula(formula, species):\n sympy_rate = sympy.sympify(formula, _clash1)\n nodes = [sympy_rate]\n index = 0\n while index < len(nodes):\n node = nodes[index]\n index += 1\n nodes.extend(node.args)\n species_return = []\n for node in nodes:\n if type(node) == sympy.Symbol:\n if node.name in species:\n species_return.append(node.name)\n return species_return\n\ndef create_sbml_model(compartment_id=\"default\", time_units='second', extent_units='mole', substance_units='mole',\n length_units='metre', area_units='square_metre', volume_units='litre', volume = 1e-6):\n # Create an empty SBML Document of Level 3 Version 2 of SBML\n document = libsbml.SBMLDocument(3, 2) \n model = document.createModel()\n model.setId('bioscrape_generated_model_' + str(np.random.randint(1e6)))\n # Define units for area (not used, but keeps COPASI from complaining)\n unitdef = model.createUnitDefinition()\n unitdef.setId('square_metre')\n unitdef.setName('square_metre')\n unit = unitdef.createUnit()\n unit.setKind(libsbml.UNIT_KIND_METRE)\n unit.setExponent(2)\n unit.setScale(0)\n unit.setMultiplier(1)\n\n # Set up required units and containers\n model.setTimeUnits(time_units) # set model-wide time units\n model.setExtentUnits(extent_units) # set model units of extent\n model.setSubstanceUnits(substance_units) # set model substance units\n model.setLengthUnits(length_units) # area units (never used?)\n model.setAreaUnits(area_units) # area units (never used?)\n model.setVolumeUnits(volume_units) # default volume unit\n\n # Define the default compartment\n compartment = model.createCompartment()\n compartment.setId(compartment_id)\n compartment.setName(compartment_id)\n compartment.setConstant(True) # keep compartment size constant\n compartment.setSpatialDimensions(3) # 3 dimensional compartment\n compartment.setVolume(volume) # 1 microliter\n\n # Returning document is enough. document.getModel() gives the model, and model.getCompartment(0) gives the compartment.\n return document, model\n\n\n# Creates an SBML id from a chemical_reaction_network.species object\ndef species_sbml_id(species_name, document=None):\n # Construct the species ID\n all_ids = []\n if document:\n all_ids = getAllIds(document.getListOfAllElements())\n\n trans = SetIdFromNames(all_ids)\n species_id = trans.getValidIdForName(species_name)\n return species_id\n\n\n# Helper function to add a species to the model\n# species must be chemical_reaction_network.species objects\ndef add_species(model, compartment, species, debug=False, initial_concentration=None):\n model = model # Get the model where we will store results\n\n # Construct the species name\n species_name = species\n\n # Construct the species ID\n species_id = species_sbml_id(species_name, model.getSBMLDocument())\n if species_name != species_id:\n raise ValueError('Species names used are invalid strings to write into an SBML file.' + \n 'Remove colons, semicolons, and other special characters.' + \n 'Duplicate species names are also not allowed.' + \n 'Starting species names with numbers is also not allowed')\n\n if debug: print(\"Adding species\", species_name, species_id)\n sbml_species = model.createSpecies()\n sbml_species.setName(species_name)\n sbml_species.setId(species_id)\n sbml_species.setName(species_id)\n sbml_species.setCompartment(compartment.getId())\n sbml_species.setConstant(False)\n sbml_species.setBoundaryCondition(False)\n sbml_species.setHasOnlySubstanceUnits(False)\n if initial_concentration is None:\n initial_concentration = 0\n sbml_species.setInitialConcentration(initial_concentration)\n return sbml_species\n\n\n# Helper function to add a parameter to the model\ndef add_parameter(model, param_name, param_value, debug=False):\n # Check to see if this parameter is already present\n parameter = model.createParameter()\n # all_ids = getAllIds(model.getSBMLDocument().getListOfAllElements())\n # trans = SetIdFromNames(all_ids)\n # parameter.setId(trans.getValidIdForName(param_name))\n if debug: print(\"Adding parameter\", param_name)\n # param_name might be an invalid SBML identifier. But, if we change the name here\n # then we need to make sure the changes are propagated to KineticLaws etc. TODO.\n if param_name[0] == '_':\n param_name = param_name.replace('_','',1)\n parameter.setId(param_name)\n parameter.setName(param_name)\n # Set the value of the parameter\n parameter.setValue(float(param_value))\n parameter.setConstant(True) # Is set to False while creating rules (if some parameter is changed using Rules)\n\n return parameter\n\n# Helper function to add a rule to an sbml model\ndef add_rule(model, rule_id, rule_type, rule_variable, rule_formula, rule_frequency, **kwargs):\n # Create SBML equivalent of bioscrape rule:\n # Set constant attribute for parameter to False if rule is on a parameter.\n for param in model.getListOfParameters():\n if rule_variable == param.getId():\n param.setConstant(False)\n if rule_type == 'assignment' or rule_type == 'additive':\n # Simply create SBML assignment rule type. For additive rule type as well,\n # AssignmentRule type of SBML will work as $s_0$ is the artificial species that\n # exists in the bioscrape model.\n rule = model.createAssignmentRule()\n elif rule_type in ['ode', 'ODE', 'GeneralODERule']:\n rule = model.createRateRule()\n else:\n raise ValueError(f\"Rule Type {rule_type} not supported with SBML.\")\n rule.setId(rule_id)\n rule.setName(rule_id)\n rule.setVariable(rule_variable)\n rule.setFormula(rule_formula)\n rule_annotation_string = ('\\n rule_frequency='\n '' + rule_frequency + '\\n'\n )\n rule.setAnnotation(rule_annotation_string)\n return rule\n\n\n# Helper function to add a reaction to an sbml model\n# propensity params is a dictionary of the parameters for non-massaction propensities.\n# propensity_params is a dictionary with keyword 'rate' for general propensity\ndef add_reaction(model, inputs_list, outputs_list,\n reaction_id, propensity_type, propensity_params,\n stochastic = False, delay_annotation_dict = None):\n\n # Create the reaction\n # We cast to an OrderedDict and back to remove duplicates.\n # We could cast to a regular dict instead, but only in Python 3.7 or higher.\n inputs = list(OrderedDict.fromkeys(inputs_list))\n #inputs.sort()\n input_coefs = [inputs_list.count(i) for i in inputs]\n outputs = list(OrderedDict.fromkeys(outputs_list))\n output_coefs = [outputs_list.count(o) for o in outputs]\n\n reaction = model.createReaction()\n reaction.setReversible(False)\n # reaction.setFast(False) # Deprecated in SBML\n all_ids = getAllIds(model.getSBMLDocument().getListOfAllElements())\n trans = SetIdFromNames(all_ids)\n reaction.setId(trans.getValidIdForName(reaction_id))\n reaction.setName(reaction.getId())\n ratestring = \"\" #Stores the string representing the rate function\n propensity_annotation_dict = {\"type\":propensity_type}\n allspecies = []\n for s in model.getListOfSpecies():\n sid = s.getId()\n allspecies.append(sid)\n\n allparams = {}\n for p in model.getListOfParameters():\n pid = p.getId()\n pid = '_' + pid \n allparams[pid] = p.getValue()\n ratelaw = reaction.createKineticLaw()\n #Create Local Propensity Parameters\n if propensity_type==\"massaction\":\n propensity_annotation_dict[\"k\"] = propensity_params['k']\n ratestring = propensity_params['k']\n\n #Hill Function Propensities\n elif propensity_type in [\"hillpositive\", \"hillnegative\", \"proportionalhillpositive\", \"proportionalhillnegative\"]:\n ratestring = propensity_params['k']\n propensity_annotation_dict[\"k\"] = propensity_params['k']\n propensity_annotation_dict[\"K\"] = propensity_params['K']\n propensity_annotation_dict[\"n\"] = propensity_params['n']\n\n elif propensity_type == \"general\":\n pass\n #propensity_annotation_dict[\"rate\"] = propensity_params['rate']\n else:\n raise ValueError(propensity_type+\" is not a supported propensity_type\")\n\n # Create the reactants\n reactants_list = []\n for i in range(len(inputs)):\n species = str(inputs[i]).replace(\"'\", \"\")\n stoichiometry = input_coefs[i]\n # Multiple species with same name should be an invalid bioscrape construct.\n species_id = getSpeciesByName(model,species).getId()\n reactant = reaction.createReactant()\n reactant.setSpecies(species_id) # ! TODO: add error checking\n reactants_list.append(species_id)\n reactant.setConstant(False)\n if stoichiometry is None or stoichiometry is np.nan:\n stoichiometry = 1.0\n reactant.setStoichiometry(stoichiometry)\n\n #Create Rate-strings for massaction propensities\n if propensity_type==\"massaction\" and stochastic:\n for i in range(stoichiometry):\n if i > 0:\n ratestring += f\" * ( {species_id} - {i} )\"\n else:\n ratestring += f\" * {species_id}\"\n\n elif propensity_type==\"massaction\" and not stochastic:\n if stoichiometry > 1:\n ratestring += f\" * {species_id}^{stoichiometry}\"\n else:\n ratestring += f\" * {species_id}\"\n\n # Create the products\n products_list = []\n for i in range(len(outputs)):\n species = str(outputs[i]).replace(\"'\", \"\")\n stoichiometry = output_coefs[i]\n product = reaction.createProduct()\n species_id = getSpeciesByName(model, species).getId()\n product.setSpecies(species_id)\n products_list.append(species_id)\n if stoichiometry is None or stoichiometry is np.nan:\n stoichiometry = 1.0\n product.setStoichiometry(stoichiometry)\n product.setConstant(False)\n\n\n #Create ratestring for non-massaction propensities\n if propensity_type == \"hillpositive\":\n if not (\"s1\" in propensity_params):\n raise ValueError(\"hillpositive propensities, p(s1; k, K, n) \"\n \"= k*s1^n/(s1^n + K), require the following key in the propensity_params dictionary:\"\n \"'s1':species (chemical_reaction_network.species)\")\n\n s = str(propensity_params['s1']).replace(\"'\", \"\")\n s_species_id = getSpeciesByName(model, s).getId()\n if s_species_id not in reactants_list and s_species_id not in products_list:\n modifier = reaction.createModifier()\n modifier.setSpecies(s_species_id)\n n = propensity_params['n']\n K = propensity_params['K']\n ratestring+=f\"*{s_species_id}^n/({s_species_id}^{n}+{K})\"\n\n propensity_annotation_dict[\"s1\"] = s_species_id\n\n elif propensity_type == \"hillnegative\":\n if not (\"s1\" in propensity_params):\n raise ValueError(\"hillnegative propensities, \"\n \"p(s1; k, K, n) = k*1/(s1^n + K), require the following key in the propensity_params dictionary:\"\n \"'s1':species (chemical_reaction_network.species)\")\n s = str(propensity_params['s1']).replace(\"'\", \"\")\n s_species_id = getSpeciesByName(model,s).getId()\n if s_species_id not in reactants_list and s_species_id not in products_list:\n modifier = reaction.createModifier()\n modifier.setSpecies(s_species_id)\n n = propensity_params['n']\n K = propensity_params['K']\n ratestring+=f\"/({s_species_id}^{n}+{K})\"\n propensity_annotation_dict[\"s1\"] = s_species_id\n\n elif propensity_type == \"proportionalhillpositive\":\n if not (\"s1\" in propensity_params and \"d\" in propensity_params):\n raise ValueError(\"proportionalhillpositive propensities, \"\n \"p(s1, d; k, K, n) = k*d*s1^n/(s1^n + K), require the following key in the propensity_params dictionary:\"\n \"'s1':species (chemical_reaction_network.species)\"\n \"'d':species (chemical_reaction_network.species), \")\n\n s = str(propensity_params['s1']).replace(\"'\", \"\")\n d = str(propensity_params['d']).replace(\"'\", \"\")\n s_species_id = getSpeciesByName(model,s).getId()\n if s_species_id not in reactants_list and s_species_id not in products_list:\n modifier = reaction.createModifier()\n modifier.setSpecies(s_species_id)\n d_species_id = getSpeciesByName(model,d).getId()\n if d_species_id not in reactants_list and d_species_id not in products_list:\n modifier = reaction.createModifier()\n modifier.setSpecies(d_species_id)\n n = propensity_params['n']\n K = propensity_params['K']\n\n ratestring+=f\"*{d_species_id}*{s_species_id}^n/({s_species_id}^{n} + {K})\"\n\n propensity_annotation_dict[\"s1\"] = s_species_id\n propensity_annotation_dict[\"d\"] = d_species_id\n\n elif propensity_type == \"proportionalhillnegative\":\n if not (\"s1\" in propensity_params and \"d\" in propensity_params):\n raise ValueError(\"proportionalhillnegative propensities, \"\n \"p(s1, d; k, K, n) = k*d/(s1^n + K), require the following key in the propensity_params dictionary:\"\n \"'s1':species (chemical_reaction_network.species)\"\n \"'d':species (chemical_reaction_network.species), \")\n\n s = str(propensity_params['s1']).replace(\"'\", \"\")\n d = str(propensity_params['d']).replace(\"'\", \"\")\n s_species_id = getSpeciesByName(model,s).getId()\n if s_species_id not in reactants_list and s_species_id not in products_list:\n modifier = reaction.createModifier()\n modifier.setSpecies(s_species_id)\n d_species_id = getSpeciesByName(model,d).getId()\n if d_species_id not in reactants_list and d_species_id not in products_list:\n modifier = reaction.createModifier()\n modifier.setSpecies(d_species_id)\n n = propensity_params['n']\n K = propensity_params['K']\n\n ratestring+=f\"*{d_species_id}/({s_species_id}^{n}+{K})\"\n\n propensity_annotation_dict[\"s1\"] = s_species_id\n propensity_annotation_dict[\"d\"] = d_species_id\n elif propensity_type == \"general\":\n ratestring = propensity_params['rate']\n species_list = _get_species_list_in_formula(ratestring, allspecies)\n for s in species_list:\n if s not in reactants_list and s not in products_list:\n modifier = reaction.createModifier()\n modifier.setSpecies(s)\n ratestring = str(ratestring).replace('**','^')\n # Set the ratelaw to the ratestring\n math_ast = libsbml.parseL3Formula(ratestring)\n flag = ratelaw.setMath(math_ast)\n if not flag == libsbml.LIBSBML_OPERATION_SUCCESS or math_ast is None:\n raise ValueError(\"Could not write the rate law for reaction to SBML. Check the reaction definition.\")\n\n #Add propensity annotation\n if propensity_type != \"general\":\n propensity_annotation_string = \"\"\n for k in propensity_annotation_dict:\n propensity_annotation_string += \" \"+k + \"=\" + str(propensity_annotation_dict[k])\n propensity_annotation_string += \"\"\n else:\n propensity_annotation_string = \"\"\n if delay_annotation_dict != None:\n delay_annotation_string = \"\"\n for k, val in delay_annotation_dict.items():\n if k == 'parameters':\n for param_key, param_value in val.items():\n delay_annotation_string += \" \"+str(param_key)+ \"=\" + str(param_value)\n elif k == 'reactants' or k == 'products':\n delay_annotation_string += \" \"+ k + \"=\"\n for v in val:\n delay_annotation_string += str(v) + \",\"\n if delay_annotation_string[-1] == \",\":\n delay_annotation_string = delay_annotation_string[:-1]\n else:\n delay_annotation_string += \" \"+k + \"=\" + str(val)\n delay_annotation_string += \"\"\n else:\n delay_annotation_string = \"\"\n annotation_string = \"\\n\" + propensity_annotation_string + delay_annotation_string + \"\\n\"\n ret = reaction.setAnnotation(annotation_string)\n if ret != libsbml.LIBSBML_OPERATION_SUCCESS:\n warnings.warn(\"Could not write Bioscrape annotation to SBML file.\")\n return reaction\n\n# # Returns a list of all ids from the given list of elements\n# #\ndef getAllIds(allElements):\n result = []\n if (allElements == None or allElements.getSize() == 0):\n return result\n\n for i in range(0, allElements.getSize()):\n current = allElements.get(i)\n if (current.isSetId() \\\n and current.getTypeCode() != libsbml.SBML_LOCAL_PARAMETER):\n result.append(current.getId())\n return result\n\n# Renames lists of SIds in an SBML Document\ndef renameSIds(document, oldSIds, newSIds, debug = False):\n '''\n Updates the SId from oldSId to newSId for any component of the Subsystem.\n Returns the SBMLDocument of the updated Subsystem\n '''\n\n #\n # @file renameSId.py\n # @brief Utility program, renaming a specific SId\n # while updating all references to it.\n # @author Frank T. Bergmann\n #\n # \n #\n\n try:\n import libsbml\n except:\n raise ImportError(\"libsbml not found. See sbml.org for installation help!\\n\" +\n 'If you are using anaconda you can run the following:\\n' +\n 'conda install -c SBMLTeam python-libsbml\\n\\n\\n')\n\n\n if len(oldSIds) != len(newSIds):\n raise ValueError(\"Length oldSIds != length newSIds\")\n\n for ind in range(len(oldSIds)):\n oldSId = oldSIds[ind]\n newSId = newSIds[ind]\n\n if oldSId == newSId:\n warnings.warn(\"The Ids are identical: \" +str(oldSId)+\". SId skipped.\")\n\n if not libsbml.SyntaxChecker.isValidInternalSId(newSId):\n warnings.warn(\"The new SId '{0}' does not represent a valid SId.\".format(newSId))\n\n\n element = document.getElementBySId(oldSId)\n\n if element == None:\n if debug:\n warnings.warn(\"Found no element with SId '{0}' in subsystem {1}\".format(oldSId,document.getModel().getId()))\n\n # update all references to this element\n allElements = document.getListOfAllElements()\n for i in range(allElements.getSize()):\n current = allElements.get(i)\n current.renameSIdRefs(oldSId, newSId)\n return document\n\n\n# !/usr/bin/env python\n##\n## @file setIdFromNames.py\n## @brief Utility program, renaming all SIds that also has\n## names specified. The new id will be derived from\n## the name, with all invalid characters removed.\n##\n## @author Frank T. Bergmann\n##\n##\n## \n##\n##\n\nimport sys\nimport os.path\nimport time\n\n\n# This class implements an identifier transformer, that means it can be used\n# to rename all sbase elements.\nclass SetIdFromNames(libsbml.IdentifierTransformer):\n def __init__(self, ids):\n # call the constructor of the base class\n libsbml.IdentifierTransformer.__init__(self)\n # remember existing ids ...\n self.existingIds = ids\n\n # The function actually doing the transforming. This function is called\n\n # once for each SBase element in the model.\n def transform(self, element):\n # return in case we don't have a valid element\n if (element == None \\\n or element.getTypeCode() == libsbml.SBML_LOCAL_PARAMETER):\n return libsbml.LIBSBML_OPERATION_SUCCESS\n\n # or if there is nothing to do\n if (element.isSetName() == False \\\n or element.getId() == element.getName()):\n return libsbml.LIBSBML_OPERATION_SUCCESS\n\n # find the new id\n newId = self.getValidIdForName(element.getName())\n\n # set it\n element.setId(newId)\n\n # remember it\n self.existingIds.append(newId)\n\n return libsbml.LIBSBML_OPERATION_SUCCESS\n\n def nameToSbmlId(self, name):\n IdStream = []\n count = 0\n end = len(name)\n\n if '0' <= name[count] and name[count] <= '9':\n IdStream.append('x_')\n if '*' in name:\n IdStream.append('xx')\n for count in range(0, end):\n if (('0' <= name[count] and name[count] <= '9') or\n ('a' <= name[count] and name[count] <= 'z') or\n ('A' <= name[count] and name[count] <= 'Z')):\n IdStream.append(name[count])\n else:\n IdStream.append('_')\n Id = ''.join(IdStream)\n if (Id[len(Id) - 1] != '_'):\n return Id\n\n return Id[:-1]\n\n #\n # Generates the id out of the name, and ensures it is unique.\n # It does so by appending numbers to the original name.\n #\n def getValidIdForName(self, name):\n baseString = self.nameToSbmlId(name)\n id = baseString\n count = 1\n while (self.existingIds.count(id) != 0):\n id = \"{0}_{1}\".format(baseString, count)\n count = count + 1\n return id\n\n # #\n\n\ndef getSpeciesByName(model, name, compartment=''):\n '''\n Returns a list of species in the Model with the given name\n compartment : (Optional) argument to specify the compartment name in which\n to look for the species.\n '''\n if type(name) is not str:\n raise ValueError('\"name\" must be a string.')\n species_found = []\n for species in model.getListOfSpecies():\n if species.getName() == name:\n if compartment != '':\n comp_elem = species.getCompartment()\n comp_name = model.getElementBySId(comp_elem).getName()\n if comp_name == compartment:\n species_found.append(species)\n else:\n continue\n else:\n species_found.append(species)\n\n if len(species_found) == 1:\n return species_found[0]\n elif not species_found:\n raise ValueError('The species ' + name + ' not found.')\n else:\n warnings.warn('Multiple species with name ' + name + ' found. Returning a list')\n return species_found","sub_path":"bioscrape/sbmlutil.py","file_name":"sbmlutil.py","file_ext":"py","file_size_in_byte":46000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"347511792","text":"import subprocess\nimport enum\nimport sys\nimport json\nimport os\nimport base64\nimport random\n\nconfig_file_path = sys.argv[1]\nconfig = json.loads(open(config_file_path).read())\n\n\ndef validate_config():\n assert config['test']['mode'] == 'testgen'\n\n\nvalidate_config()\n\n\nclass ErrorPolicy(enum.Enum):\n FAIL = \"fail\"\n ERROR = \"error\"\n\n\nclass Type(enum.Enum):\n PYTHON = 'python'\n CPLUSPLUS = 'cpp'\n BINARY = 'exe'\n\n\nclass Submission:\n type: Type\n path: str\n\n\ndef run(*, file, type: Type, argv=None, env=None, input=''):\n if env is None:\n env = dict()\n if argv is None:\n argv = []\n # print(\"run(file={}, argv={})\".format(file, argv))\n env0 = dict(os.environ)\n for (key, value) in env.items():\n env0[key] = value\n argv = [file] + argv\n if type == Type.PYTHON:\n argv = ['python'] + argv\n popen = subprocess.run(argv, env=env0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=input.encode())\n if popen.returncode != 0:\n print(\"child process crashed\")\n print(popen.stderr.decode())\n print(popen.stdout.decode())\n print('args:', popen.args)\n print('retcode: ', popen.returncode)\n exit(1)\n return {\n 'output': popen.stdout.decode(),\n 'errors': popen.stderr.decode()\n }\n\n\ndef get_test(test_id):\n options = {\n 'INVOKE_TEST_ID': str(test_id)\n }\n gen_config = config['test']['generator']\n r = run(file=gen_config['path'], type=Type(config['test']['generator']['type']), env=options)\n return r['output']\n\n\ndef get_output(solution: Submission, input):\n r = run(file=solution.path, type=solution.type, env={}, input=input)\n return r['output']\n\n\nclass Status(enum.Enum):\n OK = 'OK'\n WRONG_ANSWER = 'WA'\n RUNTIME_ERROR = 'RT'\n TIMELIMIT_EXCEEDED = 'TL'\n\n\ndef check_output(test: str, output: str) -> Status:\n input = '\\n'.join([\n base64.b64encode(test.encode()).decode(),\n base64.b64encode(output.encode()).decode()\n ])\n checker_config = config['test']['checker']\n r = run(file=checker_config['path'], type=Type(checker_config['type']), input=input)\n checker_output = r['output'].split()\n status = checker_output[-1]\n return Status(status)\n\n\ndef allocate_temp_file(tpl: str, should_open: bool = True):\n name = tpl.replace('#', str(random.randint(100, 999)))\n name = os.path.join(config['tmp'], name)\n f = None\n if should_open:\n f = open(name, 'w')\n return name, f\n\n\ndef build(path: str, type: Type) -> Submission:\n # print(\"build({}, {})\".format(path, type))\n if type == Type.PYTHON:\n s = Submission()\n s.path = path\n s.type = type\n return s\n if type == Type.CPLUSPLUS:\n gcc_path = config['build']['cpp']['path']\n bin_file = allocate_temp_file('cpp-exe-#.exe', False)\n argv = [path, '-o', bin_file[0]]\n run(file=gcc_path, type='exe', argv=argv)\n s = Submission()\n s.path = bin_file[0]\n s.type = Type.BINARY\n return s\n\n\ndef main():\n num_tests = int(config['test']['num_tests'])\n built = [build(sol['path'], Type(sol['type'])) for sol in config['solutions']]\n for i in range(num_tests):\n test = get_test(i)\n for j in range(len(config['solutions'])):\n res = built[j]\n path = config['solutions'][j]['path']\n out = get_output(res, test)\n status = check_output(test, out)\n print(i, 'path', status)\n\n\nmain()\n","sub_path":"tools/invoke.py","file_name":"invoke.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"490742448","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/bee/Dev/piu/django/testSite/bee_django_coin/migrations/0005_auto_20181016_1619.py\n# Compiled at: 2018-10-16 04:19:04\nfrom __future__ import unicode_literals\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('bee_django_coin', '0004_auto_20180620_1845')]\n operations = [\n migrations.CreateModel(name=b'OtherCoinCount', fields=[\n (\n b'id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name=b'ID')),\n (\n b'other_type', models.CharField(choices=[(1, '班级剩余金币')], max_length=180, null=True)),\n (\n b'other_type_id', models.IntegerField()),\n (\n b'count', models.IntegerField(default=0)),\n (\n b'update_at', models.DateTimeField(auto_now=True))], options={b'ordering': [\n b'pk'], \n b'db_table': b'bee_django_coin_other_count'}),\n migrations.AddField(model_name=b'usercoinrecord', name=b'coin_content_id', field=models.IntegerField(null=True))]","sub_path":"pycfiles/bee-django-coin-0.1.48.tar/0005_auto_20181016_1619.py","file_name":"0005_auto_20181016_1619.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"158851163","text":"from utils import _Utils, OUTPUT_FOLDER\nfrom . import db\nfrom .image_model import ImageModel\nfrom .utils import SQLAlchemyDBConnection\n\n\"\"\"Table \"public.images\"\n\n Column | Type | Collation | Nullable | Default\n----------+------------------------+-----------+----------+---------\n filename | character varying(100) | | not null |\n styles | character varying[] | | not null |\nIndexes:\n \"images_pkey\" PRIMARY KEY, btree (filename)\n\nfilename: daa24daad3393865.jpg\nstyles: {cartoongan_hayao, cartoongan_hosoda}\n\"\"\"\n\nclass Database:\n @classmethod\n def sync_image_models(cls):\n with SQLAlchemyDBConnection(db) as session:\n files = []\n print(_Utils.file_from_storage(OUTPUT_FOLDER))\n for file in _Utils.file_from_storage(OUTPUT_FOLDER):\n [filename, style] = _Utils.filename_and_style(file)\n files.append([filename, style])\n exist_model = ImageModel.query.filter_by(filename=filename).first()\n if exist_model is None: # db에 없으면\n session.add(ImageModel(filename, style)) # add\n else:\n exist_model.styles.append(style) # db에 이미 있으면 update (style 추가)\n\n return files # [[filename, style]]\n","sub_path":"webserver/database/postgresql.py","file_name":"postgresql.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"132207116","text":"import time\nimport socket\nimport re\nimport json\nimport sys\nfrom datetime import datetime\n\ndef __init__(self, url, port=80):\n self.url = url\n self.port = port\n\ndef dtu_http_request(url, port=80):\n # create a socket object\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # connect the client\n client.connect((url, port)) \n\n # get current timestamp in millis\n current_milli_time = lambda: int(round(time.time() * 1000))\n\n try:\n # send data\n request = f\"GET /hometable.xml?_t={current_milli_time()} HTTP/1.1\\r\\nHost:%s\\r\\n\\r\\n\" % url\n client.send(request.encode())\n\n # receive some data\n response = client.recv(4096)\n\n # processing response\n json = array_to_json(response)\n\n # returning json\n return json\n except Exception:\n return None\n\ndef array_to_json(response):\n try:\n splited = str(response).split(';')\n data = {}\n data['current_power_total'] = {\"value\": splited[0].strip('b\\''), \"unit\": \"kW\" }\n data['full_power_total'] = {\"value\": splited[1], \"unit\": \"kWh\" }\n data['energy_today'] = {\"value\": splited[2], \"unit\": \"kWh\" }\n data['co2_saved'] = {\"value\": splited[3], \"unit\": \"kg\" }\n data['panels_online'] = splited[7]\n data['inverters'] = []\n if '01' in splited[5]:\n j = 11\n t = 0\n count = 0\n while t < int(splited[4]):\n panel_voltage = re.sub(r\"[^0-9\\.]\", \"\", splited[j+t*8+2])\n grid_voltage = re.sub(r\"[^0-9\\.]\", \"\", splited[j+t*8+3])\n grid_frequency = re.sub(r\"[^0-9\\.]\", \"\", splited[j+t*8+4])\n panel_power = re.sub(r\"[^0-9\\.]\", \"\", splited[j+t*8+5])\n energy_today = re.sub(r\"[^0-9\\.]\", \"\", splited[j+t*8+6])\n panel_temperature = re.sub(r\"[^0-9\\.]\", \"\", splited[j+t*8+7])\n if t == 0 or (splited[j+t*8+1][:12] != splited[j+(t-1)*8+1][:12]):\n data['inverters'].append({\n \"id\": splited[j+t*8+1][:12],\n \"channels\": [{\n \"channel\": splited[j+t*8+1][-1],\n \"panelvoltage\": {\"value\": float(panel_voltage), \"unit\": \"V\" },\n \"gridvoltage\": {\"value\": float(grid_voltage), \"unit\": \"V\" },\n \"gridfrequency\": {\"value\": float(grid_frequency), \"unit\": \"Hz\" },\n \"panelpower\": {\"value\": float(panel_power), \"unit\": \"W\" },\n \"energytoday\": {\"value\": float(energy_today), \"unit\": \"Wh\" },\n \"temperature\": {\"value\": float(panel_temperature), \"unit\": \"C\" },\n \"date\": splited[j+t*8+8]\n }]\n })\n count = count + 1\n else:\n data['inverters'][count - 1]['channels'].append({\n \"channel\": splited[j+t*8+1][-1],\n \"panelvoltage\": {\"value\": float(panel_voltage), \"unit\": \"V\" },\n \"gridvoltage\": {\"value\": float(grid_voltage), \"unit\": \"V\" },\n \"gridfrequency\": {\"value\": float(grid_frequency), \"unit\": \"Hz\" },\n \"panelpower\": {\"value\": float(panel_power), \"unit\": \"W\" },\n \"energytoday\": {\"value\": float(energy_today), \"unit\": \"Wh\" },\n \"temperature\": {\"value\": float(panel_temperature), \"unit\": \"C\" },\n \"date\": splited[j+t*8+8]\n })\n t = t+1\n jsonStr = json.dumps(data, indent=4)\n return jsonStr\n except Exception:\n return None\n","sub_path":"hoymilesdtumi/hoymilesdtumi.py","file_name":"hoymilesdtumi.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"366180709","text":"#project/__init__.py\nimport logging\nimport os\nfrom logging.handlers import RotatingFileHandler\nfrom logging.handlers import SMTPHandler\n\nfrom flask import Flask\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import LoginManager\nfrom flask_migrate import Migrate\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail\n\nfrom config import Config\n\n\n\ndb = SQLAlchemy()\nmigrate = Migrate()\nlogin = LoginManager()\nlogin.login_view = 'auth.login'\n#login.login_message = _l('Please log in to access this page.')\nmail = Mail()\nbootstrap = Bootstrap()\nmoment = Moment()\n#babel = Babel()\n\n\ndef create_app(config_class=Config):\n application = Flask(__name__)\n application.config.from_object(config_class)\n\n from project import models\n\n db.init_app(application)\n migrate.init_app(application, db)\n login.init_app(application)\n mail.init_app(application)\n bootstrap.init_app(application)\n moment.init_app(application)\n #babel.init_app(app)\n\n from project.errors import bp as errors_bp\n application.register_blueprint(errors_bp)\n\n from project.main import bp as main_bp\n application.register_blueprint(main_bp)\n\n from project.auth import bp as auth_bp\n application.register_blueprint(auth_bp, url_prefix='/auth')\n\n if not application.debug and not application.testing:\n if application.config['MAIL_SERVER']:\n auth = None\n if application.config['MAIL_USERNAME'] or application.config['MAIL_PASSWORD']:\n auth = (application.config['MAIL_USERNAME'], application.config['MAIL_PASSWORD'])\n secure = None\n if application.config['MAIL_USE_TLS']:\n secure = ()\n mail_handler = SMTPHandler(\n mailhost=(application.config['MAIL_SERVER'], application.config['MAIL_PORT']),\n fromaddr='no-reply@' + application.config['MAIL_SERVER'],\n toaddrs=application.config['ADMINS'], subject='Microblog Failure',\n credentials=auth, secure=secure)\n mail_handler.setLevel(logging.ERROR)\n application.logger.addHandler(mail_handler)\n\n if not os.path.exists('logs'):\n os.mkdir('logs')\n file_handler = RotatingFileHandler('logs/project.log', maxBytes=10240,\n backupCount=10)\n file_handler.setFormatter(\n logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))\n file_handler.setLevel(logging.INFO)\n application.logger.addHandler(file_handler)\n\n application.logger.setLevel(logging.INFO)\n application.logger.info('Project startup')\n\n return application\n\n\nfrom project import models\n#from project.auth import routes","sub_path":"project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"483820624","text":"import FWCore.ParameterSet.Config as cms\n\nfrom L1Trigger.L1THGCal.hgcalVFEProducer_cfi import vfe_proc\n\ndef create_compression(process,\n exponent=vfe_proc.exponentBits,\n mantissa=vfe_proc.mantissaBits,\n rounding=vfe_proc.rounding,\n oot_coefficients=vfe_proc.oot_coefficients\n ):\n producer = process.hgcalVFEProducer.clone(\n ProcessorParameters = vfe_proc.clone(\n exponentBits = exponent,\n mantissaBits = mantissa,\n rounding = rounding,\n oot_coefficients = oot_coefficients\n )\n )\n return producer\n","sub_path":"L1Trigger/L1THGCalUtilities/python/vfe.py","file_name":"vfe.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"647254663","text":"class Student(object):\n def __init__(self, name):\n self.name = name\n def __str__(self):\n return 'student object (name: %s)' % self.name\n __repr__ = __str__\n\nprint(Student('Michael'))\n\n# 类可以被遍历,类似list,tuple\nclass Fib(object):\n def __init__(self):\n self.a, self.b = 0, 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.a, self.b = self.b, self.a + self.b\n if self.a > 100:\n raise StopIteration\n return self.a\n\nfor n in Fib():\n print(n)\n\nclass Fib(object):\n def __getitem__(self, n):\n a, b = 1, 1\n for x in range(n):\n a, b = b, a + b\n return a\nf = Fib()\nprint(f[0],f[1],f[2],f[3])\n\n# 简单实现切片\nclass Fib():\n def __getitem__(self, n):\n if isinstance(n, int):\n a, b = 1, 1\n for x in range(n):\n a, b = b, a + b\n return a\n if isinstance(n, slice):\n start = n.start\n stop = n.stop\n if start is None:\n start = 0\n a, b = 1, 1\n L = []\n for x in range(stop):\n if x >= start:\n L.append(a)\n a, b = b, a + b\n return L\n\nf = Fib()\nprint(f[0:5],f[0:10])\n\n# 调用不存在属性时\nclass Student(object):\n def __init__(self):\n self.name = 'Michael'\n\n def __getattr__(self, attr):\n if attr == 'score':\n return 99\n if attr == 'age':\n return lambda: 25\n\ns = Student()\nprint(s.name,s.score,s.age())\n\nclass Student(object):\n def __init__(self, name):\n self.name = name\n\n def __call__(self):\n print('My name is %s.' % self.name)\n\ns = Student('michael')\nprint(s())\n\n\nfrom enum import Enum\nMonth = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\n\nfor name, member in Month.__members__.items():\n print(name, '=>', member, ',', member.value)\n\nfrom enum import Enum, unique\n\n@unique\nclass Weekday(Enum):\n Sun = 0\n Mon = 1\n Tue = 2\n Wed = 3\n Thu = 4\n Fri = 5\n Sat = 6\n\nday1 = Weekday.Mon\nprint(day1,day1.value)\n","sub_path":"python/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"102813585","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef read_data(file_name):\n return pd.read_csv(file_name)\n\n\ndef common_class(data):\n recurrence = 0\n noRecurrence = 0\n for i,j in data.iterrows():\n if data.iloc[i]['Class'] != \"?\":\n if data.iloc[i]['Class'] == \"no-recurrence-events\":\n noRecurrence += 1\n else:\n recurrence += 1\n return {\"no-recurrence\": noRecurrence} if recurrence < noRecurrence else {\"recurrence\": reecurrence}\n\n\ndef common_age_and_menopause_w_recurrence(data):\n age = {}\n menopause = {}\n for i,j in data.iterrows():\n if data.iloc[i]['Class'] != \"?\":\n if data.iloc[i]['Class'] == \"recurrence-events\":\n age[data.iloc[i]['age']] = age.get(data.iloc[i]['age'], 0) + 1\n if data.iloc[i]['menopause'] != \"premeno\":\n menopause[data.iloc[i]['menopause']] = menopause.get(data.iloc[i]['menopause'], 0) + 1\n \n return (age, menopause, max(age, key=age.get), max(menopause, key=menopause.get))\n\n\ndef plot_recurrences(frequencies):\n df4 = pd.DataFrame(frequencies, columns= [freq for freq in frequencies], index=[0])\n df4.plot.bar(alpha=0.5)\n\n\nif __name__ == '__main__':\n print(common_class(read_data(\"breast-cancer.data\")))\n age, menopause, max_age, max_meno= common_age_and_menopause_w_recurrence(read_data(\"breast-cancer.data\"))\n print(age, max_age)\n print(menopause, max_meno)\n plot_recurrences(age)\n","sub_path":"pandasExercise.py","file_name":"pandasExercise.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"350482701","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.test import TestCase\n\nfrom waddleadmin.helpers import PermissionHelper, PagePermissionHelper\nfrom wagtail.wagtailimages.models import Image\nfrom wagtail.tests.testapp.models import EventPage\nfrom wagtail.tests.utils import WagtailTestUtils\n\n\nclass UsersMixin(object):\n fixtures = ['test.json'] # wagtail/tests/testapp/fixtures/test.json\n\n def setUp(self):\n super(UsersMixin, self).setUp()\n # Create a custom permission\n image_ct = ContentType.objects.get_for_model(Image)\n exterminate_image_permission = Permission.objects.create(\n content_type=image_ct, codename='exterminate_image'\n )\n moderators_group = Group.objects.get(pk=4)\n moderators_group.permissions.add(exterminate_image_permission)\n\n @staticmethod\n def get_moderator():\n user = get_user_model().objects._create_user(username='moderator', email='moderator@email.com', password='password', is_staff=True, is_superuser=False)\n user.groups.add(Group.objects.get(pk=4))\n return user\n\n @staticmethod\n def get_editor():\n user = get_user_model().objects._create_user(username='editor', email='editor@email.com', password='password', is_staff=True, is_superuser=False)\n user.groups.add(Group.objects.get(pk=5))\n return user\n\n @staticmethod\n def get_non_editor():\n user = get_user_model().objects._create_user(username='nobody', email='nobody@email.com', password='password', is_staff=False, is_superuser=False)\n user.groups.add(Group.objects.get(pk=6))\n return user\n\n\nclass TestPermissionHelper(UsersMixin, TestCase, WagtailTestUtils):\n\n def setUp(self):\n super(TestPermissionHelper, self).setUp()\n self.helper = PermissionHelper(Image)\n\n def test_user_can_create_images(self):\n self.assertTrue(\n self.helper.user_can(self.create_test_user(), 'create')\n )\n self.assertTrue(\n self.helper.user_can(self.get_moderator(), 'create')\n )\n self.assertTrue(\n self.helper.user_can(self.get_editor(), 'create')\n )\n self.assertFalse(\n self.helper.user_can(self.get_non_editor(), 'create')\n )\n\n def test_user_can_edit_an_image(self):\n image_obj = Image.objects.get(id=1)\n self.assertTrue(\n self.helper.user_can(self.create_test_user(), 'edit', image_obj)\n )\n self.assertTrue(\n self.helper.user_can(self.get_moderator(), 'edit', image_obj)\n )\n self.assertTrue(\n self.helper.user_can(self.get_editor(), 'edit', image_obj)\n )\n self.assertFalse(\n self.helper.user_can(self.get_non_editor(), 'edit', image_obj)\n )\n\n def test_user_can_exterminate_images(self):\n # If a permission exists, a superuser will automatically have it\n self.assertTrue(\n self.helper.user_can(self.create_test_user(), 'exterminate')\n )\n # A custom 'exterminate' permission has be created and assigned to the\n # moderator group, so a moderator should be able to 'exterminate' an\n # Image\n self.assertTrue(\n self.helper.user_can(self.get_moderator(), 'exterminate')\n )\n self.assertFalse(\n self.helper.user_can(self.get_editor(), 'exterminate')\n )\n self.assertFalse(\n self.helper.user_can(self.get_non_editor(), 'exterminate')\n )\n\n def test_user_cannot_transmogrify_images(self):\n # No 'transmogrify' permission exists, so checking this should return\n # False (and raise a warning)\n user = self.create_test_user()\n self.assertFalse(self.helper.user_can(user, 'transmogrify'))\n\n\nclass TestPagePermissionHelper(UsersMixin, TestCase, WagtailTestUtils):\n\n def setUp(self):\n super(TestPagePermissionHelper, self).setUp()\n self.helper = PagePermissionHelper(EventPage)\n\n def test_user_can_create_eventpages(self):\n self.assertTrue(\n self.helper.user_can(self.create_test_user(), 'create')\n )\n self.assertTrue(\n self.helper.user_can(self.get_moderator(), 'create')\n )\n # Editors shouldn't be able to create an EventPage\n self.assertFalse(\n self.helper.user_can(self.get_editor(), 'create')\n )\n # Users with no page-related permissions at all certainly shouldn't\n self.assertFalse(\n self.helper.user_can(self.get_non_editor(), 'create')\n )\n\n def test_user_can_delete_an_eventpage(self):\n christmas = EventPage.objects.get(id=4)\n self.assertTrue(\n self.helper.user_can(self.create_test_user(), 'delete', christmas)\n )\n self.assertTrue(\n self.helper.user_can(self.get_moderator(), 'delete', christmas)\n )\n # Editors shouldn't be able to delete an EventPage\n self.assertFalse(\n self.helper.user_can(self.get_editor(), 'delete', christmas)\n )\n # Users with no page-related permissions at all certainly shouldn't\n self.assertFalse(\n self.helper.user_can(self.get_non_editor(), 'delete', christmas)\n )\n\n def test_user_cannot_transmogrify_an_eventpage(self):\n user = self.create_test_user()\n christmas = EventPage.objects.get(id=4)\n # The below should result in PagePermissionHelper to looking for a\n # 'can_transmogrify' method on PagePermissionTester, but failing\n # because there is no such method\n self.assertFalse(self.helper.user_can(user, 'transmogrify', christmas))\n\n def test_user_cannot_move_to_an_eventpage(self):\n user = self.create_test_user()\n christmas = EventPage.objects.get(id=4)\n # The below should result in PagePermissionHelper attempting to call\n # PagePermissionTester.can_move_to(), but failing, because\n # can_move_to() requires an additional 'parent' argument\n self.assertFalse(self.helper.user_can(user, 'move_to', christmas))\n","sub_path":"waddleadmin/tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":6266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"358739846","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @author: x.huang\n# @date:29/05/19\nimport time\n\nfrom pypay.gateways.wechat import WechatPay\n\n\nclass MpPayImpl(WechatPay):\n\n @staticmethod\n def get_trade_type():\n return 'JSAPI'\n\n def pay(self, config_biz: dict):\n self.check_config('app_id')\n\n _now = time.time()\n prepay_id = self.pre_order(config_biz).get('prepay_id')\n\n pay_dict = {\n 'appid': self.config.appid,\n 'partnerid': self.config.mch_id,\n 'prepayid': prepay_id,\n 'timestamp': _now,\n 'noncestr': self.gen_nonce_str(),\n 'package': f'prepay_id={prepay_id}'\n }\n pay_dict['paySign'] = self.gen_sign(pay_dict)\n\n return pay_dict\n","sub_path":"pypay/gateways/wechat_impl/mp_pay_impl.py","file_name":"mp_pay_impl.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"579970540","text":"# ---------------------------------------------------------------\n#This reducer code will input a line of text and \n# output \n# ---------------------------------------------------------------\nimport sys\n\nlast_key = None \nrunning_total = 0\n\n# -----------------------------------\n# 使用循环读取输入并计数\n# --------------------------------\nfor input_line in sys.stdin:\n input_line = input_line.strip()\n this_key, value = input_line.split(\"\\t\", 1) \n value = int(value) \n \n if last_key == this_key: \n running_total += value # add value to running total\n\n else:\n if last_key: \n print( \"{0}\\t{1}\".format(last_key, running_total) )\n \n running_total = value #reset values\n last_key = this_key\n\nif last_key == this_key:\n print( \"{0}\\t{1}\".format(last_key, running_total)) \n","sub_path":"i03Hadoop Beginner's Guide/ch3understand_mapreduce/wordcount_reducer.py","file_name":"wordcount_reducer.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"522257016","text":"import torch\n\nclass EarlyStopping():\n \"\"\"\n Monitor the training process to stop training early if the model shows\n evidence of beginning to overfit the validation dataset, and save the\n best model. \n \n Note that patience here is measured in steps, rather than in epochs,\n because the size of an epoch will not be consistent if the size of the \n dataset changes.\n \n Inspired by:\n https://github.com/Bjarten/early-stopping-pytorch\n https://github.com/fastai/fastai/blob/master/courses/dl2/imdb_scripts/finetune_lm.py\n \"\"\"\n \n def __init__(self, patience=100):\n \"\"\"\n Args:\n model: the PyTorch model being trained \n output_file: (str) location to save the trained model\n patience: (int) if the validation loss fails to improve for this\n number of consecutive batches, training will be stopped \n \"\"\"\n self.patience = patience\n self.counter = 0\n self.best_loss = None\n self.step_at_best = 0\n self.stop = False\n print(\"instantiated early stopping with patience=\" + \\\n str(self.patience))\n \n def __call__(self, val_loss, model, output_file, step_idx):\n # do nothing if early stopping is disabled\n if self.patience > 0:\n if self.best_loss is None:\n self.best_loss = val_loss\n self.step_at_best = step_idx\n self.save_model(model, output_file)\n elif val_loss >= self.best_loss:\n # loss is not decreasing\n self.counter += 1\n if self.counter >= self.patience:\n self.stop = True\n print(\"stopping early with best loss \" + \\\n str(self.best_loss))\n else: \n # loss is decreasing\n self.best_loss = val_loss\n self.step_at_best = step_idx\n ## reset counter\n self.counter = 0\n self.save_model(model, output_file)\n \n def save_model(self, model, output_file):\n torch.save(model.rnn.state_dict(), output_file)\n","sub_path":"early_stopping.py","file_name":"early_stopping.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"249159628","text":"nums = [x for x in input().split()] # 9992 562 8933\n\ntext = input()\noutput = ''\nindexes = []\n\nfor num in nums:\n index = 0\n for i in num:\n index += int(i)\n indexes.append(index)\n\nfor idx in indexes:\n while idx >= len(text):\n idx = 1\n output += text[idx]\n text = text[:idx] + text[idx + 1:]\n\nprint(output)\n\n\n\n\n\n","sub_path":"05. List Advanced-more exercises/01. Messaging.py","file_name":"01. Messaging.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"307484067","text":"#!/usr/bin/env python\nimport requests\nimport sys\n\nip_addr = sys.argv[1]\n\nsess = requests.session()\nr = sess.get('https://talosintelligence.com/sb_api/query_lookup',\n headers = {'referer' : 'https://talosintelligence.com/reputation_center/lookup?search=%s' % ip_addr},\n params = {'query' : '/api/v2/details/ip/', 'query_entry' : ip_addr}\n )\nif r.status_code == 200 or r.status_code == 201:\n data = r.json()\n score = data['email_score_name']\n print(score)","sub_path":"mx-score-lookup.py","file_name":"mx-score-lookup.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"639597272","text":"import re\nfrom collections import defaultdict\nimport pandas as pd\nimport numpy as np\nimport csv\n\ntry:\n import log_constants as c\nexcept ImportError:\n import summarizer.performance_utils.log_constants as c\n\n\nclass MeasurementReader(object):\n \"\"\"docstring for MeasurementReader\"\"\"\n\n def __init__(self):\n self.info = {c.DATASET: '', c.TOPIC: '', c.ORACLE_TYPE: '', c.SUMMARY_LEN: '',\n c.ITERATIONS: '', c.VERSION: '', c.MAX_WEIGHT: ''}\n self.run_log_values = [\"k\", \"t\", 'r2', 'r1', 'r4', \"r\",\n \"upper bound\", \"summary length\", \"model_id\", \"break condition\"]\n self.iteration_log_values = ['iteration', 't', 'r2', 'r1', 'r4', 'constraints', 'concepts',\n 'sentences', 'accepts', 'rejects', 'entropy', 'total concepts', 'ranking entropy', \"k\"]\n\n self.run_log = {key: [] for key in self.run_log_values}\n self.iteration_log = defaultdict(lambda: defaultdict(list))\n\n self.aggregated_data = defaultdict(list)\n self.aggregated_iteration_data = defaultdict(list)\n self.agg_max_iteration = defaultdict(list)\n\n def read_run_log(self, file):\n with open(file) as f:\n text = f.read()\n\n # get run info for plot\n for att in self.info:\n match = re.search(\"{} (\\w+.?\\w+?)\".format(att), text)\n if match is None:\n continue\n self.info[att] = match.groups()[0]\n\n mlines = text.split('\\n')\n start = \"k | t | r2\"\n for line in mlines:\n if line.startswith(start):\n i0 = mlines.index(line) + 1\n break\n measurements = mlines[i0:-1]\n for i, m in enumerate(measurements):\n tokens = m.replace(\" \", '').split('|')\n for key, token in zip(self.run_log_values, tokens):\n # in case of double ks\n # if key is 'k':\n # if token in self.run_log[key]:\n # token = token + \"_\" + str(i)\n self.run_log[key].append(token)\n\n def read_iteration_log(self, file):\n with open(file) as f:\n text = f.read()\n measurements = text.split('k=')\n for i, m in enumerate(measurements[1:]):\n lines = m.splitlines()\n k = lines[0]\n # in case of double ks, which might happen in dynamic testing with rank ranges\n # if k in self.iteration_log:\n # k = k + \"_\" + str(i)\n for line in lines[2:]:\n tokens = line.replace(\" \", '').split('|')\n for i in range(0, len(tokens) - 1):\n try:\n entry = float(tokens[i])\n except ValueError as e:\n entry = tokens[i]\n self.iteration_log[k][self.iteration_log_values[i]].append(entry)\n\n def read_corpora_stats(self, folder):\n path = folder + self.info[c.DATASET]\n self.corpus_stats_df = self.csv_to_df(path, sep=\"|\")\n\n def aggregate_data(self):\n for key, value_list in self.run_log.items():\n for value in value_list:\n try:\n self.aggregated_data[key].append(float(value))\n except ValueError:\n self.aggregated_data[key].append(value)\n # extra stuff\n # for k in self.run_log['k']:\n # number_of_iterations = len(self.iteration_log[k]['t'])\n # avg_time_per_iteration = sum(\n # self.iteration_log[k]['t']) / number_of_iterations\n # self.aggregated_data['number_of_iterations'].append(\n # number_of_iterations)\n # self.aggregated_data['avg_time_per_iteration'].append(\n # avg_time_per_iteration)\n\n def aggregate_iteration_data(self):\n keys = self.iteration_log[self.run_log['k'][0]].keys()\n for k in self.run_log['k']:\n for key in keys:\n for val in self.iteration_log[k][key]:\n self.aggregated_iteration_data[key].append(val)\n # add ks\n for key in keys:\n for val in self.iteration_log[k][key]:\n self.aggregated_iteration_data['k'].append(k)\n break\n\n del self.iteration_log\n self.iteration_log = defaultdict(lambda: defaultdict(list))\n\n # corpus_size = self.get_corpus_stat(\"Corpus Size\")\n # for i in range(0, len(self.iteration_log[k][key])):\n # self.aggregated_iteration_data['k'].append(float(k))\n # self.aggregated_iteration_data['length_constraint'].append(float(self.info[c.SUMMARY_LEN]))\n # self.aggregated_iteration_data['corpus_size'].append(int(corpus_size))\n\n def get_corpus_stat(self, column):\n df = self.corpus_stats_df\n value = df.loc[df['Topic'] == self.topic_rid][column]\n return value.values[0]\n\n def csv_to_df(self, path, sep=','):\n df = pd.read_csv(path, sep=sep)\n return df\n\n def write_to_csv(self, path):\n df = pd.DataFrame(data=self.aggregated_iteration_data)\n df.to_csv(path + \"iterations.csv\", index=False)\n\n def get_number_of_iterations(self):\n for k in sorted(self.iteration_log.keys()):\n self.agg_max_iteration[k].append(max(self.iteration_log[k]['iteration']))\n\n def maxit_to_csv(self, path):\n d = {}\n # TODO: Rotate columns to row\n for k, v in self.agg_max_iteration.items():\n d[k] = sum(v) / len(v)\n with open(path + 'maxit.csv', 'w') as f:\n w = csv.DictWriter(f, d.keys())\n w.writeheader()\n w.writerow(d)\n\n def set_topic_rid(self, rid=None):\n if rid is None:\n rid = self.info[c.TOPIC]\n try:\n self.topic_rid = int(rid)\n except ValueError:\n self.topic_rid = rid\n\n def clear_aggregate_data(self):\n self.aggregated_data = defaultdict(list)\n self.aggregated_iteration_data = defaultdict(list)\n\n def add_max(self, label='r', att='r2', it=10, best_slice=1, max_weight=None):\n if max_weight is None:\n max_weight = self.info[c.MAX_WEIGHT]\n\n values = []\n # measured_k = self.run_log['k']\n for k, la in zip(self.run_log['k'], self.run_log[label]):\n if label is 'k':\n if k in ['10', '20']:\n # # if k in ['10', '20', '50']:\n continue\n if label is 'r':\n la = float(la) * max_weight\n try:\n values.append((la, self.iteration_log[k][att][it - 1]))\n except IndexError:\n values.append((la, self.iteration_log[k][att][-1]))\n # continue\n if not values:\n return None\n\n # heck = [x for x in values if x == max(values, key=lambda x: x[1])]\n # if len(heck) > 1:\n # print(heck)\n # exit()\n\n try:\n return_values = sorted(values, key=lambda x: (-float(x[1]), int(x[0])))\n except ValueError:\n return_values = sorted(values, key=lambda x: (-float(x[1]), x[0]))\n # return_values = sorted(values, reverse=True, key=lambda x: x[1])\n # exit()\n if best_slice == 1:\n return return_values[0]\n else:\n return return_values[:best_slice]\n\n def get_k_attribute_pairs(self, att='entropy', it=1):\n for k in self.run_log['k']:\n try:\n yield (k, self.iteration_log[k][att][it - 1])\n except IndexError:\n yield (k, self.iteration_log[k][att][-1])\n\n def get_value_at(self, k, att='r2', it=0):\n try:\n val = float(self.iteration_log[k][att][it])\n except IndexError:\n val = float(self.iteration_log[k][att][-1])\n return val\n","sub_path":"ukpsummarizer-be/summarizer/performance_utils/mreader.py","file_name":"mreader.py","file_ext":"py","file_size_in_byte":8032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"512342580","text":"QWERTY = list('''-=qwertyuiop[]sdfghjkl;'zxcvbn,./_+QWERTYUIOP{}SDFGHJKL:\"ZXCVBN<>?''')\nDVORAK = list('''[]',.pyfgcrl/=oeuidhtns-;qjkxbwvz{}\"<>PYFGCRL?+OEUIDHTNS_:QJKXBWVZ''')\ndvo2qwe = {}\nqwe2dvo = {}\nn = 0\ndvo2qwe = dict(zip(DVORAK, QWERTY))\nqwe2dvo = dict(zip(QWERTY, DVORAK))\n\nAHKcode = '#SingleInstance force\\n\\n'\ntry:\n\tfor q in qwe2dvo:\n\t\tnewline = \"${}::{}\\nReturn\\n\".format(q, qwe2dvo[q])\n\t\tAHKcode += newline \n\twith open('dvorak.ahk', 'w') as f:\n\t\tf.write(AHKcode)\nexcept Exception as e:\n\tprint(e)\n\ninput()","sub_path":"dvorak/generatedvor.py","file_name":"generatedvor.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"156228309","text":"# coding: utf-8\n\nimport json\nimport numpy as np\nimport pandas as pd\nfrom pymongo import MongoClient\n\nclient = MongoClient()\ndb = client.yelp_comparative_analytics\n\ncity = 'tempe'\n_type = 'restaurants'\ntable = 'yelp_review_patterns_las_vagas_restaurant'\ncity = raw_input('enter city ')\n\nquery = {\n 'city': city,\n 'type': _type\n}\n\nbusiness = [x['business_id'] for x in list(db.yelp_business_information_processed.find(query))]\nprint(len(business))\n\n# In[4]:\n\nquery = {\n 'business_id': {'$in': business}\n}\nwhat = {\n\n 'business_id': 1,\n 'review_id': 1,\n 'funny': 1,\n 'cool': 1,\n 'stars': 1,\n 'userful': 1,\n 'text': 1,\n 'useful': 1\n}\nraw = list(db.yelp_reviews.find(query))\nprint(\"[Info] Total elements \" + str(len(raw)))\n\nreviews_df = pd.DataFrame(raw)\nreviews_df = reviews_df.drop(\"_id\", axis=1)\n\n# In[5]:\n\nreviews_df['word_count'] = reviews_df.text.apply(lambda x: len(x.split(\" \")))\nprint(\"Word count done\")\n\n\n# In[6]:\n\ndef format_word_split(txt):\n \"\"\"Turns a text document to a list of formatted words.\n Get rid of possessives, special characters, multiple spaces, etc.\n \"\"\"\n tt = txt.lower().replace(\" \", \" \").replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"~\", \"\").replace(\"!\",\n \" \").replace('/',\n \" \").replace(\n \"'\", \"\").lstrip()\n\n return tt\n\n\n# In[7]:\n\nprint(\"Correct the text of words done\")\nreviews_df['text'] = reviews_df.text.apply(lambda x: format_word_split(x))\n\n# In[8]:\n\nprint(\"Polarity of words done\")\n\n\n# reviews_df['polarity'] = reviews_df.text.apply(lambda x : 100*abs(TextBlob(x).sentiment.polarity))\n\n\n# In[9]:\n\ndef map_reviews(review_rating):\n dict_ = {\n 3: 3,\n 2: 2,\n 1: 1,\n 4: 2,\n 5: 1\n }\n return dict_[review_rating]\n\n\nprint(\"Map reviews \")\nreviews_df['stars_inv'] = reviews_df.stars.apply(lambda x: map_reviews(x))\n\n\n# In[10]:\n\ndef scaling_(number_list, scaling_factor=10):\n maxi = np.max(number_list)\n mini = np.min(number_list)\n\n ret = []\n if mini < 0:\n for elem in number_list:\n ret.append(elem)\n number_list = ret[:]\n\n maxi = np.max(number_list)\n mini = np.max(number_list)\n if maxi == mini:\n return number_list\n\n scaled = [scaling_factor * (float(x) / maxi) for x in number_list]\n return scaled\n\n\n# In[11]:\n\ndef scores_to_scale(score):\n ret = []\n bins = [0] + list(np.histogram(score, bins=8)[1])\n bins.append(np.max(score) + 10)\n\n for elem in score:\n pos = None\n for i in range(len(bins)):\n if bins[i] <= elem <= bins[i + 1]:\n pos = bins.index(bins[i])\n ret.append(pos)\n return ret\n\n\n# In[13]:\n\nprint(\"calculating score start\")\nret = []\nreviews_df_scored = None\nreview_grouped = reviews_df.groupby('business_id')\nfor _, group in review_grouped:\n group = group.copy()\n\n group['user_votes'] = group['useful'] + group['cool'] + group['funny']\n group['sc_word_count'] = scaling_(group.word_count)\n group['sc_user_score'] = scaling_(group.user_votes, 30)\n group['sc_stars'] = scaling_(group.stars_inv)\n\n group['score'] = group['user_votes'] + group['sc_word_count'] + group['sc_stars']\n group['scaled_score'] = scores_to_scale(group.score)\n\n ret.append(group)\n\n if reviews_df_scored is None:\n reviews_df_scored = group.copy()\n else:\n reviews_df_scored = pd.concat([group, reviews_df_scored])\nprint(\"calculating score done\")\n\nreviews_df_scored.sort_values('scaled_score', ascending=False).head(n=20)\n\n\ndef to_mongo_db(df, collection_name):\n records = json.loads(df.T.to_json()).values()\n db[collection_name].insert_many(records)\n\n\nto_mongo_db(reviews_df_scored, 'yelp_review_scored')\n","sub_path":"review_helpfulness.py","file_name":"review_helpfulness.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"136832969","text":"# 1.- Import the NUMPY package under the name np.\nimport numpy\n\n\n# 2. Print the NUMPY version and the configuration.\nnumpy_version = numpy.version\nprint(\"Version:\", numpy_version.version)\n\n\n# 3. Generate a 2x3x5 3-dimensional array with random values. Assign the array to variable \"a\"\n# Challenge: there are at least three easy ways that use numpy to generate random arrays. How many ways can you find?\na = numpy.random.random((2, 3, 5))\n\n\n# 4. Print a.\nprint(\"a: \", a)\n\n\n# 5. Create a 5x2x3 3-dimensional array with all values equaling 1.\n# Assign the array to variable \"b\"\nb = numpy.ones((5, 3, 2))\n\n\n# 6. Print b.\nprint(\"b:\", b)\n\n\n# 7. Do a and b have the same size? How do you prove that in Python code?\nprint(\"Same Size A and B?\", len(a) == len(b))\n\n\n# 8. Are you able to add a and b? Why or why not?\ntry:\n numpy.add(a, b)\n\nexcept:\n print(\"No son del mismo tamaño\")\n\n\n# 9. Transpose b so that it has the same structure of a (i.e. become a 2x3x5 array). Assign the transposed array\n# to variable \"c\".\nc = numpy.transpose(b)\nprint(\"c:\", c)\n\n\n# 10. Try to add a and c. Now it should work. Assign the sum to varialbe \"d\". But why does it work now?\ntry:\n d = numpy.add(a, c)\nexcept:\n print(\"operands could not be broadcast together with shapes/ No son del mismo tamaño\")\n\nprint(\"a-shape: \", a.shape, \"c-shape\", c.shape)\n\n\n# 11. Print a and d. Notice the difference and relation of the two array in terms of the values? Explain.\nprint(\"a: \", a)\nprint(\"d: \", d)\n# Numpy realizó una suma entre arreglos, sumando 1 a todos los datos del array\n\n\n# 12. Multiply a and c. Assign the result to e.\ne = numpy.multiply(a, c)\nprint(a, c)\nprint(\"e:\", e)\n\n\n# 13. Does e equal to a? Why or why not?\nprint(\"e equeal to a?\", a == e)\n# Porque solo se multiplico x1\n\n\n# 14. Identify the max, min, and mean values in d. Assign those values to variables \"d_max\", \"d_min\", and \"d_mean\"\nd_max = d.max()\nd_min = d.min()\nd_mean = d.mean()\nprint(\"Max:\", d_max, \"Min:\", d_min, \"Mean:\", d_mean)\n\n\n# 15. Now we want to label the values in d. First create an empty array \"f\" with the same shape (i.e. 2x3x5)\n# as d using `np.empty`.\nf = numpy.empty((2, 3, 5),dtype=object)\n\n\n\"\"\"\n#16. Populate the values in f. For each value in d, if it's larger than d_min but smaller than d_mean, assign 25 to the \ncorresponding value in f.\nIf a value in d is larger than d_mean but smaller than d_max, assign 75 to the corresponding value in f.\nIf a value equals to d_mean, assign 50 to the corresponding value in f.\nAssign 0 to the corresponding value(s) in f for d_min in d.\nAssign 100 to the corresponding value(s) in f for d_max in d.\nIn the end, f should have only the following values: 0, 25, 50, 75, and 100.\nNote: you don't have to use Numpy in this question.\n\"\"\"\nfor i in range(len(d)):\n for j in range(len(d[0])):\n for k in range(len(d[0, 0])):\n if d[i, j, k] > d_min and d[i, j, k,] < d_mean:\n f[i, j, k] = \"B\"\n elif d[i, j, k] > d_mean and d[i,j,k,] < d_max:\n f[i, j, k] = \"D\"\n elif d[i, j, k] == d_mean:\n f[i, j, k] = \"C\"\n elif d[i, j, k] == d_min:\n f[i, j, k] = \"A\"\n elif d[i, j, k] == d_max:\n f[i, j, k] = \"E\"\nprint(\"F:\", f)\n\n\n\"\"\"\n#17. Print d and f. Do you have your expected f?\nFor instance, if your d is:\narray([[[1.85836099, 1.67064465, 1.62576044, 1.40243961, 1.88454931],\n [1.75354326, 1.69403643, 1.36729252, 1.61415071, 1.12104981],\n [1.72201435, 1.1862918 , 1.87078449, 1.7726778 , 1.88180042]],\n\n [[1.44747908, 1.31673383, 1.02000951, 1.52218947, 1.97066381],\n [1.79129243, 1.74983003, 1.96028037, 1.85166831, 1.65450881],\n [1.18068344, 1.9587381 , 1.00656599, 1.93402165, 1.73514584]]])\n\nYour f should be:\narray([[[ 75., 75., 75., 25., 75.],\n [ 75., 75., 25., 25., 25.],\n [ 75., 25., 75., 75., 75.]],\n\n [[ 25., 25., 25., 25., 100.],\n [ 75., 75., 75., 75., 75.],\n [ 25., 75., 0., 75., 75.]]])\n\"\"\"\nprint(d)\nprint(f)\n\n\n\"\"\"\n#18. Bonus question: instead of using numbers (i.e. 0, 25, 50, 75, and 100), how to use string values \n(\"A=0\", \"B=25\", \"C=50\", \"D=75\", and \"E=100\") to label the array elements? You are expecting the result to be:\narray([[[ 'D', 'D', 'D', 'B', 'D'],\n [ 'D', 'D', 'B', 'B', 'B'],\n [ 'D', 'B', 'D', 'D', 'D']],\n\n [[ 'B', 'B', 'B', 'B', 'E'],\n [ 'D', 'D', 'D', 'D', 'D'],\n [ 'B', 'D', 'A', 'D', 'D']]])\nAgain, you don't need Numpy in this question.\n\"\"\"\n'''''''''\ng = numpy.chararray((2,3,5))\nfor i in range(len(d)):\n for j in range(len(d[0])):\n for k in range(len(d[0,0])):\n if d[i,j,k] > d_min and d[i,j,k,] < d_mean:\n g[i,j,k] = 'B'\n elif d[i,j,k] > d_mean and d[i,j,k,] < d_max:\n g[i,j,k] = 'D'\n elif d[i,j,k] == d_mean:\n g[i,j,k] = 'C'\n elif d[i,j,k] == d_min:\n g[i,j,k] = 'A'\n elif d[i,j,k] == d_max:\n g[i,j,k] = 'E'\nprint(g)\n'''''''''''\n\nfor i in range(len(d)):\n for j in range(len(d[0])):\n for k in range(len(d[0, 0])):\n if d[i, j, k] > d_min and d[i, j, k,] < d_mean:\n f[i, j, k] = 25\n elif d[i, j, k] > d_mean and d[i,j,k,] < d_max:\n f[i, j, k] = 75\n elif d[i, j, k] == d_mean:\n f[i, j, k] = 50\n elif d[i, j, k] == d_min:\n f[i, j, k] = 0\n elif d[i, j, k] == d_max:\n f[i, j, k] = 100\nprint(\"F:\", f)\n","sub_path":"your-code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"575087294","text":"\n\"\"\"\nCall with `[size] [limit]` like: `10 5`\nThis should always take no more than `limit` seconds.\n\"\"\"\n\nimport asyncio\nimport aiocogs\nimport random\n\nloop = asyncio.get_event_loop()\n\nasync def execute(wait):\n\n \"\"\"\n Just wait for a bit.\n \"\"\"\n\n await asyncio.sleep(wait)\n\nasync def main(size, limit):\n\n \"\"\"\n Calculate random periods and pass them to execute.\n Yield their tasks as they complete.\n \"\"\"\n\n # need this to be index-able for later\n waits = tuple(random.randrange(1, limit) for index in range(size))\n\n print('order', *waits)\n\n coroutines = map(execute, waits)\n\n # this too so we can track our waits\n tasks = tuple(map(loop.create_task, coroutines))\n\n async for task in aiocogs.ready(*tasks):\n\n # sanity check\n result = task.result()\n\n index = tasks.index(task)\n\n wait = waits[index]\n\n print(f'finished {wait}')\n\nif __name__ == '__main__':\n\n import sys\n\n args = map(int, sys.argv[1:])\n\n coroutine = main(*args)\n\n try:\n\n loop.run_until_complete(coroutine)\n\n except KeyboardInterrupt:\n\n pass\n","sub_path":"examples/ready.py","file_name":"ready.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"440138489","text":"#!/usr/bin/env /usr/bin/python2.7\n# -*- coding:utf-8 -*-\n\nimport os, sys, string, glob, socket, json\nimport syslog\nimport dhtreader\n\nplugin_name = list(os.path.split(sys.argv[0]))[1]\n\narg = sys.argv[1] if len(sys.argv) == 2 else None\nif arg == \"config\":\n\tprint(\"graph_title Humidity/temperature from DHT11\");\n\tprint(\"graph_vlabel %\");\n\tprint(\"graph_category environmental\");\n\tprint(\"h.label humidity\");\n\tprint(\"h.draw AREA\");\nelse:\n\tdhtreader.init()\n\tread_loop = True\n\twhile read_loop:\n\t\ttry:\n\t\t\tt, h = dhtreader.read(11, 25)\n\t\texcept Exception as e:\n\t\t\tsyslog.syslog(syslog.LOG_ERR, \"read exception: %s\" %(e));\n\t\telse:\n\t\t\tread_loop = False\n\tprint(\"h.value {0}\".format(h))\n\t\n\t\nsys.exit(0)\n","sub_path":"munin/DHT11.py","file_name":"DHT11.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"271751675","text":"import tensorflow as tf\n\n\ndef read_data(source_path, target_path):\n with open(source_path, 'r', encoding='utf-8') as f:\n source_data = f.read()\n with open(target_path, 'r', encoding='utf-8') as f:\n target_data = f.read()\n return source_data.split('\\n'), target_data.split('\\n')\n\n\ndef construct_character_vocab(source_data_list, target_data_list):\n \"\"\"Construct the source and target vocabularies\n\n :param source_data_list: a list of source data, eg. returned by read_data\n :param target_data_list: a list of target data, eg. returned by read_data\n :return: source_vocab_char_to_int, source_vocab_int_to_char,\n target_vocab_char_to_int, target_vocab_int_to_char\n \"\"\"\n\n def vocab(data):\n \"\"\"\n :param data: a list\n :return: character_to_int, int_to_character\n \"\"\"\n special_chars = ['', '', '', '']\n # sorted_chars = special_chars + sorted(Counter([char for line in data for char in line]).keys())\n sorted_chars = special_chars + sorted(list(set([char for line in data for char in line])))\n int_to_character = {ids: ch for ids, ch in enumerate(sorted_chars)}\n character_to_int = {ch: ids for ids, ch in enumerate(sorted_chars)}\n\n return character_to_int, int_to_character\n\n source_vocab_char_to_int, source_vocab_int_to_char = vocab(source_data_list)\n target_vocab_char_to_int, target_vocab_int_to_char = vocab(target_data_list)\n\n return source_vocab_char_to_int, source_vocab_int_to_char, \\\n target_vocab_char_to_int, target_vocab_int_to_char\n\n\ndef source_target_to_int(source_data_list, target_data_list,\n source_vocab_char_to_int, target_vocab_char_to_int):\n \"\"\"\n\n :param source_data_list: a list of source data, eg. returned by read_data\n :param target_data_list: a list of target data, eg. returned by read_data\n :param source_vocab_char_to_int: vocabulary mapping source character to integer\n :param target_vocab_char_to_int: vocabulary mapping target character to integer\n :return: source_int, target_int\n \"\"\"\n source_int = [[source_vocab_char_to_int.get(char, source_vocab_char_to_int['']) for char in line]\n for line in source_data_list]\n target_int = [[target_vocab_char_to_int.get(char, target_vocab_char_to_int['']) for char in line]\n + [target_vocab_char_to_int['']] for line in target_data_list]\n\n return source_int, target_int\n\n\ndef process_decoder_inputs(target_inputs_ids, batch_size, target_vocab_char_to_int, start_token=''):\n \"\"\"Delete the last character of each example and add the start token to the front\n\n :param target_inputs_ids: a list, shape (batch_size, time_steps)\n :param batch_size: size of current batch\n :param target_vocab_char_to_int: a dict, returned by construct_character_vocab\n :param start_token: the start token to be added to the target inputs\n :return: processed target_inputs_ids\n \"\"\"\n # delete the last character of each example\n deleted = tf.strided_slice(target_inputs_ids, [0, 0], [batch_size, -1], [1, 1])\n output = tf.concat([tf.fill([batch_size, 1], target_vocab_char_to_int['']), deleted],\n axis=1)\n return output\n\n\ndef pad_batch(batch_input, pad_int):\n \"\"\"Pad the batch_input to equivalent length\n \"\"\"\n max_length = max([len(line) for line in batch_input])\n return [line + [pad_int]*(max_length-len(line)) for line in batch_input]\n\n\ndef get_batch(batch_size, source_int, target_int, source_pad_int, target_pad_int):\n \"\"\"Define a generator to yield a batch of data\n\n \"\"\"\n times = len(source_int) // batch_size\n\n for i in range(times):\n start = i * batch_size\n batch_source = source_int[start: start+batch_size]\n batch_target = target_int[start: start+batch_size]\n\n # Record the real lengths of target_int which will be used to compute losses\n target_lengths = []\n target_lengths.extend(len(line) for line in batch_target)\n source_lengths = []\n source_lengths.extend(len(line) for line in batch_source)\n\n batch_pad_source_int = pad_batch(batch_source, source_pad_int)\n batch_pad_target_int = pad_batch(batch_target, target_pad_int)\n\n yield batch_pad_source_int, batch_pad_target_int, target_lengths, source_lengths\n\n\ndef get_inputs():\n batch_source_input_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='batch_source_input_ids')\n batch_target_input_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='batch_target_input_ids')\n target_sequences_lengths = tf.placeholder(dtype=tf.int32, shape=[None, ], name='target_sequences_lengths')\n max_target_sequences_length = tf.reduce_max(target_sequences_lengths)\n\n return batch_source_input_ids, batch_target_input_ids, target_sequences_lengths, max_target_sequences_length\n\n\nif __name__ == '__main__':\n source_data_list, target_data_list = read_data('./data/letters_source.txt', './data/letters_target.txt')\n # print(source_data[: 5])\n # print(target_data[: 5])\n source_vocab_char_to_int, source_vocab_int_to_char, target_vocab_char_to_int, target_vocab_int_to_char = \\\n construct_character_vocab(source_data_list, target_data_list)\n\n source_int, target_int = source_target_to_int(source_data_list, target_data_list,\n source_vocab_char_to_int, target_vocab_char_to_int)\n\n print(source_int[:5])\n print(target_int[:5])\n\n test_source_int = source_int[: 128]\n test_target_int = target_int[: 128]\n batch_generator = get_batch(128, test_source_int, test_target_int,\n source_vocab_char_to_int[''], target_vocab_char_to_int[''])\n for batch_pad_source_int, batch_pad_target_int, target_lengths, source_lengths in batch_generator:\n print(batch_pad_source_int[0])\n print(batch_pad_target_int[0])\n","sub_path":"seq2seq_character/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"58521478","text":"\n#https://www.hackerrank.com/challenges/predicting-house-prices\n\n'''\n2 7\n0.18 0.89 109.85\n1.0 0.26 155.72\n0.92 0.11 137.66\n0.07 0.37 76.17\n0.85 0.16 139.75\n0.99 0.41 162.6\n0.87 0.47 151.77\n4\n0.49 0.18\n0.57 0.83\n0.56 0.64\n0.76 0.18\n'''\n\nfrom sklearn.linear_model.base import LinearRegression\n\nclass inp_reader():\n inp_features = list()\n inp_prices = list()\n features = list()\n def get_inp_features(self): \n return self.inp_features\n def get_inp_prices(self): \n return self.inp_prices\n def get_features(self): \n return self.features\n \n def read(self):\n F, N = map(int, raw_input().split(' ')) \n for _ in range(N):\n inp_f = map(float, raw_input().strip().split())\n self.inp_features.append(inp_f[:F:])\n self.inp_prices.append(inp_f[F::])\n questions = int(raw_input()) \n for _ in range(questions):\n self.features.append(map(float, raw_input().split()))\n \nreader = inp_reader()\nreader.read()\ninp_features = reader.get_inp_features()\ninp_prices = reader.get_inp_prices()\nfeatures = reader.get_features()\n \nmodel = LinearRegression()\n\nmodel.fit(inp_features, inp_prices)\nprices=model.predict(features)\nfor el in prices:\n print (el[0])","sub_path":"ai/predictiongHousePrices/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"386027331","text":"import django_tables2 as tables\n\nfrom utilities.tables import BaseTable, TagColumn, ToggleColumn\nfrom ipam.models import *\n\n__all__ = (\n 'ServiceTable',\n)\n\n\n#\n# Services\n#\n\nclass ServiceTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n parent = tables.Column(\n linkify=True,\n order_by=('device', 'virtual_machine')\n )\n ports = tables.TemplateColumn(\n template_code='{{ record.port_list }}',\n verbose_name='Ports'\n )\n tags = TagColumn(\n url_name='ipam:service_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Service\n fields = ('pk', 'id', 'name', 'parent', 'protocol', 'ports', 'ipaddresses', 'description', 'tags')\n default_columns = ('pk', 'name', 'parent', 'protocol', 'ports', 'description')\n","sub_path":"netbox/ipam/tables/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"355752457","text":"\nfrom timemachines.skaters.tcn.tcninclusiontraining import using_tcntraining\n\nif using_tcntraining:\n from timemachines.skaters.elo.eloensembles import elo_fastest_residual_precision_ensemble\n from timemachines.skaters.tcn.tcntraining import train_tcn_surrogate\n\n def test_tcn_training():\n f = elo_fastest_residual_precision_ensemble\n n_lags = 20\n onnx_models = train_tcn_surrogate(f=f, k=1, n_real=1, n_samples=10, n_warm=50, n_tile=2,\n n_input=n_lags, verbose=True, n_iterations=6, n_models=3)\n\n\nif __name__=='__main__':\n assert using_tcntraining\n test_tcn_training()","sub_path":"tests/tcn/test_tcn_training.py","file_name":"test_tcn_training.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"432083930","text":"import requests\n\nfrom stobjects import *\nimport json\n\n\n__author__ = 'Dan'\n\n\nclass STInterface:\n QUOTE_SUFFIX = \"quote\"\n LOCATION_SUFFIX = \"location\"\n CUSTOMER_SUFFIX = \"company\"\n ATTACHMENT_SUFFIX = \"attachment\"\n JOB_SUFFIX = \"job\"\n DEFICIENCY_SUFFIX = \"deficiency\"\n USER_SUFFIX = \"user\"\n APPOINTMENT_SUFFIX = \"appointment\"\n CLOCK_SUFFIX = \"clock\"\n COMMENT_SUFFIX = \"comment\"\n\n JOB_ENTITY_TYPE = \"3\"\n QUOTE_ENTITY_TYPE = \"9\"\n DEFICIENCY_ENTITY_TYPE = \"10\"\n LOCATION_ENTITY_TYPE = \"11\"\n CUSTOMER_ENTITY_TYPE = \"5\"\n\n JSON_TRUE = \"1\"\n JSON_FALSE = \"0\"\n\n def __init__(self, username, password, authtoken=None):\n\n # building an exe throws hissy fit and cant find the .pem file for verification, so its disabled for now\n requests.packages.urllib3.disable_warnings()\n\n self.base_url = \"https://api.servicetrade.com/api\"\n self.username = username\n self.password = password\n self.r = None\n\n self.authToken = authtoken\n if self.authToken is not None:\n # TODO don't assume the authtoken is valid, run a check first\n self.authenticated = True\n else:\n self.authenticated = False\n\n def authenticate(self):\n payload = {\"username\": self.username, \"password\": self.password}\n\n # building an exe throws hissy fit and cant find the .pem file for verification, so its disabled for now\n # TODO fix requests verification\n self.r = requests.post(self.base_url + \"/auth\", params=payload, verify=False)\n\n try:\n j = json.loads(self.r.text)\n self.authToken = str(j['data']['authToken'])\n\n authenticateduser = User(j['data']['user'])\n print(\"Authentication successful, hello %s\" % authenticateduser.fullname)\n\n self.authenticated = True\n except:\n print(\"Failed to authenticate\")\n return\n\n def getalllocations(self):\n objectsuffix = self.LOCATION_SUFFIX\n groupsuffix = \"locatins\"\n classname = Location\n params = {\"limit\": \"5000\"}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getalljobs(self):\n objectsuffix = self.JOB_SUFFIX\n groupsuffix = \"jobs\"\n classname = Job\n params = {\"status\": \"all\"}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getjobsfromlocation(self, locationId):\n objectsuffix = self.JOB_SUFFIX\n groupsuffix = \"jobs\"\n classname = Job\n params = {\"locationId\": str(locationId)}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getdeficienciesfromlocation(self, locationId):\n objectsuffix = self.DEFICIENCY_SUFFIX\n groupsuffix = \"deficiencies\"\n classname = Deficiency\n params = {\"locationId\": str(locationId)}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getappointmentsfromjob(self, jobId):\n objectsuffix = self.APPOINTMENT_SUFFIX\n groupsuffix = \"appointments\"\n classname = Appointment\n params = {\"jobId\": str(jobId)}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getallcustomers(self):\n objectsuffix = self.CUSTOMER_SUFFIX\n groupsuffix = \"companies\"\n classname = Company\n\n return self.getallobjects(objectsuffix, groupsuffix, classname)\n\n def getlocationsfromcompany(self, companyid):\n objectsuffix = self.LOCATION_SUFFIX\n groupsuffix = \"locations\"\n classname = Location\n params = {\"companyId\": str(companyid), \"limit\": \"5000\"}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getallobjects(self, objectsuffix, groupsuffix, classname, params=None):\n if self.authenticated:\n objects = []\n\n objectmap = self.execquery(objectsuffix, params)\n\n for jsondata in objectmap.get('data', {}).get(groupsuffix, []):\n objects.append(classname(jsondata))\n\n return objects\n\n else:\n Utils.log(\"error\", \"not authenticated\")\n return []\n\n def getsingleobject(self, objectsuffix, objectId, classname, params=None):\n if self.authenticated:\n url_suffix = \"%s/%s\" % (objectsuffix, str(objectId))\n objectmap = self.execquery(url_suffix, params)\n\n obj = None\n rawdata = objectmap.get('data')\n if rawdata is not None:\n obj = classname(rawdata)\n else:\n Utils.log(\"warning\", \"error getting a single object from query, query returned empty for id: \" + objectId)\n\n return obj\n\n else:\n Utils.log(\"error\", \"not authenticated\")\n return []\n\n def getjob(self, jobId):\n objectsuffix = self.JOB_SUFFIX\n classname = Job\n\n return self.getsingleobject(objectsuffix, jobId, classname)\n\n def getspecificjobs(self, jobidlist):\n objectsuffix = self.JOB_SUFFIX\n groupsuffix = \"jobs\"\n classname = Job\n params = {\"jobIds\": \",\".join(map(str, jobidlist)), \"status\": \"all\"}\n jobs = self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n return jobs\n\n def getallattachments(self, entity_id, entity_type):\n objectsuffix = self.ATTACHMENT_SUFFIX\n groupsuffix = \"attachments\"\n classname = Attachment\n params = {\"entityId\": entity_id, \"entityType\": entity_type}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getallcomments(self, entity_id, entity_type):\n objectsuffix = self.COMMENT_SUFFIX\n groupsuffix = \"comments\"\n classname = Comment\n params = {\"entityId\": entity_id, \"entityType\": entity_type}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getquotesfromlocation(self, companyid):\n objectsuffix = self.QUOTE_SUFFIX\n groupsuffix = \"quotes\"\n classname = Quote\n params = {\"locationId\": str(companyid)}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getalltechnicians(self):\n objectsuffix = self.USER_SUFFIX\n groupsuffix = \"users\"\n classname = User\n params = {\"isTech\": self.JSON_TRUE}\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def getclockevents(self, userId, startTime, endTime):\n objectsuffix = self.CLOCK_SUFFIX\n groupsuffix = \"events\"\n classname = ClockEvent\n params = {\"userId\": str(userId), \"startTime\": str(startTime), \"endTime\": str(endTime)}\n\n # TODO update each clock event with its job parents coordinates to get a distance in miles from job\n\n return self.getallobjects(objectsuffix, groupsuffix, classname, params)\n\n def executefullbackup(self, destinationdirectory):\n if self.authenticated:\n\n # create the base directory\n datadestinationdirectory = os.path.join(destinationdirectory, \"ST-Backup\")\n self.makedirs(datadestinationdirectory)\n\n # begin the recursive backup process\n # this will start on companies, and run down the servicetrade object structure pulling everything\n self.processcompanies(datadestinationdirectory)\n\n else:\n Utils.log(\"error\", \"not authenticated\")\n return []\n\n def execquery(self, urlsuffix, payload=None):\n cookie = {\"PHPSESSID\": self.authToken}\n queryurl = self.base_url + \"/\" + urlsuffix\n try:\n self.r = requests.get(queryurl, cookies=cookie, params=payload, verify=False)\n return json.loads(self.r.text)\n except:\n Utils.log(\"error\", \"failed to parse query results, Url: %s\" % queryurl)\n return {}\n\n def makedirs(self, directory):\n try:\n if not os.path.isdir(directory):\n os.makedirs(directory)\n except:\n Utils.log(\"io error\", \"failed to create directory in the supplied directory\")\n return\n\n def processattachments(self, basedir, entityId, entityType, recursionlevel=0):\n\n attachmentsdir = os.path.join(basedir, \"Attachments\")\n self.makedirs(attachmentsdir)\n\n jobattachments = self.getallattachments(entityId, entityType)\n\n attachmentindex = 0\n for attachment in jobattachments:\n attachmentindex += 1\n Utils.log(\"backup status\", \"%sDownloading attachment %d of %d\" %\n (\" \" * recursionlevel, attachmentindex, len(jobattachments)))\n try:\n attachment.savetodisk(attachmentsdir)\n except:\n Utils.log(\"error\", \"failed to save attachment %d of %d\" % (attachmentindex, len(jobattachments)))\n\n def processcomments(self, basedir, entityId, entityType, recursionlevel=0):\n\n commentsdir = os.path.join(basedir, \"Comments\")\n self.makedirs(commentsdir)\n\n comments = self.getallcomments(entityId, entityType)\n\n commentindex = 0\n for comment in comments:\n commentindex += 1\n Utils.log(\"backup status\", \"%sWorking on comment %d of %d\" %\n (\" \" * recursionlevel, commentindex, len(comments)))\n try:\n comment.savetodisk(commentsdir)\n except:\n Utils.log(\"error\", \"failed to save comment %d of %d\" % (commentindex, len(comments)))\n\n def processjobs(self, basedir, locationId, recursionlevel=0):\n\n locationjobs = self.getjobsfromlocation(locationId)\n\n jobfolder = os.path.join(basedir, \"Jobs\")\n self.makedirs(jobfolder)\n\n jobindex = 0\n for job in locationjobs:\n jobindex += 1\n Utils.log(\"backup status\",\n \"%sWorking on job %d of %d\" % (\" \" * recursionlevel, jobindex, len(locationjobs)))\n\n jobsubfolder = os.path.join(jobfolder, job.id)\n self.makedirs(jobsubfolder)\n job.savetodisk(jobsubfolder)\n\n self.processappointments(jobsubfolder, job.id, recursionlevel + 1)\n\n self.processattachments(jobsubfolder, job.id, self.JOB_ENTITY_TYPE, recursionlevel + 1)\n\n # process the comments for this location\n self.processcomments(jobsubfolder, job.id, self.JOB_ENTITY_TYPE, recursionlevel + 1)\n\n def processdeficiencies(self, basedir, locationId, recursionlevel=0):\n\n locationdeficiencies = self.getdeficienciesfromlocation(locationId)\n\n deficiencyfolder = os.path.join(basedir, \"Deficiencies\")\n self.makedirs(deficiencyfolder)\n\n deficiencyindex = 0\n for deficiency in locationdeficiencies:\n deficiencyindex += 1\n Utils.log(\"backup status\", \"%sWorking on deficiency %d of %d\" % (\" \" * recursionlevel, deficiencyindex, len(locationdeficiencies)))\n\n deficiencysubfolder = os.path.join(deficiencyfolder, deficiency.id)\n self.makedirs(deficiencysubfolder)\n deficiency.savetodisk(deficiencysubfolder)\n\n self.processattachments(deficiencysubfolder, deficiency.id, self.DEFICIENCY_ENTITY_TYPE, recursionlevel + 1)\n\n # process the comments for this location\n self.processcomments(deficiencysubfolder, deficiency.id, self.DEFICIENCY_ENTITY_TYPE, recursionlevel + 1)\n\n def processquotes(self, basedir, locationId, recursionlevel=0):\n\n quotefolder = os.path.join(basedir, \"Quotes\")\n self.makedirs(quotefolder)\n\n locationquotes = self.getquotesfromlocation(locationId)\n\n quoteindex = 0\n for quote in locationquotes:\n quoteindex += 1\n Utils.log(\"backup status\",\n \"%sWorking on quote %d of %d for this location\" % (\" \" * recursionlevel, quoteindex, len(locationquotes)))\n\n quotesubfolder = os.path.join(quotefolder, quote.id)\n self.makedirs(quotesubfolder)\n quote.savetodisk(quotesubfolder)\n\n self.processattachments(quotesubfolder, quote.id, self.QUOTE_ENTITY_TYPE, recursionlevel + 1)\n\n # process the comments for this location\n self.processcomments(quotesubfolder, quote.id, self.QUOTE_ENTITY_TYPE, recursionlevel + 1)\n\n def processappointments(self, basedir, jobId, recursionlevel=0):\n\n appointmentdir = os.path.join(basedir, \"Appointments\")\n self.makedirs(appointmentdir)\n\n jobappointments = self.getappointmentsfromjob(jobId)\n\n appointmentindex = 0\n for appointment in jobappointments:\n appointmentindex += 1\n Utils.log(\"backup status\", \"%sWorking on appointment %d of %d for this job\" % (\" \" * recursionlevel, appointmentindex, len(jobappointments)))\n\n self.makedirs(appointmentdir)\n appointment.savetodisk(appointmentdir)\n\n def processlocations(self, basedir, companyId, recursionlevel=0):\n\n locationdir = os.path.join(basedir, \"Locations\")\n self.makedirs(locationdir)\n\n companylocations = self.getlocationsfromcompany(companyId)\n\n locationindex = 0\n for location in companylocations:\n locationindex += 1\n Utils.log(\"backup status\", \"%sWorking on location %d of %d for this customer\" % (\n \" \" * recursionlevel, locationindex, len(companylocations)))\n locationsubfolder = os.path.join(locationdir, location.id)\n\n # create location folder and save the data there\n self.makedirs(locationsubfolder)\n location.savetodisk(locationsubfolder)\n\n # process the jobs for this location\n self.processjobs(locationsubfolder, location.id, recursionlevel + 1)\n\n # process the quotes for this location\n self.processquotes(locationsubfolder, location.id, recursionlevel + 1)\n\n # process the deficiencies for this location\n self.processdeficiencies(locationsubfolder, location.id)\n\n # process the attachments for this location\n self.processattachments(locationsubfolder, location.id, self.LOCATION_ENTITY_TYPE, recursionlevel + 1)\n\n # process the comments for this location\n self.processcomments(locationsubfolder, location.id, self.LOCATION_ENTITY_TYPE, recursionlevel + 1)\n\n def processcompanies(self, basedir, recursionlevel=0):\n\n companyfolder = os.path.join(basedir, \"Companies\")\n self.makedirs(companyfolder)\n\n customers = self.getallcustomers()\n\n companyindex = 0\n for customer in customers:\n customersubfolder = os.path.join(companyfolder, customer.id)\n self.makedirs(customersubfolder)\n\n companyindex += 1\n Utils.log(\"backup status\", \"Working on customer %d of %d\" % (companyindex, len(customers)))\n\n customer.savetodisk(customersubfolder)\n\n self.processlocations(customersubfolder, customer.id, recursionlevel + 1)\n\n self.processcomments(customersubfolder, customer.id, self.CUSTOMER_ENTITY_TYPE, recursionlevel + 1)\n\n self.processattachments(customersubfolder, customer.id, self.CUSTOMER_ENTITY_TYPE, recursionlevel + 1)\n\n","sub_path":"stinterface/stinterface.py","file_name":"stinterface.py","file_ext":"py","file_size_in_byte":15324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"151325276","text":"\"\"\"\nGeneric icon handling, especially of embedded SVG images served from a pool of icons.\n\"\"\"\n\nfrom . import html5\nfrom .network import HTTPRequest\nfrom flare.config import conf\n\n\n@html5.tag(\"flare-svg-icon\")\nclass SvgIcon(html5.svg.Svg):\n\t\"\"\"\n\tA raw, embedded SVG icon-component\n\t\"\"\"\n\n\tdef __init__(self, value=None, fallbackIcon=None, title=\"\"):\n\t\tsuper().__init__()\n\t\tself.value = value\n\t\tself.title = title\n\t\tself.fallbackIcon = fallbackIcon\n\n\t\tself[\"xmlns\"] = \"http://www.w3.org/2000/svg\"\n\t\tself[\"class\"] = [\"icon\"] # mostly used\n\n\t\tif title:\n\t\t\tself[\"title\"] = title\n\n\t\tif value:\n\t\t\tself.getIcon()\n\n\tdef _setValue(self, value):\n\t\tself.value = value\n\t\tself.getIcon()\n\n\tdef _setTitle(self, val):\n\t\tself.title = val\n\n\tdef getIcon(self):\n\t\tif self.value and self.value.endswith(\".svg\"):\n\t\t\turl = self.value\n\t\telse:\n\t\t\turl = conf[\"basePathSvgs\"] + \"/%s.svg\" % self.value\n\n\t\tHTTPRequest(\"GET\", url, callbackSuccess=self.replaceSVG, callbackFailure=self.requestFallBack)\n\n\tdef replaceSVG(self, icondata):\n\t\tself.removeAllChildren()\n\n\t\tfor node in html5.fromHTML(icondata):\n\t\t\tif isinstance(node, html5.svg.Svg):\n\t\t\t\tself[\"viewbox\"] = node[\"viewbox\"]\n\t\t\t\tself[\"class\"] = node[\"class\"]\n\t\t\t\tself.appendChild(node._children)\n\t\t\t\tbreak\n\n\tdef requestFallBack(self, data, status):\n\t\turl = None\n\t\tif self.fallbackIcon:\n\t\t\turl = conf[\"basePathSvgs\"] + \"/%s.svg\" % self.fallbackIcon\n\t\telif self.title:\n\t\t\t#language=HTML\n\t\t\tself[\"viewbox\"] = \"-10 -10 20 20\"\n\t\t\tself.appendChild('''%s'''%self.title[0].upper())\n\t\telse:\n\t\t\turl = conf[\"basePathSvgs\"] + \"/icon-error.svg\" # fallback\n\n\t\tif url:\n\t\t\tHTTPRequest(\"GET\", url, callbackSuccess=self.replaceSVG)\n\n\n@html5.tag(\"flare-icon\")\nclass Icon(html5.I):\n\t\"\"\"\n\tIcon component with first-letter fallback, normally shown as embedded SVG.\n\t\"\"\"\n\n\tdef __init__(self, value=None, fallbackIcon=None, title=\"\", classes=[]):\n\t\tsuper().__init__()\n\t\tself[\"class\"] = [\"i\"] + classes\n\t\tself.title = title\n\t\tself[\"title\"] = title\n\t\tself.fallbackIcon = fallbackIcon\n\t\tself.value = value\n\t\tif value:\n\t\t\tself[\"value\"] = value\n\n\tdef _setValue(self, value):\n\t\tif isinstance(value, dict):\n\t\t\tself.value = value.get(\"dest\", {}).get(\"downloadUrl\")\n\t\telse:\n\t\t\tself.value = value\n\t\t# sig= test is really ugly we need a better solution\n\t\tif self.value and (\"sig=\" in self.value or any(\n\t\t\t\t[self.value.endswith(ext) for ext in [\".jpg\", \".png\", \".gif\", \".bmp\", \".webp\", \".heic\", \".jpeg\"]])):\n\t\t\t# language=HTML\n\t\t\tself.appendChild('')\n\t\t\tself.image.onError = lambda e: self.onError(e)\n\t\t\tself.image.sinkEvent(\"onError\")\n\t\t\tself.image[\"src\"] = self.value\n\t\telse:\n\t\t\tif self.value and self.value.endswith(\".svg\"):\n\t\t\t\turl = self.value\n\t\t\telse:\n\t\t\t\turl = conf[\"basePathSvgs\"] + \"/%s.svg\" % self.value\n\t\t\tself.appendChild(SvgIcon(url, self.fallbackIcon, self.title))\n\n\tdef _setTitle(self, val):\n\t\tself.title = val\n\n\tdef onError(self, event):\n\t\tif self.fallbackIcon:\n\t\t\tself.removeChild(self.image)\n\t\t\tself.appendChild(SvgIcon(conf[\"basePathSvgs\"] + \"/%s.svg\" % self.fallbackIcon, title=self.title))\n\t\telif self.title:\n\t\t\tself.removeChild(self.image)\n\t\t\tself.appendChild(self.title[0].upper())\n\t\telse:\n\t\t\tself.removeChild(self.image)\n\t\t\tself.appendChild(SvgIcon(conf[\"basePathSvgs\"] + \"/icon-error.svg\", title=self.title))\n\n\n@html5.tag(\"flare-badge-icon\")\nclass BadgeIcon(Icon):\n\t\"\"\"\n\tA badge icon is an icon-component with a little badge,\n\te.g. a number of new messages or items in the cart or so.\n\t\"\"\"\n\n\tdef __init__(self, title, value=None, fallbackIcon=None, badge=None):\n\t\tsuper().__init__(title, value, fallbackIcon)\n\t\tself.badge = badge\n\t\t# language=HTML\n\t\tself.appendChild('%s' % self.badge)\n\n\tdef _setBadge(self, value):\n\t\tself.badgeobject.appendChild(value, replace=True)\n\n\tdef _getBadge(self):\n\t\treturn self.badge\n","sub_path":"flare/icons.py","file_name":"icons.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"233744735","text":"import numpy as np\nfrom petitRADTRANS import Radtrans\nimport matplotlib.pyplot as plt\n#atmosphere = Radtrans(line_species = ['CH4_main_iso'], \\\natmosphere = Radtrans(line_species = ['COej_HITEMP19'], \\\n # 'H2O_main_iso', 'CO_all_iso', \\\n# 'CH4_main_iso', 'CO2_main_iso', 'Na', 'K', \\\n# 'C2H2', 'HCN_main_iso', 'NH3', 'H2S_main_iso'], \\\n #rayleigh_species = ['H2', 'He'], \\\n #continuum_opacities = ['H2-H2', 'H2-He'], \\\n wlen_bords_micron = [2.3,2.4], \\\n mode = 'lbl')\n\npressures = np.logspace(-10, 2, 130)\nprint(len(pressures))\nwith open('Gl229/pressures.dat', 'w') as f: \n for a in range(0,len(pressures)): \n f.write('{:e}\\n'.format(pressures[a]))\n \natmosphere.setup_opa_structure(pressures)\n\n\n\nimport petitRADTRANS.nat_cst as nc\n\nR_pl = 1.838*nc.r_jup_mean\ngravity = 1e1**5.0\nP0 = 0.01\n\nkappa_IR = 0.01\ngamma = 0.4\nT_int = 200.\nT_equ = 1500.\ntemperature = nc.guillot_global(pressures, kappa_IR, gamma, gravity, T_int, T_equ)\nprint(len(temperature))\n\ndata = open('Gl229/MMR/eq.dat', 'r')\nline = data.read().split()\ndata.close()\n\nmolname = {}\nfor a in range(0,160):\n molname[a] = line[a+6]\n print(a, molname[a])\n\n \ndata = open('Gl229/MMR/eq.dat', 'r')\na = 0\nMMW_k = np.ones_like(temperature)\nMMR = [[0] * len(pressures) for j in range(160)]\nfor line in data:\n if line[0]=='#':\n continue\n lines = line.rstrip('\\n').split()\n temperature[a] = lines[1]\n MMW_k[a] = lines[2]\n for i in range(0,160):\n MMR[i][a] = lines[i+3]\n a = a + 1\ndata.close()\n\n\n\nabundances = {}\nabundances['H2'] = 7.1383555382E-001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['H2'][a] = MMR[1][a]\n\nabundances['He'] = 0.24 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['He'][a] = MMR[2][a]\n\nabundances['H2O_main_iso'] = 0.0 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['H2O_main_iso'][a] = MMR[59][a]\n\nabundances['COej_HITEMP19'] = 0.01 * np.ones_like(temperature)\n#for a in range(0,130):\n# abundances['CO_all_iso'][a] = MMR[26][a]\n\nabundances['CO2_main_iso'] = 0.00001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['CO2_main_iso'][a] = MMR[27][a]\n\nabundances['CH4_main_iso'] = 0.000001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['CH4_main_iso'][a] = MMR[19][a]\n\nabundances['Na'] = 0.00001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['Na'][a] = MMR[5][a]\n\nabundances['K'] = 0.000001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['K'][a] = MMR[3][a]\n\nabundances['C2H2'] = 0.000001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['C2H2'][a] = MMR[30][a]\n\nabundances['HCN_main_iso'] = 0.000001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['HCN_main_iso'][a] = MMR[13][a]\n\nabundances['NH3'] = 0.000001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['NH3'][a] = MMR[61][a]\n\nabundances['H2S_main_iso'] = 0.000001 * np.ones_like(temperature)\nfor a in range(0,130):\n abundances['H2S_main_iso'][a] = MMR[92][a]\n\n \nMMW = 2.33 * np.ones_like(temperature)\nfor a in range(0,130):\n MMW[a] = MMW_k[a]\n\nplt.plot(temperature,pressures)\nplt.yscale(\"log\")\nplt.gca().invert_yaxis()\nplt.show()\nplt.clf()\n\natmosphere.calc_flux(temperature, abundances, gravity, MMW, contribution = True)\nprint(atmosphere.contr_em)\n\nplt.rcParams['figure.figsize'] = (10, 6)\n\nwlen_mu = nc.c/atmosphere.freq/1e-4\nX, Y = np.meshgrid(wlen_mu, pressures)\nplt.contourf(X,Y,atmosphere.contr_em,30,cmap=plt.cm.bone_r)\n\nplt.yscale('log')\nplt.xscale('log')\nplt.ylim([1e2,1e-10])\n#plt.xlim()\n\nplt.xlabel('Wavelength (microns)')\nplt.ylabel('P (bar)')\nplt.title('Emission contribution function')\nplt.gca().invert_yaxis()\nplt.xlim(2.3800,2.3900)\nplt.show()\nplt.clf()\n\n\nwith open('Gl229/Gl229B_spectrum_CO.dat', 'w') as f:\n for a in range(0,len(atmosphere.freq)): \n f.write('{:e} {:e}\\n'.format(nc.c/atmosphere.freq[a]/1e-4, atmosphere.flux[a]))\n\n\n\nimport pylab as plt\nplt.rcParams['figure.figsize'] = (10, 6)\n\nplt.plot(nc.c/atmosphere.freq/1e-4, atmosphere.flux/1e-6)\n\nplt.xlabel('Wavelength (microns)')\nplt.ylabel(r'Planet flux $F_\\nu$ (10$^{-6}$ erg cm$^{-2}$ s$^{-1}$ Hz$^{-1}$)')\nplt.show()\nplt.savefig('Gl229/emission-HR.pdf',bbox_inches='tight')\nplt.clf()\n\n\n\nplt.plot(nc.c/atmosphere.freq/1e-4, atmosphere.flux/1e-6)\n\nplt.xlim([2.3,2.3025])\nplt.xlabel('Wavelength (microns)')\nplt.ylabel(r'Planet flux $F_\\nu$ (10$^{-6}$ erg cm$^{-2}$ s$^{-1}$ Hz$^{-1}$)')\nplt.show()\nplt.savefig('Gl229/emission-HR-zoom.pdf',bbox_inches='tight')\nplt.clf()\n\n","sub_path":"examples/comparisons/co_exojax/Gl229_CO.py","file_name":"Gl229_CO.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"105003","text":"from datetime import datetime\nfrom time import sleep\nfrom colorama import init\nfrom util import *\n\n\ndef print_current_online(online_users):\n \"\"\"\n Напечатать на экран пользователей, которые сейчас онлайн\n \"\"\"\n online = len(online_users)\n print(Fore.LIGHTBLUE_EX + f\"Current online ({online})\", end='')\n if online != 0:\n print(\": \", end='')\n online = \"\"\n for online_user in online_users:\n online += online_user + ', '\n print(online[:-2], end='')\n print(Style.RESET_ALL)\n\n\n# Очистка экрана\nclear_screen()\n\n# Инициализация библиотеки для цветного вывода в консоль\ninit()\n\n# Запоминаем время последнего принятого сообщения\nlast_message_time = 0\n\n# Отправляем запрос серверу, чтобы выяснить, кто сейчас в сети\nresponse = requests.get(\n SERVER_ADDR + '/check_users'\n )\ndata = response.json()\nprint_current_online(data['online'])\n\nwhile True:\n # Отправляем запрос, чтобы получить историю сообщений\n # старше последнего принятого сообщения\n response = requests.get(\n SERVER_ADDR + '/history',\n params={'after': last_message_time}\n )\n\n data = response.json()\n\n # Печатаем новые сообщения на экан\n for message in data['messages']:\n # float -> datetime\n beauty_time = datetime.fromtimestamp(message['time'])\n beauty_time = beauty_time.strftime('%H:%M:%S')\n sender_name = message['username']\n print(Fore.CYAN + beauty_time + ' ' + Fore.GREEN + sender_name, end=': ')\n print(Fore.WHITE + message['text'])\n last_message_time = message['time']\n\n # Проверяем состояние пользователей\n # чтобы напечатать в общий чат,\n # если произошли какие-то изменения состояния пользователей\n response = requests.get(\n SERVER_ADDR + '/check_users'\n )\n\n data = response.json()\n\n # Если кто-то недавно отключился,\n # и об этом ещё не было напечатано в общий чат,\n # тогда печатаем имя пользователя, который отключился, и время отключения\n disconnected_users = data['disconnected']\n for disconnected_user in disconnected_users:\n beauty_time = datetime.now()\n beauty_time = beauty_time.strftime('%H:%M:%S')\n print(Fore.CYAN + beauty_time + \" \" +\n Fore.LIGHTRED_EX + disconnected_user + \" has left\" +\n Style.RESET_ALL)\n\n # Если кто-то недавно подключился,\n # и об этом ещё не было оповещения в общем чате,\n # тогда печатаем имя и время подключения\n connected_users = data['connected']\n for connected_user in connected_users:\n beauty_time = datetime.now()\n beauty_time = beauty_time.strftime('%H:%M:%S')\n print(Fore.CYAN + beauty_time + \" \" +\n Fore.LIGHTRED_EX + connected_user + \" has joined\" +\n Style.RESET_ALL)\n\n # Если были подключенные и отключенные пользователи,\n # то необходимо обновить список пользователей, которые сейчас в сети\n if len(disconnected_users) != 0 or len(connected_users) != 0:\n print_current_online(data['online'])\n\n # Ждём секунду, чтобы снова повторить все действия выше\n sleep(1)\n","sub_path":"reciever.py","file_name":"reciever.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"6887830","text":"class Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n profit = 0\n min_price = float('inf')\n\n for price in prices:\n min_price = min(min_price, price)\n profit = max(profit, price - min_price)\n\n return profit\n\n\nprices = [7, 1, 5, 3, 6, 4]\n# Output: 5\n\nsol = Solution()\nprint(sol.maxProfit(prices))\n","sub_path":"Leetcode/121.Best_Time_to_Buy_and_Sell_Stock.py","file_name":"121.Best_Time_to_Buy_and_Sell_Stock.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"149787682","text":"#!/usr/bin/env python\n\nimport struct\n\nglobal_messages = {\n 0 : ()\n}\n\nclass FitFile(object):\n\n def __init__(self, filename):\n self.filename = filename\n self._open()\n\n def _open(self):\n with open(self.filename, 'rb') as self.file:\n self._read_file_header()\n self._read_data_records()\n self._read_crc()\n \n def _read_file_header(self):\n data = self.file.read(12)\n (header_size, protocol_version, profile_version, self.data_size, data_type) = struct.unpack('> 4, protocol_version & 0x0F,\n profile_version // 100, profile_version % 100, self.data_size, self.crc))\n\n def _read_data_records(self):\n amount_read = 0\n while amount_read < self.data_size:\n amount_read += self._read_data_record()\n\n def _read_data_record(self):\n data = self.file.read(1)\n amount_read = 1\n (record_header,) = struct.unpack('B', data)\n if record_header & 0x80:\n amount_read += self._read_compressed_timestamp_header(record_header)\n elif record_header & 0x40:\n amount_read += self._read_definition_message(record_header)\n else:\n amount_read += self._read_data_message(record_header)\n return amount_read\n \n def _read_compressed_timestamp_header(self, record_header):\n assert False, 'not implemented'\n\n def _read_definition_message(self, record_header):\n local_message_type = record_header & 0x0F\n data = self.file.read(2)\n amount_read = 2\n (architecture,) = struct.unpack('xB', data)\n endianness = '>' if architecture & 0x01 else '<'\n data = self.file.read(3)\n amount_read += 3\n (global_message_number, fields) = struct.unpack(endianness + 'HB', data)\n assert global_message_number in global_messages, 'Unknown global message {}'.format(global_message_number)\n for field in range(fields):\n amount_read += self._read_definition_field()\n return amount_read\n\n def _read_definition_field(self):\n data = self.file.read(3)\n (field_definition_number, size, base_type) = struct.unpack('BBB', data)\n endian_ability = base_type & 0x80 == 0x80\n base_type_number = base_type & 0x0F\n print('field definition number: {}\\n'\n 'size: {}\\n'\n 'base type number: {}'.format(field_definition_number, size, base_type_number))\n return 3\n\n def _read_data_message(self, record_header):\n local_message_type = record_header & 0x0F\n assert False, 'not implemented'\n\n def _read_crc(self):\n assert False, 'not implemented'\n\nif __name__ == \"__main__\":\n\n fit = FitFile('../Device.fit')\n \n","sub_path":"garmin-parse.py","file_name":"garmin-parse.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"396190077","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject, QTimer, Qt, QModelIndex, qInstallMessageHandler\nfrom init_server import initialize_server, save_status\nfrom database_management import report_management\nimport json, datetime, sys, os, time\n\nclass generate_report_ui(QMainWindow):\n\tdef __init__(\n\t\t\tself,\n\t\t\tdata_changed_flags, \n\t\t\ttask_queue,\n\t\t\tlog_queue,\n\t\t\tconfig,\n\t\t\tparent=None\n\t\t):\n\t\tsuper(generate_report_ui, self).__init__(parent)\n\t\t \n\t\tself.data_changed_flags = data_changed_flags\n\t\tself.task_queue = task_queue\n\t\tself.log_queue = log_queue\n\t\tself.config = config\n\n\t\tself.all_checked = False\n\t\tself.account_checked = False\n\t\tself.submission_checked = False\n\t\tself.client_checked = False\n\t\tself.judge_checked = False\n\t\tself.scoreboard_checked = False\n\t\tself.query_checked = False\n\t\tself.problems_checked = False\n\t\t\n\t\tself.setWindowTitle('Generate Report')\n\t\tself.width = 800\n\t\tself.height = 600\n\t\tself.setGeometry(600, 300, self.width, self.height)\n\t\tself.setFixedSize(self.width, self.height)\n\t\t\n\t\tself.progress = QProgressBar()\n\t\t\n\t\theading = QLabel('Select Reports')\n\t\theading.setObjectName('main_screen_heading')\n\n\t\tself.all_reports_checkbox = QCheckBox('All Reports')\n\t\tself.all_reports_checkbox.setObjectName('top_level_checkbox')\n\t\tself.all_reports_checkbox.stateChanged.connect(self.all_state_changed)\n\t\tself.all_reports_checkbox.setChecked(self.all_checked)\n\n\t\tself.account_report_checkbox = QCheckBox('Accounts Report')\n\t\tself.account_report_checkbox.setObjectName('sub_level_checkbox')\n\t\tself.account_report_checkbox.setChecked(self.account_checked)\n\t\tself.account_report_checkbox.stateChanged.connect(self.account_state_changed)\n\n\t\tself.submission_report_checkbox = QCheckBox('Submissions Report')\n\t\tself.submission_report_checkbox.setObjectName('sub_level_checkbox')\n\t\tself.submission_report_checkbox.setChecked(self.submission_checked)\n\t\tself.submission_report_checkbox.stateChanged.connect(self.submission_state_changed)\n\n\t\tself.client_report_checkbox = QCheckBox('Clients Report')\n\t\tself.client_report_checkbox.setObjectName('sub_level_checkbox')\n\t\tself.client_report_checkbox.setChecked(self.client_checked)\n\t\tself.client_report_checkbox.stateChanged.connect(self.client_state_changed)\n\n\t\tself.judge_report_checkbox = QCheckBox('Judge Report')\n\t\tself.judge_report_checkbox.setObjectName('sub_level_checkbox')\n\t\tself.judge_report_checkbox.setChecked(self.judge_checked)\n\t\tself.judge_report_checkbox.stateChanged.connect(self.judge_state_changed)\n\n\t\tself.scoreboard_report_checkbox = QCheckBox('Leaderboard Report')\n\t\tself.scoreboard_report_checkbox.setObjectName('sub_level_checkbox')\n\t\tself.scoreboard_report_checkbox.setChecked(self.scoreboard_checked)\n\t\tself.scoreboard_report_checkbox.stateChanged.connect(self.scoreboard_state_changed)\n\n\t\tself.query_report_checkbox = QCheckBox('Query Report')\n\t\tself.query_report_checkbox.setObjectName('sub_level_checkbox')\n\t\tself.query_report_checkbox.setChecked(self.query_checked)\n\t\tself.query_report_checkbox.stateChanged.connect(self.query_state_changed)\n\n\t\tself.problems_report_checkbox = QCheckBox('Problems Report')\n\t\tself.problems_report_checkbox.setObjectName('sub_level_checkbox')\t\t\n\t\tself.problems_report_checkbox.setChecked(self.problems_checked)\n\t\tself.problems_report_checkbox.stateChanged.connect(self.problems_state_changed)\n\n\t\tconfirm_button = QPushButton('Generate')\n\t\tconfirm_button.setFixedSize(150, 30)\n\t\tconfirm_button.clicked.connect(lambda:generate_report_ui.final_status(self))\n\t\tconfirm_button.setDefault(True)\n\t\tconfirm_button.setObjectName('interior_button')\n\t\t\n\t\tbutton_widget = QWidget()\n\t\tbutton_layout = QHBoxLayout(button_widget)\n\t\tbutton_layout.addWidget(confirm_button)\n\t\tbutton_layout.setAlignment(Qt.AlignCenter)\n\n\t\tmain_layout = QVBoxLayout()\n\t\tmain_layout.addWidget(heading)\n\t\tmain_layout.addStretch(1)\n\t\tmain_layout.addWidget(self.all_reports_checkbox)\n\t\tmain_layout.addWidget(self.account_report_checkbox)\n\t\tmain_layout.addWidget(self.submission_report_checkbox)\n\t\tmain_layout.addWidget(self.client_report_checkbox)\n\t\tmain_layout.addWidget(self.judge_report_checkbox)\n\t\tmain_layout.addWidget(self.scoreboard_report_checkbox)\n\t\tmain_layout.addWidget(self.query_report_checkbox)\n\t\tmain_layout.addWidget(self.problems_report_checkbox)\n\t\tmain_layout.addStretch(1)\n\t\tmain_layout.addWidget(self.progress)\n\t\tmain_layout.addStretch(1)\n\t\tmain_layout.addWidget(button_widget)\n\t\tmain_layout.addStretch(1)\n\t\tmain = QWidget()\n\t\tmain.setLayout(main_layout)\n\t\tmain.setObjectName('account_window')\n\t\tself.setCentralWidget(main)\n\n\tdef all_state_changed(self, state):\n\t\tif(state == Qt.Checked):\n\t\t\tself.account_report_checkbox.setChecked(True)\n\t\t\tself.submission_report_checkbox.setChecked(True)\n\t\t\tself.client_report_checkbox.setChecked(True)\n\t\t\tself.judge_report_checkbox.setChecked(True)\n\t\t\tself.scoreboard_report_checkbox.setChecked(True)\n\t\t\tself.query_report_checkbox.setChecked(True)\n\t\t\tself.problems_report_checkbox.setChecked(True)\n\t\t\tself.all_checked = True\n\t\t\tself.account_checked = True\n\t\t\tself.submission_checked = True\n\t\t\tself.client_checked = True\n\t\t\tself.judge_checked = True\n\t\t\tself.scoreboard_checked = True\n\t\t\tself.query_checked = True\n\t\t\tself.problems_checked = True\n\t\treturn\n\n\tdef account_state_changed(self, state):\n\t\tif state == Qt.Checked:\n\t\t\tself.account_checked = True\n\t\telse:\n\t\t\tself.all_checked = False\n\t\t\tself.account_checked = False\n\t\t\tself.all_reports_checkbox.setChecked(False)\n\n\tdef submission_state_changed(self, state):\n\t\tif state == Qt.Checked:\n\t\t\tself.submission_checked = True\n\t\telse:\n\t\t\tself.all_checked = False\n\t\t\tself.submission_checked = False\n\t\t\tself.all_reports_checkbox.setChecked(False)\n\n\tdef client_state_changed(self, state):\n\t\tif state == Qt.Checked:\n\t\t\tself.client_checked = True\n\t\telse:\n\t\t\tself.all_checked = False\n\t\t\tself.client_checked = False\n\t\t\tself.all_reports_checkbox.setChecked(False)\n\n\tdef judge_state_changed(self, state):\n\t\tif state == Qt.Checked:\n\t\t\tself.judge_checked = True\n\t\telse:\n\t\t\tself.all_checked = False\n\t\t\tself.judge_checked = False\n\t\t\tself.all_reports_checkbox.setChecked(False)\n\n\tdef scoreboard_state_changed(self, state):\n\t\tif state == Qt.Checked:\n\t\t\tself.scoreboard_checked = True\n\t\telse:\n\t\t\tself.all_checked = False\n\t\t\tself.scoreboard_checked = False\n\t\t\tself.all_reports_checkbox.setChecked(False)\n\n\tdef query_state_changed(self, state):\n\t\tif state == Qt.Checked:\n\t\t\tself.query_checked = True\n\t\telse:\n\t\t\tself.all_checked = False\n\t\t\tself.query_checked = False\n\t\t\tself.all_reports_checkbox.setChecked(False)\n\n\tdef problems_state_changed(self, state):\n\t\tif state == Qt.Checked:\n\t\t\tself.problems_checked = True\n\t\telse:\n\t\t\tself.all_checked = False\n\t\t\tself.problems_checked = False\n\t\t\tself.all_reports_checkbox.setChecked(False)\n\n\n\tdef log(self, text):\n\t\tself.log_queue.put(text)\n\n\tdef final_status(self):\n\t\ttry:\n\t\t\tif self.account_checked == True:\n\t\t\t\taccount_data = report_management.get_account_data()\n\t\t\t\tif account_data == 'NULL':\n\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No account report data found!')\n\t\t\t\telse:\n\t\t\t\t\twith open('./Reports/account_reports.txt', 'w+') as file:\n\t\t\t\t\t\tcurrent_date_time = datetime.datetime.now()\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\tstr(current_date_time) +\n\t\t\t\t\t\t\t'\\n################ ' +\n\t\t\t\t\t\t\t'BitsOJ '+ \n\t\t\t\t\t\t\t' ################' +\n\t\t\t\t\t\t\t'\\nContest: ' + \n\t\t\t\t\t\t\tself.config['Contest Name'] +\n\t\t\t\t\t\t\t' - ' +\n\t\t\t\t\t\t\tself.config['Contest Theme'] + \n\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tfile.write('Contest Teams:\\n')\n\t\t\t\t\t\tfile.write('\\tTeam\\t\\t Password\\t\\t Type\\n')\n\t\t\t\t\t\tfor accounts in account_data:\n\t\t\t\t\t\t\tusername = accounts[0]\n\t\t\t\t\t\t\tpassword = accounts[1]\n\t\t\t\t\t\t\taccount_type = accounts[2]\n\t\t\t\t\t\t\tfile.write('\\t' + username + '\\t\\t' + password + '\\t\\t' + account_type + '\\n')\n\t\t\t\t\t\tfile.write('\\n################################################' )\n\t\t\tself.progress.setValue(10)\n\t\t\ttime.sleep(0.2)\n\n\t\t\tif self.submission_checked == True:\n\t\t\t\tsubmission_data = report_management.get_all_submission_data()\n\t\t\t\tif submission_data == 'NULL':\n\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No submission report data found!')\n\t\t\t\telse:\n\t\t\t\t\twith open('./Reports/submission_reports.txt', 'w+') as file:\n\t\t\t\t\t\tcurrent_date_time = datetime.datetime.now()\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\tstr(current_date_time) +\n\t\t\t\t\t\t\t'\\n################ ' +\n\t\t\t\t\t\t\t'BitsOJ '+ \n\t\t\t\t\t\t\t' ################' +\n\t\t\t\t\t\t\t'\\nContest: ' + \n\t\t\t\t\t\t\tself.config['Contest Name'] +\n\t\t\t\t\t\t\t' - ' +\n\t\t\t\t\t\t\tself.config['Contest Theme'] + \n\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tfile.write('All Submissions:\\n')\n\t\t\t\t\t\tfile.write('\\tRun ID\\t\\tClient ID\\t\\tProblem Code\\t\\tLanguage\\t\\tTimestamp\\t\\tVerdict\\t\\tJudge\\n')\n\t\t\t\t\t\tfor submissions in submission_data:\n\t\t\t\t\t\t\trun_id = submissions[0]\n\t\t\t\t\t\t\tclient_id = submissions[1]\n\t\t\t\t\t\t\tproblem_code = submissions[2]\n\t\t\t\t\t\t\tlanguage = submissions[3]\n\t\t\t\t\t\t\ttimestamp = submissions[4]\n\t\t\t\t\t\t\tverdict = submissions[5]\n\t\t\t\t\t\t\tjudge = submissions[6]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t'\\t' + \n\t\t\t\t\t\t\t\tstr(run_id) + \n\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' + \n\t\t\t\t\t\t\t\tstr(client_id) + \n\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' + \n\t\t\t\t\t\t\t\tproblem_code +\n\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' + \n\t\t\t\t\t\t\t\tlanguage +\n\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' + \n\t\t\t\t\t\t\t\ttimestamp +\n\t\t\t\t\t\t\t\t'\\t\\t\\t' + \n\t\t\t\t\t\t\t\tverdict +\n\t\t\t\t\t\t\t\t'\\t\\t' + \n\t\t\t\t\t\t\t\tjudge +\n\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tproblem_codes = self.config['Problem Codes']\n\t\t\t\t\t\tfile.write('\\n\\nSubmission data grouped by problems:\\n')\n\t\t\t\t\t\tfor problem in problem_codes:\n\t\t\t\t\t\t\tfile.write('> Problem: ' + problem + '\\n')\n\t\t\t\t\t\t\tfile.write('\\tRun ID\\t\\t\\t Client ID \\t\\t\\t Language \\t\\t\\t Timestamp \\t\\t\\t Verdict\\n')\n\t\t\t\t\t\t\tgrouped_data = report_management.get_grouped_problem_sub_data(problem)\n\t\t\t\t\t\t\tif grouped_data == 'NULL':\n\t\t\t\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No submission report data found!')\n\t\t\t\t\t\t\telif len(grouped_data) == 0:\n\t\t\t\t\t\t\t\tfile.write('\\tNo Submissions for this problem.\\n')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfor problem_data in grouped_data:\n\t\t\t\t\t\t\t\t\trun_id = problem_data[0]\n\t\t\t\t\t\t\t\t\tclient_id = problem_data[1]\n\t\t\t\t\t\t\t\t\tlanguage = problem_data[2]\n\t\t\t\t\t\t\t\t\ttimestamp = problem_data[3]\n\t\t\t\t\t\t\t\t\tverdict = problem_data[4]\n\t\t\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t\t\t'\\t\\t' + \n\t\t\t\t\t\t\t\t\t\tstr(run_id) + \n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t\\t' + \n\t\t\t\t\t\t\t\t\t\tstr(client_id) +\n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' +\n\t\t\t\t\t\t\t\t\t\tlanguage +\n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t ' +\n\t\t\t\t\t\t\t\t\t\ttimestamp + \n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t ' +\n\t\t\t\t\t\t\t\t\t\tverdict +\n\t\t\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t\t)\n\n\t\t\tself.progress.setValue(30)\n\t\t\ttime.sleep(0.4)\n\n\t\t\tif self.client_checked == True:\n\t\t\t\tclient_data = report_management.get_all_client_data()\n\t\t\t\tif client_data == 'NULL':\n\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No client report data found!')\n\t\t\t\telse:\n\t\t\t\t\twith open('./Reports/client_reports.txt', 'w+') as file:\n\t\t\t\t\t\tcurrent_date_time = datetime.datetime.now()\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\tstr(current_date_time) +\n\t\t\t\t\t\t\t'\\n################ ' +\n\t\t\t\t\t\t\t'BitsOJ '+ \n\t\t\t\t\t\t\t' ################' +\n\t\t\t\t\t\t\t'\\nContest: ' + \n\t\t\t\t\t\t\tself.config['Contest Name'] +\n\t\t\t\t\t\t\t' - ' +\n\t\t\t\t\t\t\tself.config['Contest Theme'] + \n\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tfile.write('All Clients:\\n')\n\t\t\t\t\t\tfile.write('\\tClient ID\\t\\tTeam Name\\t\\t IP Address\\n')\n\t\t\t\t\t\tfor clients in client_data:\n\t\t\t\t\t\t\tclient_id = clients[0]\n\t\t\t\t\t\t\tteam_name = clients[1]\n\t\t\t\t\t\t\tip_address = clients[2]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t'\\t' + \n\t\t\t\t\t\t\t\tstr(client_id) + \n\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' + \n\t\t\t\t\t\t\t\tteam_name +\n\t\t\t\t\t\t\t\t'\\t\\t ' + \n\t\t\t\t\t\t\t\tip_address +\n\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t\t)\n\t\t\t\t\t\t\n\t\t\t\t\t\tfile.write('\\n\\nSubmission data grouped by clients:\\n')\n\t\t\t\t\t\tfor client in client_data:\n\t\t\t\t\t\t\tfile.write('> Client: ' + str(client[1]) + '\\n')\n\t\t\t\t\t\t\tfile.write('\\tRun ID\\t\\t\\t Problem Code \\t\\t\\t Language \\t\\t\\t Timestamp \\t\\t\\t Verdict\\n')\n\t\t\t\t\t\t\tgrouped_data = report_management.get_grouped_client_sub_data(client[0])\n\t\t\t\t\t\t\tif grouped_data == 'NULL':\n\t\t\t\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No client report data found for client: ', client[1])\n\t\t\t\t\t\t\telif len(grouped_data) == 0:\n\t\t\t\t\t\t\t\tfile.write('\\tNo Submissions by this client.\\n')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfor problem_data in grouped_data:\n\t\t\t\t\t\t\t\t\trun_id = problem_data[0]\n\t\t\t\t\t\t\t\t\tproblem_code = problem_data[1]\n\t\t\t\t\t\t\t\t\tlanguage = problem_data[2]\n\t\t\t\t\t\t\t\t\ttimestamp = problem_data[3]\n\t\t\t\t\t\t\t\t\tverdict = problem_data[4]\n\t\t\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t\t\t'\\t\\t' + \n\t\t\t\t\t\t\t\t\t\tstr(run_id) + \n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t ' + \n\t\t\t\t\t\t\t\t\t\tproblem_code +\n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t\\t' +\n\t\t\t\t\t\t\t\t\t\tlanguage +\n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t ' +\n\t\t\t\t\t\t\t\t\t\ttimestamp + \n\t\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' +\n\t\t\t\t\t\t\t\t\t\tverdict +\n\t\t\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t\t)\n\t\t\tself.progress.setValue(50)\n\t\t\ttime.sleep(0.6)\n\n\t\t\tif self.judge_checked == True:\n\t\t\t\tjudge_data = report_management.get_all_judge_data()\n\t\t\t\tjudge_data.append(('__ADMIN__', 'ADMIN', 'localhost'))\n\t\t\t\twith open('./Reports/judge_reports.txt', 'w+') as file:\n\t\t\t\t\tcurrent_date_time = datetime.datetime.now()\n\t\t\t\t\tfile.write(\n\t\t\t\t\t\tstr(current_date_time) +\n\t\t\t\t\t\t'\\n################ ' +\n\t\t\t\t\t\t'BitsOJ '+ \n\t\t\t\t\t\t' ################' +\n\t\t\t\t\t\t'\\nContest: ' + \n\t\t\t\t\t\tself.config['Contest Name'] +\n\t\t\t\t\t\t' - ' +\n\t\t\t\t\t\tself.config['Contest Theme'] + \n\t\t\t\t\t\t'\\n\\n'\n\t\t\t\t\t)\n\t\t\t\t\tfile.write('All Judges:\\n')\n\t\t\t\t\tfile.write('\\tJudge ID\\t\\tTeam Name\\t\\t IP Address\\n')\n\t\t\t\t\tfor judge in judge_data:\n\t\t\t\t\t\tjudge_id = judge[0]\n\t\t\t\t\t\tjudge_name = judge[1]\n\t\t\t\t\t\tip_address = judge[2]\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t'\\t' + \n\t\t\t\t\t\t\tjudge_id + \n\t\t\t\t\t\t\t'\\t\\t' + \n\t\t\t\t\t\t\tjudge_name +\n\t\t\t\t\t\t\t'\\t\\t ' + \n\t\t\t\t\t\t\tip_address +\n\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t)\n\n\t\t\t\t\tfile.write(\n\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t)\n\t\t\t\t\t\n\t\t\t\t\tfile.write('\\n\\nSubmission data grouped by judgess:\\n')\n\t\t\t\t\t\n\t\t\t\t\tfor judge in judge_data:\n\t\t\t\t\t\tjudgement_count = report_management.get_judgement_count(judge[1])\n\t\t\t\t\t\tfile.write('\\n> Judge: ' + judge[1] + ' :::: Number of verdicts: ' + str(judgement_count) + '\\n')\n\n\t\t\t\t\t\tfile.write('\\tRun ID\\t\\t\\tClient ID \\t\\t\\t Problem Code \\t\\t Language \\t\\t\\t Timestamp \\t\\t\\t Verdict\\n')\n\t\t\t\t\t\tgrouped_data = report_management.get_grouped_judge_sub_data(judge[1])\n\t\t\t\t\t\tif grouped_data == 'NULL':\n\t\t\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No judge report data found for judge: ', judge[1])\n\t\t\t\t\t\telif len(grouped_data) == 0:\n\t\t\t\t\t\t\tfile.write('\\tNo Verdicts by this judge.\\n')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor problem_data in grouped_data:\n\t\t\t\t\t\t\t\trun_id = problem_data[0]\n\t\t\t\t\t\t\t\tclient_id = problem_data[1]\n\t\t\t\t\t\t\t\tproblem_code = problem_data[2]\n\t\t\t\t\t\t\t\tlanguage = problem_data[3]\n\t\t\t\t\t\t\t\ttimestamp = problem_data[4]\n\t\t\t\t\t\t\t\tverdict = problem_data[5]\n\t\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t\t'\\t\\t' + \n\t\t\t\t\t\t\t\t\tstr(run_id) + \n\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t ' +\n\t\t\t\t\t\t\t\t\tstr(client_id) +\n\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t ' + \n\t\t\t\t\t\t\t\t\tproblem_code +\n\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t\\t' +\n\t\t\t\t\t\t\t\t\tlanguage +\n\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t ' +\n\t\t\t\t\t\t\t\t\ttimestamp + \n\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t' +\n\t\t\t\t\t\t\t\t\tverdict +\n\t\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\tfile.write(\n\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t)\n\n\t\t\tself.progress.setValue(70)\n\t\t\ttime.sleep(0.1)\n\n\t\t\tif self.scoreboard_checked == True:\n\t\t\t\twinner = report_management.get_winner()\n\t\t\t\tif winner == \"NULL\" or len(winner) == 0:\n\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No winner report data found!')\n\t\t\t\telse:\n\t\t\t\t\twinner_score = winner[0]\n\t\t\t\t\twinner_name = winner[1]\n\n\t\t\t\t\twith open('./Reports/scoreboard_reports.txt', 'w+') as file:\n\t\t\t\t\t\tcurrent_date_time = datetime.datetime.now()\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\tstr(current_date_time) +\n\t\t\t\t\t\t\t'\\n################ ' +\n\t\t\t\t\t\t\t'BitsOJ '+ \n\t\t\t\t\t\t\t' ################' +\n\t\t\t\t\t\t\t'\\nContest: ' + \n\t\t\t\t\t\t\tself.config['Contest Name'] +\n\t\t\t\t\t\t\t' - ' +\n\t\t\t\t\t\t\tself.config['Contest Theme'] + \n\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tfile.write('Contest Winner:\\n')\n\t\t\t\t\t\tfile.write('\\tTeam: ' + winner_name + '\\n\\tScore: ' + str(winner_score))\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tdata = report_management.get_scoreboard_data()\n\t\t\t\t\t\tif len(data) != 0 and data != \"NULL\":\n\t\t\t\t\t\t\tfile.write('\\nComplete Scoreboard: \\n')\n\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t\"\\t\\tTeam \t\\t\\t\\t\\t Score \t\\tProblems Solved\\t\\t\\t Total Time\\n\"\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tfor scoreboard_entry in data:\n\t\t\t\t\t\t\t\tteam = scoreboard_entry[0]\n\t\t\t\t\t\t\t\tscore = scoreboard_entry[1]\n\t\t\t\t\t\t\t\tproblems_solved = scoreboard_entry[2]\n\t\t\t\t\t\t\t\ttotal_time = scoreboard_entry[3]\n\n\t\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t\t'\\t\\t' + \n\t\t\t\t\t\t\t\t\tteam + \n\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t ' +\n\t\t\t\t\t\t\t\t\tstr(score) +\n\t\t\t\t\t\t\t\t\t' \\t\\t\\t\\t ' + \n\t\t\t\t\t\t\t\t\tstr(problems_solved) +\n\t\t\t\t\t\t\t\t\t'\\t\\t\\t\\t\\t' +\n\t\t\t\t\t\t\t\t\ttotal_time +\n\t\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\t\t'\\n################################################' +\n\t\t\t\t\t\t\t\t'####################################################' \n\t\t\t\t\t\t\t)\n\t\t\tself.progress.setValue(80)\n\t\t\ttime.sleep(0.6)\n\n\t\t\tif self.problems_checked == True:\n\t\t\t\tproblem_data = report_management.get_problem_data()\n\t\t\t\tif problem_data == 'NULL':\n\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No problem report data found!')\n\t\t\t\telse:\n\t\t\t\t\twith open('./Reports/problem_reports.txt', 'w+') as file:\n\t\t\t\t\t\tcurrent_date_time = datetime.datetime.now()\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\tstr(current_date_time) +\n\t\t\t\t\t\t\t'\\n################ ' +\n\t\t\t\t\t\t\t'BitsOJ '+ \n\t\t\t\t\t\t\t' ################' +\n\t\t\t\t\t\t\t'\\nContest: ' + \n\t\t\t\t\t\t\tself.config['Contest Name'] +\n\t\t\t\t\t\t\t' - ' +\n\t\t\t\t\t\t\tself.config['Contest Theme'] + \n\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tparticipant_count = report_management.get_participant_count()\n\t\t\t\t\t\tpro_count = report_management.get_participant_pro_count()\n\t\t\t\t\t\tfile.write('Number of participants with at least one submission: ' + str(participant_count) + '\\n')\n\t\t\t\t\t\tfile.write('Number of participants with at least one AC submission: ' + str(pro_count) + '\\n')\n\n\t\t\t\t\t\tfile.write('Contest Problem Details:\\n')\n\t\t\t\t\t\tfor problem in problem_data:\n\t\t\t\t\t\t\tname = problem[0]\n\t\t\t\t\t\t\tcode = problem[1]\n\t\t\t\t\t\t\ttest_files = problem[2]\n\t\t\t\t\t\t\ttime_limit = problem[3]\n\n\t\t\t\t\t\t\tac_count = report_management.get_ac_count(code)\n\t\t\t\t\t\t\tsub_count = report_management.get_submission_count(code)\n\n\t\t\t\t\t\t\tfile.write(' > ' + name + '[ ' + code + ' ]\\n')\n\t\t\t\t\t\t\tfile.write('\\t Time Limit : ' + str(time_limit) + ' Seconds\\n')\n\t\t\t\t\t\t\tfile.write('\\t Number of Test files: ' + str(test_files) + '\\n')\n\t\t\t\t\t\t\tfile.write('\\t Total Submissions on this problem: ' + str(sub_count) + '\\n')\n\t\t\t\t\t\t\tfile.write('\\t AC Submissions on this problem: ' + str(ac_count) + '\\n')\n\t\t\t\t\t\tfile.write('\\n################################################' )\n\t\t\tself.progress.setValue(90)\n\t\t\ttime.sleep(0.4)\n\n\t\t\tif self.query_checked == True:\n\t\t\t\tquery_data = report_management.get_query_data()\n\t\t\t\tif query_data == 'NULL':\n\t\t\t\t\tprint('[ REPORTS ][ ERROR ] No query report data found!')\n\t\t\t\telse:\n\t\t\t\t\twith open('./Reports/query_reports.txt', 'w+') as file:\n\t\t\t\t\t\tcurrent_date_time = datetime.datetime.now()\n\t\t\t\t\t\tfile.write(\n\t\t\t\t\t\t\tstr(current_date_time) +\n\t\t\t\t\t\t\t'\\n################ ' +\n\t\t\t\t\t\t\t'BitsOJ '+ \n\t\t\t\t\t\t\t' ################' +\n\t\t\t\t\t\t\t'\\nContest: ' + \n\t\t\t\t\t\t\tself.config['Contest Name'] +\n\t\t\t\t\t\t\t' - ' +\n\t\t\t\t\t\t\tself.config['Contest Theme'] + \n\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tfile.write('Queries and Announcements:\\n')\n\t\t\t\t\t\tfile.write('\\tClientID\\t\\t\\t\\t\\tQuery\\t\\t\\t\\t\\tResponse\\n')\n\t\t\t\t\t\tfor query in query_data:\n\t\t\t\t\t\t\tclient_id = query[0]\n\t\t\t\t\t\t\tquery_text = query[1]\n\t\t\t\t\t\t\tresponse = query[2]\n\t\t\t\t\t\t\tfile.write('\\t' + str(client_id) + '\\t\\t' + query_text + '\\t\\t' + response + '\\n')\n\t\t\t\t\t\tfile.write('\\n################################################' )\n\n\t\t\tself.progress.setValue(100)\n\t\t\tinfo_box = QMessageBox()\n\t\t\tinfo_box.setIcon(QMessageBox.Critical)\n\t\t\tinfo_box.setWindowTitle('Done!')\n\t\t\tinfo_box.setText(\n\t\t\t\t'Reports generated in ./Reports/'\n\t\t\t)\n\t\t\tinfo_box.setStandardButtons(QMessageBox.Ok)\n\t\t\tinfo_box.exec_()\n\t\t\t\n\t\texcept Exception as error:\n\t\t\tprint('[ ERROR ] Could not generate reports: ' + str(error))\n\t\t\tself.log('[ ERROR ] Could not generate reports: ' + str(error))\n\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tfname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n\t\t\t\n\t\t\tinfo_box = QMessageBox()\n\t\t\tinfo_box.setIcon(QMessageBox.Critical)\n\t\t\tinfo_box.setWindowTitle('Alert')\n\t\t\tinfo_box.setText(\n\t\t\t\t'Error while generating reports.\\n' +\n\t\t\t\t'Error Message: ' + str(error) +\n\t\t\t\t' Details\\nFile: ' + fname +\n\t\t\t\t'Line No.: ' + str(exc_tb.tb_lineno)\n\t\t\t)\n\t\t\tinfo_box.setStandardButtons(QMessageBox.Ok)\n\t\t\tinfo_box.exec_()\n\t\tfinally:\n\t\t\tself.close()\n\n\tdef exit(self):\n\t\tself.close()","sub_path":"Server/Interface/generate_report_ui.py","file_name":"generate_report_ui.py","file_ext":"py","file_size_in_byte":20919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"168000596","text":"notas = []\nc = soma = 0\nwhile c < 4:\n c += 1\n n = float(input(f'{c}ª nota: '))\n soma += n\n notas.append(n)\nmed = soma / c\nprint('Suas notas foram: ', end='')\nfor c, i in enumerate(notas):\n print(i, end='')\n print(' | ' if c < 3 else ' ', end='')\nprint()\nprint('- ' * 5)\nif med < 5:\n print(f'Sua média é {med}, você está REPROVADO(A)')\nelif 5 < med < 7:\n print(f'Sua média é {med}, você está de RECUPERAÇÃO')\nelse:\n print(f'Sua média é {med}, você está APROVADO(A)')\n","sub_path":"Revisao Estruturas de Controle/ex040.py","file_name":"ex040.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"260563794","text":"'''\nThe functions in this file first upload data from the data file, using loadData(). They then create data for new fields,\nusing extractData(), and then write the original data and the new data back to the original data file, using writeData().\n\n'''\n\nimport nltk\nimport io\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import SyllableTokenizer\nfrom nltk import word_tokenize\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nimport sys\n\n# The lists that data loaded from original document will be put into.\ncorpus = [] # List of sentences in human-readable form (i.e. not vectorized).\ndataExcludingDocumentAllRecordsList = [] # List of lists of data excluding the sentence for each record.\n\n# These lists store data for the new fields to be added to the data file.\nnumAdjectivesList = []\nnumVerbsList = []\nnumAdverbsList = []\nnumConjunctionsList = []\nnumNounsList = []\nnumPronounsList = []\nnumModalVerbsList = []\nnumPrepositionsList = []\nnumDeterminerList = []\nnumCommasList = []\nnumExclamationMarksList = []\nnumFullStopsList = []\nnumQuestionMarksList = []\naverageNumSyllableList = []\nnumInterjectionsList = []\navWordFrequencyList = [] # Average frequency of words in each document\navWordLengthList = []\nnumStopWordsList = [] # Number of stop words in each document\nnumProperNounsList = []\nnumMoreThanSevenCharsList = []\nnumLessThanFiveCharsList = []\nnumCapitalisedWordsList = []\nVADERSentimentScoreList = []\nnumOfWordsInTop35 = []\nnumExistentialTheresList = []\nfileName = \"original_formality_dataset.csv\"\n\n\n# Checks if the 'fileName' is the correct file name\ndef checkFileNameCorrect():\n global fileName\n print(\"The default file name is\", fileName, \"\\n\")\n print(\"If this is the name of the data file, press enter\")\n newFileName = input(\"Otherwise, please provide the correct name (including the file extension), then press enter \")\n if newFileName != \"\":\n fileName = newFileName\n print(\"\\nThank you. The file name has been changed to\", fileName)\n else:\n print(\"\\nThank you. You have confirmed that the file name is correct:\", fileName)\n\n\n# Checks if file present. Code for this module adapted from:\n# https://stackoverflow.com/questions/5627425/what-is-a-good-way-to-handle-exceptions-when-trying-to-read-a-file-in-python\ndef checkFilePresent():\n try:\n f = open(fileName, 'rb')\n except OSError:\n print(\"\\nFile not found:\", fileName)\n print(\"Please ensure that the data file is in the same folder as the program file and try again.\")\n print(\"Exiting program.\")\n sys.exit()\n\n\n# loadData() uploads data for each record from the existing csv file.\ndef loadData():\n checkFileNameCorrect()\n checkFilePresent()\n with open(fileName, encoding='utf-8') as inputFile:\n firstLine = inputFile.readline()\n firstLineAsList = firstLine.split(\",\")\n\n # The sentence field is always the final field on the right. Therefore, the sentence index is the number of\n # fields up to and including the one immediately preceding the 'sentence' field.\n sentenceIndex = len(firstLineAsList)-1\n for line in inputFile:\n\n # Searches through the line for commas, character by character. Stops when 'sentenceIndex' number of commas\n # have been encountered.\n # The document is located to the right of the comma corresponding to index 'sentenceIndex'.\n # Everything to the left of that comma is data relating to the document.\n numCommas = 0\n for character in range(len(line)):\n if line[character] == \",\":\n numCommas = numCommas + 1\n if numCommas == sentenceIndex:\n dataExcludingSentence = line[:character]\n dataExcludingSentenceThisRecordList = dataExcludingSentence.split(\",\") # List containing attributes\n dataExcludingDocumentAllRecordsList.append(dataExcludingSentenceThisRecordList)\n\n # The rest of the current line is comprised of the document:\n documentToAdd = line[character + 1:]\n\n # Removes \\n from the end of the sentence:\n documentToAdd.replace('\\n', '')\n\n # Puts document into a list of Strings:\n corpus.append(documentToAdd)\n break\n inputFile.close()\n print(\"\\nNo of records uploaded: \", len(corpus))\n\n\n# Returns a list of most common words in the corpus, with rankDepth being the ranking when the words are ordered by\n# frequency. Returns a list of words up to and including that ranking.\n\ndef getTopWordsInCorpus(rankDepth):\n topWordsList = []\n # Creates a dictionary containing every word in the corpus as a key, and the frequency of occurrence as value.\n wordsInCorpus = {}\n for documents in corpus:\n wordList = documents.split(\" \")\n for words in wordList:\n\n # Removes punctuation. Code apapted from:\n # https://stackoverflow.com/questions/875968/how-to-remove-symbols-from-a-string-with-python\n words = re.sub(r'[^\\w]', '', words)\n words = words.lower() # makes lower case\n if words in wordsInCorpus.keys():\n wordsInCorpus[words] = wordsInCorpus[words] + 1\n else:\n wordsInCorpus[words] = 1\n\n # List of word counts and words in descending order by word count. Line below adapted from:\n # https://careerkarma.com/blog/python-sort-a-dictionary-by-value/\n wordFreqList = sorted(wordsInCorpus.items(), key=lambda x: x[1], reverse=True)\n\n # Puts the dictionary's keys and values into separate lists\n frequencyKeyList = [] # What is the word?\n frequencyValList = [] # How frequently does the word appear in the corpus?\n for entry in wordFreqList:\n frequencyKeyList.append(entry[0])\n frequencyValList.append(entry[1])\n topValList = [] # List of word frequency values, containing with the top 'rankDepth' number of items\n currentRank = 1\n numInstancesThisVal = 1\n isFirstVal = True\n previousVal = 0\n\n # Finds finds the top 'rankDepth' greatest 'word frequency within corpus' values and appends them to topValList.\n for value in frequencyValList:\n if isFirstVal:\n previousVal = value\n isFirstVal = False\n topValList.append(value)\n continue\n if value == previousVal:\n numInstancesThisVal = numInstancesThisVal + 1\n if currentRank <= rankDepth:\n topValList.append(value)\n else:\n currentRank = currentRank + numInstancesThisVal\n previousVal = value\n numInstancesThisVal = 1\n\n # Add word to topWordsList when its ranking is <= rankDepth and it is different to the previous word.\n if currentRank <= rankDepth:\n topValList.append(value)\n\n # Appends the top 'rankDepth' words to topWordsList.\n for i in range(len(topValList)):\n topWordsList.append(frequencyKeyList[i])\n return topWordsList\n\n\n# Returns the number of words in the document which are in the 'topWordsInCorpus' list, which is a list of the most\n# frequently occurring words in the corpus of sentences.\ndef getNumTopWordsInDocument(cleansedWordsList, topWordsInCorpus):\n count = 0\n for words in cleansedWordsList:\n if words.lower() in topWordsInCorpus:\n count = count + 1\n return count\n\n\n# Calculates the number of each of various grammar types (nouns, verbs, etc) in the document, and appends the relevant\n# lists.\ndef extractGrammar(document):\n documentAsTokens = word_tokenize(document)\n grammarList = nltk.pos_tag(documentAsTokens)\n numAdjectives = numVerbs = numAdverbs = numConjunctions = numNouns = numPronouns = numModalVerbs = 0\n numPrepositions = numDeterminer = numInterjections = numProperNouns = numExistentialTheres = 0\n for entry in grammarList:\n\n # See https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html for the meanings of the\n # abbreviations below\n if entry[1] == \"JJ\" or entry[1] == \"JJR\" or entry[1] == \"JJS\":\n numAdjectives = numAdjectives + 1\n if entry[1] == \"VB\" or entry[1] == \"VBD\" or entry[1] == \"VBG\" or entry[1] == \"VBN\" or \\\n entry[1] == \"VBP\" or entry[1] == \"VBZ\":\n numVerbs = numVerbs + 1\n if entry[1] == \"RB\" or entry[1] == \"RBR\" or entry[1] == \"RBS\" or entry[1] == \"WRB\":\n numAdverbs = numAdverbs + 1\n if entry[1] == \"CC\":\n numConjunctions = numConjunctions + 1\n if entry[1] == \"NN\" or entry[1] == \"NNS\" or entry[1] == \"NNP\" or entry[1] == \"NNPS\":\n numNouns = numNouns + 1\n if entry[1] == \"PRP\" or entry[1] == \"PRP$\" or entry[1] == \"WP\":\n numPronouns = numPronouns + 1\n if entry[1] == \"MD\":\n numModalVerbs = numModalVerbs + 1\n if entry[1] == \"IN\" or entry[1] == \"TO\":\n numPrepositions = numPrepositions + 1\n if entry[1] == \"DT\" or entry[1] == \"WDT\":\n numDeterminer = numDeterminer + 1\n if entry[1] == \"UH\":\n numInterjections = numInterjections + 1\n if entry[1] == \"NNP\" or entry[1] == \"NNPS\":\n numProperNouns = numProperNouns + 1\n if entry[1] == \"EX\":\n numExistentialTheres = numExistentialTheres + 1\n numAdjectivesList.append(numAdjectives)\n numVerbsList.append(numVerbs)\n numAdverbsList.append(numAdverbs)\n numConjunctionsList.append(numConjunctions)\n numNounsList.append(numNouns)\n numPronounsList.append(numPronouns)\n numModalVerbsList.append(numModalVerbs)\n numPrepositionsList.append(numPrepositions)\n numDeterminerList.append(numDeterminer)\n numInterjectionsList.append(numInterjections)\n numProperNounsList.append(numProperNouns)\n numExistentialTheresList.append(numExistentialTheres)\n\n\n# Calculates how many instances there are of various types of punctuation in each sentence, and appends\n# the relevant lists.\ndef extractPunctuation(document):\n numCommas = numExclamationMarks = numFullStops = numQuestionMarks = 0\n for character in document:\n if character == \",\":\n numCommas = numCommas + 1\n if character == \"!\":\n numExclamationMarks = numExclamationMarks + 1\n if character == \".\":\n numFullStops = numFullStops + 1\n if character == \"?\":\n numQuestionMarks = numQuestionMarks + 1\n numCommasList.append(numCommas)\n numExclamationMarksList.append(numExclamationMarks)\n numFullStopsList.append(numFullStops)\n numQuestionMarksList.append(numQuestionMarks)\n\n\n# Calculates the average number of syllables per word in the document and appends the relevant list with\n# the figure.\n# Code for this function adapted from code at:\n# https://www.nltk.org/api/nltk.tokenize.html#nltk.tokenize.sonority_sequencing.SyllableTokenizer\ndef syllableCount(cleansedWordsList):\n wordCountThisDocument = 0\n totalSyllsThisDocument = 0\n SSP = SyllableTokenizer()\n for words in cleansedWordsList:\n numSyllThisWord = len(SSP.tokenize(words.lower()))\n if numSyllThisWord > 0 and words: # Run if number of syllables > 0 and list entry not null\n wordCountThisDocument = wordCountThisDocument + 1\n totalSyllsThisDocument = totalSyllsThisDocument + numSyllThisWord\n if wordCountThisDocument > 0:\n averageNumSyllables = totalSyllsThisDocument / wordCountThisDocument\n else:\n averageNumSyllables = \"N/A\"\n averageNumSyllableList.append(averageNumSyllables)\n\n\n# How frequently does each word appear in the sentence on average? Appends the answer to the relevant list.\ndef wordFreqThisDoc(cleansedWordsList):\n listOfFrequencies = []\n wordsProcessed = []\n for word in cleansedWordsList:\n wordCount = 0\n if word in wordsProcessed:\n continue\n else:\n wordsProcessed.append(word)\n for i in range(len(cleansedWordsList)):\n if cleansedWordsList[i] == word:\n wordCount = wordCount + 1\n if i == len(cleansedWordsList) - 1:\n listOfFrequencies.append(wordCount)\n averageWordFrequency = 0\n if len(listOfFrequencies) != 0:\n averageWordFrequency = sum(listOfFrequencies) / len(listOfFrequencies)\n avWordFrequencyList.append(averageWordFrequency)\n\n\n# Calculates the sentence's average word length, and appends the relevant list with the result.\ndef avWordLengthThisDoc(cleansedWordsList):\n characterCount = 0\n for words in cleansedWordsList:\n characterCount = characterCount + len(words)\n avWordLength = 0\n if len(cleansedWordsList) != 0:\n avWordLength = characterCount / len(cleansedWordsList)\n avWordLengthList.append(avWordLength)\n # Number of stop words in the document.\n stopWords = stopwords.words('english')\n stopWordCount = 0\n for word in cleansedWordsList:\n if word in stopWords:\n stopWordCount = stopWordCount + 1\n numStopWordsList.append(stopWordCount)\n\n\n# Returns the number of words greater than 'length' in length within the sentence.\ndef numWordsGreaterThanLength(cleansedWordsList, length):\n numWords = 0\n for word in cleansedWordsList:\n if len(word) > length:\n numWords = numWords + 1\n return numWords\n\n\n# Returns the number of words less than 'length' in length within the sentence.\ndef numWordsLessThanLength(cleansedWordsList, length):\n numWords = 0\n for word in cleansedWordsList:\n if len(word) < length:\n numWords = numWords + 1\n return numWords\n\n\n# Calculates the number of capitalised words within the document, and appends the figure to the relevant list.\ndef numCapitalisedWords(cleansedWordsList):\n numWords = 0\n for word in cleansedWordsList:\n if len(word) > 0:\n if word[0].isupper():\n numWords = numWords + 1\n numCapitalisedWordsList.append(numWords)\n\n\n'''\nVADER document sentiment analysis - Code adapted from code at:\nhttps://github.com/cjhutto/vaderSentiment#citation-information\n\nProduces a score between -1 and 1, where -1 is extreme negative sentiment and 1 is extreme positive sentiment.\n\nIt then appends the relevant list with the score. \n'''\n\n\ndef VADERScore(document):\n analyzer = SentimentIntensityAnalyzer()\n vs = analyzer.polarity_scores(document)\n compoundScore = vs['compound']\n VADERSentimentScoreList.append(compoundScore)\n\n# extractData() goes through each sentence in the corpus in turn using a for loop, and calls helper methods to\n# populate lists with new data for each sentence.\n\n\ndef extractData():\n print(\"Processing data. This can take a while. Please be patient.\")\n # Downloads for NLTK, which is a library that is used for labelling words as nouns, verbs, etc and counting\n # syllables\n nltk.download('cmudict')\n nltk.download('punkt')\n nltk.download('averaged_perceptron_tagger')\n nltk.download('stopwords')\n for document in corpus:\n extractGrammar(document) # Finds number of each of the grammar type and puts the figures into lists.\n extractPunctuation(document) # Gets number of various punctuation types and puts figures into lists.\n listOfWords = document.split(\" \") # Splits each sentence into a list of its words.\n\n # Creates list with punctuation removed from the words. Code for this functionality adapted from code at:\n # https://stackoverflow.com/questions/875968/how-to-remove-symbols-from-a-string-with-python\n cleansedWordsList = []\n for words in listOfWords:\n words = re.sub(r'[^\\w]', '', words)\n cleansedWordsList.append(words)\n syllableCount(cleansedWordsList) # Average number of syllables per word\n wordFreqThisDoc(cleansedWordsList) # How frequently does each word appear in the sentence on average?\n avWordLengthThisDoc(cleansedWordsList) # Average word length. Total character length ex punctuation / num words\n numMoreThanSevenCharsList.append(numWordsGreaterThanLength(cleansedWordsList, 7)) # Num words with > 7 chars\n numLessThanFiveCharsList.append(numWordsLessThanLength(cleansedWordsList, 5)) # Num words with < 5 chars\n numCapitalisedWords(cleansedWordsList) # Number of capitalised words\n VADERScore(document) # The sentence's VADER sentiment score\n top35WordsList = getTopWordsInCorpus(35) # Returns top 35 most common words in corpus.\n numOfWordsInTop35.append(getNumTopWordsInDocument(cleansedWordsList, top35WordsList))\n\n\n# writeData() writes the original data and the newly created fields/accompanying data to the original data file.\n# The new data is stored after the end of original document-related data and before the sentences themselves (the\n# sentence field is the rightmost field).\ndef writeData():\n with open(fileName, encoding='utf-8') as inputFile:\n # Get column headers from existing data set and insert new field headers before the sentence field (which is the\n # furthest field on the right).\n existingHeaders = inputFile.readline()\n existingHeadersAsList = existingHeaders.split(\",\")\n\n # How many fields (and therefore commas) up to and including the one immediately preceding the 'sentence' field?\n sentenceIndex = len(existingHeadersAsList)-1\n\n # Additions to existing header.\n newHeaders = ['Number of adjectives', 'Number of verbs', 'Number of adverbs', 'Number of conjuctions',\n 'Number of nouns', 'Number of pronouns', 'Number of modal verbs', 'Number of prepositions',\n 'Number of determiners', 'Number of commas', 'Number of exclamation marks',\n 'Number of full stops', 'Number of question marks', 'Number of existential theres',\n 'Number of proper nouns', 'Number of capitalised words','Number of interjections',\n 'Average number of syllables per word', 'Average word length', 'Number of stop words',\n 'Number of words with > seven characters','Number of words with < 5 characters',\n 'Average word frequency', 'Number of words in 35 most common words in corpus',\n 'VADER sentiment score']\n tempSentenceIndex = sentenceIndex\n for items in newHeaders:\n existingHeadersAsList.insert(tempSentenceIndex, items)\n tempSentenceIndex = tempSentenceIndex + 1\n sep = \",\"\n allHeaders = sep.join(existingHeadersAsList)\n inputFile.close()\n numberOfDocs = len(corpus)\n\n # Copy data to file. NB Overwrites previous file, so back it up first.\n with io.open(fileName, 'w', encoding='utf8') as outputFile:\n outputFile.write(allHeaders)\n print(\"Number of records for new file \", numberOfDocs)\n for i in range(numberOfDocs):\n\n # References to ints are surrounded by [ and ] to prevent the error msg: TypeError: can only concatenate\n # list (not \"int\") to list\n listOfStrings = dataExcludingDocumentAllRecordsList[i] + [numAdjectivesList[i]] + [numVerbsList[i]] + \\\n [numAdverbsList[i]] + [numConjunctionsList[i]] + [numNounsList[i]] + \\\n [numPronounsList[i]] + [numModalVerbsList[i]] + [numPrepositionsList[i]] + \\\n [numDeterminerList[i]] + [numCommasList[i]] + [numExclamationMarksList[i]] + \\\n [numFullStopsList[i]] + [numQuestionMarksList[i]] + [numExistentialTheresList[i]] + \\\n [numProperNounsList[i]] + [numCapitalisedWordsList[i]] + [numInterjectionsList[i]] + \\\n [averageNumSyllableList[i]] + [avWordLengthList[i]] + [numStopWordsList[i]] + \\\n [numMoreThanSevenCharsList[i]] + [numLessThanFiveCharsList[i]] + \\\n [avWordFrequencyList[i]] + [numOfWordsInTop35[i]] + [VADERSentimentScoreList[i]] + \\\n [corpus[i]]\n allDataThisLine = ','.join(map(str, listOfStrings)) # This line's data as a comma-separated string\n outputFile.write(allDataThisLine)\n print(\"New data successfully written to file. Program complete.\")\n outputFile.close()\n\n\n# METHOD CALLS THAT EXECUTE WHENEVER THE PROGRAM IS RUN\nloadData()\nextractData()\nwriteData()","sub_path":"add-new-fields.py","file_name":"add-new-fields.py","file_ext":"py","file_size_in_byte":20504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"396360836","text":"#declare a main function\r\ndef main():\r\n #prompts the user to enter a word\r\n userInput = input(\"Enter a word: \")\r\n #prints the reversed word to the console\r\n print(\"The reversed word = \",reverse(userInput))\r\n \r\n#this is a function that reverses the user input text \r\ndef reverse(userInput):\r\n #initialze a string called reverseInput\r\n reverseInput = \"\"\r\n #beginning from the last letter up until the first,\r\n #append all the letters to reverseInput\r\n for count in range ( len(userInput)-1, -1, -1):\r\n if count == 0:\r\n reverseInput += userInput[count]\r\n else:\r\n reverseInput += userInput[count]+\",\"\r\n #return the reverse input to the user\r\n return reverseInput\r\n \r\n\r\nmain()\r\n\r\n","sub_path":"Extra-Credit-1/Part1_3.py","file_name":"Part1_3.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"634846272","text":"#!/usr/bin/env python\n\nfrom os.path import join, dirname\nfrom distutils.core import setup\n\npackages = [\n 'pygephi'\n]\n\nsetup(name='pygephi',\n version=\"1.0\",\n description='Python scripts that can be used to stream data to gephi',\n long_description=open(join(dirname(__file__), 'README'), 'r').read(),\n author='Andre Panisson',\n author_email='panisson@gmail.com',\n license='Apache 2.0',\n url='https://github.com/panisson/pygephi_graphstreaming',\n packages=packages,\n classifiers=[\n 'Development Status :: 6 - Mature',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Visualization'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"238736516","text":"from functools import wraps\n\nfrom django.http import JsonResponse\n\n\ndef json_response(f):\n \"\"\"Return the response as json\"\"\"\n @wraps(f)\n def wrapped(*args, **kwargs):\n \"\"\"Wrapped function\"\"\"\n result = f(*args, **kwargs)\n response = JsonResponse(result)\n if type(result) == dict:\n if 'error' in result:\n try:\n response.status_code = int(result.get('error', {}).get('code'))\n except ValueError:\n result['error']['code'] = 500\n response = JsonResponse(result)\n response.status_code = 500\n return response\n return wrapped\n","sub_path":"utils/decorators/json_response.py","file_name":"json_response.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"180097789","text":"class Solution:\n def oddEvenJumps(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n def findJump(A):\n import bisect\n vlist = [] # list of values\n v2idx = {} # value => closest index\n\n n = len(A)\n jump = [-1] * n\n for i in range(n)[::-1]:\n if i == n - 1 or A[i] > vlist[-1]:\n pass # cannot jump from this point\n else:\n idx = bisect.bisect_left(vlist, A[i])\n # bisect.bisect_left guarantees that:\n # vlist[idx - 1] < A[i], vlist[idx] >= A[i]\n # So vlist[idx] is the smallest element that is no smaller than A[i]\n jump[i] = v2idx[vlist[idx]]\n\n if A[i] not in v2idx:\n bisect.insort_right(vlist, A[i]) # Insert A[i] into vlist and keep vlist ordered\n v2idx[A[i]] = i # Update v2idx\n return jump\n\n ojump = findJump(A)\n ejump = findJump([-e for e in A])\n\n opossible = [False] * len(A)\n epossible = [False] * len(A)\n ans = 0\n for i in range(len(A))[::-1]:\n if i == len(A) - 1:\n opossible[i] = epossible[i] = True\n else:\n opossible[i] = epossible[ojump[i]] if ojump[i] != -1 else False\n epossible[i] = opossible[ejump[i]] if ejump[i] != -1 else False\n\n if opossible[i]:\n ans += 1\n return ans\n","sub_path":"WeeklyContest/119/odd-even-jump.py","file_name":"odd-even-jump.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"595208747","text":"import json\nimport os\nimport timeit\nimport tkinter as tk\nimport tracemalloc\nfrom tkinter import *\nimport tkinter.messagebox as tkMsg\nfrom tkinter import filedialog as fd\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom func_timeout import func_timeout, FunctionTimedOut\n\nimport Automa\nimport ChiusuraSilenziosa\nimport Constant\nimport Diagnosticatore\nimport Link\nimport SalvaRiassunto\nimport Transizione\nimport SpazioComportamentale\nimport SpazioComportamentaleOsservazione\nimport Diagnosi\n\nclass CaricaRete():\n\n '''\n Questa finestra contiene l'operazione di osservazione lineare e di diagnosi effettuate su una rete OSSERVATA\n '''\n def OpReteOsservata(self, NomeRete, PercorsoSalvataggio, automi, links, transizioni, listaTransizioni,\n percorsoAutoma, percorsoLinks, percorsoTransizioni, statoComportamentale,\n arcoComportamentale):\n\n def abilitaConfermaOss(*_):\n if entryOsservazioneLineare.var.get() != \"\":\n buttonOsservazione['state'] = 'normal'\n else:\n buttonOsservazione['state'] = 'disabled'\n\n def controlloOsservazioneTime():\n listaTempi = timeit.repeat(stmt=lambda: controlloOsservazione(),\n setup='pass', number=1, repeat=100)\n x = np.linspace(0, len(listaTempi), 100)\n plt.scatter(x, listaTempi)\n plt.xlabel(\"Esecuzioni\")\n plt.ylabel(\"Tempi\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/Oss.png\", dpi=100)\n plt.show()\n avg = sum(listaTempi) / len(listaTempi)\n print(avg)\n\n listaAvg = []\n listaPic = []\n i = 0\n while i < 10:\n tracemalloc.start()\n controlloOsservazione()\n i += 1\n first_item, second_item = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n listaAvg.append(first_item)\n listaPic.append(second_item)\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ff2600'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Media - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n plt.show()\n plt.bar(range(0, len(listaPic)), listaPic, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ff2600'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Picchi - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n plt.show()\n\n controlloOsservazione()\n\n def controlloOsservazione():\n osservazione_lineare = entryOsservazioneLineare.get()\n osservazione_lineare = osservazione_lineare.split(',')\n for oss in osservazione_lineare:\n if not str(oss).isalnum() or str(oss) == \"\":\n tkMsg.showerror(title=\"Errore osservazione\",\n message=\"L'osservazione lineare contiene caratteri non ammessi\")\n return\n trovato = False\n for arco in arcoComportamentale:\n if str(oss) == getattr(arco, 'observability'):\n trovato = True\n break\n if not trovato:\n tkMsg.showerror(title=\"Errore osservazione\",\n message=\"L'osservazione lineare contiene elementi non presenti nello spazio\")\n return\n\n confermaOsservazione(osservazione_lineare)\n\n def confermaOsservazione(osservazioneLineare):\n\n def mostraGraficoOsservazioneLineare(path):\n labelEsitoOssTitle.config(text=\"Risultato dell'osservazione lineare inserita post potatura\")\n reteOssImg = tk.PhotoImage(file=path + \".gv.png\")\n labelEsitoOss.config(image=reteOssImg)\n labelEsitoOss.image = reteOssImg\n\n lista_stati_oss = []\n lista_link_oss = []\n stato_comportamentale_oss = []\n arco_comportamentale_oss = []\n try:\n func_timeout(Constant.TIMEOUT, func=SpazioComportamentaleOsservazione.creaSpazioComportamentaleOsservazioneLineare,\n args=(automi, transizioni, links, lista_stati_oss, lista_link_oss, listaTransizioni,\n stato_comportamentale_oss, arco_comportamentale_oss, osservazioneLineare))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n '''\n Controllo che la rete comportamentale prodotta dall'osservazione abbia stati terminali\n '''\n terminale = False\n for stato in stato_comportamentale_oss:\n if getattr(stato, 'finale'):\n terminale = True\n break\n if not terminale:\n tkMsg.showerror(title=\"Errore osservazione\",\n message=\"L'osservazione inserita non produce una rete con stati terminali, correggere \"\n \"l'input, per favore.\")\n return\n\n max_indice = len(osservazioneLineare)\n titoloOss = \"\".join(osservazioneLineare)\n\n # Disegno dello spazio comportamentale relativo ad un'osservazione lineare\n nome_oss = PercorsoSalvataggio + NomeRete + \"Oss\" + titoloOss\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.disegnaSpazioComportamentaleIndiceOsservazione(\n arco_comportamentale_oss, nome_oss)\n\n # Potatura Osservazione\n try:\n func_timeout(Constant.TIMEOUT, func=SpazioComportamentaleOsservazione.PotaturaOss,\n args=(stato_comportamentale_oss, arco_comportamentale_oss))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n\n # Ridenominazione Osservazione\n stato_comportamentale_ridenominato_oss = []\n arco_comportamentale_ridenominato_oss = arco_comportamentale_oss.copy()\n SpazioComportamentaleOsservazione.RidenominaOss(stato_comportamentale_oss,\n stato_comportamentale_ridenominato_oss,\n arco_comportamentale_ridenominato_oss, max_indice)\n # Costruzione e salvataggio sommario\n summary = SalvaRiassunto.PreparaSommario(automi, links, listaTransizioni,\n stato_comportamentale_ridenominato_oss, arco_comportamentale_ridenominato_oss, osservato=True)\n SalvaRiassunto.Salva(nome_oss, summary)\n if summary == -1:\n # Non accade più\n print(\"Il file esiste già\")\n\n # Disegno dello spazio comportamentale relativo ad un'osservazione lineare potato\n nome_pot_oss = PercorsoSalvataggio + NomeRete.split('.')[0] + \"Oss\" + titoloOss + \"Pot\"\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.disegnaSpazioComportamentaleIndiceOsservazione(\n arco_comportamentale_oss, nome_pot_oss)\n # Disegno dello spazio comportamentale relativo ad un'osservazione lineare potato e ridenominato\n nomeReteRidenominataOss = PercorsoSalvataggio + NomeRete.split('.')[0] + \"Oss\" + titoloOss + \"PotRid\"\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.disegnaSpazioComportamentaleRidenominatoOss(\n arco_comportamentale_ridenominato_oss, nomeReteRidenominataOss)\n\n # Riporto il disegno dello spazio comportamentale osservato potato nella finestra\n mostraGraficoOsservazioneLineare(PercorsoSalvataggio + NomeRete.split('.')[0] + \"Oss\" + titoloOss + \"Pot\")\n\n # Salvo la rete dello spazio comportamentale osservato\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.salvaSpazioComportamentaleOss(\n arco_comportamentale_oss, stato_comportamentale_oss, PercorsoSalvataggio + NomeRete.split('.')[0] +\\\n \"Oss\" + titoloOss, percorsoAutoma, percorsoLinks, percorsoTransizioni, osservazioneLineare)\n\n def OpDiagnosiTime():\n listaTempi = timeit.repeat(stmt=lambda: OpDiagnosi(), setup='pass', number=1, repeat=100)\n x = np.linspace(0, len(listaTempi), 100)\n plt.scatter(x, listaTempi)\n plt.xlabel(\"Esecuzioni\")\n plt.ylabel(\"Tempi\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/Diagnosi.png\", dpi=100)\n plt.show()\n avg = sum(listaTempi) / len(listaTempi)\n print(avg)\n\n listaAvg = []\n listaPic = []\n i = 0\n while i < 10:\n tracemalloc.start()\n OpDiagnosi()\n i += 1\n first_item, second_item = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n listaAvg.append(first_item)\n listaPic.append(second_item)\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#63b6ff'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Media - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n plt.show()\n plt.bar(range(0, len(listaPic)), listaPic, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#63b6ff'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Picchi - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n plt.show()\n\n OpDiagnosi()\n\n def OpDiagnosi():\n try:\n diagnosiText = func_timeout(Constant.TIMEOUT, func=Diagnosi.DiagnosiTotale, args=(NomeRete,\n PercorsoSalvataggio, automi, transizioni, listaTransizioni, links, percorsoAutoma,\n percorsoLinks, percorsoTransizioni, statoComportamentale, arcoComportamentale, False,\n []))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n Diagnosi.Salva(NomeRete, PercorsoSalvataggio, diagnosiText)\n labelDiagnosi.config(text=diagnosiText)\n\n def TutteChiusureTime(statoComportamentale, arcoComportamentale):\n listaTempi = timeit.repeat(stmt=lambda: TutteChiusure(statoComportamentale, arcoComportamentale),\n setup='pass', number=1, repeat=100)\n x = np.linspace(0, len(listaTempi), 100)\n plt.scatter(x, listaTempi)\n plt.xlabel(\"Esecuzioni\")\n plt.ylabel(\"Tempi\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/SpazioChiusure.png\", dpi=100)\n plt.show()\n avg = sum(listaTempi) / len(listaTempi)\n print(avg)\n\n listaAvg = []\n listaPic = []\n i = 0\n while i < 10:\n tracemalloc.start()\n TutteChiusure(statoComportamentale, arcoComportamentale)\n i += 1\n first_item, second_item = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n listaAvg.append(first_item)\n listaPic.append(second_item)\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#a229ff'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Media - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n plt.show()\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#a229ff'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Picchi - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n plt.show()\n\n TutteChiusure(statoComportamentale, arcoComportamentale)\n\n def TutteChiusure(statoComportamentale, arcoComportamentale):\n try:\n listaChiusure = func_timeout(Constant.TIMEOUT, func=ChiusuraSilenziosa.ChiusuraSilenziosaPostOss,\n args=(statoComportamentale, arcoComportamentale))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n try:\n listaStatiUscita, listaRegexUscita = func_timeout(Constant.TIMEOUT,\n func=Diagnosi.DiagnosiSingolaPostOss, args=(\n NomeRete, PercorsoSalvataggio, automi, transizioni,\n listaTransizioni, links, percorsoAutoma, percorsoLinks, percorsoTransizioni,\n statoComportamentale, arcoComportamentale, True, listaChiusure))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n try:\n listaArchiChiusure = func_timeout(Constant.TIMEOUT,\n func=ChiusuraSilenziosa.SpazioChiusureSilenziosePostOss,\n args=(listaChiusure,\n arcoComportamentale, listaStatiUscita, listaRegexUscita))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n # Costruzione e salvataggio del sommario\n summary = SalvaRiassunto.PreparaSommarioSpazioChiusure(listaChiusure, listaArchiChiusure,\n osservata=True)\n SalvaRiassunto.Salva(PercorsoSalvataggio + \"SpazioChiusure\" + NomeRete, summary)\n if summary == -1:\n # Non accade più\n print(\"Il file esiste già\")\n\n ChiusuraSilenziosa.DisegnaSpazioChiusurePostOss(listaChiusure, listaStatiUscita, listaRegexUscita,\n listaArchiChiusure,\n PercorsoSalvataggio + \"SpazioChiusure\" + NomeRete)\n\n def OpChiusure():\n\n def ChiusuraSingolo(statoIniziale, statoComportamentale, arcoComportamentale):\n listaStati, listaArchi = ChiusuraSilenziosa.ChiusuraSilenziosaPostOssStato(\n statoIniziale=listaStatiSelezionabili[statoIniziale], statoComportamentale=statoComportamentale,\n arcoComportamentale=arcoComportamentale)\n '''\n PROMEMORIA: attenzione quando si fanno i nomi dei file, meglio fare solo stringhe con caratteri semplici\n perché altrimenti sbarella tutto, non apre più nulla e non si sa il perché.\n '''\n titolo = str(getattr(listaStatiSelezionabili[statoIniziale], 'listaStati')) +\\\n str(getattr(listaStatiSelezionabili[statoIniziale], 'listaLink'))\n titolo = \"\".join(titolo) + str(getattr(listaStatiSelezionabili[statoIniziale], 'indice'))\n titolo = titolo.replace('ε', 'e')\n titolo = \"ChiusuraSilenziosaDi\" + titolo\n titolo = PercorsoSalvataggio + titolo\n\n ChiusuraSilenziosa.DisegnaChiusuraSilenziosaPostOss(listaStati, listaArchi, titolo)\n\n diagnosiText = Diagnosi.DiagnosiTotale(NomeRete, PercorsoSalvataggio, automi=automi, transizioni=transizioni,\n listaTransizioni=listaTransizioni, links=links,\n percorsoAutoma=percorsoAutoma, percorsoLinks=percorsoLinks,\n percorsoTransizioni=percorsoTransizioni,\n statoComportamentale=listaStati, arcoComportamentale=listaArchi,\n chiusura=True, archiSpazio=arcoComportamentale)\n labelDiagnosiChiusura.config(text=diagnosiText)\n\n def TutteChiusure(statoComportamentale, arcoComportamentale):\n try:\n listaChiusure = func_timeout(Constant.TIMEOUT/3, func=ChiusuraSilenziosa.ChiusuraSilenziosaPostOss,\n args=(statoComportamentale, arcoComportamentale))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n try:\n listaStatiUscita, listaRegexUscita = func_timeout(Constant.TIMEOUT/3,\n func=Diagnosi.DiagnosiSingolaPostOss, args=(NomeRete, PercorsoSalvataggio, automi, transizioni,\n listaTransizioni, links, percorsoAutoma, percorsoLinks, percorsoTransizioni,\n statoComportamentale, arcoComportamentale, True, listaChiusure))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n try:\n listaArchiChiusure = func_timeout(Constant.TIMEOUT/3,\n func=ChiusuraSilenziosa.SpazioChiusureSilenziosePostOss, args=(listaChiusure,\n arcoComportamentale, listaStatiUscita, listaRegexUscita))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n # Costruzione e salvataggio del sommario\n summary = SalvaRiassunto.PreparaSommarioSpazioChiusure(listaChiusure, listaArchiChiusure,\n osservata=True)\n SalvaRiassunto.Salva(PercorsoSalvataggio + \"SpazioChiusure\" + NomeRete, summary)\n if summary == -1:\n # Non accade più\n print(\"Il file esiste già\")\n\n ChiusuraSilenziosa.DisegnaSpazioChiusurePostOss(listaChiusure, listaStatiUscita, listaRegexUscita,\n listaArchiChiusure, PercorsoSalvataggio + \"SpazioChiusure\" + NomeRete)\n\n listaStatiSelezionabili = []\n counter = 0\n for stato in statoComportamentale:\n\n if counter == 0:\n listaStatiSelezionabili.append(stato)\n counter += 1\n continue\n\n for arco in arcoComportamentale:\n\n if getattr(arco, 'observability') == 'ε':\n continue\n\n st = getattr(stato, 'listaStati') + getattr(stato, 'listaLink')\n st = \" \".join(st) + \" \" + str(getattr(stato, 'indice'))\n\n stDest_ogg = getattr(arco, 'statoDestinazione')\n stDest = getattr(stDest_ogg, 'listaStati') + getattr(stDest_ogg, 'listaLink')\n stDest = \" \" .join(stDest) + \" \" + str(getattr(stDest_ogg, 'indice'))\n\n if st == stDest:\n listaStatiSelezionabili.append(stato)\n break\n\n labelSceltaStatoIniziale = tk.Label(finestraOpOss, text=\"Scegli lo stato di ingresso tra quelli disponibili\")\n labelSceltaStatoIniziale.pack()\n\n if len(listaStatiSelezionabili) != 0:\n for i in range(len(listaStatiSelezionabili)):\n st = getattr(listaStatiSelezionabili[i], 'listaStati') + getattr(listaStatiSelezionabili[i], 'listaLink')\n st = \" \".join(st) + \" \" + str(getattr(listaStatiSelezionabili[i], 'indice'))\n\n newButton = tk.Button(finestraOpOss, text=st, command=lambda j=i: ChiusuraSingolo(\n statoIniziale=j, statoComportamentale=statoComportamentale,\n arcoComportamentale=arcoComportamentale))\n newButton.pack()\n else:\n labelSceltaStatoIniziale.config(text = \"Non ci sono stati di ingresso adatti disponibili\")\n\n labelDiagnosiChiusura = tk.Label(finestraOpOss, text = \"\")\n labelDiagnosiChiusura.pack()\n\n labelSpazioChiusura = tk.Label(finestraOpOss, text=\"Crea Spazio Chiusure Silenziose\")\n labelSpazioChiusura.pack()\n\n buttonSpazioChiusura = tk.Button(finestraOpOss, text=\"Crea\", command=lambda : TutteChiusure(\n statoComportamentale=statoComportamentale, arcoComportamentale=arcoComportamentale\n ))\n buttonSpazioChiusura.pack()\n\n def CostruisciDiagnosticatoreTime(statoComportamentale, arcoComportamentale):\n listaTempi = timeit.repeat(stmt=lambda:CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale),\n setup='pass', number=1, repeat=100)\n x = np.linspace(0, len(listaTempi), 100)\n plt.scatter(x, listaTempi)\n plt.xlabel(\"Esecuzioni\")\n plt.ylabel(\"Tempi\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/Diagnosticatore.png\", dpi=100)\n plt.show()\n avg = sum(listaTempi) / len(listaTempi)\n print(avg)\n\n listaAvg = []\n listaPic = []\n i = 0\n while i < 10:\n tracemalloc.start()\n CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale)\n i += 1\n first_item, second_item = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n listaAvg.append(first_item)\n listaPic.append(second_item)\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ffaa00'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Media - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n plt.show()\n plt.bar(range(0, len(listaPic)), listaPic, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ffaa00'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Picchi - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n plt.show()\n\n CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale)\n\n def CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale):\n listaChiusure = ChiusuraSilenziosa.ChiusuraSilenziosaPostOss(statoComportamentale, arcoComportamentale)\n listaStatiUscita, listaRegexUscita = Diagnosi.DiagnosiSingolaPostOss(NomeRete, PercorsoSalvataggio,\n automi=automi, transizioni=transizioni, links=links, listaTransizioni=listaTransizioni,\n percorsoAutoma=percorsoAutoma, percorsoLinks=percorsoLinks, percorsoTransizioni=percorsoTransizioni,\n statoComportamentale=statoComportamentale, arcoComportamentale=arcoComportamentale, chiusura=True,\n listaChiusure=listaChiusure)\n listaArchiChiusure = ChiusuraSilenziosa.SpazioChiusureSilenziosePostOss(listaChiusure,\n arcoComportamentale, listaStatiUscita, listaRegexUscita)\n\n listaDiagnosiChiusura = Diagnosi.DiagnosiDiagnosticatore(NomeRete, PercorsoSalvataggio, automi=automi,\n transizioni=transizioni, links=links,listaTransizioni=listaTransizioni, percorsoAutoma=percorsoAutoma,\n percorsoLinks=percorsoLinks, percorsoTransizioni=percorsoTransizioni,\n statoComportamentale=statoComportamentale, arcoComportamentale=arcoComportamentale, chiusura=True,\n listaChiusure=listaChiusure, osservata=True)\n\n # Salva sommario Diagnosticatore\n summary = SalvaRiassunto.PreparaSommarioDiagnosticatore(listaChiusure, listaDiagnosiChiusura,\n listaArchiChiusure, listaStatiUscita, listaRegexUscita)\n SalvaRiassunto.Salva(PercorsoSalvataggio + \"Diagnosticatore\" + NomeRete, summary)\n if summary == -1:\n print(\"Il file esiste già\")\n\n Diagnosticatore.DisegnaDiagnosticatore(listaChiusure, listaStatiUscita, listaRegexUscita,\n listaArchiChiusure, listaDiagnosiChiusura, PercorsoSalvataggio + \"Diagnosticatore\" + NomeRete,\n osservato=True)\n Diagnosticatore.SalvaDiagnosticatore(percorsoAutoma, percorsoLinks, percorsoTransizioni, listaChiusure,\n listaStatiUscita, listaRegexUscita, listaArchiChiusure, listaDiagnosiChiusura,\n PercorsoSalvataggio + \"Diagnosticatore\" + NomeRete, osservato=True)\n\n finestraOpOss = tk.Toplevel(self)\n finestraOpOss.minsize(200, 200)\n finestraOpOss.title(\"Operazioni su Spazio Comportamentale Osservato\")\n finestraOpOss.resizable = False\n finestraOpOss.configure(background=\"white\")\n\n labelIstruzioni = tk.Label(finestraOpOss, background=\"white\",\n text=\"Scegliere l'operazione che si vuole eseguire sulla rete appena caricata. Cliccare sul bottone\\n\"\n \"relativo per aprire e proseguire con l'operazione.\")\n labelIstruzioni.pack()\n\n '''\n PARTE RELATIVA ALL'OSSERVAZIONE LINEARE\n '''\n entryOsservazioneLineare = tk.Entry(finestraOpOss, width=100)\n entryOsservazioneLineare.var = tk.StringVar()\n entryOsservazioneLineare['textvariable'] = entryOsservazioneLineare.var\n entryOsservazioneLineare.var.trace_add('write', abilitaConfermaOss)\n entryOsservazioneLineare.pack()\n\n buttonOsservazione = tk.Button(finestraOpOss, text=\"Conferma Osservazione Lineare\", state=DISABLED, command=\n controlloOsservazione)\n buttonOsservazione.pack()\n labelEsitoOssTitle = tk.Label(finestraOpOss, text=\"\")\n labelEsitoOssTitle.pack()\n labelEsitoOss = tk.Label(finestraOpOss, text=\"\")\n labelEsitoOss.pack()\n '''\n PARTE RELATIVA ALLA DIAGNOSI\n '''\n buttonDiagnosi = tk.Button(finestraOpOss, text=\"Diagnosi\", command=lambda : OpDiagnosi())\n buttonDiagnosi.pack()\n labelDiagnosi = tk.Label(finestraOpOss, text=\"\")\n labelDiagnosi.pack()\n '''\n PARTE RELATIVA ALLA SEZIONE CHIUSURE\n '''\n labelChiusura = tk.Label(finestraOpOss, text=\"Chiusura Silenziosa\")\n labelChiusura.pack()\n buttonChiusureSilenziose = tk.Button(finestraOpOss, text=\"Chiusure Silenziose\" , command=lambda :\n TutteChiusure(statoComportamentale, arcoComportamentale))\n buttonChiusureSilenziose.pack()\n '''\n PARTE RELATIVA ALLA COSTRUZIONE DI UN DIAGNOSTICATORE\n '''\n labelDiagnosticatore = tk.Label(finestraOpOss, text=\"Diagnosticatore\")\n labelDiagnosticatore.pack()\n buttonDiagnosticatore = tk.Button(finestraOpOss, text=\"Costruisci Diagnosticatore\", command=lambda :\n CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale))\n buttonDiagnosticatore.pack()\n\n def OpReteNonOsservata(self, NomeRete, PercorsoSalvataggio, automi, links, transizioni, listaTransizioni,\n percorsoAutoma, percorsoLinks, percorsoTransizioni, statoComportamentale,\n arcoComportamentale):\n\n def abilitaConferma(*_):\n if entryOsservazioneLineare.var.get() != \"\":\n buttonConferma['state'] = 'normal'\n else:\n buttonConferma['state'] = 'disabled'\n\n def controlloOsservazioneTime():\n listaTempi = timeit.repeat(stmt=lambda: controlloOsservazione(), setup='pass', number=1,\n repeat=100)\n x = np.linspace(0, len(listaTempi), 100)\n plt.scatter(x, listaTempi)\n plt.xlabel(\"Esecuzioni\")\n plt.ylabel(\"Tempi\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/Oss.png\", dpi=100)\n plt.show()\n avg = sum(listaTempi) / len(listaTempi)\n print(avg)\n\n listaAvg = []\n listaPic = []\n i = 0\n while i < 10:\n tracemalloc.start()\n controlloOsservazione()\n i += 1\n first_item, second_item = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n listaAvg.append(first_item)\n listaPic.append(second_item)\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ff2600'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Media - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n plt.show()\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ff2600'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Picchi - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n plt.show()\n\n controlloOsservazione()\n\n def controlloOsservazione():\n osservazione_lineare = entryOsservazioneLineare.get()\n osservazione_lineare = osservazione_lineare.split(',')\n for oss in osservazione_lineare:\n if not str(oss).isalnum() or str(oss) == \"\":\n tkMsg.showerror(title=\"Osservazione errata\",\n message=\"L'osservazione lineare contiene caratteri non ammessi\")\n return\n trovato = False\n for arco in arcoComportamentale:\n if str(oss) == getattr(arco, 'observability'):\n trovato = True\n break\n if not trovato:\n tkMsg.showerror(title=\"Errore osservazione\",\n message=\"L'osservazione lineare contiene elementi non presenti nello spazio\")\n return\n\n confermaOsservazione(osservazione_lineare)\n\n def confermaOsservazione(osservazioneLineare):\n\n def mostraGraficoOsservazioneLineare(path):\n labelEsitoOssTitle.config(text=\"Risultato dell'osservazione lineare inserita post potatura\")\n reteOssImg = tk.PhotoImage(file=path + \".gv.png\")\n labelEsitoOss.config(image=reteOssImg)\n labelEsitoOss.image = reteOssImg\n\n lista_stati_oss = []\n lista_link_oss = []\n stato_comportamentale_oss = []\n arco_comportamentale_oss = []\n try:\n func_timeout(Constant.TIMEOUT, func=SpazioComportamentaleOsservazione.creaSpazioComportamentaleOsservazioneLineare,\n args=(automi, transizioni, links, lista_stati_oss, lista_link_oss, listaTransizioni,\n stato_comportamentale_oss, arco_comportamentale_oss, osservazioneLineare))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n '''\n Controllo che la rete comportamentale prodotta dall'osservazione abbia stati terminali\n '''\n terminale = False\n for stato in stato_comportamentale_oss:\n if getattr(stato, 'finale'):\n terminale = True\n break\n if not terminale:\n tkMsg.showerror(title=\"Errore osservazione\",\n message=\"L'osservazione inserita non produce una rete con stati terminali, correggere \"\n \"l'input, per favore.\")\n return\n\n max_indice = len(osservazioneLineare)\n titoloOss = \"\".join(osservazioneLineare)\n\n # Disegno dello spazio comportamentale relativo ad un'osservazione lineare\n nome_oss = PercorsoSalvataggio + NomeRete + \"Oss\" + titoloOss\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.disegnaSpazioComportamentaleIndiceOsservazione(\n arco_comportamentale_oss, nome_oss)\n\n # Potatura Osservazione\n try:\n func_timeout(Constant.TIMEOUT, func=SpazioComportamentaleOsservazione.PotaturaOss,\n args=(stato_comportamentale_oss, arco_comportamentale_oss))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n\n # Ridenominazione Osservazione\n stato_comportamentale_ridenominato_oss = []\n arco_comportamentale_ridenominato_oss = arco_comportamentale_oss.copy()\n SpazioComportamentaleOsservazione.RidenominaOss(stato_comportamentale_oss,\n stato_comportamentale_ridenominato_oss,\n arco_comportamentale_ridenominato_oss, max_indice)\n\n summary = SalvaRiassunto.PreparaSommario(automi, links, listaTransizioni,\n stato_comportamentale_ridenominato_oss, arco_comportamentale_ridenominato_oss, osservato=True)\n SalvaRiassunto.Salva(nome_oss, summary)\n if summary == -1:\n # Non accade più\n print(\"Il file esiste già\")\n\n # Disegno dello spazio comportamentale relativo ad un'osservazione lineare potato\n nome_pot_oss = PercorsoSalvataggio + NomeRete.split('.')[0] + \"Oss\" + titoloOss + \"Pot\"\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.disegnaSpazioComportamentaleIndiceOsservazione(\n arco_comportamentale_oss, nome_pot_oss)\n # Disegno dello spazio comportamentale relativo ad un'osservazione lineare potato e ridenominato\n nomeReteRidenominataOss = PercorsoSalvataggio + NomeRete.split('.')[0] + \"Oss\" + titoloOss + \"PotRid\"\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.disegnaSpazioComportamentaleRidenominatoOss(\n arco_comportamentale_ridenominato_oss, nomeReteRidenominataOss)\n\n # Riporto il disegno dello spazio comportamentale osservato potato nella finestra\n mostraGraficoOsservazioneLineare(PercorsoSalvataggio + NomeRete.split('.')[0] + \"Oss\" + titoloOss + \"Pot\")\n\n # Salvo la rete dello spazio comportamentale osservato\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.salvaSpazioComportamentaleOss(\n arco_comportamentale_oss, stato_comportamentale_oss,\n PercorsoSalvataggio + NomeRete.split('.')[0] + \"Oss\" + titoloOss, percorsoAutoma, percorsoLinks,\n percorsoTransizioni, osservazioneLineare)\n\n def TutteChiusureTime(statoComportamentale, arcoComportamentale):\n listaTempi = timeit.repeat(stmt=lambda: TutteChiusure(statoComportamentale, arcoComportamentale),\n setup='pass', number=1, repeat=100)\n x = np.linspace(0, len(listaTempi), 100)\n plt.scatter(x, listaTempi)\n plt.xlabel(\"Esecuzioni\")\n plt.ylabel(\"Tempi\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/SpazioChiusure.png\", dpi=100)\n plt.show()\n avg = sum(listaTempi) / len(listaTempi)\n print(avg)\n\n listaAvg = []\n listaPic = []\n i = 0\n while i < 10:\n tracemalloc.start()\n TutteChiusure(statoComportamentale, arcoComportamentale)\n i += 1\n first_item, second_item = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n listaAvg.append(first_item)\n listaPic.append(second_item)\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#a229ff'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Media - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n plt.show()\n plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#a229ff'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Picchi - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n plt.show()\n\n TutteChiusure(statoComportamentale, arcoComportamentale)\n\n def TutteChiusure(statoComportamentale, arcoComportamentale):\n\n def mostraGraficoSpazioChiusure(path):\n labelChiusuraTitle.config(text=\"Spazio delle chiusure\")\n chiusuraEsitoImg = tk.PhotoImage( file=path + \".gv.png\")\n labelDiagnosticatoreEsito.config(image=chiusuraEsitoImg)\n labelChiusuraEsito.image = chiusuraEsitoImg\n\n try:\n listaChiusure = func_timeout(Constant.TIMEOUT, func=ChiusuraSilenziosa.ChiusuraSilenziosa,\n args=(statoComportamentale, arcoComportamentale))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n try:\n listaStatiUscita, listaRegexUscita = func_timeout(Constant.TIMEOUT,\n func=Diagnosi.DiagnosiSingolaNonOss, args=(NomeRete, PercorsoSalvataggio, automi, transizioni,\n listaTransizioni, links, percorsoAutoma, percorsoLinks, percorsoTransizioni, statoComportamentale,\n arcoComportamentale, True, listaChiusure))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n try:\n listaArchiChiusure = func_timeout(Constant.TIMEOUT, func=ChiusuraSilenziosa.SpazioChiusureSilenziose,\n args=(listaChiusure, arcoComportamentale, listaStatiUscita, listaRegexUscita))\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n\n # Costruzione e salvataggio del sommario\n summary = SalvaRiassunto.PreparaSommarioSpazioChiusure(listaChiusure, listaArchiChiusure,\n osservata=False)\n SalvaRiassunto.Salva(PercorsoSalvataggio + \"SpazioChiusure\" + NomeRete, summary)\n if summary == -1:\n # Non accade più\n print(\"Il file esiste già\")\n\n ChiusuraSilenziosa.DisegnaSpazioChiusureSilenziose(listaChiusure, listaStatiUscita, listaRegexUscita,\n listaArchiChiusure, PercorsoSalvataggio+\"SpazioChiusure\"+NomeRete)\n\n #mostraGraficoSpazioChiusure(PercorsoSalvataggio + \"SpazioChiusure\" + NomeRete)\n\n def CostruisciDiagnosticatoreTime(statoComportamentale, arcoComportamentale):\n listaTempi = timeit.repeat(stmt=lambda:CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale),\n setup='pass', number=1, repeat=100)\n x = np.linspace(0, len(listaTempi), 100)\n plt.scatter(x, listaTempi)\n plt.xlabel(\"Esecuzioni\")\n plt.ylabel(\"Tempi\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/Diagnosticatore.png\", dpi=100)\n plt.show()\n avg = sum(listaTempi) / len(listaTempi)\n print(avg)\n\n listaAvg = []\n listaPic = []\n i = 0\n while i < 10:\n tracemalloc.start()\n CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale)\n i += 1\n first_item, second_item = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n listaAvg.append(first_item)\n listaPic.append(second_item)\n plt.bar(range(0, len(listaPic)), listaPic, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ffaa00'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Media - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n plt.show()\n plt.bar(range(0, len(listaPic)), listaPic, edgecolor='black', linewidth=1,\n color=matplotlib.colors.hex2color('#ffaa00'))\n plt.xlabel(\"Casi\")\n plt.ylabel(\"Occupazione Picchi - byte\")\n plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n plt.show()\n\n CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale)\n\n def CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale):\n\n def mostraGraficoDiagnosticatore(path):\n labelDiagnosticatoreTitle.config(text=\"Diagnosticatore\")\n diagnosticatoreEsitoImg = tk.PhotoImage(file=path + \".gv.png\")\n labelDiagnosticatoreEsito.config(image=diagnosticatoreEsitoImg)\n labelDiagnosticatoreEsito.image = diagnosticatoreEsitoImg\n\n listaChiusure = ChiusuraSilenziosa.ChiusuraSilenziosa(statoComportamentale, arcoComportamentale)\n listaStatiUscita, listaRegexUscita = Diagnosi.DiagnosiSingolaNonOss(NomeRete, PercorsoSalvataggio,\n automi=automi, transizioni=transizioni, links=links, listaTransizioni=listaTransizioni,\n percorsoAutoma=percorsoAutoma, percorsoLinks=percorsoLinks, percorsoTransizioni=percorsoTransizioni,\n statoComportamentale=statoComportamentale, arcoComportamentale=arcoComportamentale, chiusura=True,\n listaChiusure=listaChiusure)\n listaArchiChiusure = ChiusuraSilenziosa.SpazioChiusureSilenziose(listaChiusure, arcoComportamentale,\n listaStatiUscita, listaRegexUscita)\n\n listaDiagnosiChiusura = Diagnosi.DiagnosiDiagnosticatore(NomeRete, PercorsoSalvataggio, automi=automi,\n transizioni=transizioni, links=links, listaTransizioni=listaTransizioni,\n percorsoAutoma=percorsoAutoma, percorsoLinks=percorsoLinks, percorsoTransizioni=percorsoTransizioni,\n statoComportamentale=statoComportamentale, arcoComportamentale=arcoComportamentale, chiusura=True,\n listaChiusure=listaChiusure, osservata=False)\n\n # Salva sommario Diagnosticatore\n summary = SalvaRiassunto.PreparaSommarioDiagnosticatore(listaChiusure, listaDiagnosiChiusura,\n listaArchiChiusure, listaStatiUscita, listaRegexUscita)\n SalvaRiassunto.Salva(PercorsoSalvataggio + \"Diagnosticatore\" + NomeRete, summary)\n if summary == -1:\n print(\"Il file esiste già\")\n\n Diagnosticatore.DisegnaDiagnosticatore(listaChiusure, listaStatiUscita, listaRegexUscita,\n listaArchiChiusure, listaDiagnosiChiusura,PercorsoSalvataggio+\"Diagnosticatore\"+NomeRete,\n osservato=False)\n\n Diagnosticatore.SalvaDiagnosticatore(percorsoAutoma, percorsoLinks, percorsoTransizioni, listaChiusure,\n listaStatiUscita, listaRegexUscita, listaArchiChiusure, listaDiagnosiChiusura,\n PercorsoSalvataggio+\"Diagnosticatore\"+NomeRete, osservato=False)\n mostraGraficoDiagnosticatore(PercorsoSalvataggio + \"Diagnosticatore\" + NomeRete)\n\n finestraOp = tk.Toplevel(self)\n finestraOp.minsize(200, 200)\n finestraOp.title(\"Operazioni su Spazio Comportamentale non Osservato\")\n finestraOp.resizable = False\n finestraOp.configure(background=\"white\")\n '''\n PARTE RELATIVA ALLA SEZIONE OSSERVAZIONE LINEARE\n '''\n labelIstruzioniOss = tk.Label(finestraOp,\n text=\"Scrivere un'osservazione lineare nell'apposito campo di testo. Scrivere l'osservazione come\\n\"\n \"un elenco di eventi osservabili separati da solo una virgola. Non usare spazi od altri caratteri.\\n\"\n \"I grafi relativi alle osservazioni saranno salvati nella cartella dove si trova lo spazio comportamentale\")\n labelIstruzioniOss.pack()\n\n entryOsservazioneLineare = tk.Entry(finestraOp, width=100)\n entryOsservazioneLineare.var = tk.StringVar()\n entryOsservazioneLineare['textvariable'] = entryOsservazioneLineare.var\n entryOsservazioneLineare.var.trace_add('write', abilitaConferma)\n entryOsservazioneLineare.pack()\n\n buttonConferma = tk.Button(finestraOp, text=\"Conferma\", state=DISABLED, command=controlloOsservazione)\n buttonConferma.pack()\n labelEsitoOssTitle = tk.Label(finestraOp, text=\"\")\n labelEsitoOssTitle.pack()\n labelEsitoOss = tk.Label(finestraOp, text=\"\")\n labelEsitoOss.pack()\n '''\n RETE NON OSSERVATA NON PUO' ESEGUIRE DIAGNOSI\n '''\n '''\n PARTE RELATIVA ALLA SEZIONE CHIUSURE\n '''\n labelChiusura = tk.Label(finestraOp, text=\"Chiusura Silenziosa\")\n labelChiusura.pack()\n buttonChiusure = tk.Button(finestraOp, text=\"Crea Spazio Chiusure\", command=lambda : TutteChiusure(\n statoComportamentale=statoComportamentale, arcoComportamentale=arcoComportamentale))\n buttonChiusure.pack()\n labelChiusuraTitle = tk.Label(finestraOp, text=\"\")\n labelChiusuraTitle.pack()\n labelChiusuraEsito = tk.Label(finestraOp, text=\"\")\n labelChiusuraEsito.pack()\n '''\n PARTE RELATIVA ALLA COSTRUZIONE DI UN DIAGNOSTICATORE\n '''\n labelDiagnosticatore = tk.Label(finestraOp, text=\"Diagnosticatore\")\n labelDiagnosticatore.pack()\n buttonDiagnosticatore = tk.Button(finestraOp, text=\"Costruisci Diagnosticatore\", command=lambda :\n CostruisciDiagnosticatore(statoComportamentale, arcoComportamentale))\n buttonDiagnosticatore.pack()\n labelDiagnosticatoreTitle = tk.Label(finestraOp, text=\"\")\n labelDiagnosticatoreTitle.pack()\n labelDiagnosticatoreEsito = tk.Label(finestraOp, text=\"\")\n labelDiagnosticatoreEsito.pack()\n\n def OpDiagnosticatore(self, NomeRete, PercorsoSalvataggio, percorsoAutoma, percorsoLinks, percorsoTransizioni,\n osservata, listaChiusure, listaStatiUscita, listaRegexUscita, listaArchiInterChiusura,\n listaDiagnosiChiusure):\n\n def abilitaConferma(*_):\n if entryOsservazioneLineare.var.get() != \"\":\n buttonConferma['state'] = 'normal'\n else:\n buttonConferma['state'] = 'disabled'\n\n def controlloOsservazione():\n osservazione_lineare = entryOsservazioneLineare.get()\n osservazione_lineare = osservazione_lineare.split(',')\n for oss in osservazione_lineare:\n if not str(oss).isalnum() or str(oss) == \"\":\n tkMsg.showerror(title=\"Osservazione errata\",\n message=\"L'osservazione lineare contiene caratteri non ammessi\")\n return\n trovato = False\n for arco in listaArchiInterChiusura:\n if str(oss) == getattr(arco, 'observability'):\n trovato = True\n break\n if not trovato:\n tkMsg.showerror(title=\"Errore osservazione\",\n message=\"L'osservazione lineare contiene elementi non presenti nello spazio\")\n return\n confermaOsservazione(osservazione_lineare)\n\n def confermaOsservazione(osservazioneLineare):\n try:\n # listaTempi = timeit.repeat(stmt=lambda: Diagnosi.DiagnosiLineareOssDiagnosticatore(listaChiusure,\n # listaStatiUscita, listaRegexUscita, listaArchiInterChiusura, listaDiagnosiChiusure,\n # osservata, osservazioneLineare), setup='pass', number=1, repeat=100)\n # x = np.linspace(0, len(listaTempi), 100)\n # plt.scatter(x, listaTempi)\n # plt.xlabel(\"Esecuzioni\")\n # plt.ylabel(\"Tempi\")\n # plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/DiagnosiDiagnosticatore.png\", dpi=100)\n # plt.show()\n # avg = sum(listaTempi) / len(listaTempi)\n # print(avg)\n #\n # listaAvg = []\n # listaPic = []\n # i = 0\n # while i < 10:\n # tracemalloc.start()\n # diagnosi = func_timeout(Constant.TIMEOUT, func=Diagnosi.DiagnosiLineareOssDiagnosticatore,\n # args=(listaChiusure, listaStatiUscita, listaRegexUscita, listaArchiInterChiusura,\n # listaDiagnosiChiusure, osservata, osservazioneLineare))\n # i += 1\n # first_item, second_item = tracemalloc.get_traced_memory()\n # tracemalloc.stop()\n # listaAvg.append(first_item)\n # listaPic.append(second_item)\n # plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n # color=matplotlib.colors.hex2color('#63b6ff'))\n # plt.xlabel(\"Casi\")\n # plt.ylabel(\"Occupazione Media - byte\")\n # plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Memoria.png\", dpi=100)\n # plt.show()\n # plt.bar(range(0, len(listaAvg)), listaAvg, edgecolor='black', linewidth=1,\n # color=matplotlib.colors.hex2color('#63b6ff'))\n # plt.xlabel(\"Casi\")\n # plt.ylabel(\"Occupazione Picchi - byte\")\n # plt.savefig(\"C:/Users/Alessandro/Desktop/ReteComportamentale/\" + NomeRete + \"Picchi.png\", dpi=100)\n # plt.show()\n\n diagnosi = func_timeout(Constant.TIMEOUT, func=Diagnosi.DiagnosiLineareOssDiagnosticatore,\n args=(listaChiusure, listaStatiUscita, listaRegexUscita, listaArchiInterChiusura,\n listaDiagnosiChiusure, osservata, osservazioneLineare))\n\n except FunctionTimedOut:\n tkMsg.showerror(title=Constant.titleTimeOut, message=Constant.messageTimeOut)\n return\n if diagnosi == -1: # Se torna -1 significa che l'osservazione non è stata vista tutta --> errore\n tkMsg.showerror(title=\"Errore osservazione\",\n message=\"Non è possibile eseguire la diagnosi relativa all'osservazione inserita.\")\n return\n Diagnosi.Salva(NomeRete, PercorsoSalvataggio, diagnosi)\n labelEsitoOss.config(text=diagnosi)\n\n finestraOpDiagnosticatore = tk.Toplevel(self)\n finestraOpDiagnosticatore.minsize(200, 200)\n finestraOpDiagnosticatore.title(\"Operazioni su Diagnosticatore\")\n finestraOpDiagnosticatore.resizable = False\n finestraOpDiagnosticatore.configure(background=\"white\")\n\n '''\n SEZIONE RELATIVA AD ESEGUIRE UN'OSSERVAZIONE LINEARE SU UN DIAGNOSTICATORE\n '''\n labelIstruzioniOss = tk.Label(finestraOpDiagnosticatore,\n text=\"Inserire un'osservazione lineare nell'apposito campo di testo. Scrivere l'osservazione come\\n\"\n \"un elenco di eventi osservabili separati da solo una virgola. Non usare spazi od altri caratteri.\\n\"\n \"I grafi relativi alle osservazioni saranno salvati nella cartella dove si trova il diagnosticatore\")\n labelIstruzioniOss.pack()\n\n entryOsservazioneLineare = tk.Entry(finestraOpDiagnosticatore, width=100)\n entryOsservazioneLineare.var = tk.StringVar()\n entryOsservazioneLineare['textvariable'] = entryOsservazioneLineare.var\n entryOsservazioneLineare.var.trace_add('write', abilitaConferma)\n entryOsservazioneLineare.pack()\n\n buttonConferma = tk.Button(finestraOpDiagnosticatore, text=\"Conferma\", state=DISABLED,\n command=controlloOsservazione)\n buttonConferma.pack()\n labelEsitoOss = tk.Label(finestraOpDiagnosticatore, text=\"\")\n labelEsitoOss.pack()\n\n def MostraReteCaricata(self, NomeRete, PercorsoSalvataggio, NomiAutomi):\n NomeRete = NomeRete.split('.')[0]\n finestraCaricamento = tk.Toplevel(self)\n finestraCaricamento.title(\"Dati della rete\")\n finestraCaricamento.configure(background=\"white\")\n\n frameRete = tk.Frame(finestraCaricamento)\n\n hScrollbar = tk.Scrollbar(frameRete, orient=HORIZONTAL)\n vScrollbar = tk.Scrollbar(frameRete, orient=VERTICAL)\n\n canvasRete = tk.Canvas(frameRete, yscrollcommand=vScrollbar.set, xscrollcommand=hScrollbar.set)\n vScrollbar.config(command=canvasRete.yview)\n hScrollbar.config(command=canvasRete.xview)\n\n frameRete.pack(fill=BOTH, expand=1)\n\n canvasRete = tk.Canvas(frameRete)\n canvasRete.pack(side=LEFT, fill=BOTH, expand=1)\n\n yScrollbarRete = tk.Scrollbar(frameRete, orient=VERTICAL, command=canvasRete.yview)\n yScrollbarRete.pack(side=RIGHT, fill=Y)\n xScrollbarRete = tk.Scrollbar(frameRete, orient=HORIZONTAL, command=canvasRete.xview)\n xScrollbarRete.pack(side=BOTTOM, fill=X)\n\n canvasRete.configure(yscrollcommand=yScrollbarRete.set)\n canvasRete.configure(xscrollcommand=xScrollbarRete.set)\n canvasRete.bind('', lambda e: canvasRete.configure(scrollregion=canvasRete.bbox(\"all\")))\n\n second_frame = tk.Frame(canvasRete)\n\n canvasRete.create_window((0, 0), window=second_frame, anchor=\"nw\")\n\n count = 1\n for automa in NomiAutomi:\n labelAutoma = tk.Label(second_frame, text=automa)\n labelAutoma.grid(row=count, column=0)\n\n automaImmagine = tk.PhotoImage(file=\"\" + PercorsoSalvataggio + automa + \".gv.png\")\n labelAutomaImg = tk.Label(second_frame, image=automaImmagine)\n labelAutomaImg.image = automaImmagine\n labelAutomaImg.grid(row=count, column=1)\n\n count += 1\n\n count += 1\n labelTopologia = tk.Label(second_frame, text=\"Topologia\")\n labelTopologia.grid(row=count, column=0)\n topologiaImg = tk.PhotoImage(file=\"\" + PercorsoSalvataggio + \"topologia.gv.png\")\n labelTopologiaImg = tk.Label(second_frame, image=topologiaImg)\n labelTopologiaImg.image = topologiaImg\n labelTopologiaImg.grid(row=count, column=1)\n\n count += 1\n labelSpazioComportamentalePotato = tk.Label(second_frame, text=\"Spazio Comportamentale Potato\")\n labelSpazioComportamentalePotato.grid(row=count, column=0)\n spzCompPotatoImg = tk.PhotoImage(file=\"\" + PercorsoSalvataggio + NomeRete + \"Potato.gv.png\")\n labelSpazioComportamentalePotatoImg = tk.Label(second_frame, image=spzCompPotatoImg)\n labelSpazioComportamentalePotatoImg.image = spzCompPotatoImg\n labelSpazioComportamentalePotatoImg.grid(row=count, column=1)\n\n count += 1\n labelSpzPotatoRidenominato = tk.Label(second_frame, text=\"Spazio Comportamentale Potato e Ridenominato\")\n labelSpzPotatoRidenominato.grid(row=count, column=0)\n spzCompPotRidenImg = tk.PhotoImage(file=\"\" + PercorsoSalvataggio + NomeRete + \"PotatoRidenominato.gv.png\")\n labelSpzPotRidenImg = tk.Label(second_frame, image=spzCompPotRidenImg)\n labelSpzPotRidenImg.image = spzCompPotRidenImg\n labelSpzPotRidenImg.grid(row=count, column=1)\n\n def MostraDiagnosticatoreCaricato(self, NomeFile, PercorsoSalvataggio, osservata):\n NomeFile = NomeFile.split('.')[0]\n NomeFile = NomeFile.replace('Diagnosticatore', '')\n finestraCaricamento = tk.Toplevel(self)\n finestraCaricamento.title(\"Dati del diagnosticatore\")\n finestraCaricamento.configure(background=\"white\")\n\n frameDiagnosticatore = tk.Frame(finestraCaricamento)\n\n hScrollbar = tk.Scrollbar(frameDiagnosticatore, orient=HORIZONTAL)\n vScrollbar = tk.Scrollbar(frameDiagnosticatore, orient=VERTICAL)\n\n canvasDiagnosticatore = tk.Canvas(frameDiagnosticatore, yscrollcommand=vScrollbar.set,\n xscrollcommand=hScrollbar.set)\n frameDiagnosticatore.pack(fill=BOTH, expand=1)\n\n canvasDiagnosticatore = tk.Canvas(frameDiagnosticatore)\n canvasDiagnosticatore.pack(side=LEFT, fill=BOTH, expand=1)\n\n yScrollbarDiagnosticatore = tk.Scrollbar(frameDiagnosticatore, orient=VERTICAL,\n command=canvasDiagnosticatore.yview)\n yScrollbarDiagnosticatore.pack(side=RIGHT, fill=Y)\n xScrollbarDiagnosticatore = tk.Scrollbar(frameDiagnosticatore, orient=HORIZONTAL,\n command=canvasDiagnosticatore.xview)\n xScrollbarDiagnosticatore.pack(side=BOTTOM, fill=X)\n\n canvasDiagnosticatore.configure(yscrollcommand=yScrollbarDiagnosticatore.set)\n canvasDiagnosticatore.configure(xscrollcommand=xScrollbarDiagnosticatore.set)\n canvasDiagnosticatore.bind('', lambda e: canvasDiagnosticatore.configure(scrollregion=canvasDiagnosticatore.bbox(\"all\")))\n\n second_frame = tk.Frame(canvasDiagnosticatore)\n\n canvasDiagnosticatore.create_window((0,0), window=second_frame, anchor=\"nw\")\n\n count = 1\n if os.path.exists(PercorsoSalvataggio + \"SpazioChiusure\" + NomeFile + \".gv.png\"):\n labelSpazioChiusure = tk.Label(second_frame, text=\"Spazio Chiusure\")\n labelSpazioChiusure.grid(row=count, column=0)\n spazioChiusureImg = tk.PhotoImage(file=\"\" + PercorsoSalvataggio + \"SpazioChiusure\" + NomeFile + \".gv.png\")\n labelSpazioChiusureImg = tk.Label(second_frame, image=spazioChiusureImg)\n labelSpazioChiusureImg.image = spazioChiusureImg\n labelSpazioChiusureImg.grid(row=count, column=1)\n count += 1\n\n labelDiagnosticatore = tk.Label(second_frame, text=\"Diagnosticatore\")\n labelDiagnosticatore.grid(row=count, column=0)\n diagnosticatoreImg = tk.PhotoImage(file=\"\" + PercorsoSalvataggio + \"Diagnosticatore\" + NomeFile + \".gv.png\")\n labelDiagnosticatoreImg = tk.Label(second_frame, image=diagnosticatoreImg)\n labelDiagnosticatoreImg.image = diagnosticatoreImg\n labelDiagnosticatoreImg.grid(row=count, column=1)\n count += 1\n\n def CaricaRete(self):\n\n # Metodo che apre la finestra per aprire una rete\n def apriFile(tipo):\n if tipo == 1:\n path = fd.askopenfilename()\n labelPathRete.config(text=path)\n if path != \"\":\n buttonConfermaRete['state'] = 'normal'\n else:\n buttonConfermaRete['state'] = 'disabled'\n elif tipo == 2:\n path = fd.askopenfilename()\n labelPathReteOss.config(text=path)\n if path != \"\":\n buttonConfermaReteOss['state'] = 'normal'\n else:\n buttonConfermaReteOss['state'] = 'disabled'\n elif tipo == 3:\n path = fd.askopenfilename()\n labelPathDiagnosticatore.config(text=path)\n if path != \"\":\n buttonConfermaDiagnosticatore['state'] = 'normal'\n else:\n buttonConfermaDiagnosticatore['state'] = 'disabled'\n\n # Metodo che importa la rete selezionata\n def importaFile(tipo):\n if tipo == 1:\n file = labelPathRete.cget(\"text\")\n elif tipo == 2:\n file = labelPathReteOss.cget(\"text\")\n elif tipo == 3:\n file = labelPathDiagnosticatore.cget(\"text\")\n percorso = file.split('/')\n nome_file = percorso.pop()\n # Faccio la stringa con solo il nome del file, cioè della rete\n nome_file = nome_file.split('.')[0]\n # Salvo la stringa con il solo percorso\n percorso = \"/\".join(percorso) + \"/\"\n with open(file) as json_file:\n dati = json.load(json_file)\n automi = []\n value = Automa.Automa.importaAutomiDaFile(automi, dati['file'][0])\n if value != 0:\n # Qualcosa è andato storto\n tkMsg.showerror(title=\"Errore\",\n message=\"Il file degli automi non è stato trovato al percorso indicato\")\n # Disegno degli automi\n nomiAutomi = []\n for automa in automi:\n nomiAutomi.append(automa.name)\n automa.disegnaAutoma(automa.edges, automa.final_states, percorso)\n # Importo dei link\n links = []\n value = Link.importaLinkDaFile(links, dati['file'][1])\n if value != 0:\n tkMsg.showerror(title=\"Errore\",\n message=\"Il file dei link non è stato trovato al percorso indicato\")\n # Disegno della topologia\n Link.Link.disegnaTopologia(links, percorso);\n # Importo delle transizioni\n lista_transizioni = []\n transizioni = []\n value = Transizione.importaTransizioniDaFile(lista_transizioni, transizioni, dati['file'][2])\n if value != 0:\n tkMsg.showerror(title=\"Errore\",\n message=\"Il file delle transizioni non è stato trovato al percorso indicato\")\n\n\n if tipo == 1: # Caso caricamento spazio comportamentale normale\n # Ricostruisco la lista degli stati dello spazio comportamentale\n stato_comportamentale = []\n # Disegno lo spazio comportamentale (essendo caricato è già potato)\n SpazioComportamentale.ArcoComportamentale.importaSpazioComportamentale(dati['arco'], nome_file,\n percorso)\n\n for stato in dati['stato']:\n stato_comportamentale.append(SpazioComportamentale.StatoComportamentale(stato['listaStati'],\n stato['listaLink'], stato['finale']))\n stato_comportamentale_ridenominato = []\n arco_comportamentale_ridenominato = []\n for arco in dati['arco']:\n pos_stato_partenza = 0\n for stato in stato_comportamentale:\n if (arco['stPart_st'] + arco['stPart_li']) == (stato.listaStati + stato.listaLink):\n break\n pos_stato_partenza += 1\n pos_stato_destinazione = 0\n for stato in stato_comportamentale:\n if (arco['stDest_st'] + arco['stDest_li']) == (stato.listaStati + stato.listaLink):\n break\n pos_stato_destinazione += 1\n\n arco_comportamentale_ridenominato.append(\n SpazioComportamentale.ArcoComportamentale(stato_comportamentale[pos_stato_partenza],\n stato_comportamentale[pos_stato_destinazione], arco['etichetta'], arco['observability'],\n arco['relevance']))\n\n # Costruisco lo spazio ridenominato\n SpazioComportamentale.Ridenomina(stato_comportamentale, stato_comportamentale_ridenominato,\n arco_comportamentale_ridenominato)\n\n # Disegnare spazio comportamentale ridenominato\n SpazioComportamentale.ArcoComportamentale.disegnaSpazioComportamentaleRidenominato(\n arco_comportamentale_ridenominato, percorso + nome_file + \"PotatoRidenominato\")\n # Apro la finestra con gli schemi della rete caricata\n CaricaRete.MostraReteCaricata(self, nome_file, percorso, nomiAutomi)\n # Apro la finestra per eseguire operazioni su una rete non osservata\n CaricaRete.OpReteNonOsservata(self, nome_file, percorso, automi=automi, transizioni=transizioni,\n listaTransizioni=lista_transizioni, links=links, percorsoAutoma=dati['file'][0],\n percorsoLinks=dati['file'][1], percorsoTransizioni=dati['file'][2],\n statoComportamentale=stato_comportamentale,\n arcoComportamentale=arco_comportamentale_ridenominato)\n elif tipo == 2: # caso spazio comportamentale osservato\n # Ricostruisco la lista degli stati dello spazio comportamentale\n stato_comportamentale = []\n # Disegno lo spazio comportamentale (essendo caricato è già potato)\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.importaSpazioComportamentaleOss(\n dati['arco'], nome_file, percorso)\n\n for stato in dati['stato']:\n stato_comportamentale.append(SpazioComportamentaleOsservazione.StatoComportamentaleIndiceOsservazione(\n stato['listaStati'], stato['listaLink'], stato['indiceOss'], stato['finale']\n ))\n for stato in dati['stato']:\n if stato['finale']:\n max_indice = stato['indiceOss']\n break\n\n stato_comportamentale_ridenominato = []\n arco_comportamentale_ridenominato = []\n for arco in dati['arco']:\n pos_stato_partenza = 0\n for stato in stato_comportamentale:\n if (arco['stPart_st'] + arco['stPart_li']) == (stato.listaStati + stato.listaLink)\\\n and (arco['stPart_in'] == stato.indice):\n break\n pos_stato_partenza += 1\n pos_stato_destinazione = 0\n for stato in stato_comportamentale:\n if (arco['stDest_st'] + arco['stDest_li']) == (stato.listaStati + stato.listaLink)\\\n and (arco['stDest_in'] == stato.indice):\n break\n pos_stato_destinazione += 1\n\n arco_comportamentale_ridenominato.append(\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione(\n stato_comportamentale[pos_stato_partenza], stato_comportamentale[pos_stato_destinazione],\n arco['etichetta'], arco['observability'], arco['relevance']))\n\n\n # Costruisco lo spazio ridenominato\n SpazioComportamentaleOsservazione.RidenominaOss(stato_comportamentale,\n stato_comportamentale_ridenominato, arco_comportamentale_ridenominato, max_indice)\n\n # Disegnare spazio comportamentale ridenominato\n SpazioComportamentaleOsservazione.ArcoComportamentaleOsservazione.disegnaSpazioComportamentaleRidenominatoOss(\n arco_comportamentale_ridenominato, percorso + nome_file + \"PotatoRidenominato\")\n # Apro la finestra con gli schemi della rete caricata\n CaricaRete.MostraReteCaricata(self, nome_file, percorso, nomiAutomi)\n # Apro la finestra con la scelta delle operazioni per rete osservata\n CaricaRete.OpReteOsservata(self, nome_file, percorso, automi=automi, transizioni=transizioni,\n listaTransizioni=lista_transizioni, links=links, percorsoAutoma=dati['file'][0],\n percorsoLinks=dati['file'][1], percorsoTransizioni=dati['file'][2],\n statoComportamentale=stato_comportamentale, arcoComportamentale=arco_comportamentale_ridenominato)\n elif tipo == 3: # caso caricamento diagnosticatore\n osservata = dati['osservata'][0]\n\n listaChiusure = []\n for chiusura in dati['chiusura']:\n nome = chiusura['nome']\n dati_chiusura = chiusura['datiChiusura']\n\n listaStati = []\n for statoChiusura in dati_chiusura['statoChiusura']:\n if osservata:\n stato = SpazioComportamentaleOsservazione.StatoComportamentaleIndiceOsservazione(\n statoChiusura['listaStati'], statoChiusura['listaLink'], statoChiusura['indice'],\n statoChiusura['finale'])\n else:\n stato = SpazioComportamentale.StatoComportamentale(statoChiusura['listaStati'],\n statoChiusura['listaLink'], statoChiusura['finale'])\n listaStati.append(stato)\n\n listaArchi = []\n for arcoChiusura in dati_chiusura['arcoChiusura']:\n if osservata:\n statoPartenza = SpazioComportamentaleOsservazione.StatoComportamentaleIndiceOsservazione(\n arcoChiusura['stPart_st'], arcoChiusura['stPart_li'], arcoChiusura['stPart_in'],\n arcoChiusura['stPart_fi'])\n statoDestinazione = SpazioComportamentaleOsservazione.StatoComportamentaleIndiceOsservazione(\n arcoChiusura['stDest_st'], arcoChiusura['stDest_li'], arcoChiusura['stDest_in'],\n arcoChiusura['stDest_fi'])\n else:\n statoPartenza = SpazioComportamentale.StatoComportamentale(arcoChiusura['stPart_st'],\n arcoChiusura['stPart_li'], arcoChiusura['stPart_fi'])\n statoDestinazione = SpazioComportamentale.StatoComportamentale(arcoChiusura['stDest_st'],\n arcoChiusura['stDest_li'], arcoChiusura['stDest_fi'])\n arco = ChiusuraSilenziosa.ArcoChiusuraSilenziosa(arcoChiusura['nomeChiusura'],\n statoPartenza, statoDestinazione, arcoChiusura['etichetta'],\n arcoChiusura['observability'], arcoChiusura['relevance'])\n listaArchi.append(arco)\n\n listaStatiUscita = []\n for statoUscita in dati_chiusura['statoUscita']:\n if osservata:\n stato = ChiusuraSilenziosa.StatoChiusuraSilenziosaOss(statoUscita['statoUscita_ch'],\n statoUscita['statoUscita_st'], statoUscita['statoUscita_li'],\n statoUscita['statoUscita_in'], statoUscita['statoUscita_fi'])\n else:\n stato = ChiusuraSilenziosa.StatoChiusuraSilenziosa(statoUscita['statoUscita_ch'],\n statoUscita['statoUscita_st'], statoUscita['statoUscita_li'],\n statoUscita['statoUscita_fi'])\n listaStatiUscita.append(stato)\n\n chiusura_ogg = ChiusuraSilenziosa.ChiusuraSilenziosaOgg(nome, listaStati, listaArchi,\n listaStatiUscita)\n listaChiusure.append(chiusura_ogg)\n\n listaArchiInterChiusura = []\n for arco in dati['arcoInterChiusura']:\n if osservata:\n statoPartenza = ChiusuraSilenziosa.StatoChiusuraSilenziosaOss(arco['stPart_ch'],\n arco['stPart_st'], arco['stPart_li'], arco['stPart_in'], arco['stPart_fi'])\n statoDestinazione = ChiusuraSilenziosa.StatoChiusuraSilenziosaOss(arco['stDest_ch'],\n arco['stDest_st'], arco['stDest_li'], arco['stDest_in'], arco['stDest_fi'])\n else:\n statoPartenza = ChiusuraSilenziosa.StatoChiusuraSilenziosa(arco['stPart_ch'],\n arco['stPart_st'], arco['stPart_li'], arco['stPart_fi'])\n statoDestinazione = ChiusuraSilenziosa.StatoChiusuraSilenziosa(arco['stDest_ch'],\n arco['stDest_st'], arco['stDest_li'], arco['stDest_fi'])\n\n listaArchiInterChiusura.append(ChiusuraSilenziosa.ArcoInterChiusuraSilenziosaOgg(arco['chPart'],\n arco['chDest'], statoPartenza, statoDestinazione, arco['etichetta'], arco['observability'],\n arco['relevance']))\n\n listaStatiUscita = []\n for statoUscita in dati['statoUscita']:\n if osservata:\n stato = ChiusuraSilenziosa.StatoChiusuraSilenziosaOss(statoUscita['chiusura'],\n statoUscita['listaStati'], statoUscita['listaLink'], statoUscita['indice'],\n statoUscita['finale'])\n else:\n stato = ChiusuraSilenziosa.StatoChiusuraSilenziosa(statoUscita['chiusura'],\n statoUscita['listaStati'], statoUscita['listaLink'], statoUscita['finale'])\n listaStatiUscita.append(stato)\n\n listaRegexUscita = []\n for regex in dati['regexUscita']:\n listaRegexUscita.append(regex['regex'])\n\n listaDiagnosiChiusure = []\n for diagnosiChiusura in dati['diagnosiChiusura']:\n listaDiagnosiChiusure.append(diagnosiChiusura['diagnosiChiusura'])\n\n Diagnosticatore.DisegnaDiagnosticatore(listaChiusure, listaStatiUscita, listaRegexUscita,\n listaArchiInterChiusura, listaDiagnosiChiusure,\n percorso+nome_file, osservato=osservata)\n\n CaricaRete.MostraDiagnosticatoreCaricato(self, nome_file, percorso, osservata)\n\n CaricaRete.OpDiagnosticatore(self, nome_file, percorso, automa, links, transizioni, osservata,\n listaChiusure, listaStatiUscita, listaRegexUscita, listaArchiInterChiusura, listaDiagnosiChiusure)\n\n\n finestraCaricamento = tk.Toplevel(self)\n finestraCaricamento.title(\"Pagina caricamento rete comportamentale\")\n finestraCaricamento.configure(background=\"white\")\n\n labelIstruzioni = tk.Label(finestraCaricamento,\n text=\"Premere sul bottone 'Carica Rete' per caricare una rete di uno spazio comportamentale.\\n\"\n \"Il file deve essere in formato txt e deve essere di una rete solo potata e che non ha\\n\"\n \"avuto altre operazioni svolte su di essa, come un'osservazione lineare. Se si desidera\\n\"\n \"caricare una rete che ha subito un'operazione come l'osservazione lineare, allora cliccare\\n\"\n \"sul bottone 'Carica Rete Oss.' per un corretto funzionamento del programma.\",\n background=\"white\").pack()\n '''\n SEZIONE: CARICA RETE NON OSSERVATA\n '''\n buttonApriRete = tk.Button(finestraCaricamento, text=\"Carica Rete\", command=lambda : apriFile(1))\n buttonApriRete.pack()\n labelPathRete = tk.Label(finestraCaricamento, text=\"\")\n labelPathRete.pack()\n buttonConfermaRete = tk.Button(finestraCaricamento, state='disabled', text=\"Conferma File Rete\",\n command=lambda : importaFile(1))\n buttonConfermaRete.pack()\n '''\n SEZIONE: CARICA RETE OSSERVATA\n '''\n buttonApriReteOss = tk.Button(finestraCaricamento, text=\"Carica Rete Oss.\", command=lambda : apriFile(2))\n buttonApriReteOss.pack()\n labelPathReteOss = tk.Label(finestraCaricamento, text=\"\")\n labelPathReteOss.pack()\n buttonConfermaReteOss = tk.Button(finestraCaricamento, state='disabled', text=\"Conferma File Rete Oss.\",\n command=lambda : importaFile(2))\n buttonConfermaReteOss.pack()\n '''\n SEZIONE: CARICA DIAGNOSTICATORE\n '''\n buttonApriDiagnosticatore = tk.Button(finestraCaricamento, text=\"Carica Diagnosticatore\", command=lambda :\n apriFile(3))\n buttonApriDiagnosticatore.pack()\n labelPathDiagnosticatore = tk.Label(finestraCaricamento, text=\"\")\n labelPathDiagnosticatore.pack()\n buttonConfermaDiagnosticatore = tk.Button(finestraCaricamento, state='disabled',\n text=\"Conferma Diagnosticatore\", command=lambda : importaFile(3))\n buttonConfermaDiagnosticatore.pack()\n\n\n tk.Button(finestraCaricamento, text=\"Indietro\", command=finestraCaricamento.destroy).pack()\n","sub_path":"ASD/CaricaRete.py","file_name":"CaricaRete.py","file_ext":"py","file_size_in_byte":80351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"413450687","text":"# 5- Fazer rotinas para figuras a serem utilizadas no desenho de uma casa\nimport turtle\n\n\ncaneta = turtle.Turtle()\n\n\n# Funcao para desenha quadrado(parede) e triangulo equilatero(telhado)\ndef poligono_regular(n_lados, comp_lado, turtle):\n angulo_total = 360 / n_lados\n angulo_parcial = 360 / n_lados\n while (angulo_total <= 361):\n turtle.forward(comp_lado)\n turtle.setheading(angulo_total)\n angulo_total = angulo_total + angulo_parcial\n\n\n# Funcao para desenha retangulo(porta)\ndef paralelogramo(angulo_1, angulo_2,base,altura,turtle):\n contador = 0\n angulo_total = 0\n while (contador < 4):\n if (contador % 2 == 0):\n angulo_total= angulo_total + angulo_2\n turtle.forward(base)\n turtle.setheading(angulo_total)\n contador = contador +1\n else:\n angulo_total= angulo_total + angulo_1\n turtle.forward(altura)\n turtle.setheading(angulo_total)\n contador= contador +1\n\n\n# Desenhando Casa\npoligono_regular(4, 100, caneta)\ncaneta.penup()\ncaneta.sety(100)\ncaneta.down()\npoligono_regular(3,100,caneta)\ncaneta.up()\ncaneta.sety(0)\ncaneta.setx(35)\ncaneta.down()\nparalelogramo(90,90,30,50,caneta)\ninput()","sub_path":"ComputacaoGraficaFabricio/Exercicios/01/Codigo/Exercicio01Item05.py","file_name":"Exercicio01Item05.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"226033267","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# File Name : for_example.py\n\n\ndef main():\n \"\"\"\n Bab Statement Kontrol\n Bagian Statement Perulangan\n Statement for\n \"\"\"\n\n # menggunakan for untuk rentang nilai tertentu\n for nRead in range(1, 6):\n print(\"%d: Hello World!\" % nRead)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fundamental/chapter_3/for_example.py","file_name":"for_example.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"398725177","text":"# -*- coding:utf-8 -*-\nimport sys\nimport itertools\nfrom pathlib import Path\n\nworkspace = './'\nsys.path.append(workspace)\n\n\nfrom model import mlp_model, rf_model, svm_model\n# from utils import get_signal_plot\nfrom utils.plots import *\n\nif __name__ == '__main__':\n date = '0517'\n if len(sys.argv) == 1:\n feature_data = \"cache/E017/feature/bin_10_400kb.xlsx\"\n else:\n feature_data = sys.argv[1]\n\n# Generating all combinations of following features to be excluded\nnot_available = ['H3K4me3', 'H3K4me2', 'CTCF', 'H3K9ac']\ndata_list = [False]\nexclude = True\nfor i in range(1, 5):\n data_list += [j for j in itertools.combinations(not_available, i)]\nprint(data_list)\n\n# MLP model\nprint(\"MLP...\")\nresult_folder_mlp = Path('results/{0}_corrected_feature_exclusion'.format(date))\nfor exluded_features in data_list:\n print(exluded_features)\n mlp_model.mlp_result(feature_data, result_folder_mlp, hist_list=exluded_features, exclude=exclude, date=date, input_type='xlsx')\nplot_roc_folder(result_folder_mlp, result_folder_mlp/'{0}_mlp_ROC_curve_excluded_features.png'.format(date), 'ROC comparison: MLP on original data')\n\n# RF model\nprint(\"RF...\")\nresult_folder_rf = Path('results/{0}_corrected_feature_exclusion'.format(date))\nfor exluded_features in data_list:\n print(exluded_features)\n rf_model.rf_result(feature_data, result_folder_rf, hist_list=exluded_features, exclude=exclude, date=date, input_type='xlsx')\nplot_roc_folder(result_folder_rf, result_folder_rf/'{0}_rf_ROC_curve_excluded_features.png'.format(date), 'ROC comparison: RF on original data')\n ","sub_path":"src/feature_exclusion.py","file_name":"feature_exclusion.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"472915559","text":"from fractions import gcd\n\ndef yakusuu(a):\n yakusuu = list()\n for i in range(1,a+1):\n if a % i == 0:\n yakusuu.append(i)\n return yakusuu\n\nA, B, K = map(int, input().split())\n\nans = yakusuu(gcd(A,B))[-K]\nprint(ans)\n\n","sub_path":"atcoder/ABC/120/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"73646901","text":"import numpy as np\r\nimport os\r\nimport sys\r\nCUR_DIR = os.path.dirname(os.path.realpath(__file__))\r\nsys.path.append(os.path.join(CUR_DIR, \"../CGNet\"))\r\nimport geometries\r\nimport gzip\r\nimport logging\r\nimport sys\r\nimport random\r\nimport datetime\r\n\r\nIMAGE_SIZE = 28\r\nNUM_CHANNELS = 1\r\nPIXEL_DEPTH = 255\r\nNUM_LABELS = 10\r\nREAL_PART=0\r\nIMAG_PART=1\r\n \r\nCUR_DIR = os.path.dirname(os.path.realpath(__file__))\r\n\r\ndef extract_data(filename, num_images):\r\n print('Extracting data', filename)\r\n open_func = gzip.open if filename[-3:-1] == \".gz\" else (lambda x:open(x,\"rb\"))\r\n #with gzip.open(filename) as bytestream:\r\n #with open(filename, 'rb') as bytestream:\r\n with open_func(filename) as bytestream:\r\n bytestream.read(16)\r\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)\r\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\r\n #data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\r\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE)\r\n return data / (PIXEL_DEPTH + 0.1)\r\n\r\ndef extract_labels(filename, num_images):\r\n print('Extracting label', filename)\r\n with open(filename, 'rb') as bytestream:\r\n bytestream.read(8)\r\n buf = bytestream.read(1 * num_images)\r\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\r\n return labels\r\n\r\ndef _get_input_from2D(img2D, idx=None, lmax=14, rotate_angles=None, \r\n complexFlag=True,\r\n separateParts=False,\r\n sphs=None):\r\n coords, hs = geometries.get_coef_grid(img2D)\r\n if rotate_angles is not None:\r\n beta, alpha = geometries.rotate_coords(coords[:,0], coords[:,1], rotate_angles)\r\n else:\r\n beta = coords[:,0]\r\n alpha = coords[:,1]\r\n coefs, sphs = geometries.get_coef_C(hs, beta, alpha, lmax=lmax, chop_coeffs=False, complexFlag=complexFlag, sph=sphs)\r\n if separateParts:\r\n reals = coefs.real\r\n imags = coefs.imag\r\n f_0 = (reals, imags)\r\n return f_0, sphs, hs, beta, alpha\r\n else:\r\n return coefs, sphs, hs, beta, alpha\r\n\r\ndef get_inputs_from2D(lmax, img2Ds, random_rotate=False):\r\n logger = logging.getLogger(\"_datautils_logger\")\r\n if random_rotate:\r\n logger.info(\"random rotate..\")\r\n d = len(img2Ds.shape)\r\n if (d > 2):\r\n sphs = None\r\n batch_size = len(img2Ds)\r\n fs = []\r\n for i in range(batch_size):\r\n angles = 2*np.pi*np.random.uniform(size=3) if random_rotate else None\r\n if i % 10 == 0 and logger is not None:\r\n #print(i)\r\n logger.info(\"working on {}\".format(i))\r\n coefs, sphs,_,_,_ = _get_input_from2D(img2Ds[i], idx=i, lmax=lmax, rotate_angles=angles, sphs=sphs if not random_rotate else None)\r\n fs.append(coefs)\r\n f_0 = np.stack(fs, 0)\r\n else:\r\n angles = 2*np.pi*np.random.uniform(size=3) if random_rotate else None\r\n #angles = tuple([random.random()*2.*np.pi for i in range(3)]) if random_rotate else None\r\n batch_size = 0\r\n f_0, _, _, _, _ = _get_input_from2D(0,img2Ds, lmax=self.lmax, rotate_angles=angles)\r\n return f_0\r\n\r\n\r\ndef np_save_safe(file_path, data):\r\n folder_path = os.path.dirname(file_path)\r\n if not os.path.isdir(folder_path):\r\n os.makedirs(folder_path)\r\n print(\"Saving to {}\".format(file_path))\r\n np.save(file_path, data)\r\n\r\n\r\ndef precomputing_coefs(data_folder, lmax, \r\n NUM_IMAGES_TO_USE=1000, \r\n data_file_name=\"train-images.idx3-ubyte\",\r\n st=None,\r\n ed=None,\r\n coeftype=\"train\"):\r\n rotate = \"rotate\" in coeftype\r\n coefs_folder = os.path.join(CUR_DIR, \"precomputed_coefs/\")\r\n print(\"in datautils.precomputing_coefs: doing %s\"%coeftype)\r\n if st is None or ed is None:\r\n file_path, num_got = _find_old_file(coefs_folder,lmax,NUM_IMAGES_TO_USE,coeftype=coeftype)\r\n if file_path is not None:\r\n data = np.load(file_path)[0:NUM_IMAGES_TO_USE,0:((lmax+1)**2)]\r\n if (num_got < NUM_IMAGES_TO_USE):\r\n print(\"WARNING - did not read enough data! computing the rest...\")\r\n file_path = os.path.join(os.path.join(coefs_folder,coeftype),\"L_{}_N_{}.npy\".format(lmax, NUM_IMAGES_TO_USE))\r\n all_data = extract_data(os.path.join(data_folder, data_file_name), NUM_IMAGES_TO_USE)\r\n newly_read = get_inputs_from2D(lmax, all_data[num_got:NUM_IMAGES_TO_USE], random_rotate=rotate)\r\n data = newly_read if num_got == 0 else np.concatenate([data, newly_read],0)\r\n np_save_safe(file_path, data)\r\n else:\r\n file_path = os.path.join(coefs_folder, \"separate/{}/L_{}_st_{}_ed_{}.npy\".format(coeftype, lmax, st, ed))\r\n all_data = extract_data(os.path.join(data_folder, data_file_name), ed)[st:ed]\r\n\r\n data = get_inputs_from2D(lmax, all_data, random_rotate=rotate)\r\n np_save_safe(file_path, data)\r\n return data\r\n\r\n#def split_data(data_real, data_imag, ratios=[0.9,0.05,0.05]):\r\ndef split_data(data_to_split, ratios=[0.9,0.05,0.05]):\r\n s = float(ratios[0] + ratios[1] + ratios[2])\r\n r0 = ratios[0]/s\r\n r1 = r0 + ratios[1] / s\r\n n = len(data_to_split)\r\n data_train = data_to_split[0:int(r0*n)]\r\n data_valid = data_to_split[int(r0*n):int(r1*n)]\r\n data_test = data_to_split[int(r1*n):n]\r\n return data_train, data_valid, data_test\r\n\r\ndef _find_old_file(coefs_folder, lmax, NUM_IMAGES_TO_USE, coeftype=\"train\"):\r\n data_folder = os.path.join(coefs_folder, coeftype)\r\n if not os.path.isdir(data_folder):\r\n os.makedirs(data_folder)\r\n files = [x for x in os.listdir(data_folder) if x.endswith('.npy')]\r\n d = {}\r\n max_N = 0\r\n for f in files:\r\n _, L, _, N = f.split('.')[0].split(\"_\")\r\n L = int(L)\r\n N = int(N)\r\n if L >= lmax:\r\n if N >= NUM_IMAGES_TO_USE:\r\n return os.path.join(data_folder, f), N\r\n elif N > max_N:\r\n max_N = N\r\n d[N] = os.path.join(data_folder, f)\r\n return d.get(max_N), max_N\r\n\r\n#TODO: fix unzipping issue (now need to manually unzip)\r\ndef download_MNIST_and_precompute_all(n=60000, work_directory=os.path.join(CUR_DIR, \"temp\"), st=None, ed=None, istrain=True, rotate=False):\r\n import gzip\r\n import os\r\n import numpy\r\n import urllib\r\n if not os.path.isdir(work_directory):\r\n os.makedirs(work_directory)\r\n log_name = \"precompute.log\" if st is None or ed is None else \"pre_{}_{}_{}.log\".format(st,ed, \"train\" if istrain else \"test\")\r\n logging.basicConfig(filename=os.path.join(os.path.join(CUR_DIR,\"temp\"),log_name), level=logging.INFO)\r\n filenames = [\"train-images-idx3-ubyte.gz\", \"train-labels-idx1-ubyte.gz\", \"t10k-images-idx3-ubyte.gz\", \"t10k-labels-idx1-ubyte.gz\"]\r\n def _download(filename, folder,SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'):\r\n file_path = os.path.join(folder, filename)\r\n new_filename = filename.replace(\".gz\", \"\").replace(\"-idx\", \".idx\")\r\n if not os.path.exists(file_path):\r\n file_path, _ = urllib.urlretrieve(SOURCE_URL + filename, file_path)\r\n if not os.path.exists(os.path.join(folder, new_filename)):\r\n import gzip\r\n with gzip.open(file_path, 'rb') as infile:\r\n with open(os.path.join(folder, new_filename), 'wb') as outfile:\r\n for line in infile:\r\n outfile.write(line)\r\n return new_filename\r\n for i in range(len(filenames)):\r\n filenames[i] = _download(filenames[i], work_directory)\r\n \r\n #filenames = [\"train-images.idx3-ubyte\", \"train-labels.idx1-ubyte\", \"t10k-images.idx3-ubyte\", \"t10k-labels.idx1-ubyte\"]\r\n\r\n coeftype = (\"train\" if istrain else \"test\") + (\"_rotate\" if rotate else \"\")\r\n if st is None or ed is None:\r\n precomputing_coefs(work_directory, 12, n, data_file_name=filenames[0 if istrain else 2], coeftype=coeftype)\r\n else:\r\n precomputing_coefs(work_directory, 12, st=st, ed=ed, data_file_name=filenames[0 if istrain else 2], coeftype=coeftype)\r\n return None\r\n\r\nif __name__ == \"__main__\":\r\n np.random.seed(1)\r\n print(CUR_DIR)\r\n if len(sys.argv) == 2:\r\n if sys.argv[1] == \"all\":\r\n download_MNIST_and_precompute_all(n=60000, istrain=True, rotate=False)\r\n download_MNIST_and_precompute_all(n=10000, istrain=False, rotate=False)\r\n download_MNIST_and_precompute_all(n=60000, istrain=True, rotate=True)\r\n download_MNIST_and_precompute_all(n=10000, istrain=False, rotate=True)\r\n elif len(sys.argv) == 4:\r\n st = int(sys.argv[1])\r\n ed = int(sys.argv[2])\r\n #0: train unrotate\r\n #1: test unrotate\r\n #2: train rotate\r\n #3: test rotate\r\n istrain = int(sys.argv[3])%2 == 0\r\n rotate = int(sys.argv[3])//2 == 1\r\n starttime = datetime.datetime.now()\r\n download_MNIST_and_precompute_all(st=st,ed=ed, istrain=istrain, rotate=rotate)\r\n print(\"start {}, now {}\".format(starttime,datetime.datetime.now()))","sub_path":"MNIST/datautils.py","file_name":"datautils.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"356083033","text":"import cdsapi\nimport os\nimport datetime\nfrom shutil import rmtree\nfrom dataset import dataset as ds\n\ndef get_grib_files(job):\n current_datetime = job.start_date_time\n interval_timedelta = datetime.timedelta(hours=job.interval)\n number = 1\n while current_datetime <= job.end_date_time:\n print(\"Looking up \" + str(current_datetime))\n make_request(number, job.job_id,\n str(current_datetime.year),\n str(current_datetime.month),\n str(current_datetime.day),\n \"%02d:00\" % current_datetime.hour,\n job.dataset)\n current_datetime += interval_timedelta\n number += 1\n\ndef make_request(number, job_id, year, month, day, time, dataset):\n\n filename = str(job_id) +\"/%09d\" % number\n if not os.path.exists('downloads/%s' % job_id):\n os.makedirs('downloads/%s' % job_id)\n\n c = cdsapi.Client()\n\n request_params = {\n 'year':year,\n 'month':month,\n 'day':day,\n 'time':time,\n }\n\n extra_params = ds[dataset].keys()\n for k in extra_params:\n request_params[k] = ds[dataset][k]\n\n print(request_params)\n\n r = c.retrieve(\n 'reanalysis-era5-single-levels', request_params)\n r.download('downloads/%s.grib' % filename)\n\ndef clean_up_temporary_files(job_id):\n rmtree(\"downloads/%s\" % job_id)\n","sub_path":"cdsapi_wrapper.py","file_name":"cdsapi_wrapper.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"24218843","text":"import numpy as np\nfrom sklearn.decomposition import PCA\nimport logging\n\ndef python_pca(X,n_components=False):\n\n M = np.mean(X.T,axis=1)\n C = X - M\n V = np.cov(C.T)\n values,vectors = np.linalg.eig(V)\n P = vectors.T.dot(C.T)[:n_components,:].T\n\n return(P)\n\ndef coil_pca(coil_ims,coil_dim=-1,n_components=4,give_explained_var=False,debug_level=logging.WARNING):\n '''Reduce the dimensionality of the coil dimension using PCA.\n\n coil_dim -- Coil axis, default is last axis.\n coil_ims -- Coil images.\n n_components -- How many principal components to keep.\n give_explained_var -- Return explained variance for real,imag decomposition\n debug_level -- Verbosity level to set logging module.\n\n give_explained_var=True will return (coil_ims_pca,expl_var). expl_var is a\n complex valued 1D vector representing:\n cumsum(pca_real.explained_variance_ratio_) +\n 1j*cumsum(pca_imag.explained_variance_ratio_)\n\n Thus, if you were so inclined, you could take a look and see how many\n components you'd need to explain the variance up to some percentage.\n '''\n\n # Every day I'm logging...\n logging.basicConfig(format='%(levelname)s: %(message)s',level=debug_level)\n logging.info('Starting coil_pca: initial size: %s' % str(coil_ims.shape))\n\n # Get data in form (n_samples,n_features)\n coil_ims = np.moveaxis(coil_ims,coil_dim,-1)\n n_features = coil_ims.shape[-1]\n im_shape = coil_ims.shape[:-1]\n coil_ims = np.reshape(coil_ims,(-1,n_features))\n logging.info('Number of features: %d' % n_features)\n\n # Do PCA on both real/imag parts\n logging.info('Performing PCA on real/imag parts...')\n pca_real = PCA(n_components=n_components)\n pca_imag = PCA(n_components=n_components)\n coil_ims_real = pca_real.fit_transform(coil_ims.real)\n coil_ims_imag = pca_imag.fit_transform(coil_ims.imag)\n\n coil_ims_pca = (coil_ims_real + 1j*coil_ims_imag).reshape((*im_shape,n_components))\n\n # Move coil dim back to where it was\n coil_ims_pca = np.moveaxis(coil_ims_pca,-1,coil_dim)\n\n logging.info('Resulting size: %s' % str(coil_ims_pca.shape))\n logging.info('Number of components: %d' % n_components)\n\n if give_explained_var:\n logging.info('Returning explained_variance_ratio for both real and imag PCA decompositions.')\n logging.info('Do mr_utils.view(expl_var.real) to see the plot for the real part.')\n expl_var = np.cumsum(pca_real.explained_variance_ratio_) + 1j*np.cumsum(pca_imag.explained_variance_ratio_)\n return(coil_ims_pca,expl_var)\n else:\n return(coil_ims_pca)\n\nif __name__ == '__main__':\n pass\n","sub_path":"mr_utils/coils/coil_combine/coil_pca.py","file_name":"coil_pca.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"357866491","text":"'''\nBenders' Decomposition:\n Branch and cut\n Multiple scenario generation\n Improved Integer cut generation\n Variable Neighbourhood Branching (After Root Nodes) Version 2\n Proximity search\n\nDu Bo\n'''\nimport model_rflp as mr\nfrom gurobipy import *\nimport time\n\n\ndef bra_cut(Time_Limit, p, cd, cdk, sk, a1, tl_total, tl_node, tl_pr_node, tl_pr_total, branch_step, stop_gap, pr_terminate, pr_step):\n convergence = []\n Heu_sol = []\n # Number of nodes\n ni = len(cd)\n nk = len(cdk)\n # weights of two stages\n a2 = 1 - a1\n try:\n def mycallback(model, where): # callback fuction: benders cut & integer cut\n if where == GRB.Callback.MIP:\n if TSRFLP.LB_terminate == 1 and TSRFLP.LB_branch == 0:\n if TSRFLP.vn_end == 1:\n if time.time() - pr_node_time >= tl_pr_node:\n model.terminate()\n if time.time() - pr_time >= tl_pr_total:\n model.terminate()\n TSRFLP.pr_end = 1\n else:\n if time.time() - LB_time >= tl_node:\n model.terminate()\n if time.time() - vn_time >= tl_total:\n model.terminate() #\n TSRFLP.vn_end = 1\n objbst = model.cbGet(GRB.Callback.MIP_OBJBST)\n objbnd = model.cbGet(GRB.Callback.MIP_OBJBND)\n if objbst < 1e10:\n if TSRFLP.LB_terminate == 1 and TSRFLP.vn_end == 0:\n # print('22222222222222222')\n if convergence != [] and objbst <= convergence[-1][0]:\n convergence.append(\n [convergence[-1][0], convergence[-1][1], time.time() - start_time])\n convergence.append(\n [objbst, TSRFLP.bestbound, time.time() - start_time])\n # if TSRFLP.vn_end == 1 and TSRFLP.pr_end == 0:\n # print('3333333333333333')\n # print(TSRFLP.a1*TSRFLP.value_L+TSRFLP.a2*TSRFLP.value_omega)\n # if convergence[-1][0] == 328.49999999999983:\n # aaa = 1\n # obj_now=TSRFLP.a1*TSRFLP.value_L+TSRFLP.a2*TSRFLP.value_omega\n # if obj_now <= convergence[-1][0]:\n # convergence.append([convergence[-1][0],convergence[-1][1],time.time()-start_time])\n # convergence.append([obj_now,TSRFLP.bestbound,time.time()-start_time])\n if TSRFLP.pr_end == 1 and TSRFLP.LB_branch == 1:\n # print('444444444444444444')\n if convergence != [] and objbnd >= TSRFLP.bestbound and objbst <= convergence[-1][0]:\n convergence.append(\n [convergence[-1][0], convergence[-1][1], time.time() - start_time])\n convergence.append(\n [objbst, objbnd, time.time() - start_time])\n if TSRFLP.LB_terminate == 0:\n # print('111111111111111')\n if convergence != []:\n convergence.append(\n [convergence[-1][0], convergence[-1][1], time.time() - start_time])\n convergence.append(\n [objbst, objbnd, time.time() - start_time])\n nodecnt = model.cbGet(GRB.Callback.MIP_NODCNT)\n if time.time() - start_time >= Time_Limit and nodecnt > 0: # Stop criteria\n model.terminate()\n if where == GRB.Callback.MIPSOL:\n nodecnt = model.cbGet(GRB.Callback.MIPSOL_NODCNT)\n objbst = model.cbGet(GRB.Callback.MIPSOL_OBJBST)\n objbnd = model.cbGet(GRB.Callback.MIPSOL_OBJBND)\n vals = model.cbGetSolution(model._vars)\n TSRFLP.value_y = vals[-3 - ni:-3]\n if TSRFLP.warm == 'over':\n # make sure y are binary\n TSRFLP.value_y = [round(x) for x in TSRFLP.value_y]\n TSRFLP.value_omega = vals[-1]\n TSRFLP.value_L = vals[-2] # for recording objval in prox\n if nodecnt > 200 and TSRFLP.LB_terminate == 0: # LB right after root node\n TSRFLP.LB_terminate = 1\n TSRFLP.bestbound = objbnd\n model.terminate()\n if TSRFLP.value_y not in TSRFLP.save_y and TSRFLP.value_y not in TSRFLP.save_y_int:\n TSRFLP.update_sub_dual(callback=1)\n TSRFLP.sub_dual.optimize()\n max_Lk = TSRFLP.worst_scenario()\n SP_Qk = [i.x for i in TSRFLP.sub_dual.getVars()\n [-TSRFLP.nk:]]\n save_sub = TSRFLP.get_subdual_vals()\n TSRFLP.save_max_Lk_DualLP.append(\n [TSRFLP.value_y, TSRFLP.max_Lk, SP_Qk, save_sub])\n TSRFLP.save_y.append(TSRFLP.value_y)\n if max_Lk[0] - TSRFLP.value_omega >= 1e-4: # ----benders cut----\n TSRFLP.update_multiple_scenario(SP_Qk)\n else: # ----integer cut----\n TSRFLP.update_sub(callback=1)\n TSRFLP.sub_model.optimize()\n TSRFLP.worst_scenario(1) # calculate max L3\n TSRFLP.save_max_Lk_SP.append(\n [TSRFLP.value_y, TSRFLP.max_Lk])\n TSRFLP.save_y_int.append(TSRFLP.value_y)\n TSRFLP.gap_calculation(1) # calculate int_gap\n if TSRFLP.int_gap >= 1e-4:\n TSRFLP.update_integer_cut()\n model.cbLazy(TSRFLP.omega >= TSRFLP.integer_cut)\n else:\n if TSRFLP.pr_end == 0 and TSRFLP.vn_end == 1:\n if objbst < pr_terminate: # terninate at hard incumbent\n TSRFLP.pr_end = 1\n# model.terminate()\n obj_now = TSRFLP.a1 * TSRFLP.value_L + TSRFLP.a2 * TSRFLP.value_omega\n if obj_now <= convergence[-1][0]:\n # print('333333333333333333333333333')\n convergence.append(\n [convergence[-1][0], convergence[-1][1], time.time() - start_time])\n convergence.append(\n [obj_now, TSRFLP.bestbound, time.time() - start_time])\n if obj_now <= TSRFLP.Branching_record[0]:\n TSRFLP.Branching_record = [\n obj_now, vals, time.time() - start_time]\n else:\n save_index = [(i, x.index(TSRFLP.value_y)) for i, x in enumerate(\n TSRFLP.save_max_Lk_DualLP) if TSRFLP.value_y in x]\n # ----benders cut----\n if TSRFLP.save_max_Lk_DualLP[save_index[0][0]][1][0] - TSRFLP.value_omega >= 1e-4:\n TSRFLP.update_multiple_scenario(TSRFLP.save_max_Lk_DualLP[save_index[0][0]][2],\n TSRFLP.save_max_Lk_DualLP[save_index[0][0]][3])\n else:\n save_index = [(i, x.index(TSRFLP.value_y)) for i, x in enumerate(\n TSRFLP.save_max_Lk_SP) if TSRFLP.value_y in x]\n if save_index != []:\n if TSRFLP.save_max_Lk_SP[save_index[0][0]][1][0] - TSRFLP.value_omega >= 1e-4:\n TSRFLP.update_integer_cut(\n 0, TSRFLP.save_max_Lk_SP[save_index[0][0]][1])\n model.cbLazy(TSRFLP.omega >=\n TSRFLP.integer_cut)\n else:\n TSRFLP.update_sub(callback=1)\n TSRFLP.sub_model.optimize()\n TSRFLP.worst_scenario(1) # calculate max L3\n TSRFLP.save_max_Lk_SP.append(\n [TSRFLP.value_y, TSRFLP.max_Lk])\n TSRFLP.save_y_int.append(TSRFLP.value_y)\n TSRFLP.gap_calculation(1) # calculate int_gap\n if TSRFLP.int_gap >= 1e-4:\n TSRFLP.update_integer_cut()\n model.cbLazy(TSRFLP.omega >=\n TSRFLP.integer_cut)\n else:\n if TSRFLP.pr_end == 0 and TSRFLP.vn_end == 1:\n if objbst < pr_terminate: # terninate at hard/soft incumbent\n TSRFLP.pr_end = 1\n# model.terminate()\n obj_now = TSRFLP.a1 * TSRFLP.value_L + TSRFLP.a2 * TSRFLP.value_omega\n if obj_now <= convergence[-1][0]:\n # print('333333333333333333333333333')\n convergence.append(\n [convergence[-1][0], convergence[-1][1], time.time() - start_time])\n convergence.append(\n [obj_now, TSRFLP.bestbound, time.time() - start_time])\n if obj_now <= TSRFLP.Branching_record[0]:\n TSRFLP.Branching_record = [\n obj_now, vals, time.time() - start_time]\n\n if where == GRB.Callback.MESSAGE: # Record lazy constraints\n if TSRFLP.LB_branch == 1:\n msg = model.cbGet(GRB.Callback.MSG_STRING)\n cutname = 'Lazy constraints'\n if cutname in msg:\n TSRFLP.num_cut += int(msg[20:-1])\n\n TSRFLP = mr.rflp(p, ni, nk, a1, a2, cd, cdk, sk) # instantiate class\n # setting algorithm environment\n TSRFLP.dual = 1\n TSRFLP.intSP = 1.0\n TSRFLP.lift = 0\n TSRFLP.zero_half = 0\n gap = 1\n # initailization\n TSRFLP.dual_sub(callback=1)\n TSRFLP.sub(callback=1)\n TSRFLP.warm_start(1) # 1:no warm start 0: warm start\n TSRFLP.params_tuneup()\n start_time = time.time() # set initail time\n TSRFLP.master_model._vars = TSRFLP.master_model.getVars()\n TSRFLP.master_model.Params.lazyConstraints = 1\n TSRFLP.master_model.optimize(mycallback) # terminate after root node\n if TSRFLP.master_model.status == 2:\n TSRFLP.LB_terminate = 1\n TSRFLP.master_model.addConstr(\n TSRFLP.a1 * TSRFLP.L + TSRFLP.a2 * TSRFLP.omega >= TSRFLP.bestbound)\n rootval = TSRFLP.master_model.objval\n TSRFLP.Branching_record = [1e6, []]\n TSRFLP.Branching_record, better_sol, convergence,_ = TSRFLP.record_best_sol(\n TSRFLP.Branching_record, start_time, convergence)\n TSRFLP.add_LB(TSRFLP.Branching_record, branch_step, 1)\n LB_cut = 2\n vn_time = time.time()\n LB_time = 0\n # Branching\n while TSRFLP.vn_end == 0 and tl_total != 0:\n LB_time = time.time() # time Limits for one neighbourhood\n TSRFLP.master_model.optimize(mycallback)\n if TSRFLP.master_model.status in [3,4,5]:\n if branch_step <= TSRFLP.p*2-2:\n if branch_step == 2:\n TSRFLP.reverse_LB(TSRFLP.Branching_record,branch_step)\n branch_step += 2\n TSRFLP.add_LB(TSRFLP.Branching_record,branch_step+2) #\n LB_cut += 1\n # print('*********************','++first reverse++',branch_step,TSRFLP.Branching_record[0],'*********************')\n else:\n TSRFLP.reverse_LB(TSRFLP.Branching_record,branch_step)\n branch_step += 2\n TSRFLP.add_LB(TSRFLP.Branching_record,branch_step+2) #\n # print('*********************','++reverse++',branch_step,'incumbent',TSRFLP.Branching_record[0],'*********************')\n else:\n TSRFLP.vn_end = 1\n break\n if TSRFLP.master_model.status in [2,11]:\n TSRFLP.Branching_record,better_sol,convergence,Reverse_record = TSRFLP.record_best_sol(\n TSRFLP.Branching_record,start_time,convergence)\n if better_sol == 1:\n TSRFLP.reverse_LB(Reverse_record,branch_step,better_sol=1)\n branch_step = 2\n TSRFLP.add_LB(TSRFLP.Branching_record,branch_step) #\n LB_cut += 1\n # print('*********************','++better_sol++',branch_step,'incumbent',TSRFLP.Branching_record[0],'last one',Reverse_record[0],'*********************')\n else:\n if branch_step == 2:\n TSRFLP.reverse_LB(TSRFLP.Branching_record,branch_step)\n branch_step += 2\n TSRFLP.add_LB(TSRFLP.Branching_record,branch_step+2) #\n LB_cut += 1\n # print('*********************','++first reverse++',branch_step,'incumbent',TSRFLP.Branching_record[0],'*********************')\n else:\n TSRFLP.reverse_LB(TSRFLP.Branching_record,branch_step)\n branch_step += 2\n TSRFLP.add_LB(TSRFLP.Branching_record,branch_step+2) #\n # print('*********************','++reverse++',branch_step,'incumbent',TSRFLP.Branching_record[0],'*********************')\n for n in range(LB_cut):\n TSRFLP.master_model.remove(\n TSRFLP.master_model.getConstrs()[-n - 1])\n if tl_total == 0:\n TSRFLP.vn_end = 1\n # Proximity search\n pr_time = time.time()\n pr_gap = 1\n while TSRFLP.pr_end == 0 and tl_pr_total != 0:\n pr_node_time = time.time()\n rhs, soft_rhs = TSRFLP.add_proximity(TSRFLP.Branching_record, pr_step)\n TSRFLP.master_model.optimize(mycallback)\n if TSRFLP.master_model.Status in [2, 11]: # optimal or interrupted\n if TSRFLP.master_model.ObjVal < 1e10: # optimal or feasible\n best_incumbent = []\n obj_now = TSRFLP.a1 * TSRFLP.L.x + TSRFLP.a2 * TSRFLP.omega.x\n if obj_now < TSRFLP.Branching_record[0]:\n Vars = TSRFLP.master_model.getVars()\n for n in Vars:\n best_incumbent.append(n.x)\n TSRFLP.Branching_record = [\n obj_now, best_incumbent, time.time() - start_time]\n if abs(soft_rhs - obj_now) < abs(rhs-obj_now) and TSRFLP.master_model.Status ==2:\n TSRFLP.bestbound = rhs\n convergence.append(\n [convergence[-1][0], convergence[-1][1], time.time() - start_time])\n convergence.append(\n [TSRFLP.Branching_record[0], TSRFLP.bestbound, time.time() - start_time])\n else: # cannot find feasible solution\n if TSRFLP.master_model.ObjBound > 1e5:\n TSRFLP.bestbound = rhs\n TSRFLP.pr_end = 1 # stop\n if TSRFLP.master_model.Status in [3, 4, 5]: # infeasible\n TSRFLP.bestbound = soft_rhs\n pr_gap = (\n TSRFLP.Branching_record[0] - TSRFLP.bestbound) / (1 + TSRFLP.Branching_record[0])\n if pr_gap <= stop_gap:\n TSRFLP.pr_end = 1\n # print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n # print('gap: ', pr_gap, ' UB= ',\n # TSRFLP.Branching_record[0], ' LB= ', TSRFLP.bestbound)\n TSRFLP.master_model.remove(TSRFLP.master_model.getConstrs()[-1])\n TSRFLP.master_model.remove(TSRFLP.master_model.getConstrs()[-2])\n TSRFLP.remove_proximity()\n if tl_pr_total == 0:\n TSRFLP.pr_end = 1\n for n in range(len(convergence)):\n if abs(TSRFLP.Branching_record[0] - convergence[n][0]) <= 1e-5:\n Heu_sol = [round(TSRFLP.Branching_record[0], 2),\n round(convergence[n][2], 2)]\n break\n TSRFLP.LB_branch = 1\n for x in TSRFLP.LB_cuts:\n TSRFLP.master_model.addConstr(TSRFLP.omega >= x)\n TSRFLP.master_model.update()\n for n in range(len(TSRFLP.LB_cuts)):\n TSRFLP.master_model.getConstrs()[-1-n].Lazy = 1\n TSRFLP.master_model.update()\n if TSRFLP.Branching_record[1] != []:\n TSRFLP.set_initial(TSRFLP.Branching_record[1])\n TSRFLP.master_model.addConstr(\n TSRFLP.a1 * TSRFLP.L + TSRFLP.a2 * TSRFLP.omega >= TSRFLP.bestbound)\n TSRFLP.master_model.optimize(mycallback) # final optimization\n except GurobiError as e:\n print('Error code ' + str(e.errno) + \": \" + str(e))\n except AttributeError:\n print('Encountered an attribute error')\n runtime = round((time.time() - start_time), 2)\n if TSRFLP.master_model.Status == 2:\n TSRFLP.opt = 1\n objval = round(TSRFLP.master_model.Objval, 2)\n gap = TSRFLP.master_model.MIPGap\n if abs(gap) <= 1e-5:\n gap = 0\n var_y = []\n if objval < 1e10: # prevent infeasible\n for j in range(TSRFLP.ni):\n y_name = ''.join(['y[', str(j), ']'])\n y_temp = TSRFLP.master_model.getVarByName(y_name)\n var_y.append(y_temp.x) #\n convergence = [*zip(*convergence)]\n gap = round(gap, 2)\n rootval = round(rootval, 2)\n Heu_sol.append(\n round((Heu_sol[0] - TSRFLP.master_model.Objval) / (1 + Heu_sol[0]), 2))\n return var_y, runtime, TSRFLP.num_cut, TSRFLP.opt, objval, gap, convergence, len(TSRFLP.LB_cuts), Heu_sol, rootval\n","sub_path":"Two-stage Recoverable FLP/Local Branching/TSRO_BC_SP_LB2.py","file_name":"TSRO_BC_SP_LB2.py","file_ext":"py","file_size_in_byte":18701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"452350429","text":"import math\r\n \r\nBULLET_SPEED=2\r\nstart_x = 0\r\nstart_y = 400\r\ndest_x = 600\r\ndest_y = 500\r\nx_diff = dest_x - start_x\r\ny_diff = dest_y - start_y\r\nangle = math.atan2(y_diff, x_diff)\r\nbulletcenter_x = start_x\r\nbulletcenter_y = start_y\r\n \r\nbulletchange_x = math.cos(angle) * BULLET_SPEED\r\nbulletchange_y = math.sin(angle) * BULLET_SPEED\r\n\r\nprint(bulletchange_x)\r\nprint(bulletchange_y)\r\n\r\n\r\n\r\n \r\n","sub_path":"data/class test.py","file_name":"class test.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"589504719","text":"from suds.transport import Reply\nfrom http.client import HTTPMessage\nimport unittest.mock as mock\nimport soap\nimport re\n\nfrom .http import HttpTransport\n\n\ntry:\n from lxml import etree\nexcept ImportError:\n try:\n # Python 2.5\n import xml.etree.cElementTree as etree\n except ImportError:\n try:\n # Python 2.5\n import xml.etree.ElementTree as etree\n except ImportError:\n try:\n # normal cElementTree install\n import cElementTree as etree\n except ImportError:\n try:\n # normal ElementTree install\n import elementtree.ElementTree as etree\n except ImportError:\n pass\n\n\nclass XMLAssertions(object):\n \"\"\"\n Unit test mixin to add XPath assertions on XML data.\n \"\"\"\n\n def assertNodeCount(self, xml_str, xpath, num):\n \"\"\"\n Assert that N number of the given node exist.\n\n :param xml_str: XML to test\n :param xpath: XPath query to run\n :param num: Number of nodes that the XPath query should return\n \"\"\"\n doc = etree.fromstring(xml_str)\n nodes = doc.xpath(xpath)\n self.assertEqual(num, len(nodes))\n\n def assertNodeText(self, xml_str, xpath, expected):\n \"\"\"\n Assert that each node returned by the XPath equals the given text.\n\n :param xml_str: XML to test\n :param xpath: XPath query to run\n :param expected: Expected string content\n \"\"\"\n doc = etree.fromstring(xml_str)\n nodes = doc.xpath(xpath)\n self.assertTrue(len(nodes) > 0)\n for node in nodes:\n self.assertEqual(expected, node.text)\n\n def assertNodeAttributes(self, xml_str, xpath, attributes):\n \"\"\"\n Assert that each node returned by the XPath has each of the given attributes and attribute values.\n\n :param xml_str: XML to test\n :param xpath: XPath query to run\n :param expected: Dictionary of attribute names and their expected values\n \"\"\"\n doc = etree.fromstring(xml_str)\n nodes = doc.xpath(xpath)\n self.assertTrue(len(nodes) > 0)\n for node in nodes:\n for attribute, value in attributes.items():\n self.assertTrue(attribute in node.attrib)\n self.assertEqual(value, node.attrib[attribute])\n\n\nclass SoapTest(XMLAssertions):\n \"\"\"\n Subclass of :class:`soap.tests.XMLAssertions ` that adds behavior useful for\n mocking and testing a SOAP API at the XML level.\n \"\"\"\n\n def setUp(self):\n \"\"\"Test Setup. Clears the :attr:`soap.clients ` cache.\"\"\"\n soap.clients = {}\n\n def _build_transport_with_reply(\n self, body, status=200, pattern=None, test_request=None\n ):\n \"\"\"\n Build a fake :class:`soap.http.HttpTransport ` that, when called, will\n reply with the given XML body and status code.\n\n :param body: XML response data as bytes.\n :param status: HTTP status code to return.\n :param pattern: Optional. Regexp pattern to match against the request URL. Useful if your\n test communicates with multiple SOAP APIs that need different mock responses.\n :param test_request: Optional. Function to call with a request object, before returning\n the response. Can use this to run assertions on the SOAP request XML.\n :return: :class:`soap.http.HttpTransport ` object\n :rtype: soap.http.HttpTransport\n \"\"\"\n headers = HTTPMessage()\n headers.add_header(\"Content-Type\", \"text/xml; charset=utf-8\")\n reply = Reply(status, headers, body)\n\n transport = HttpTransport()\n\n def surrogate(request, *args, **kwargs):\n if pattern and not re.search(pattern, request.url):\n return HttpTransport.send(transport, request, *args, **kwargs)\n if test_request:\n test_request(request)\n return reply\n\n transport.send = mock.MagicMock()\n transport.send.side_effect = surrogate\n return transport\n","sub_path":"src/soap/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"301898885","text":"import sys\nimport click\nimport os\nimport torch\nimport logging\nimport random\nimport numpy as np\nimport logging\nimport ray\nsys.path.append('../')\nfrom ray import tune\nfrom ray.tune import track\nfrom ray.tune.suggest.ax import AxSearch\nfrom ax.plot.contour import plot_contour\nfrom ax.plot.trace import optimization_trace_single_method\nfrom ax.service.ax_client import AxClient\nfrom sklearn.model_selection import TimeSeriesSplit, KFold, train_test_split\nfrom models.DeepSAD import DeepSAD\nfrom datasets.cicflow import CICFlowADDataset\nfrom utils.config import Config\nfrom datasets.main import load_dataset\n\n\n################################################################################\n# Settings\n################################################################################\n@click.command()\n@click.argument('dataset_name',\n type=click.Choice([\n 'mnist', 'fmnist', 'cifar10', 'arrhythmia', 'cardio',\n 'satellite', 'satimage-2', 'shuttle', 'thyroid', 'cicflow'\n ]))\n@click.argument('net_name',\n type=click.Choice([\n 'mnist_LeNet', 'fmnist_LeNet', 'cifar10_LeNet',\n 'arrhythmia_mlp', 'cardio_mlp', 'satellite_mlp',\n 'satimage-2_mlp', 'shuttle_mlp', 'cicflow_mlp',\n 'cicflow_tcn', 'thyroid_mlp'\n ]))\n@click.argument('xp_path', type=click.Path(exists=True))\n@click.argument('data_path', type=click.Path(exists=True))\n@click.option('--load_config',\n type=click.Path(exists=True),\n default=None,\n help='Config JSON-file path (default: None).')\n@click.option('--load_model',\n type=click.Path(exists=True),\n default=None,\n help='Model file path (default: None).')\n@click.option('--eta',\n type=float,\n default=1.0,\n help='Deep SAD hyperparameter eta (must be 0 < eta).')\n@click.option('--ratio_known_normal',\n type=float,\n default=0.0,\n help='Ratio of known (labeled) normal training examples.')\n@click.option('--ratio_known_outlier',\n type=float,\n default=0.0,\n help='Ratio of known (labeled) anomalous training examples.')\n@click.option(\n '--ratio_pollution',\n type=float,\n default=0.0,\n help=\n 'Pollution ratio of unlabeled training data with unknown (unlabeled) anomalies.'\n)\n@click.option('--device',\n type=str,\n default='cuda',\n help='Computation device to use (\"cpu\", \"cuda\", \"cuda:2\", etc.).'\n )\n@click.option('--seed',\n type=int,\n default=0,\n help='Set seed. If -1, use randomization.')\n@click.option(\n '--optimizer_name',\n type=click.Choice(['adam']),\n default='adam',\n help='Name of the optimizer to use for Deep SAD network training.')\n@click.option('--validation',\n type=click.Choice(['kfold', 'time_series', 'index']),\n default='index',\n help='Validation strategy.')\n@click.option(\n '--lr',\n type=float,\n default=0.001,\n help='Initial learning rate for Deep SAD network training. Default=0.001')\n@click.option('--n_epochs',\n type=int,\n default=50,\n help='Number of epochs to train.')\n@click.option(\n '--lr_milestone',\n type=int,\n default=0,\n multiple=True,\n help=\n 'Lr scheduler milestones at which lr is multiplied by 0.1. Can be multiple and must be increasing.'\n)\n@click.option('--batch_size',\n type=int,\n default=128,\n help='Batch size for mini-batch training.')\n@click.option(\n '--weight_decay',\n type=float,\n default=1e-6,\n help='Weight decay (L2 penalty) hyperparameter for Deep SAD objective.')\n@click.option('--pretrain',\n type=bool,\n default=True,\n help='Pretrain neural network parameters via autoencoder.')\n@click.option('--ae_optimizer_name',\n type=click.Choice(['adam']),\n default='adam',\n help='Name of the optimizer to use for autoencoder pretraining.')\n@click.option(\n '--ae_lr',\n type=float,\n default=0.001,\n help='Initial learning rate for autoencoder pretraining. Default=0.001')\n@click.option('--ae_n_epochs',\n type=int,\n default=100,\n help='Number of epochs to train autoencoder.')\n@click.option(\n '--ae_lr_milestone',\n type=int,\n default=0,\n multiple=True,\n help=\n 'Lr scheduler milestones at which lr is multiplied by 0.1. Can be multiple and must be increasing.'\n)\n@click.option('--ae_batch_size',\n type=int,\n default=128,\n help='Batch size for mini-batch autoencoder training.')\n@click.option(\n '--ae_weight_decay',\n type=float,\n default=1e-6,\n help='Weight decay (L2 penalty) hyperparameter for autoencoder objective.')\n@click.option(\n '--num_threads',\n type=int,\n default=0,\n help=\n 'Number of threads used for parallelizing CPU operations. 0 means that all resources are used.'\n)\n@click.option(\n '--n_jobs_dataloader',\n type=int,\n default=0,\n help=\n 'Number of workers for data loading. 0 means that the data will be loaded in the main process.'\n)\n@click.option(\n '--normal_class',\n type=int,\n default=0,\n help=\n 'Specify the normal class of the dataset (all other classes are considered anomalous).'\n)\n@click.option(\n '--known_outlier_class',\n type=int,\n default=1,\n help=\n 'Specify the known outlier class of the dataset for semi-supervised anomaly detection.'\n)\n@click.option(\n '--n_known_outlier_classes',\n type=int,\n default=0,\n help='Number of known outlier classes.'\n 'If 0, no anomalies are known.'\n 'If 1, outlier class as specified in --known_outlier_class option.'\n 'If > 1, the specified number of outlier classes will be sampled at random.'\n)\ndef main(dataset_name, net_name, xp_path, data_path, load_config, load_model,\n eta, ratio_known_normal, ratio_known_outlier, ratio_pollution, device,\n seed, optimizer_name, validation, lr, n_epochs, lr_milestone,\n batch_size, weight_decay, pretrain, ae_optimizer_name, ae_lr,\n ae_n_epochs, ae_lr_milestone, ae_batch_size, ae_weight_decay,\n num_threads, n_jobs_dataloader, normal_class, known_outlier_class,\n n_known_outlier_classes):\n \"\"\"\n Deep SAD, a method for deep semi-supervised anomaly detection.\n\n :arg DATASET_NAME: Name of the dataset to load.\n :arg NET_NAME: Name of the neural network to use.\n :arg XP_PATH: Export path for logging the experiment.\n :arg DATA_PATH: Root path of data.\n \"\"\"\n\n ######################################################\n # GLOBAL CONFIG #\n ######################################################\n\n sys.path.append('../')\n\n xp_path = os.path.abspath(xp_path)\n data_path = os.path.abspath(data_path)\n # Get configuration\n cfg = Config(locals().copy())\n\n # Set up logging\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(tune.__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n log_file = xp_path + '/log.txt'\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Print paths\n logger.info('Log file is %s' % log_file)\n logger.info('Data path is %s' % data_path)\n logger.info('Export path is %s' % xp_path)\n\n # Print experimental setup\n logger.info('Dataset: %s' % dataset_name)\n logger.info('Normal class: %d' % normal_class)\n logger.info('Ratio of labeled normal train samples: %.2f' %\n ratio_known_normal)\n logger.info('Ratio of labeled anomalous samples: %.2f' %\n ratio_known_outlier)\n logger.info('Pollution ratio of unlabeled train data: %.2f' %\n ratio_pollution)\n if n_known_outlier_classes == 1:\n logger.info('Known anomaly class: %d' % known_outlier_class)\n else:\n logger.info('Number of known anomaly classes: %d' %\n n_known_outlier_classes)\n logger.info('Network: %s' % net_name)\n\n if cfg.settings['seed'] != -1:\n random.seed(cfg.settings['seed'])\n np.random.seed(cfg.settings['seed'])\n torch.manual_seed(cfg.settings['seed'])\n torch.cuda.manual_seed(cfg.settings['seed'])\n torch.backends.cudnn.deterministic = True\n logger.info('Set seed to %d.' % cfg.settings['seed'])\n\n ######################################################\n # EXP CONFIG #\n ######################################################\n\n # Init ray\n ray.init(address='auto')\n ax = AxClient(enforce_sequential_optimization=False)\n # Default device to 'cpu' if cuda is not available\n\n ax.create_experiment(\n name=\"cicflow_mlp_experiment\",\n parameters=[\n {\n \"name\": \"lr\",\n \"type\": \"range\",\n \"bounds\": [1e-6, 0.4],\n \"log_scale\": True\n },\n {\n \"name\": \"eta\",\n \"type\": \"range\",\n \"bounds\": [0.0, 1.5]\n },\n ],\n objective_name=\"mean_auc\",\n )\n\n def mlp_trainable(parameterization, reporter):\n return train_evaluate(parameterization,\n reporter,\n validation=validation,\n data_path=data_path,\n n_known_outlier_classes=n_known_outlier_classes,\n ratio_known_normal=ratio_known_normal,\n ratio_known_outlier=ratio_known_outlier,\n cfg=cfg,\n n_jobs_dataloader=n_jobs_dataloader,\n net_name=net_name,\n pretrain=pretrain,\n ratio_pollution=ratio_pollution)\n\n tune.run(\n mlp_trainable,\n name=\"SSAD MLP\",\n num_samples=30,\n resources_per_trial={'gpu': 1},\n search_alg=AxSearch(\n ax), # Note that the argument here is the `AxClient`.\n verbose=\n 2, # Set this level to 1 to see status updates and to 2 to also see trial results.\n # To use GPU, specify: resources_per_trial={\"gpu\": 1}.\n )\n\n best_parameters, values = ax.get_best_parameters()\n best_parameters\n\n\ndef train_evaluate(parameterization,\n reporter,\n validation,\n data_path,\n n_known_outlier_classes,\n ratio_known_normal,\n ratio_known_outlier,\n ratio_pollution,\n cfg,\n n_jobs_dataloader,\n net_name,\n pretrain,\n n_splits=5):\n\n sys.path.append('../')\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n period = np.array(\n ['2019-11-08', '2019-11-09', '2019-11-11', '2019-11-12', '2019-11-13'])\n # period = np.array(['2019-11-08','2019-11-09'])\n\n if (validation == 'kfold'):\n split = KFold(n_splits=n_splits)\n elif (validation == 'time_series'):\n split = TimeSeriesSplit(n_splits=n_splits)\n else:\n # Dummy object with split method that return indexes of train/test split 0.8/0.2. Similar to train_test_split without shuffle\n split = type(\n 'obj', (object, ), {\n 'split':\n lambda p: [([x for x in range(int(len(p) * 0.8))],\n [x for x in range(int(len(p) * 0.8), len(p))])]\n })\n\n test_aucs = []\n\n for train, test in (split.split(period)):\n\n dataset = CICFlowADDataset(\n root=os.path.abspath(data_path),\n n_known_outlier_classes=n_known_outlier_classes,\n ratio_known_normal=ratio_known_normal,\n ratio_known_outlier=ratio_known_outlier,\n train_dates=period[train],\n test_dates=period[test],\n ratio_pollution=ratio_pollution,\n shuffle=True)\n\n # Initialize DeepSAD model and set neural network phi\n\n model = DeepSAD(parameterization['eta']).set_network(net_name)\n\n if pretrain:\n\n model = model.pretrain(\n dataset,\n optimizer_name=cfg.settings['ae_optimizer_name'],\n lr=parameterization['lr'],\n n_epochs=cfg.settings['ae_n_epochs'],\n lr_milestones=cfg.settings['ae_lr_milestone'],\n batch_size=cfg.settings['ae_batch_size'],\n weight_decay=cfg.settings['ae_weight_decay'],\n device=device,\n n_jobs_dataloader=n_jobs_dataloader)\n\n # Save pretraining results\n # deepSAD.save_ae_results(export_json=xp_path + '/ae_results.json')\n\n # Train model on dataset\n\n model = model.train(dataset,\n optimizer_name=cfg.settings['ae_optimizer_name'],\n lr=parameterization['lr'],\n n_epochs=cfg.settings['n_epochs'],\n lr_milestones=cfg.settings['lr_milestone'],\n batch_size=cfg.settings['batch_size'],\n weight_decay=cfg.settings['weight_decay'],\n device=device,\n n_jobs_dataloader=n_jobs_dataloader,\n reporter=reporter)\n\n model.test(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)\n test_auc = model.results['auc_roc']\n\n test_aucs.append(test_auc)\n\n reporter(mean_auc=evaluate_aucs(test_aucs=test_aucs))\n\n\ndef evaluate_aucs(test_aucs):\n return sum(test_aucs) / len(test_aucs)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/ssad_mlp_cicflow.py","file_name":"ssad_mlp_cicflow.py","file_ext":"py","file_size_in_byte":14184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"552453722","text":"import os\nimport yaml\nfrom pathlib import Path\nimport tarfile\nfrom jinja2 import Environment, BaseLoader, FileSystemLoader\nfrom catapult.exceptions import FileNotFoundException, HostIsEmptyException\nfrom yaml.parser import ParserError\nfrom catapult.exceptions import YamlParseException, EnvironmentParameterParserException\n\n\ndef get_hosts(servers):\n hosts = []\n\n for server in servers:\n if 'host' not in server:\n continue\n\n hosts.append(server['host'])\n\n return set(hosts)\n\n\ndef render_path(template_path, context):\n path, filename = os.path.split(template_path)\n\n return Environment(\n loader=FileSystemLoader(path or './')\n ).get_template(filename).render(context)\n\n\ndef render_string(template_string, context):\n template = Environment(loader=BaseLoader).from_string(template_string)\n return template.render(context)\n\n\ndef load_yaml(path):\n file_path = Path(path)\n\n if not file_path.exists():\n raise FileNotFoundException('Can not find yaml file by path \"{}\"'.format(str(file_path.absolute())))\n\n with open(str(file_path.absolute())) as stream:\n try:\n config = yaml.load(stream)\n\n return config\n except ParserError as err:\n raise YamlParseException(err.message)\n\n\ndef normalize_servers(servers):\n normalized = []\n\n for server in servers:\n if 'host' not in server:\n raise HostIsEmptyException('Host for server is empty')\n\n normalized.append(server)\n\n return normalized\n\n\ndef shared_render(template_path, dest, context):\n generated = render_path(template_path, {\n 'get': context.get\n })\n\n with open(dest, 'w') as open_file:\n open_file.write(generated)\n\n\ndef normalize_linked_paths(paths):\n if paths is None:\n return []\n\n paths = sorted(paths, key=len, reverse=True)\n\n normalized = []\n\n for path in paths:\n normalized.append(path.strip('/'))\n\n return normalized\n\n\ndef get_template_path(type, file_name):\n folder = os.path.abspath(os.path.dirname(__file__))\n\n return '{}/templates/{}/{}.j2'.format(folder, type, file_name)\n\n\ndef get_local_path(type, path):\n folder = os.path.abspath(os.getcwd())\n\n if type:\n return '{}/{}/{}'.format(folder, type, path)\n\n return '{}/{}'.format(folder, path)\n\n\ndef parse_parameters(parameters_list):\n parameters = {}\n\n for pare in parameters_list:\n stack = pare.split(':')\n\n if not stack[0]:\n raise EnvironmentParameterParserException('Environment parameter must contain a key')\n\n key = stack[0]\n\n value = ':'.join(stack[1:])\n\n if not value:\n value = None\n\n parameters[key] = value\n\n return parameters\n\n\ndef create_build(host, storage, logger):\n tar = tarfile.open('{}/{}.tar.gz'.format(storage.get('path.local.builds'), host), 'w:gz')\n\n tar.add(\n '{}/install.sh'.format(storage.get('path.local.release')),\n arcname='install.sh'\n )\n\n tar.add(\n storage.get('path.local.shared'),\n arcname='shared'\n )\n\n tar.add(\n storage.get('path.local.code'),\n arcname='code'\n )\n\n log_added = [] # only for logging\n for root, dirs, files in os.walk(\n '{}/{}'.format(storage.get('path.local.configs'), host),\n topdown=True):\n for file in files:\n tar.add(\n '{}/{}'.format(root, file),\n arcname='configs/{}'.format(file)\n )\n\n log_added.append(file)\n\n tar.close()\n\n if log_added:\n logger.success('added configs: ' + ', '.join(log_added), host)\n else:\n logger.warning('nothing config added', host)\n","sub_path":"catapult/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"209663581","text":"#!/usr/bin/env python3\n\n# INF8775 - Analyse et conception d'algorithmes\n# TP1 - Problème de la ligne d'horizon\n#\n# AUTEUR :\n# HAOUAS, Mohammed Najib - 29 janvier 2021\n#\n# RÉSUMÉ DES CHANGEMENTS :\n# 01/30/2021 - Disponibilité initiale.\n#\n# USAGE :\n# Ce script génère les exemplaires requis pour le TP1 portant sur le problème de la ligne d'horizon (Leetcode Hard).\n#\n# $ ./inst_gen.py [-h] -s NB_BATIMENTS [-n NB_EXEMPLAIRES]\n#\n# où :\n# * NB_BATIMENTS est la taille du problème et \n# * NB_EXEMPLAIRES est le nombre d'exemplaires différents requis (par défaut 1).\n#\n# Il est nécessaire de rendre ce script exécutable en utilisant chmod +x\n# Python 3.5 ou ultérieur recommandé pour lancer ce script.\n\nimport random\nimport argparse\n\n\nif __name__ == \"__main__\":\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--taille\", \\\n help=\"Représente le nombre de bâtiments à générer\", \\\n action='store', required=True, metavar='NB_BATIMENTS', type=int)\n parser.add_argument(\"-n\", \"--nb-exemplaires\", \\\n help=\"Représente le nombre d'exemplaires d'une même taille à générer\", \\\n action='store', required=False, metavar='NB_EXEMPLAIRES', type=int)\n\n args = parser.parse_args()\n if not args.nb_exemplaires:\n args.nb_exemplaires = 1\n\n # Parameters\n max_width = 50\n max_dist = 30\n max_height = 300\n max_range = args.taille*max_dist\n\n # Generate\n for i in range(args.nb_exemplaires):\n with open('N' + str(args.taille) + '_' + str(i),'w') as inst:\n last_l = 0\n inst.write(\"%d\\n\" % args.taille)\n for _ in range(args.taille):\n l = random.randint(last_l, last_l + max_dist)\n r = random.randint(l+1, l + max_width)\n h = random.randint(1, max_height)\n\n inst.write(\"%d %d %d\\n\" % (l, r, h))\n\n last_l = l\n","sub_path":"TP1/inst_gen.py","file_name":"inst_gen.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"265544146","text":"import os\nimport json\nimport random\nimport matplotlib.pyplot as plt\n\nclass Dictionary:\n \"\"\"A szótár logikai osztálya.\"\"\"\n def __init__(self, filename) -> None:\n self.filename = filename\n self.not_existing_words = [] #Azoknak a szavaknak a listája, amelyeket nem tartalmaz a szotar, de megprobaltad\n self.word_exist = False \n if os.path.exists(filename):\n with open(filename,\"r\") as f:\n self.data = json.load(f)\n\n def get_random_word(self): \n \"\"\"Visszaad egy random szót a már létező json fájlból.\"\"\"\n return random.choice(list(self.data.keys()))\n\n def has_word(self,word:str)->bool:\n \"\"\"Visszaadja, hogy a beírt szó szerepel-e már a szótárban\"\"\"\n return word in self.data.keys()\n\n def add_word(self,word:str,definition:str)-> None:\n \"\"\"Hozzáadja a megadott szót és definíciót a json fájlhoz. \n Ha már létezik az a szó a szótárban, Exceptiont dob.\"\"\"\n if self.has_word(word):\n raise Exception(\"A megadott szó már szerepel a szótárban!\")\n\n if len(word) == 0 or len(definition) == 0:\n raise Exception(\"Egyik mező sem maradhat üres!\")\n\n self.data[word] = definition\n\n def update_word(self, word:str, new_def:str) -> None:\n if word in self.data.keys():\n self.data[word] = new_def\n\n else: raise Exception(\"Csak meglevo szavak definiciojat tudod modositani\")\n\n def save_words(self):\n with open(self.filename, \"w\") as f:\n f.write(json.dumps(self.data)) \n \n \nclass Quiz:\n \"\"\"A QUIZ logikai osztálya.\"\"\"\n def __init__(self, dictionary:Dictionary)-> None:\n self.dictionary = dictionary\n self.random_word = None #random kiválasztott szó\n self.the_good_answer = None # a random szó tényleges jelentése\n self.answers = [] # 3 random definíciot tartalmaz, és a jó válaszhoz tartozót \n self.good = 0 # jó válaszaid számát tárolja, azaz a pontjaid\n self.questions = 0 # feltett kérdések számát tárolja\n self.asked_words = [] # azokat a szavakat tárolja amiket megkerdezett a quiz, plotolashoz\n \n def result(self):\n \"\"\"Megmutatja az eredményed az EXIT gomb megnyomása után, hogy hány százalékot értél el.\"\"\"\n try:\n return int((self.good/self.questions)*100)\n except ZeroDivisionError: pass\n\n def make_quiz(self)-> None:\n \"\"\"Kiválaszt egy random szót, és 4 válaszlehetőséget, melyek közül csak az egyik igaz.\n Ezeket megkeveri, hogy random sorrendben jelenjenek meg. Ha a json fájl nem tartalmaz legalább \n 4 szót, kapunk egy Exceptiont, mivel ismétlődéseket nem szeretnénk látni.\"\"\"\n self.random_word = self.dictionary.get_random_word()\n self.asked_words.append(self.random_word)\n self.the_good_answer = self.dictionary.data[self.random_word]\n self.answers.append(self.the_good_answer)\n if len(self.dictionary.data) < 4: raise Exception(\"Dict must have at least 4 elements before starting QUIZ.\")\n while len(self.answers) != 4:\n word = self.dictionary.get_random_word()\n if self.dictionary.data[word] not in self.answers:\n self.answers.append(self.dictionary.data[word])\n random.shuffle(self.answers)\n\n\n def reset(self)-> None:\n \"\"\"Visszaállítja a pontszámot nullára, és a kérdések számlálója is újra indul.\"\"\"\n self.questions = 0\n self.good = 0\n self.asked_words = []\n\n def make_plot(self,filename:str)-> None:\n \"\"\"Csinál egy plotot.\"\"\"\n counts = dict()\n for word in self.asked_words:\n counts[word] = counts.get(word, 0) + 1\n k = counts.keys()\n v = counts.values()\n fig, ax = plt.subplots()\n ax.bar(k,v)\n ax.set_title(\"Megmutatja, hogy egy szó hányszor fordult elő egy körben.\")\n fig.savefig(filename)\n\n\nif __name__==\"__main__\":\n dictionary = Dictionary(\"dictionary_of_words.json\")\n print(\"\\nA random word: \",dictionary.get_random_word())\n dictionary.add_word(\"foo\",\"bar\")\n ","sub_path":"dict_logic.py","file_name":"dict_logic.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160951371","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 4 11:04:55 2015\n\n@author: Tim Ha\n@date: 11.04.15\n@assn: test1\n\"\"\"\n# -*- coding: ut-8 -*-\nimport codecs\n\nmovieIDs = []\nmovieTitles = []\nmovieGenres = []\n\nmoviesFile = codecs.open('movies.txt', 'r', encoding='UTF-8')\nreadMovies = moviesFile.readlines()\nfor mLine in readMovies:\n mData = mLine.split('::')\n movieIDs.append(mData[0])\n movieTitles.append(mData[1])\n movieGenres.append(mData[2])\nmoviesFile.close()\n\nratingsUserID = []\nratingsMovieID = []\nratings = []\n\nratingsFile = open('ratings.txt', 'r')\nreadRatings = ratingsFile.readlines()\n#for rLine in readRatings:\n# rData = rLine.split('::')\n# print(rData)\nratingsFile.close()\n \nusersFile = open('users.txt', 'r')\nreadUsers = usersFile.readlines()\n# for uLine in readUsers:\n# uData = uLine.split('::')\n# print(uData)\nusersFile.close()\n\ngender = [' ']\nfor uLine in readUsers:\n\tuData = uLine.split('::')\n\tgender.append(uData[1])\n\nnumRatings = 1000209\nnumMRat = 0\nnumFRat = 0\nmTotalRating = 0\nfTotalRating = 0\n\nfor rLine in readRatings:\n\trData = rLine.split('::')\n\tuserID = int(rData[0])\n\tuserRating = int(rData[2])\n\tsex = gender[userID]\n\tif sex == 'M':\n\t\tnumMRat += 1\n\t\tmTotalRating += userRating\n\telse:\n\t\tnumFRat += 1\n\t\tfTotalRating += userRating\n\navgMaleRating = mTotalRating / numMRat\navgFemaleRating = fTotalRating / numFRat\npercentMaleRating = (numMRat / numRatings) * 100\n\nprint('Male: ' + str(avgMaleRating))\nprint('Female: ' + str(avgFemaleRating))\nprint('Total number of male ratings: ' + str(numMRat))\nprint('Total number of female ratings: ' + str(numFRat))\nprint('Percent male ratings: ' + str(percentMaleRating))\n\n\n\n\n\n\n\n","sub_path":"project/movielens/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"134637227","text":"import os\n\n\ndef to_head( projectpath ):\n pathlayers = os.path.join( projectpath, 'layers/' ).replace('\\\\', '/')\n # \\PassOptionsToPackage{table, usenames, dvipsnames}{xcolor}\n return r\"\"\"\n\\documentclass[border=8pt, multi, tikz, usenames, dvipsnames]{standalone} \n\\usepackage{import}\n\\subimport{\"\"\"+ pathlayers + r\"\"\"}{init}\n\\usetikzlibrary{positioning}\n\\usetikzlibrary{3d} %for including external image \n\"\"\"\n\ndef to_cor():\n return r\"\"\"\n\\def\\ConvColor{rgb:yellow,5;red,2.5;white,5}\n\\def\\ConvReluColor{rgb:yellow,5;red,5;white,5}\n\\def\\PoolColor{rgb:red,0.49;green,0.98;blue,1.0}\n\\def\\UnpoolColor{rgb:blue,2;green,1;black,0.3}\n\\def\\FcColor{rgb:red,1.0;green,0.55;blue,0}\n\\def\\FcReluColor{rgb:blue,5;red,5;white,4}\n\\def\\BatchNormColor{rgb:red,0.91;green,0.41;blue,0.17}\n\\def\\SoftmaxColor{rgb:magenta,5;black,7}\n\\def\\Relu6Color{rgb:red,0.19;green,0.55;blue,0.91}\n\\def\\SAColor{rgb:red,0.24;green,0.82;blue,0.44}\n\"\"\"\n# ConvColor: {rgb:yellow,5;red,2.5;white,5}\n# SoftmaxColor: {rgb}{rgb:red,0.0; green,0.53; blue,0.74}\ndef to_begin():\n return r\"\"\"\n\\newcommand{\\copymidarrow}{\\tikz \\draw[-Stealth,line width=0.8mm,draw={rgb:blue,4;red,1;green,1;black,3}] (-0.3,0) -- ++(0.3,0);}\n\n\\begin{document}\n\\begin{tikzpicture}\n\\tikzstyle{connection}=[ultra thick,every node/.style={sloped,allow upside down},draw={rgb:blue,4;red,1;green,1;black,3},opacity=0.7]\n\\tikzstyle{copyconnection}=[ultra thick,every node/.style={sloped,allow upside down},draw={rgb:blue,4;red,1;green,1;black,3},opacity=0.7]\n\"\"\"\n\n# layers definition\n\ndef to_input( pathfile, to='(-3,0,0)', width=8, height=8, name=\"temp\" ):\n return r\"\"\"\n\\node[canvas is zy plane at x=0] (\"\"\" + name + \"\"\") at \"\"\"+ to +\"\"\" {\\includegraphics[width=\"\"\"+ str(width)+\"cm\"+\"\"\",height=\"\"\"+ str(height)+\"cm\"+\"\"\"]{\"\"\"+ pathfile +\"\"\"}};\n\"\"\"\n\ndef to_FC(name, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=40, depth=40, opacity=1.0, caption=\" \"):\n return r\"\"\"\n\\pic[shift={\"\"\"+ offset +\"\"\"}] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\" + name +\"\"\",\n caption=\"\"\"+ caption +r\"\"\",\n fill=\\FcColor,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n# Conv\ndef to_Conv(name, s_filter=256, n_filter=64, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=40, depth=40, opacity=1.0, caption=\" \"):\n return r\"\"\"\n\\pic[shift={\"\"\"+ offset +\"\"\"}] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\" + name +\"\"\",\n caption=\"\"\"+ caption +r\"\"\",\n fill=\\ConvColor,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n# xlabel={{\"\"\"+ str(n_filer) +\"\"\", }},\n# zlabel=\"\"\"+ str(s_filer) +\"\"\",\n\n\ndef SelfAttention(name, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=40, depth=40, opacity=1.0, caption=\" \"):\n return r\"\"\"\n\\pic[shift={\"\"\"+ offset +\"\"\"}] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\" + name +\"\"\",\n caption=\"\"\"+ caption +r\"\"\",\n fill=\\SAColor,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n# Conv,Conv,relu\n# Bottleneck\ndef to_ConvConvRelu( name, s_filer=256, n_filer=(64,64), offset=\"(0,0,0)\", to=\"(0,0,0)\", width=(2,2), height=40, depth=40, caption=\" \" ):\n return r\"\"\"\n\\pic[shift={ \"\"\"+ offset +\"\"\" }] at \"\"\"+ to +\"\"\" \n {RightBandedBox={\n name=\"\"\"+ name +\"\"\",\n caption=\"\"\"+ caption +\"\"\",\n xlabel={{ \"\"\"+ str(n_filer[0]) +\"\"\", \"\"\"+ str(n_filer[1]) +\"\"\" }},\n zlabel=\"\"\"+ str(s_filer) +\"\"\",\n fill=\\ConvColor,\n bandfill=\\ConvReluColor,\n height=\"\"\"+ str(height) +\"\"\",\n width={ \"\"\"+ str(width[0]) +\"\"\" , \"\"\"+ str(width[1]) +\"\"\" },\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\ndef to_ConvRelu6( name, s_filer=256, n_filer=64, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=40, depth=40, caption=\" \" ):\n return r\"\"\"\n\\pic[shift={\"\"\"+ offset +\"\"\"}] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\" + name +\"\"\",\n caption=\"\"\"+ caption +r\"\"\",\n fill=\\ConvColor,\n bandfill=\\ConvReluColor,\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n\ndef to_BatchNorm(name, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=32, depth=32, opacity=0.5, caption=\" \"):\n return r\"\"\"\n\\pic[shift={ \"\"\"+ offset +\"\"\" }] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\"+name+\"\"\",\n caption=\"\"\"+ caption +r\"\"\",\n fill=\\BatchNormColor,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n# Pool\ndef to_Pool(name, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=32, depth=32, opacity=0.5, caption=\" \"):\n return r\"\"\"\n\\pic[shift={ \"\"\"+ offset +\"\"\" }] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\"+name+\"\"\",\n caption=\"\"\"+ caption +r\"\"\",\n fill=\\PoolColor,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n\n# unpool4, \ndef to_UnPool(name, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=32, depth=32, opacity=0.5, caption=\" \"):\n return r\"\"\"\n\\pic[shift={ \"\"\"+ offset +\"\"\" }] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\"+ name +r\"\"\",\n caption=\"\"\"+ caption +r\"\"\",\n fill=\\UnpoolColor,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n\n\ndef to_ConvRes( name, s_filer=256, n_filer=64, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=6, height=40, depth=40, opacity=0.2, caption=\" \" ):\n return r\"\"\"\n\\pic[shift={ \"\"\"+ offset +\"\"\" }] at \"\"\"+ to +\"\"\" \n {RightBandedBox={\n name=\"\"\"+ name + \"\"\",\n caption=\"\"\"+ caption + \"\"\",\n xlabel={{ \"\"\"+ str(n_filer) + \"\"\", }},\n zlabel=\"\"\"+ str(s_filer) +r\"\"\",\n fill={rgb:white,1;black,3},\n bandfill={rgb:white,1;black,2},\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n\n# ConvSoftMax\ndef to_ConvSoftMax( name, s_filer=40, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1, height=40, depth=40, caption=\" \" ):\n return r\"\"\"\n\\pic[shift={\"\"\"+ offset +\"\"\"}] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\" + name +\"\"\",\n caption=\"\"\"+ caption +\"\"\",\n zlabel=\"\"\"+ str(s_filer) +\"\"\",\n fill=\\SoftmaxColor,\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n# SoftMax\ndef to_SoftMax( name, s_filer=10, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1.5, height=3, depth=25, opacity=1.0, caption=\" \" ):\n return r\"\"\"\n\\pic[shift={\"\"\"+ offset +\"\"\"}] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\" + name +\"\"\",\n caption=\"\"\"+ caption +\"\"\",\n xlabel={{\" \",\"dummy\"}},\n zlabel=\"\"\"+ str(s_filer) +\"\"\",\n fill=\\SoftmaxColor,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n\ndef to_Relu6( name, offset=\"(0,0,0)\", to=\"(0,0,0)\", width=1.5, height=3, depth=25, opacity=1.0, caption=\" \" ):\n return r\"\"\"\n\\pic[shift={\"\"\"+ offset +\"\"\"}] at \"\"\"+ to +\"\"\" \n {Box={\n name=\"\"\" + name +\"\"\",\n caption=\"\"\"+ caption +\"\"\",\n xlabel={{\" \",\"dummy\"}},\n fill=\\Relu6Color,\n opacity=\"\"\"+ str(opacity) +\"\"\",\n height=\"\"\"+ str(height) +\"\"\",\n width=\"\"\"+ str(width) +\"\"\",\n depth=\"\"\"+ str(depth) +\"\"\"\n }\n };\n\"\"\"\n\n\ndef to_connection( of, to):\n return r\"\"\"\n\\draw [connection] (\"\"\"+of+\"\"\"-east) -- node {\\copymidarrow} (\"\"\"+to+\"\"\"-west);\n\"\"\"\n\ndef to_skip( of, to, pos=1.25):\n return r\"\"\"\n\\path (\"\"\"+ of +\"\"\"-east) -- (\"\"\"+ of +\"\"\"-near) coordinate[pos=\"\"\"+ str(pos) +\"\"\"] (\"\"\"+ of +\"\"\"-near) ;\n\\path (\"\"\"+ to +\"\"\"-west) -- (\"\"\"+ to +\"\"\"-near) coordinate[pos=\"\"\"+ str(pos) +\"\"\"] (\"\"\"+ to +\"\"\"-near) ;\n\\draw [copyconnection] (\"\"\"+of+\"\"\"-east)\n-- node {\\copymidarrow}(\"\"\"+of+\"\"\"-near)\n-- node {\\copymidarrow}(\"\"\"+to+\"\"\"-near)\n-- node {\\copymidarrow} (\"\"\"+to+\"\"\"-west);\n\"\"\"\n\n\ndef to_end():\n return r\"\"\"\n\\end{tikzpicture}\n\\end{document}\n\"\"\"\n\n\ndef to_generate( arch, pathname=\"file.tex\" ):\n with open(pathname, \"w\") as f: \n for c in arch:\n # print(c)\n f.write(c)\n \n\n\n","sub_path":"pycore/tikzeng.py","file_name":"tikzeng.py","file_ext":"py","file_size_in_byte":8638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"654066859","text":"import time\r\nimport datetime\r\nimport Adafruit_LED_Backpack\r\nimport adafruit_bmp280\r\nimport busio\r\nimport board\r\n\r\nDELAY = 2\r\n\r\n# Create display instance on default I2C address (0x70) and bus number.\r\ndisplay = Adafruit_LED_Backpack.SevenSegment.SevenSegment()\r\n\r\n# Alternatively, create a display with a specific I2C address and/or bus.\r\n# display = Adafruit_LED_Backpack.SevenSegment.SevenSegment(address=0x74, busnum=1)\r\n\r\n# Initialize the display. Must be called once before using the display.\r\ndisplay.begin()\r\ndisplay.set_brightness(15)\r\n\r\n# Set up and initialize the temp sensor\r\ni2c = busio.I2C(board.SCL, board.SDA)\r\nbmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(i2c)\r\n\r\nwhile True:\r\n\r\n # Display temperature\r\n\r\n # Figure out the reading\r\n temp_c = bmp280.temperature\r\n temp_f = 9.0 / 5.0 * temp_c + 32\r\n\r\n # Make a string to display\r\n my_str = \"{:4.1f}\".format(temp_f)\r\n\r\n # Display the string\r\n display.clear()\r\n display.set_colon(False)\r\n display.print_number_str(my_str)\r\n display.write_display()\r\n\r\n # Pause\r\n time.sleep(DELAY)\r\n\r\n # Get the current time\r\n currentDT = datetime.datetime.now()\r\n hour = currentDT.hour\r\n minute = currentDT.minute\r\n if hour > 12:\r\n hour -= 12\r\n\r\n # Make a string to display\r\n my_str = \"{:02}{:02}\".format(hour, minute)\r\n\r\n # Display the string\r\n display.clear()\r\n display.print_number_str(my_str)\r\n display.set_colon(True)\r\n display.write_display()\r\n\r\n # Pause\r\n time.sleep(DELAY)\r\n","sub_path":"i2c_time_temp.py","file_name":"i2c_time_temp.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"480206162","text":"import asyncio\nfrom .redis import Redis\nfrom notbot.context import Module, Context\nfrom notbot.services.config_service import get_config_service\nfrom contextlib import asynccontextmanager\n\nMODULE_NAME = \"redis_connection\"\nCONFIG_KEY = \"REDIS\"\n\n\nclass RedisConnection(Module):\n def __init__(self, context: Context):\n self.connection: Redis = None\n self.redis_config = None\n self.config_service = get_config_service(context)\n\n def get_connection(self) -> Redis:\n if not self.connection:\n raise Exception(\"Redis connection has not been initialized yet\")\n return self.connection\n\n def start(self):\n self.redis_config = self.config_service.get_config(CONFIG_KEY)\n self.connection: Redis = Redis()\n loop = asyncio.get_event_loop()\n\n loop.run_until_complete(\n self.connection.connect_pool(\n self.redis_config[\"host\"],\n self.redis_config[\"port\"],\n pw=self.redis_config.get(\"password\", None),\n )\n )\n\n async def multi(self):\n await self.connection.connection_pool.execute(\"MULTI\")\n\n async def exec(self):\n await self.connection.connection_pool.execute(\"EXEC\")\n\n async def discard(self):\n await self.connection.connection_pool.execute(\"DISCARD\")\n\n @asynccontextmanager\n async def with_transaction(self):\n pool = self.connection.connection_pool\n await self.multi()\n try:\n yield pool\n except Exception as error:\n await self.discard()\n raise\n\n await self.exec()\n\n def get_name(self):\n return MODULE_NAME\n\n\ndef get_redis_connection(context: Context) -> RedisConnection:\n return context.get_or_register_module(MODULE_NAME, lambda: RedisConnection(context))\n\n","sub_path":"notbot/db/redis/redis_connection.py","file_name":"redis_connection.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"339546426","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QFont, QPixmap\nfrom PyQt5.QtCore import QTimer\nfrom random import randint\n\ncomputerScore = 0\nplayerScore = 0\n\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Rock Paper Scissors Game\")\n self.setMinimumSize(655, 408)\n self.setMaximumSize(655, 408)\n self.UI_DESIGN()\n\n def UI_DESIGN(self):\n #######################################start button#####################################\n start = QPushButton('Start', self)\n start.setGeometry(200, 240, 121, 51)\n font = QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n start.setFont(font)\n start.setObjectName(\"start\")\n start.clicked.connect(self.start)\n #######################################stop button######################################\n stop = QPushButton('Stop', self)\n stop.setGeometry(340, 240, 111, 51)\n font = QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n stop.setFont(font)\n stop.setObjectName(\"stop\")\n stop.clicked.connect(self.stop)\n ######################################computer score label#######################################\n self.computer_score = QLabel('computer score : ', self)\n self.computer_score.setGeometry(100, 30, 181, 31)\n font = QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.computer_score.setFont(font)\n self.computer_score.setObjectName(\"computer_score\")\n ######################################player score###################################################\n self.player_score = QLabel('player score : ', self)\n self.player_score.setGeometry(390, 30, 181, 31)\n font = QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.player_score.setFont(font)\n self.player_score.setObjectName(\"player_score\")\n #######################################image computer#############################################\n self.imageComputer = QLabel(self)\n self.imageComputer.setGeometry(130, 90, 101, 101)\n self.imageComputer.setText(\"\")\n self.imageComputer.setPixmap(QPixmap(\"images/rock_com.png\"))\n self.imageComputer.setScaledContents(True)\n self.imageComputer.setObjectName(\"imagecomputer\")\n #######################################image game###################################################\n self.imagegame = QLabel(self)\n self.imagegame.setGeometry(270, 90, 111, 101)\n self.imagegame.setText(\"\")\n self.imagegame.setPixmap(QPixmap(\"images/game.png\"))\n self.imagegame.setScaledContents(True)\n self.imagegame.setObjectName(\"imagegame\")\n #########################################image player################################################\n self.imagePlayer = QLabel(self)\n self.imagePlayer.setGeometry(420, 90, 101, 101)\n self.imagePlayer.setText(\"\")\n self.imagePlayer.setPixmap(QPixmap(\"images/rock_player.png\"))\n self.imagePlayer.setScaledContents(True)\n self.imagePlayer.setObjectName(\"label_3\")\n ########################################Timer###############################################\n self.timer = QTimer(self)\n self.timer.setInterval(80)\n self.timer.timeout.connect(self.playGame)\n\n self.show()\n\n ###########################################Start function###################################################\n def start(self):\n self.timer.start()\n\n ############################################Start Game function############################################\n\n def playGame(self):\n self.rndcom = randint(1, 3)\n self.rndplayer = randint(1, 3)\n\n if self.rndcom == 1:\n self.imageComputer.setPixmap(QPixmap(\"images/rock_com.png\"))\n elif self.rndcom == 2:\n self.imageComputer.setPixmap(QPixmap(\"images/paper_com.png\"))\n else:\n self.imageComputer.setPixmap(QPixmap(\"images/scissors_com.png\"))\n\n if self.rndplayer == 1:\n self.imagePlayer.setPixmap(QPixmap(\"images/rock_player.png\"))\n\n elif self.rndplayer == 2:\n self.imagePlayer.setPixmap(QPixmap(\"images/paper_player.png\"))\n else:\n self.imagePlayer.setPixmap(QPixmap(\"images/scissors_player.png\"))\n\n def stop(self):\n global computerScore\n global playerScore\n self.timer.stop()\n\n if self.rndcom == 1 and self.rndplayer == 1:\n Messbox = QMessageBox.information(self, 'Result', 'Draw in the match')\n\n elif self.rndcom == 1 and self.rndplayer == 2:\n Messbox = QMessageBox.information(self, 'Information', 'Player win')\n playerScore += 1\n self.player_score.setText(f'player score: {playerScore}')\n\n elif self.rndcom == 1 and self.rndplayer == 3:\n Messbox = QMessageBox.information(self, 'Information', 'Computer win')\n computerScore += 1\n self.computer_score.setText(f'computer score: {computerScore}')\n\n elif self.rndcom == 2 and self.rndplayer == 1:\n Messbox = QMessageBox.information(self, 'Information', 'Computer win')\n computerScore += 1\n self.computer_score.setText(f'computer score: {computerScore}')\n\n elif self.rndcom == 2 and self.rndplayer == 2:\n Messbox = QMessageBox.information(self, 'Information', 'Draw in the match')\n\n elif self.rndcom == 2 and self.rndplayer == 3:\n Messbox = QMessageBox.information(self, 'Information', 'Player win')\n playerScore += 1\n self.player_score.setText(f'player score: {playerScore}')\n\n elif self.rndcom == 3 and self.rndplayer == 1:\n Messbox = QMessageBox.information(self, 'Information', 'Player win')\n playerScore += 1\n self.player_score.setText(f'player score: {playerScore}')\n\n elif self.rndcom == 3 and self.rndplayer == 2:\n Messbox = QMessageBox.information(self, 'Information', 'Computer win')\n computerScore += 1\n self.computer_score.setText(f'computer score: {computerScore}')\n\n elif self.rndcom == 3 and self.rndplayer == 3:\n Messbox = QMessageBox.information(self, 'Information', 'Draw in the match')\n\n if computerScore == 3 or playerScore == 3:\n Messbox = QMessageBox.information(self, 'Result', 'Game over')\n\n if computerScore > playerScore:\n mbox = QMessageBox.information(self, 'Result', \"Computer win game\")\n\n else:\n mbox = QMessageBox.information(self, 'Result', \"Player win game\")\n\n sys.exit()\n\n\ndef main():\n App = QApplication(sys.argv)\n window = Window()\n window.start()\n sys.exit(App.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"RPS_Game.py","file_name":"RPS_Game.py","file_ext":"py","file_size_in_byte":7184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"553061471","text":"import datetime\nimport requests\nimport webbrowser\n\nfrom . import auth\nfrom . import resources\nfrom .utils import log, tr\n\nfrom qgis.PyQt.QtCore import QSettings\nfrom qgis.core import QgsRasterLayer, QgsProject, Qgis\n\n\nclass TilesManager:\n\n tiles = {\n \"dark-matter\": {\n \"label\": tr(\"Dark Matter\"),\n \"resource\": resources.tiles_preview_darkmatter,\n },\n \"positron\": {\n \"label\": tr(\"Positron\"),\n \"resource\": resources.tiles_preview_positron,\n },\n \"klokantech-basic\": {\n \"label\": tr(\"Klokantech-Basic\"),\n \"resource\": resources.tiles_preview_klokantech,\n },\n \"osm-bright\": {\n \"label\": tr(\"OSM Bright\"),\n \"resource\": resources.tiles_preview_osm,\n },\n }\n\n def __init__(self, main):\n self.main = main\n\n def _get_url(self, identifier):\n app_id, _ = auth.get_app_id_and_api_key()\n disable_https = QSettings().value(\n \"traveltime_platform/disable_https\", False, type=bool\n )\n return \"https://tiles.traveltimeplatform.com/styles/{identifier}/{{z}}/{{x}}/{{y}}.png?key={app_id}&client=QGIS\".format(\n app_id=app_id, identifier=identifier, verify=not disable_https\n )\n\n def add_tiles_to_browser(self):\n # We test access to tiles with API\n test_url = self._get_url(list(self.tiles.keys())[0])\n response = requests.get(test_url.format(z=12, x=2048, y=1361))\n has_tiles = response.ok\n\n if not has_tiles:\n self.main.iface.messageBar().pushMessage(\n \"Info\",\n tr(\n \"TravelTime also offers some background maps for their users. Click here to request access !\"\n ),\n level=Qgis.Info,\n )\n else:\n\n for identifier, tile in self.tiles.items():\n url = self._get_url(identifier)\n label = \"TravelTime - \" + tile[\"label\"]\n\n settings_base = \"qgis/connections-xyz/\" + label\n\n QSettings().setValue(settings_base + \"/url\", url)\n QSettings().setValue(settings_base + \"/zmax\", 20)\n QSettings().setValue(settings_base + \"/zmin\", 0)\n\n # Update GUI\n self.main.iface.reloadConnections()\n","sub_path":"travel_time_platform_plugin/tiles.py","file_name":"tiles.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"362335286","text":"import numpy as np\nimport tensorflow as tf\nimport keras\n\n#Prepare for the FizzBuzz dataset\ndef FizzBuzz(n_start, n_end):\n\tx = []\n\ty = []\n\tfor i in range(n_start, n_end+1):\n\t\tx.append([])\n\t\tx_str = str(bin(i))[2:]\n\t\t# print(i, ':', x_str)\n\n\t\tfor j in range(10):\n\t\t\tif j < len(x_str):\n\t\t\t\tx[i-n_start].append(float(x_str[len(x_str)-j-1]))\n\t\t\telse:\n\t\t\t\tx[i-n_start].append(float(0.0))\n\n\t\t# print(x[i-n_start])\n\n\t\tif i % 15 == 0:\n\t\t\ty.append([0, 0, 0, 1])\n\t\telif i % 5 == 0:\n\t\t\ty.append([0, 0, 1, 0])\n\t\telif i % 3 == 0:\n\t\t\ty.append([0, 1, 0, 0])\n\t\telse:\n\t\t\ty.append([1, 0, 0, 0])\n\n\tx = np.array(x)\n\ty = np.array(y)\n\tprint(x.shape, y.shape)\n\treturn x, y\n# for i in range(15):\n# \tprint(x[i], y[i])\n\nx_train, y_train = FizzBuzz(101, 1000)\nx_test, y_test = FizzBuzz(1, 100)\n\nw_init = tf.random_normal_initializer(mean=0.0, stddev=0.1, dtype=tf.float32)\nb_init = tf.constant_initializer(value=0.0, dtype=tf.float32)\n\n## Build the training model\nmodel = keras.Sequential()\nmodel.add(keras.layers.Dense(100, input_dim=10, activation='relu', kernel_initializer=w_init, bias_initializer=b_init))\n# model.add(keras.layers.Dense(100, input_dim=10, activation='relu'))\n\nmodel.add(keras.layers.Dense(4, activation='softmax', kernel_initializer=w_init, bias_initializer=b_init))\n# model.add(keras.layers.Dense(4, activation='softmax'))\n\n\nmodel.compile(loss='categorical_crossentropy',\n\t\t\t # optimizer='adam',\n\t\t\t optimizer=keras.optimizers.Adam(lr=1e-3),\n\t\t\t metrics=['accuracy'],\n\t\t\t )\n\nmodel.fit(x_train, y_train, epochs=100, batch_size=20)\n\nresult = model.evaluate(x_train, y_train, batch_size=1000)\nresult2 = model.evaluate(x_test, y_test, batch_size=1000)\n\n\nprint('Acc:', round(result[1], 2))\nprint('Acc2:', round(result2[1], 2))\n\n\npred = model.predict(x_test[0:20])\nfor i in range(pred.shape[0]):\n\tfor j in range(pred.shape[1]):\n\t\tpred[i][j] = round(pred[i][j], 2)\nprint(x_test[0:20])\nprint(pred)\n\n\n","sub_path":"LHY-ML/demo-2-FizzBuzz-keras.py","file_name":"demo-2-FizzBuzz-keras.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"377317281","text":"# Read a file and reflow it to screen\n\nimport textwrap\nimport os\n\n# Set up textwrappers and printers:\n\nnarrow = textwrap.TextWrapper(width = 40)\n\ndef line_printer(l):\n \"\"\"Prints a list of lines, line by line.\"\"\"\n \n\n\n# Open file\n\nwith open('example_file.fly', 'r') as reader:\n x = reader.readlines()\n\n# reflow\nl = narrow.wrap(x)\n\n#output\nfor el in l:\n print(el)\n","sub_path":"Stream_Reader/flowtest.py","file_name":"flowtest.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"498550513","text":"# Chris Chueh\r\n# 3/24/2014\r\n# Creates a Music Player\r\n\r\nimport pygame\r\nfrom tkinter import *\r\n\r\n\r\nclass DoublyLinkedListNode:\r\n def __init__(self, title, artist, file, album, Next, Previous):\r\n #Construct a new Linked List Node\r\n self.title = title\r\n self.artist = artist\r\n self.file = file\r\n self.album = album\r\n self.next = Next\r\n self.previous = Previous\r\n return\r\n \r\n\r\nclass DoublyLinkedList:\r\n def __init__(self):\r\n #Construct a new LinkedList. The first node and last node are the same. Size is 0 self.firstNode = LinkedListNode(None, None)\r\n self.firstNode = DoublyLinkedListNode(None, None, None, None, None, None)\r\n self.lastNode = self.firstNode\r\n self.size = 0\r\n return\r\n\r\n def addToFront(self, title, artist, file, album):\r\n #Add a node to the front of the list\r\n node = DoublyLinkedListNode(title, artist, file, album, None, None)\r\n node.title = title;\r\n node.artist = artist;\r\n node.file = file;\r\n node.album = album;\r\n if self.firstNode.title == None:\r\n self.firstNode = node\r\n self.lastNode = node\r\n else:\r\n self.firstNode.previous = node\r\n node.next = self.firstNode\r\n self.firstNode = node\r\n\r\n self.size += 1\r\n \r\n def addToRear(self, title, artist, file, album):\r\n #Add a node to the back of the list\r\n node = DoublyLinkedListNode(title, artist, file, album, None, None)\r\n node.title = title;\r\n node.artist = artist;\r\n node.file = file;\r\n node.album = album;\r\n if self.firstNode.title == None:\r\n self.firstNode = node\r\n self.lastNode = node\r\n else:\r\n self.lastNode.next = node\r\n node.previous = self.lastNode\r\n self.lastNode = node\r\n\r\n self.size += 1\r\n\r\n return\r\n\r\n def removeFromFront(self):\r\n #Remove a node from the front of the list\r\n\r\n if self.size == 0:\r\n frontData = None\r\n else:\r\n currentNode = self.firstNode\r\n frontData = currentNode.title\r\n\r\n #This is the case where we have only one node in the list\r\n if currentNode.next == None:\r\n self.firstNode = DoublyLinkedListNode(None, None, None, None, None, None)\r\n self.lastNode = self.firstNode\r\n self.size = self.size - 1\r\n else:\r\n\r\n #Here there are more than one nodes in the list\r\n nextNode = currentNode.next\r\n nextNode.previous = None\r\n self.firstNode = nextNode\r\n self.size = self.size - 1\r\n\r\n return frontData\r\n\r\n def removeFromRear(self):\r\n #Remove a node from the rear of the list\r\n\r\n if self.size == 0:\r\n rearData = None\r\n else:\r\n currentNode = self.lastNode\r\n rearData = currentNode.title\r\n\r\n # This is the case where we have only one node in the list\r\n if currentNode.previous == None:\r\n self.firstNode = DoublyLinkedListNode(None, None, None, None, None, None)\r\n self.lastNode = self.firstNode\r\n self.size = self.size - 1\r\n else:\r\n\r\n # Here there are more than one nodes in the list\r\n previousNode = currentNode.previous\r\n previousNode.next = None\r\n self.lastNode = previousNode\r\n self.size = self.size - 1\r\n\r\n return rearData \r\n\r\n#Plays the song\r\ndef playsong():\r\n pygame.mixer.music.play()\r\n\r\n#Pauses the music that is playing or unpauses the paused music\r\ndef pausesong():\r\n global paused\r\n if paused:\r\n pygame.mixer.music.unpause()\r\n paused = False\r\n else:\r\n pygame.mixer.music.pause()\r\n paused = True\r\n\r\n#Stops the music. If replayed, it restarts from the beginning. \r\ndef stopsong():\r\n pygame.mixer.music.stop()\r\n\r\n#Moves to the next song\r\ndef nextsong():\r\n global currentsong\r\n if currentsong.next != None:\r\n currentsong = currentsong.next\r\n else:\r\n currentsong = songslist.firstNode\r\n update()\r\n \r\n#Moves to the previous song\r\ndef previoussong():\r\n global currentsong\r\n if currentsong.previous != None:\r\n currentsong = currentsong.previous\r\n else:\r\n currentsong = songslist.lastNode\r\n update()\r\n\r\n#Adds a new song\r\ndef addsong():\r\n a.delete(0, END)\r\n b.delete(0, END)\r\n c.delete(0, END)\r\n d.delete(0, END)\r\n frame.grid(row = 2, column = 1)\r\n \r\n#Adds the title, artist, album, and file type that the user has entered\r\ndef addfinal():\r\n global a, b, c, d\r\n songslist.addToRear(a.get(), b.get(), c.get(), d.get())\r\n frame.grid_forget()\r\n\r\n# Removes the current song displayed\r\ndef deletesong():\r\n global currentsong\r\n if currentsong == songslist.firstNode:\r\n songslist.removeFromFront()\r\n currentsong = songslist.firstNode\r\n elif currentsong == songslist.lastNode:\r\n songslist.removeFromRear()\r\n currentsong = songslist.lastNode\r\n else:\r\n currentsong = currentsong.next\r\n currentsong.previous.previous.next = currentsong\r\n currentsong.previous = currentsong.previous.previous\r\n update()\r\n\r\n\r\n# Clears search entry box \r\ndef clearsearch():\r\n searchtext.delete(0, END)\r\n frame2.grid(row = 3, column = 1)\r\n\r\n# Looks for information based on user input. \r\ndef search():\r\n count = 0\r\n global currentsong\r\n temp = currentsong\r\n textresults.insert(END, '')\r\n stext = searchtext.get()\r\n for i in range(songslist.size):\r\n if stext.lower() in currentsong.title.lower() or stext.lower() in currentsong.artist.lower() or stext.lower() in currentsong.album.lower():\r\n string = 'Title: ' + currentsong.title + '\\nAlbum: ' + currentsong.album + 'Artist: ' + currentsong.artist + '\\n\\n'\r\n textresults.insert(END, string)\r\n count += 1\r\n if currentsong == songslist.lastNode:\r\n currentsong = songslist.firstNode\r\n else:\r\n currentsong = currentsong.next\r\n frame2.grid_forget()\r\n if count > 0:\r\n frame1.grid(row = 4, column = 1)\r\n \r\n else:\r\n messagebox.showinfo('Oh no!', 'No Results Found!')\r\n textresults.delete(1.0, END)\r\n currentsong = temp\r\n update()\r\n\r\n#Removes search box from view\r\ndef donewithsearch():\r\n frame1.grid_forget()\r\n textresults.delete(1.0, END)\r\n return\r\n\r\n#Updates current song information\r\ndef update():\r\n global currentsong\r\n if currentsong.title != None and currentsong.title != '':\r\n pygame.mixer.music.load(currentsong.file)\r\n titlelabel['text'] = currentsong.title\r\n artistlabel['text'] = currentsong.artist\r\n albumlabel['text'] = currentsong.album\r\n else:\r\n titlelabel['text'] = ''\r\n artistlabel['text'] = ''\r\n albumlabel['text'] = ''\r\n\r\n# Imports songs from the text file and initializes pygame. \r\npaused = False\r\nsonglist = open('songlist.txt', 'r')\r\nsongs = songlist.readlines()\r\nsongslist = DoublyLinkedList()\r\nfor i in range(len(songs)):\r\n songgroup = (songs[i].split(','))\r\n songslist.addToRear(songgroup[0], songgroup[1], songgroup[2], songgroup[3]) \r\ncurrentsong = songslist.firstNode\r\npygame.mixer.init()\r\npygame.mixer.music.load(currentsong.file)\r\n\r\nroot =Tk()\r\nframe1 = Frame(root)\r\nframe1.grid(row = 1, column = 1)\r\nroot.title('ITunez')\r\n\r\n# Creates labels the song information\r\nLabel(frame1, text = 'Title:', height = 2).grid(row = 1, column = 1)\r\ntitlelabel = Label(frame1, text = currentsong.title, height = 2, width = 20)\r\ntitlelabel.grid(row = 1, column = 2, columnspan = 2)\r\nLabel(frame1, text = 'Artist:', height = 2).grid(row = 2, column = 1)\r\nartistlabel = Label(frame1, text = currentsong.artist, height = 2, width = 20)\r\nartistlabel.grid(row = 2, column = 2, columnspan = 2)\r\nLabel(frame1, text = 'Album:', height = 2).grid(row = 3, column = 1)\r\nalbumlabel = Label(frame1, text = currentsong.album, height = 2, width = 20)\r\nalbumlabel.grid(row = 3, column = 2, columnspan = 2)\r\n\r\n# Creates buttons for user manipulation\r\nButton(frame1, text='Play', command = playsong, width = 20).grid(row = 4, column = 2)\r\nButton(frame1, text='Pause', command = pausesong, width = 20).grid(row = 5, column = 2)\r\nButton(frame1, text='Stop', command = stopsong, width = 20).grid(row = 6, column = 2)\r\nButton(frame1, text='Next', command = nextsong, width = 15).grid(row = 5, column = 4)\r\nButton(frame1, text='Previous', command = previoussong, width = 15).grid(row =5, column = 1)\r\n\r\n# Creates buttons to allow users to add or delete songs\r\nButton(frame1, text='Add', command = addsong, width = 15).grid(row = 6, column = 1, sticky = W)\r\nButton(frame1, text='Delete', command = deletesong, width = 15).grid(row = 6, column = 4, sticky = E)\r\n\r\n# Creates buttons for finding songs.\r\nButton(frame1, text='Search', command = clearsearch, width = 15).grid(row = 7, column = 0, columnspan = 2)\r\n\r\n\r\n# Creates entries for adding songs.\r\nframe = Frame(root)\r\nLabel(frame, text = 'Title:').grid(row = 1, column = 1)\r\na =Entry(frame, width = 50)\r\na.grid(row = 1, column = 2)\r\nLabel(frame, text = 'Artist:').grid(row = 2, column = 1)\r\nb = Entry(frame, width = 50)\r\nb.grid(row = 2, column = 2)\r\nLabel(frame, text = 'File Type:').grid(row = 3, column = 1)\r\nc = Entry(frame, width = 50)\r\nc.grid(row = 3, column = 2)\r\nLabel(frame, text = 'Album Name:').grid(row = 4, column = 1)\r\nd = Entry(frame, width = 50)\r\nd.grid(row = 4, column = 2)\r\nButton(frame, text = 'Done', command = addfinal).grid(row = 5, column = 1, columnspan = 2)\r\nframe2 = Frame(root)\r\nsearchtext = Entry(frame1, width = 40)\r\nsearchtext.grid(row = 7, column = 2)\r\nButton(frame1, text = 'Go', command = search, width = 15).grid(row = 7, column = 4)\r\n\r\n# Creates search results.\r\nframe1 = Frame(root)\r\nyscrollbar = Scrollbar(frame1)\r\nyscrollbar.grid(row = 0, column = 1, sticky = N+S)\r\ntextresults = Text(frame1, height = 5, width = 44, wrap = WORD, yscrollcommand=yscrollbar.set)\r\ntextresults.grid(row = 0, column = 0, sticky = N+S+E+W)\r\nyscrollbar.config(command = textresults.yview)\r\nButton(frame1, text = 'Ok', command = donewithsearch).grid(row = 2, column = 1)\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n","sub_path":"src/Chueh.py","file_name":"Chueh.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"334371115","text":"import cv2\n\nfrom CrystalMatch.dls_util.shape import Rectangle, Point\nfrom CrystalMatch.dls_util.imaging import Image, Color\n\n\nclass Overlayer:\n def __init__(self):\n pass\n\n @staticmethod\n def create_overlay_image(image1, image2, offset, rect_color=Color.black()):\n \"\"\" For the two images, A and B, where the position of B is offset from that of A, overlay\n image B onto image A at the appropriate position. The overlaid area will ve a blending of the\n two images. A rectangle will be drawn around the area.\n \"\"\"\n # Make a copy of A, the background image\n background = image1.copy()\n\n # Get overlapping regions of images\n overlap_a, overlap_b = Overlayer.get_overlap_regions(image1, image2, offset)\n if overlap_a is None or overlap_b is None:\n return background\n\n # Blend the two overlapping regions\n perc_a, perc_b = 0.5, 0.5\n blended = cv2.addWeighted(overlap_a.raw(), perc_a, overlap_b.raw(), perc_b, 0)\n background.paste(Image(blended), Point(max(offset.x, 0), max(offset.y, 0)))\n background = background.to_channels(3)\n\n # Define the rectangle that will be pasted to the background image\n w, h = image2.size()\n rect = Rectangle.from_corner(offset, w, h)\n background.draw_rectangle(rect, color=rect_color)\n\n return background\n\n @staticmethod\n def get_overlap_regions(image1, image2, offset):\n \"\"\" For the two images, A and B, where the position of B is offset from that of A,\n return two new images that are the overlapping segments of the original images.\n\n As a simple example, if image B is smaller than A and it is completely contained\n within the borders of the image A, then we will simply return the whole of image B,\n and the section of image A that it overlaps. e.g., if A is 100x100 pixels, B is\n 14x14 pixels, and the offset is (x=20, y=30), then the returned section of A will\n be (20:34, 30:44).\n\n If image B only partially overlaps image A, only the overlapping sections of each\n are returned.\n \"\"\"\n rect_a = image1.bounds()\n rect_b = image2.bounds().offset(offset)\n overlap_a_rect = rect_a.intersection(rect_b)\n overlap_a = image1.crop(overlap_a_rect)\n\n rect_a = image1.bounds().offset(-offset)\n rect_b = image2.bounds()\n overlap_b_rect = rect_a.intersection(rect_b)\n overlap_b = image2.crop(overlap_b_rect)\n\n return overlap_a, overlap_b\n","sub_path":"CrystalMatch/dls_imagematch/crystal/align/overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"314425430","text":"# =========================================================================\n# Copyright (C) 2016 Yunify, Inc.\n# -------------------------------------------------------------------------\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this work except in compliance with the License.\n# You may obtain a copy of the License in the LICENSE file, or at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =========================================================================\n\nimport sys\n\nfrom .base import BaseCommand\n\nfrom ..constants import HTTP_OK_CREATED\n\n\nclass MbCommand(BaseCommand):\n\n command = \"mb\"\n usage = \"%(prog)s [-c -z ]\"\n\n @classmethod\n def add_extra_arguments(cls, parser):\n parser.add_argument(\"bucket\", help=\"Name of the bucket to be created\")\n\n parser.add_argument(\n \"-z\",\n \"--zone\",\n dest=\"zone\",\n help=\"In which zone to create the bucket\")\n return parser\n\n @classmethod\n def send_request(cls, options):\n bucket, prefix = cls.validate_qs_path(options.bucket)\n if prefix != \"\":\n print(\"Error: Invalid bucket name\")\n sys.exit(-1)\n zone = \"\"\n if options.zone:\n zone = options.zone\n current_bucket = cls.client.Bucket(bucket, zone)\n resp = current_bucket.put()\n if resp.status_code == HTTP_OK_CREATED:\n print(\"Bucket <%s> created\" % bucket)\n else:\n print(resp.content)\n","sub_path":"qingstor/qsctl/commands/mb.py","file_name":"mb.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"620115467","text":"import random\n\n\nclass Character:\n \"\"\"\n name = name of character\n hp = health points\n atk = attack points\n score = initial score, defaults to zero\n \"\"\"\n\n def __init__(self, name, hp, atk, score=0):\n self.name = name\n self.hp = hp\n self.atk = atk\n self.score = score\n\n def attack(self):\n \"\"\"\n Returns a damage amount to enemy\n \"\"\"\n rand = random.randint(0, 20)\n # serves as a 20 side dice roll for random/chance\n if rand > 7:\n return int(self.atk * 2)\n return 0\n\n def take_damage(self, damage):\n \"\"\"\n Affects current user/player. If player/user hp is\n less or equal to zero (ie. if a enemy attacks 20 points and the user only has 10 hp,\n it results in -10 hp) they are killed\n \"\"\"\n self.hp -= damage\n print(\"{} has lost {} health\".format(self.name, damage))\n return self.hp\n\n def is_dead(self):\n \"\"\"\n Returns boolean saying when character is dead.\n \"\"\"\n return self.hp <= 0\n\n def add_points(self, amount): # calculate function for adding game points\n self.score += amount\n return self.score # used to display the final game points score\n\n\nclass Player(Character):\n\n def pick_weapon(self):\n user_pick = input(\"Pick a weapon (1, 2, 3)\")\n if user_pick == \"1\":\n self.atk += 20\n print(\"You have chosen the Dwarven-forged battle axe Jarnbjorn, Wrecker of Worlds as your weapon. Your attack power has increased by 20 points\")\n if user_pick == \"2\":\n self.atk += 15\n print(\"You have donned Megingjord, the belt of strength. Your attack power has increased by 15 points\")\n if user_pick == \"3\":\n self.atk += 50\n print(\"You have chosen Mjolnir, imbued with the might of Thor and created using the core of a star. Your attack power has increased by 50\")\n else:\n print(\"Pick a weapon (1, 2, 3)\")\n\n\nclass Enemy(Character):\n def __init__(self, name, hp, atk, score=0):\n super().__init__(name, hp, atk, score)\n # self.name = name\n # self.hp = hp\n # self.ap = ap\n # self.score = score\n\n\nclass Battle:\n\n def fight(self, user, enemy):\n \"\"\"\n Takes two instances of Character class a user and an enemy combatants.\n Returns the winner.\n \"\"\"\n if enemy.is_dead():\n print(\"The enemy has died\")\n return user\n elif user.is_dead():\n print(\"You have died\")\n return enemy\n\n while not enemy.is_dead() and not user.is_dead():\n enemy.take_damage(user.attack())\n if enemy.is_dead():\n print(\"The enemy has been vanquished\")\n return user\n user.take_damage(enemy.attack())\n if user.is_dead():\n print(\"You have died\")\n return enemy\n\n\nclass Game:\n\n def play(self):\n choice = \"\"\n while choice not in ['quit', 'exit', 'Quit']:\n enemies_template = [\n {'name': 'Destroyer_Armor', \"hp\": 5, \"atk\": 5},\n {'name': 'Amora', 'hp': 10, \"atk\": 10},\n {'name': 'Absorbing_Man', \"hp\": 15, \"atk\": 15},\n ]\n random_index = random.randint(0, len(enemies_template))\n enemy_attributes = enemies_template[random_index]\n enemy = Enemy(**enemy_attributes)\n print(enemy)\n # print(enemy_attributes['name'])\n\n# def battle(self):\n# pass\n# # # next, after y/n, we need to do the reaction outside the loop\n# # if fighting == True:\n\n# #create a list of dictionaries = enemies\n#\n\n\n# # create list of enemies\n# self.enemy_list = []\n# # need to assign point value for each enemy\n\n\n# for x in Enemies_template:\n# self.enemy_list.append(Enemies(name=x[\"name\"], points = x[\"points\"]))\n# #gives the ability to define enemies\n\n\n# counting_points = 0 #calculate game points\n\n# counting_points += enemy.get_points() - damage.get_points()\n# print(self.player.add_points(counting_points)) #player we declared to add game points\n\n\n# else:\n# running = False\n\n# c = Controller()\n# c.game()\n# #activate game\n\n\nuser = Player(\"test_user1\", 30, 100)\nenemy = Enemy(\"enemy\", 10, 200)\nuser.pick_weapon()\nbattle = Battle()\nbattle.fight(user, enemy)\n# enemy = Destroyer_Armor()\n\n\n# # Battle happens here. Battle uses while loop\n# while :\n# enemy_killed = enemy.take_damage(user.atk())\n# user_killed = user.take_damage(enemy.atk())\n# # attack is ongoing until some condition is met\n# # Battle ends, either enemy or user is killed\n# while enemy_killed:\n","sub_path":"characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"112840920","text":"# -*- coding: utf-8 -*-\n\"\"\"Interface between Generator REST-API and testcases.\"\"\"\nfrom .rest_api_base import RestApiBase\n\n\nclass Generator(RestApiBase):\n \"\"\"Generator interface.\"\"\"\n\n def __init__(self, addr, demo):\n \"Init Generator class by using base class\"\n super(Generator, self).__init__(addr, demo)\n self.rf_url = f'http://{self.addr}/api/generator/v1/setRf'\n self.testmodel_url = f'http://{self.addr}/api/generator/v1/setSignal'\n\n def preset_instrument(self):\n \"\"\"Send set RF command to adjust RF level REST-API.\"\"\"\n return self.rest_api_put(\n f'http://{self.addr}/api/generator/v0/preset', payload=None)\n\n def set_testmodel(self,\n group_data,\n test_param,\n carrier_id,\n frequency,\n level,\n rb_offset):\n \"\"\"Send set testmodel command to REST-API.\"\"\"\n return self.rest_api_post(\n self.testmodel_url,\n payload={'signals': [\n self._generator_api_payload(group_data,\n test_param,\n carrier_id,\n frequency,\n level,\n rb_offset)]})\n\n def set_interfering_signal(self, test_param, frequency, level):\n \"\"\"Send set interferer command to REST-API.\"\"\"\n # used when 2nd generator for interfering signal\n return self.rest_api_post(\n self.testmodel_url,\n payload={'signals': [self._interferer_api_payload(test_param,\n frequency,\n level,\n port=1)]})\n\n def change_interfering_arb_file(self, test_param, frequency, level, port):\n \"\"\"Send set interferer command to REST-API.\"\"\"\n # used when 2nd generator for interfering signal\n return self.rest_api_put(\n self.testmodel_url,\n payload={'signals': [self._interferer_api_payload(test_param,\n frequency,\n level,\n port)]})\n\n def set_awgn(self, group_data, test_param,\n carrier_id, ws_frequency, ws_level,\n if_frequency, if_level):\n \"\"\"Sends AWGN State ON command to REST-API.\"\"\"\n return self.rest_api_put(\n f'http://{self.addr}/api/generator/v1/setAwgn',\n payload={'signals': [self._generator_api_payload(group_data,\n test_param,\n carrier_id,\n ws_frequency,\n ws_level),\n self._interferer_api_payload(test_param,\n if_frequency,\n if_level,\n port=1,\n bandwidth=group_data['dutConfig'][carrier_id]['carrier']\n ['carrier']['uplinkBandwidthMHz'])]})\n\n def set_wanted_and_interfering_signal(self,\n group_data,\n test_param,\n carrier_id,\n ws_frequency,\n ws_level,\n rb_offset,\n if_frequency,\n if_level):\n \"\"\"Send set testmodel and interferer command to REST-API.\"\"\"\n # used when modulated interfering signal comes from SMW port 2\n return self.rest_api_post(\n self.testmodel_url,\n payload={'signals': [self._generator_api_payload(group_data,\n test_param,\n carrier_id,\n ws_frequency,\n ws_level,\n rb_offset),\n self._interferer_api_payload(test_param,\n if_frequency,\n if_level,\n port=2)]})\n\n def set_rf(self, group_data, test_param, carrier_id, frequency, level):\n \"\"\"Send set RF command to adjust RF level REST-API.\"\"\"\n return self.rest_api_put(\n self.rf_url,\n payload=self._generator_api_payload(group_data,\n test_param,\n carrier_id,\n frequency,\n level))\n\n def set_interferer_rf(self, test_param, frequency, level, port):\n \"\"\"Send set interferer RF command to REST-API.\"\"\"\n return self.rest_api_put(\n self.rf_url,\n payload=self._interferer_api_payload(test_param,\n frequency,\n level,\n port))\n\n def set_rf_off(self, group_data, test_param, carrier_id, frequency, level):\n \"\"\"Send set RF command to REST-API.\"\"\"\n return self.rest_api_put(\n self.rf_url,\n payload=self._generator_api_payload(group_data,\n test_param,\n carrier_id,\n frequency,\n level,\n generator_output_state=False))\n\n def set_interferer_rf_off(self, test_param, frequency, level, port):\n \"\"\"Send set interferer RF command to REST-API.\"\"\"\n return self.rest_api_put(\n self.rf_url,\n payload=self._interferer_api_payload(test_param,\n frequency,\n level,\n port,\n generator_output_state=False))\n\n def _generator_api_payload(self, group_data, test_param, carrier_id, frequency, level, rb_offset=0,\n generator_output_state=True, direction='uplink'):\n \"\"\"Private method. DO NOT USE THIS OUTSIDE\"\"\"\n arb_file = \"\"\n arb_offset_correction = 0\n for i in group_data['dutConfig'][carrier_id]['testmodels'][0]['devices']['generator']:\n if i['name'] == 'smw' and i['rbOffset'] == rb_offset:\n arb_file = i['fileName']\n arb_offset_correction = i.get('basebandRmsOffset', 0)\n\n return {\n \"arbFile\": arb_file,\n \"level\": level,\n \"frequencyMHz\": frequency,\n \"bandwidthMHz\": group_data['dutConfig'][carrier_id]['carrier']['carrier'][f'{direction}BandwidthMHz'],\n \"generatorLevelOffset\": test_param.get('wantedSignalCalibration', 0),\n \"generatorCalibrationFile\": \"\",\n \"basebandRmsOffset\": arb_offset_correction,\n \"port\": 1,\n \"generatorOutputState\": generator_output_state\n }\n\n def _interferer_api_payload(self, test_param, frequency, level, port, bandwidth=0,\n generator_output_state=True):\n \"\"\"Private method. DO NOT USE THIS OUTSIDE\"\"\"\n # basebandRmsOffset hardcoded to 0 because all interfering arb files dl_ul_0_10\n return {\n \"arbFile\": test_param.get('interfererArbFile', None),\n \"level\": level,\n \"frequencyMHz\": frequency,\n \"bandwidthMHz\": bandwidth,\n \"generatorLevelOffset\": test_param['interfererCalibration'],\n \"generatorCalibrationFile\": \"\",\n \"basebandRmsOffset\": 0,\n \"port\": port,\n \"generatorOutputState\": generator_output_state,\n }\n","sub_path":"OTA_test_sequence/T-GATE-5G-SEQUENCER/T-GATE-5G-SEQUENCER/sequencer/hw/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":8841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"603655958","text":"'''\r\nWrite a program that asks user to enter a word and prints out whether the word contains any vowels if so, how many?\r\n'''\r\n\r\n\r\n\r\nstr=input('enter a string')\r\nvowels = 0\r\nfor i in str:\r\n if i in 'aeiou':\r\n vowels = vowels+1\r\nprint('no.of vowels are: ')\r\nprint(vowels)\r\n","sub_path":"17 STRING PROGRAMMING Q2-MdHaseebHussain.py","file_name":"17 STRING PROGRAMMING Q2-MdHaseebHussain.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"382008353","text":"import functools\n\nimport tensorflow as tf\n\nimport dl_papers.common.layers as dl_layers\n\n# -----------------------------------------------------------------------------\n\n\ndef wide_resnet_cifar(\n inputs,\n num_classes,\n depth,\n width_factor,\n dropout_rate=0,\n scalar_gate=False,\n data_format='channels_last',\n training=False,\n):\n assert (depth - 4) % 3 == 0, \"impossible network depth\"\n\n conv2d = functools.partial(\n dl_layers.resnet.conv2d,\n data_format=data_format,\n )\n\n residual_group = functools.partial(\n dl_layers.resnet.residual_group,\n num_layers=(depth - 4) / 3,\n dropout_rate=dropout_rate,\n scalar_gate=scalar_gate,\n data_format=data_format,\n training=training,\n )\n\n batch_normalization = functools.partial(\n dl_layers.batch_normalization,\n axis=dl_layers.get_channel_axis(data_format),\n training=training,\n )\n\n global_avg_pooling2d = functools.partial(\n tf.reduce_mean,\n axis=dl_layers.get_spatial_axes(data_format),\n )\n\n net = inputs\n\n net = conv2d(net, 16, 3, name='pre_conv')\n\n net = residual_group(\n net,\n filters=16 * width_factor,\n strides=1,\n name='group_1',\n )\n net = residual_group(\n net,\n filters=32 * width_factor,\n strides=2,\n name='group_2',\n )\n net = residual_group(\n net,\n filters=64 * width_factor,\n strides=2,\n name='group_3',\n )\n\n net = batch_normalization(net, name='post_bn')\n net = tf.nn.relu(net, name='post_relu')\n net = global_avg_pooling2d(net, name='post_pool')\n\n net = tf.layers.dense(net, num_classes, name='output')\n\n return net\n\n\n# -----------------------------------------------------------------------------\n\nwide_resnet_cifar10 = functools.partial(\n wide_resnet_cifar,\n num_classes=10,\n depth=16,\n width_factor=4,\n)\n\nwide_gated_resnet_cifar10 = functools.partial(\n wide_resnet_cifar10,\n scalar_gate=True,\n)\n","sub_path":"dl_papers/wide_resnet/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"149337316","text":"# Programa para carga de datos en BBDD SQLite\n\nimport sqlite3\nfrom tkinter import *\nfrom tkinter import messagebox\n\nfrom Clases import Paises\n\n\ncolor=\"#f2d05e\"\n\n\n# - - - - - - - funciones - - - - - - - - \n\ndef grabarPais(cod,pais,cuit):\n \n\n # - - - Control Datos -----\n f=True\n\n \n if cod==\"\" or len(cod)!=2:\n if len(cod)!=2:\n messagebox.showerror(\"ERROR\", \"Cantidad del <<>>\\n deben ser 2 caracteres\")\n return\n f=False\n \n cod=cod.upper()\n \n if pais==\"\":\n f=False\n \n if cuit==\"\" or len(cuit)!=11:\n if len(cuit)!=11:\n messagebox.showerror(\"ERROR\", \"Cantidad del <<>>\\n deben ser 11 caracteres\")\n return\n f=False\n \n if(f==False):\n messagebox.showwarning(\"ERROR\", \"Faltó completar campos...\")\n return\n \n\n dato_pais=Paises.Paises()\n dato_pais.setear_pais(cod,pais,cuit)\n dato_pais.guardar_pais()\n \n \n limpiaDatos()\n\ndef limpiaDatos() :\n cod.set(\"\")\n pais.set(\"\")\n cuit.set(\"\")\n \n codigoEntry.focus()\n \n\n\n# - - - - - - - - - - Prog. Principal - - - - - - - \n\n\nraiz=Tk()\nraiz.title(\"Carga de Paises\")\nraiz.iconbitmap(\"images/logo.ico\")\nraiz.resizable(0,0)\n\nframe=Frame(raiz)\nframe.config(bg=color, width=\"650\", height=\"350\")\nframe.pack(fill=\"both\", expand=\"False\")\n\ncod = StringVar()\npais = StringVar()\ncuit = StringVar()\n \n# - - - - - Labels - - - - - -\ncodigoLbl=Label(frame,text=\"Código País: \")\ncodigoLbl.config(bg=color)\ncodigoLbl.grid(row=0,column=0,sticky=\"e\",padx=5, pady=5)\n\npaisLbl=Label(frame,text=\"País: \")\npaisLbl.config(bg=color)\npaisLbl.grid(row=1,column=0,sticky=\"e\",padx=5, pady=5)\n\ncuitLbl=Label(frame,text=\"CUIT del país: \")\ncuitLbl.config(bg=color)\ncuitLbl.grid(row=2,column=0,sticky=\"e\",padx=5, pady=5)\n\n# - - - - - Entrys - - - - - - \ncodigoEntry=Entry(frame,textvariable=cod,width=2)\ncodigoEntry.grid(row=0,column=1,sticky=\"w\",padx=5, pady=5,ipady=5)\ncodigoEntry.config(font=\"Arial 15\")\n\npaisEntry=Entry(frame,textvariable=pais)\npaisEntry.grid(row=1,column=1,sticky=\"w\",padx=5, pady=5)\npaisEntry.config(font=\"Arial 15\")\n\ncuitEntry=Entry(frame,textvariable=cuit,width=11)\ncuitEntry.grid(row=2,column=1,sticky=\"w\",padx=5, pady=5)\ncuitEntry.config(font=\"Arial 15\")\n\nguardarBtn=Button(frame,text=\"Guardar\", command=lambda:grabarPais(cod.get(),pais.get(),cuit.get()))\nguardarBtn.grid(row=3,column=0,columnspan=2,ipady=5)\nguardarBtn.config(width=\"60\")\n\nlimpiarBtn=Button(frame,text=\"Limpiar\", command=lambda:limpiaDatos())\nlimpiarBtn.grid(row=4,column=0,columnspan=2,ipady=5)\nlimpiarBtn.config(width=\"60\")\n\n\nsalirBtn=Button(frame,text=\"Salir\",command=raiz.destroy)\nsalirBtn.grid(row=5,column=0,columnspan=2,ipady=5)\nsalirBtn.config(width=\"60\")\n\ncodigoEntry.focus()\n \nraiz.mainloop()\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"view_paises.py","file_name":"view_paises.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"421028297","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom articles.items import CnnSportArticles\n\nclass SkynewsarabiaHealthSpider(scrapy.Spider):\n name = 'skynewsarabia_health'\n allowed_domains = ['skynewsarabia.com']\n counter = 0 # max = 3012\n custom_settings = {\n 'FEED_EXPORT_FIELDS': [\"article_content\", \"tags\"],\n }\n technology = \"https://www.skynewsarabia.com/technology/\"\n api_link = \"https://api.skynewsarabia.com//rest/v2/topic/774401/loadMore.json?contentType=ARTICLE&idsToExclude=1296439,1296416,1296294,1296303,1296241,1296210,1296065,1295674,1295402,1295127,1294320,1249425,1037210,986638,903289,883400&offset={}&pageSize=12\"\n start_urls = [api_link.format(counter)]\n\n def parse(self, response):\n articles = json.loads(response.text)\n\n # to get the link for each article we need to combine both the id and the urlFriendlySuffix in one link\n for article in range(0, len(articles[\"contentItems\"])):\n article_id = articles[\"contentItems\"][article][\"id\"]\n article_url = articles[\"contentItems\"][article][\"urlFriendlySuffix\"]\n relative_link = article_id + \"-\" + article_url\n full_link = self.technology + relative_link\n yield scrapy.Request(url=full_link, callback=self.parse_details)\n\n self.counter += 12\n if self.counter <= 3012:\n next_page = self.api_link.format(self.counter)\n yield response.follow(url=next_page, callback=self.parse)\n\n\n def parse_details(self, response):\n list_content = []\n middle_east = CnnSportArticles()\n middle_east[\"title\"] = response.css(\"div.sna_content_head_cont h1.sna_content_heading::text\").extract_first() \\\n .strip()\n for i in response.css(\"div.article-body div#firstBodyDiv > p:nth-child(n+1)\"):\n list_content.append(\"\".join(i.xpath('descendant-or-self::text()').extract()))\n middle_east[\"article_content\"] = list_content\n middle_east[\"tags\"] = response.css(\"div.article-tags.noprint div a h2::text\").extract()\n if middle_east[\"article_content\"] and len(middle_east[\"tags\"]) > 1: # we need more than 2 tags at least!\n yield middle_east\n\n","sub_path":"articles/articles/spiders/skynewsarabia_health.py","file_name":"skynewsarabia_health.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"233605079","text":"'''\n모든 parquet에 대해서 preprocessing 적용시키기\n'''\nimport sys, os\nsys.path.append('..')\n\nfrom tqdm import tqdm\n\nfrom utils.preprocessing import *\nimport core.config as conf\n\nclass Dataset:\n def __init__(self, training_flag=True):\n self.all_features_to_idx = dict(zip(conf.raw_features, range(len(conf.raw_features))))\n\n # save trian datas\n # self.load_data_all(path=conf.raw_lzo_path, save=True, save_dir='/hdd/preprocessing/train/')\n \n def load_data_all(self, path=conf.raw_lzo_path, save=False, save_dir='.'):\n file_list = os.listdir(path)\n file_list = sorted(file_list)\n\n for file_name in tqdm(file_list):\n if not os.path.exists(save_dir+file_name+'.parquet'):\n client.restart()\n df = dask_cudf.read_csv(f'{path}/{file_name}', sep='\\x01', header=None, names=conf.raw_features+conf.labels)\n df = df.repartition(npartitions=conf.n_partitions)\n df = self.lzo_to_dataframe(df)\n df = df.set_index('id', drop=True)\n df, = dask.persist(df)\n _ = wait(df)\n\n df = self.preprocess(df)\n\n if save:\n save_parquet(df, save_dir+file_name+'.parquet')\n \n del df\n\n def preprocess(self, df):\n df.columns = conf.raw_features + conf.labels\n df = df.drop('text_tokens', axis=1)\n\n df, = dask.persist(df)\n _ = wait(df)\n\n features = ['creator_id', 'engager_id', 'tweet_id', 'tweet_type', 'language', 'creator_follower_count', 'creator_following_count', 'domains', 'media', 'tweet_timestamp']\n df = feature_extraction(df, features=features, labels=conf.labels)\n\n target = 'like' ########### engagement \n df = df.compute().to_pandas() # to pandas\n for c in ([\n ['engager_id'],\n ['engager_id','tweet_type','language'],\n ['creator_id'],\n ['domains','media','tweet_type','language']\n ]):\n fname = 'TE_'+'_'.join(c)+'_'+target\n print( fname )\n df[fname] = tartget_encoding( df, c, target, 20, 0 )\n df = cudf.from_pandas(df)\n df = dask_cudf.from_cudf(df, npartitions=conf.n_partitions).reset_index().drop('index', axis=1)\n\n return df\n\n def lzo_to_dataframe(self, df):\n df['id'] = 1\n df['id'] = df['id'].cumsum()\n df['id'] = df['id'].astype('int32')\n\n df['reply_timestamp'] = df['reply_timestamp'].fillna(0)\n df['retweet_timestamp'] = df['retweet_timestamp'].fillna(0)\n df['retweet_with_comment_timestamp'] = df['retweet_with_comment_timestamp'].fillna(0)\n df['like_timestamp'] = df['like_timestamp'].fillna(0)\n\n df['reply_timestamp'] = df['reply_timestamp'].astype('int32')\n df['retweet_timestamp'] = df['retweet_timestamp'].astype('int32')\n df['retweet_with_comment_timestamp'] = df['retweet_with_comment_timestamp'].astype('int32')\n df['like_timestamp'] = df['like_timestamp'].astype('int32')\n\n df['tweet_timestamp'] = df['tweet_timestamp'].astype( np.int32 )\n df['creator_follower_count'] = df['creator_follower_count'].astype( np.int32 )\n df['creator_following_count'] = df['creator_following_count'].astype( np.int32 )\n df['creator_account_creation']= df['creator_account_creation'].astype( np.int32 )\n df['engager_follower_count'] = df['engager_follower_count'].astype( np.int32 )\n df['engager_following_count'] = df['engager_following_count'].astype( np.int32 )\n df['engager_account_creation']= df['engager_account_creation'].astype( np.int32 )\n\n df, = dask.persist(df)\n _ = wait(df)\n\n return df\n\n def parse_input_line(self, line):\n features = line.split(\"\\x01\")\n tweet_id = features[all_features_to_idx['tweet_id']]\n user_id = features[all_features_to_idx['engaging_user_id']]\n input_feats = features[all_features_to_idx['text_tokens']]\n\n return tweet_id, user_id, input_feats\n","sub_path":"utils/gpu/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"51567838","text":"import argparse\nimport logging\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-v', '--verbose', action='count', default=0)\nargs = parser.parse_args()\n\nlevels = [logging.WARNING, logging.INFO, logging.DEBUG]\nlevel = levels[min(len(levels)-1,args.verbose)] # capped to number of levels\n\nlogging.basicConfig(level=level,\n format=\"%(asctime)s %(levelname)s %(message)s\")\n\nlogging.debug(\"a debug message\")\nlogging.info(\"a info message\")\nlogging.warning(\"a warning message\")","sub_path":"logging2.py","file_name":"logging2.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"638768646","text":"import sys\nfrom PyQt4 import QtGui\n\napp = QtGui.QApplication(sys.argv)\n\nwindow = QtGui.QWidget()\nwindow.setGeometry(50, 50, 500, 300) # x, y point (is the window, not the frame around it)and width and height\nwindow.setWindowTitle(\"MyFirstApp\") # Set title\n\nwindow.show()\napp.exec_()\n\n","sub_path":"first_program.py","file_name":"first_program.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"526185636","text":"# coding: utf-8\n\n# # Daten einlesen\n#\n# Hier werden die Daten eingelesen, dabei werden die Veränderungstypen ignoriert. Parameter:\n#\n# - `train_file`, die Trainingsdaten\n# - `test_file`, die Testdaten\n# - `embeddings`, die Embeddings\n#\n# Anmerkungen Bodo: mal randfälle konstruieren und betrachten, wie gut das funktioniert.\n#\n\n# In[33]:\n\n\ntrain_file = 'changes0910DETAILSDIST2REVERSED.dat'\ntest_file = 'changes1011DETAILSDIST2REVERSED.dat'\nembedding_file = './res/embedding_2009_transH.vec.json'\nembeddings_in_json = 'ent_embeddings'\nids = 'uri2id.txt'\n\nimport matplotlib.pyplot as plt\nfrom sklearn import model_selection\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\nimport sklearn\nimport numpy as np\nimport json\n\nid_file = open(ids)\nid_dict = {}\n\nfor line in id_file:\n spl = str.split(line)\n id_dict[spl[0]] = int(spl[1])\n\ndata = json.load(open(embedding_file))[embeddings_in_json]\n\n\ndef get_vec_from_uri(uri):\n return data[id_dict[uri]]\n\n\ndef check_vec_in_dict(uri):\n return uri in id_dict\n\n\nf_train = open(train_file, \"r\")\nf_test = open(test_file, \"r\")\n\ntrain_values_embeddings = []\ntrain_labels = []\ntest_values_embeddings = []\ntest_labels = []\ntrain_values_embeddings_changes = []\ntest_values_embeddings_changes = []\ntrain_values_changes = []\ntest_values_changes = []\n\n\ndef embedding_to_manipulate(actualChangeDetails, embedd_vec):\n embedd_vec_ret = np.array([0, 0, 0, 0, 0])\n changeDetails = [item.strip() for item in actualChangeDetails]\n if \"Deleted\" in changeDetails:\n embedd_vec_ret[0] = embedd_vec[0] + 1\n if \"Added\" in changeDetails:\n embedd_vec_ret[0] = embedd_vec[0] + 2\n if \"Superclass\" in changeDetails:\n embedd_vec_ret[0] = embedd_vec[0] + 4\n if \"Annotation\" in changeDetails:\n embedd_vec_ret[0] = embedd_vec[0] + 8\n if \"Renamed\" in changeDetails:\n embedd_vec_ret[0] = embedd_vec[0] + 16\n return embedd_vec_ret\n\n\nfor line in f_train:\n tokens = line.split(\" \")\n item = tokens[0]\n label = tokens[1]\n changeDetails = tokens[4:]\n if (check_vec_in_dict(item)):\n vec = get_vec_from_uri(item)\n train_labels.append(int(label))\n train_values_embeddings.append(vec)\n emb_manipulated = embedding_to_manipulate(changeDetails,vec)\n train_values_changes.append(emb_manipulated)\n resVec = np.append(vec, emb_manipulated)\n train_values_embeddings_changes.append(resVec)\n\nfor line in f_test:\n tokens = line.split(\" \")\n item = tokens[0]\n label = tokens[1]\n changeDetails = tokens[4:]\n if (check_vec_in_dict(item)):\n vec = get_vec_from_uri(item)\n test_labels.append(int(label))\n test_values_embeddings.append(vec)\n emb_manipulated = embedding_to_manipulate(changeDetails,vec)\n test_values_changes.append(emb_manipulated)\n resVec = np.append(vec, emb_manipulated)\n test_values_embeddings_changes.append(resVec)\n\n\n# In[34]:\n\n\ninput_data = train_values_changes + test_values_changes\nplot_labels = [\"Deleted\", \"Added\", \"Superclass\", \"Annotation\", \"Renamed\"]\ndiag_data = []\nfor i in range(0, len(plot_labels)):\n complete = [vec[i] for vec in input_data]\n diag_data.append(np.sum(complete) / len(input_data))\n\nfig, axes = plt.subplots()\n\naxes.plot(plot_labels, diag_data, 'o')\naxes.set_title('Rel.Häufigkeit der Veränderungstypen')\nplt.tight_layout()\nplt.show()\n\n# In[35]:\n\n\ndeleted_ind = []\nadded_ind = []\nsuperclass_ind = []\nannotation_ind = []\nrenamed_ind = []\n\nall_ind = [deleted_ind, added_ind, superclass_ind, annotation_ind, renamed_ind]\n\nprint(type(train_labels), type(test_labels))\ninput_data = test_values_changes\n\nall_labels = test_labels\n\nfor ind_list_index in range(len(all_ind)):\n for case_index in range(len(all_labels)):\n if input_data[case_index][ind_list_index] > 0:\n all_ind[ind_list_index].append(all_labels[case_index])\ndiag_data = []\nfor label, case_labels in zip(plot_labels, all_ind):\n if (len(case_labels) > 0):\n avg = np.sum(case_labels) / len(case_labels)\n else:\n avg = 0\n diag_data.append(avg)\n print(label, avg)\noverall_avg = np.sum(all_labels) / len(all_labels)\nprint('Overall: ', overall_avg)\n\nfig, axes = plt.subplots()\naxes.plot(plot_labels, diag_data, 'o')\naxes.set_title(\"Relative Häufigkeit der Klasse NOCHANGE nach Veränderungstyp\")\naxes.set_ylabel('Häufigkeit')\naxes.axhline(y=overall_avg)\nplt.show()\n\n# In[36]:\n\n\nfrom sklearn.decomposition import PCA\nfrom mpl_toolkits.mplot3d import Axes3D\n\npca = PCA(n_components=2)\nreduced_data = pca.fit_transform(train_values_embeddings)\n\nxs_reduced = [v[0] for v in reduced_data]\nys_reduced = [v[1] for v in reduced_data]\n\nnochange_indices = [i for i in range(len(train_labels)) if train_labels[i] > 0]\nchange_indices = [i for i in range(len(train_labels)) if train_labels[i] == 0]\n\nxs_reduced_nochange = [xs_reduced[i] for i in nochange_indices]\nys_reduced_nochange = [ys_reduced[i] for i in nochange_indices]\n\nxs_reduced_change = [xs_reduced[i] for i in change_indices]\nys_reduced_change = [ys_reduced[i] for i in change_indices]\n\nprint(change_indices)\nprint(len(xs_reduced_change))\nprint(len(ys_reduced_change))\n\nfig, axes = plt.subplots()\naxes.scatter(xs_reduced_nochange, ys_reduced_nochange, label='mapping statement unchanged')\naxes.scatter(xs_reduced_change, ys_reduced_change, label='mapping statement changed')\naxes.legend()\nplt.show()\n\n\n# # Durchführen des Trainings und der Evaluation\n#\n# Params:\n#\n# - `models`, die Modelle, die evaluiert werden\n\n# In[24]:\n\n\ndef evaluate_data(train_values, train_labels, test_values, test_labels, graph_label):\n names = [\" \"]\n recs = []\n precs = []\n roc_aucs = []\n accs = []\n models = []\n models.append(('MLP 250', MLPClassifier(hidden_layer_sizes=(250,))))\n models.append(('SVM - linear', SVC(kernel=\"linear\")))\n models.append(('SVM - rbf', SVC()))\n models.append(('RandomForest', RandomForestClassifier()))\n models.append(('CART', DecisionTreeClassifier()))\n models.append(('KNN', KNeighborsClassifier()))\n models.append(('NB', GaussianNB()))\n models.append(('LR', LogisticRegression()))\n\n models = reversed(models)\n for name, model in models:\n model.fit(train_values, train_labels)\n result = model.predict(test_values)\n f1 = sklearn.metrics.f1_score(test_labels, result)\n acc = sklearn.metrics.accuracy_score(test_labels, result)\n prec = sklearn.metrics.precision_score(test_labels, result)\n rec = sklearn.metrics.recall_score(test_labels, result)\n roc_auc = sklearn.metrics.roc_auc_score(test_labels, result)\n msg = \"%s: f1:%f acc:%f prec:%f rec:%f roc_auc:%f\" % (name, f1, acc, prec, rec, roc_auc)\n print(msg)\n names.append(name)\n recs.append(rec)\n precs.append(prec)\n roc_aucs.append(roc_auc)\n accs.append(acc)\n\n fig, axes = plt.subplots()\n for l in range(len(precs)):\n axes.axvline(x=l, ls='dashed', color='grey', alpha=0.5)\n axes.axhline(y=0.64)\n axes.plot(precs, 'o', label='Precision', ms=7)\n axes.plot(recs, '^', label='Recall', ms=7)\n axes.plot(accs, 's', label='accuracy', ms=7)\n axes.legend()\n axes.set_title(graph_label)\n axes.set_xticklabels(names, rotation=90)\n # plt.xticks(rotation=90)\n\n plt.tight_layout()\n plt.show()\n\n\n# In[25]:\n\n\ntrain_values = train_values_changes\ntest_values = test_values_changes\n\nevaluate_data(train_values, train_labels, test_values, test_labels, 'only changes')\n\n# In[26]:\n\n\ntrain_values = train_values_embeddings_changes\ntest_values = test_values_embeddings_changes\n\nevaluate_data(train_values, train_labels, test_values, test_labels, 'embeddings and changes')\n\n# In[61]:\n\n\ntrain_values = train_values_embeddings\ntest_values = test_values_embeddings\n\nevaluate_data(train_values, train_labels, test_values, test_labels, 'embeddings only')\n\n\n","sub_path":"TransHManipulate.py","file_name":"TransHManipulate.py","file_ext":"py","file_size_in_byte":8537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"351879357","text":"import os\nimport config\nimport cgi\nimport logging\nimport time\nimport webapp2\nimport jinja2\nimport itertools\n# import MySQLdb\nfrom google.appengine.api import rdbms\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext import db\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\nfrom controllers import home, design, utilities\nfrom config import *\nfrom array import *\n\ntemplate_path = os.path.join(os.path.dirname(__file__), '../templates')\n\njinja2_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(template_path)\n )\n \nclass OperateProcess(webapp.RequestHandler):\n '''\n Initially displays the O&M page for Process Step Selection\n Uses: operateprocess.html and sub_selector\n '''\n def get(self):\n \n authenticateUser = str(users.get_current_user()) \n featureList = database.gaeSessionNavBuilder()\n processmenu = database.gaeSessionProcessMenu()\n ddb_active_case = database.gaeSessionActiveCase()\n \n conn = config.get_connection()\n cursor = conn.cursor() \n '''\n cursor.execute(\"SELECT case_id, case_nm FROM proc_case WHERE status = 1 AND emp_id =%s\", (authenticateUser))\n ddb_active_case = cursor.fetchall()\n '''\n cursor.execute(\"SELECT * FROM capability.vw_proc_run_sum WHERE proc_step_conf is null AND emp_id = %s\", (authenticateUser))\n openoperations = cursor.fetchall()\n \n conn.close()\n tabindex = 2\n\n template_values = {'ddb_active_case': ddb_active_case, 'processmenu': processmenu, 'authenticateUser': authenticateUser, \n 'openoperations': openoperations, 'featureList': featureList, 'tabindex': tabindex}\n template = jinja2_env.get_template('operateprocess.html')\n self.response.out.write(template.render(template_values))\n \nclass CreateInstance(webapp.RequestHandler):\n '''\nThis object supports selection of PROCESS STEP, creation of the process case grouping, loading it into\nthe proc_run table and then pulls those entries back out to the display.\nInstances are built off the concatenation of timefunction (secs since 1/01/1970 and the username@ function in config to\nEnsure uniqueness. If a process fails, it should be marked as a non-conformance and attempted again.\nDisplay: operateprocess.html\nTODO: Instances should load with status value set to initialised, then it should move to submitted or pending.\n'''\n \n def post(self): # post to DB\n authenticateUser = str(users.get_current_user())\n idGenerator = config.IDGenerator() # generates a unique key\n case_key = str(idGenerator) + authenticateUser\n now = config.UTCTime()\n featureList = database.gaeSessionNavBuilder()\n processmenu = database.gaeSessionProcessMenu()\n ddb_active_case = database.gaeSessionActiveCase()\n \n idGenerator = config.IDGenerator() # generates a unique key\n case_key = str(idGenerator) + authenticateUser\n client = memcache.Client()\n client.set('case_key', case_key, 6000) \n now = config.UTCTime()\n \n conn = config.get_connection()\n cursor = conn.cursor()\n\n #create an unique instance key\n cursor.execute('INSERT INTO instance (case_id, proc_step_id, instance_key) '\n 'VALUES (%s, %s, %s)',\n (\n self.request.get('case_id'),\n self.request.get('proc_step_id'),\n (case_key)\n ))\n \n conn.commit()\n \n cursor.execute(\"SELECT proc_case.case_id, proc_case.emp_id, instance.instance_key, proc_req.proc_req_id, process_step.proc_step_id, process.proc_id \"\n \"FROM proc_case \"\n \"INNER JOIN instance on (proc_case.case_id = instance.case_id) \"\n \"INNER JOIN process_step on (instance.proc_step_id = process_step.proc_step_id) \"\n \"INNER JOIN proc_req on (process_step.proc_step_id = proc_req.proc_step_id) \"\n \"INNER JOIN process on (process_step.proc_id = process.proc_id)\"\n \"WHERE instance.instance_key = %s\", (case_key))\n caseMake = cursor.fetchall()\n\n\n for row in caseMake:\n t = (row)\n cursor.execute(\"INSERT INTO proc_run (case_id, emp_id, instance_key, proc_req_id, proc_step_id, proc_id) VALUES (%s, %s, %s, %s, %s, %s) \", t)\n conn.commit()\n\n cursor.execute(\"SELECT proc_run.proc_run_id, proc_run.case_id, proc_run.emp_id, proc_run.instance_key, proc_run.proc_req_id, proc_run.proc_step_id, \"\n \"process.proc_id, proc_case.case_nm, process.proc_nm, process_step.proc_step_nm, process_step.proc_step_sop, proc_run.proc_output_conf, \"\n \"proc_req.proc_req_seq, proc_req.proc_req_nm, proc_req.proc_req_desc, process_step.proc_model_link \"\n \"FROM proc_run \"\n \"INNER JOIN proc_case on (proc_run.case_id = proc_case.case_id) \"\n \"INNER JOIN process on (proc_run.proc_id = process.proc_id) \"\n \"INNER JOIN process_step on (proc_run.proc_step_id = process_step.proc_step_id) \"\n \"INNER JOIN proc_req on (proc_run.proc_req_id = proc_req.proc_req_id) \"\n \"INNER JOIN instance on (proc_run.instance_key = instance.instance_key) \"\n \"WHERE instance.instance_key = %s\", (case_key))\n \n tabindex = 3 \n case = cursor.fetchall()\n \n cursor.execute(\"SELECT * FROM capability.vw_proc_run_sum WHERE proc_step_conf is null AND emp_id = %s\", (authenticateUser))\n openoperations = cursor.fetchall()\n \n conn.close()\n\n template_values = {'authenticateUser': authenticateUser, 'case': case, 'case_key': case_key, 'processmenu': processmenu, 'featureList': featureList,\n 'ddb_active_case': ddb_active_case, 'ddb_active_case': ddb_active_case, 'tabindex': tabindex, 'openoperations': openoperations }\n template = jinja2_env.get_template('operateprocess.html')\n self.response.out.write(template.render(template_values))\n self.response.out.write(case_key)\n \nclass PostProcessRun(webapp.RequestHandler): \n '''\n This process posts the submission of each conforming or non-conforming requirement in seqence to the database. \n ToDo: Remove the proc_run.proc_run_output IS NULL statement and instead display all the entries until the entire requirement \n has been submitted. When no requirements exist to be fulfilled, then ask if the operator wants to exit or run another process. \n '''\n def post(self): \n now = config.UTCTime()\n authenticateUser = str(users.get_current_user())\n featureList = database.gaeSessionNavBuilder()\n client = memcache.Client()\n case_key = client.get('case_key')\n proc_output_conf = self.request.get('proc_output_conf')\n proc_notes = self.request.get('proc_notes')\n proc_conseq = self.request.get('proc_conseq')\n proc_innovation = self.request.get('proc_innovation')\n proc_run_id = self.request.get('proc_run_id')\n proc_run_status = self.request.get('proc_run_status')\n \n conn = conn = config.get_connection()\n cursor = conn.cursor()\n \n cursor.execute(\"UPDATE proc_run SET \"\n \"proc_run_start_tm =%s, proc_output_conf = %s, proc_notes = %s, proc_conseq = %s, proc_innovation = %s, proc_run_status = %s \"\n \"WHERE proc_run_id = %s\",\n (now, proc_output_conf, proc_notes, proc_conseq, proc_innovation, proc_run_status, proc_run_id ))\n\n conn.commit()\n \n cursor.execute(\"SELECT proc_run.proc_run_id, proc_run.case_id, proc_run.emp_id, proc_run.instance_key, proc_run.proc_req_id, proc_run.proc_step_id, \"\n \"process.proc_id, proc_case.case_nm, process.proc_nm, process_step.proc_step_nm, process_step.proc_step_sop, proc_run.proc_output_conf, \"\n \"proc_req.proc_req_seq, proc_req.proc_req_nm, proc_req.proc_req_desc \"\n \"FROM proc_run \"\n \"INNER JOIN proc_case on (proc_run.case_id = proc_case.case_id) \"\n \"INNER JOIN process on (proc_run.proc_id = process.proc_id) \"\n \"INNER JOIN process_step on (proc_run.proc_step_id = process_step.proc_step_id) \"\n \"INNER JOIN proc_req on (proc_run.proc_req_id = proc_req.proc_req_id)\"\n \"WHERE proc_run.proc_output_conf IS NULL AND proc_run.instance_key = %s\", (case_key)) #rename this -- bad name!!\n \n casecount = cursor.rowcount\n case = cursor.fetchall() \n \n cursor.execute(\"SELECT * FROM capability.vw_proc_run_sum WHERE proc_step_conf is null AND emp_id = %s\", (authenticateUser))\n openoperations = cursor.fetchall()\n \n cursor.execute(\"SELECT case_id, case_nm FROM proc_case WHERE status = 1 AND emp_id =%s\", (authenticateUser))\n ddb_active_case = cursor.fetchall()\n\n cursor.execute(\"SELECT DISTINCT proc_id, proc_nm, proc_step_id, proc_step_seq, proc_step_nm \"\n \"FROM vw_processes \"\n \"WHERE proc_step_status = 'active' OR proc_step_owner = %s \"\n \"ORDER BY proc_id, proc_step_seq\", (authenticateUser))\n processmenu = cursor.fetchall()\n\n conn.close()\n \n if casecount > 0:\n tabindex = 3\n template_values = {'processmenu': processmenu, 'authenticateUser': authenticateUser, 'case': case, 'case_key': case_key, \n 'openoperations': openoperations, 'ddb_active_case': ddb_active_case, 'featureList': featureList,\n 'tabindex': tabindex, 'casecount':casecount}\n template = jinja2_env.get_template('operateprocess.html')\n self.response.out.write(template.render(template_values))\n else:\n self.redirect(\"/AssessPerformance\")\n \n\nclass AssessPerformance(webapp.RequestHandler):\n '''\n This displays the completed process step so that the process operator can assess their behaviour against the \n performance standard.\n '''\n def get(self):\n \n authenticateUser = str(users.get_current_user()) \n featureList = database.gaeSessionNavBuilder()\n client = memcache.Client()\n case_key = client.get('case_key')\n tabindex = 4\n conn = config.get_connection()\n cursor = conn.cursor() \n \n cursor.execute(\"SELECT proc_run.proc_run_id, proc_run.emp_id, proc_run.instance_key, proc_case.case_nm, process.proc_nm, process_step.proc_step_nm, \"\n \"proc_run.proc_output_conf, proc_req.proc_req_seq, proc_req.proc_req_nm, proc_req.proc_req_desc, proc_run.proc_notes, \"\n \"proc_run.proc_conseq, proc_run.proc_innovation \"\n \"FROM proc_run \"\n \"INNER JOIN proc_case on (proc_run.case_id = proc_case.case_id) \"\n \"INNER JOIN process on (proc_run.proc_id = process.proc_id) \"\n \"INNER JOIN process_step on (proc_run.proc_step_id = process_step.proc_step_id) \"\n \"INNER JOIN proc_req on (proc_run.proc_req_id = proc_req.proc_req_id) \"\n \"WHERE proc_run.instance_key = %s\", (case_key)) #rename this -- bad name!!\n \n assessinstance = cursor.fetchall() \n \n conn.close()\n\n template_values = {'authenticateUser': authenticateUser, 'featureList': featureList, 'tabindex': tabindex,\n 'assessinstance': assessinstance, 'case_key': case_key }\n template = jinja2_env.get_template('operateprocess.html')\n self.response.out.write(template.render(template_values))\n \nclass PostProcessAssessment(webapp.RequestHandler): \n '''\n This handler loads the completed process step on to the Assessment page and then sets it up so that he user can assess\n their behaviour and submit it. The key is case_key (name should be changed) as stored in memcache. Plan to use JS to load the \n tickboxes \n '''\n def post(self): \n now = config.UTCTime()\n authenticateUser = str(users.get_current_user())\n featureList = database.gaeSessionNavBuilder()\n processmenu = database.gaeSessionProcessMenu()\n ddb_active_case = database.gaeSessionActiveCase()\n \n perf_stnd_1 = self.request.get('perf_stnd_1')\n perf_stnd_2 = self.request.get('perf_stnd_2')\n perf_stnd_3 = self.request.get('perf_stnd_3')\n perf_stnd_notes_1 = self.request.get('perf_stnd_notes_1')\n perf_stnd_notes_2 = self.request.get('perf_stnd_notes_2')\n perf_stnd_notes_3 = self.request.get('perf_stnd_notes_3')\n client = memcache.Client()\n case_key = client.get('case_key')\n \n if perf_stnd_1 is '':\n perf_stnd_1 = 0\n else:\n perf_stnd_1 = 1\n if perf_stnd_2 is '':\n perf_stnd_2 = 0\n else:\n perf_stnd_2 = 1\n if perf_stnd_3 is '':\n perf_stnd_3 = 0\n else:\n perf_stnd_3 = 1\n \n if case_key is None:\n pass # query for last entry for expired memcache\n else:\n pass \n \n conn = config.get_connection()\n cursor = conn.cursor()\n \n #perf_stnd_1 =%s, perf_stnd_2 = %s, perf_stnd_3 = %s, // perf_stnd_1, perf_stnd_2, perf_stnd_3, //perf_stnd_notes_ts\n cursor.execute(\"UPDATE instance SET \"\n \"perf_stnd_1 = %s, perf_stnd_2 = %s,perf_stnd_3 = %s, perf_stnd_notes_1 = %s, perf_stnd_notes_2 = %s, perf_stnd_notes_3 = %s, perf_stnd_notes_ts = %s \"\n \"WHERE instance_key = %s \",\n (perf_stnd_1, perf_stnd_2, perf_stnd_3, perf_stnd_notes_1, perf_stnd_notes_2, perf_stnd_notes_3, now, case_key ))\n\n conn.commit()\n conn.close() \n\n tabindex = 2\n \n template_values = {'processmenu': processmenu, 'authenticateUser': authenticateUser, 'ddb_active_case': ddb_active_case, 'featureList': featureList,\n 'tabindex': tabindex, 'case_key': case_key}\n template = jinja2_env.get_template('operateprocess.html')\n self.response.out.write(template.render(template_values))\n \nclass CreateCase(webapp.RequestHandler):\n '''\n This object creates a user case against which process run can be associated. Cases are associated with specific users. \n Renders to operateprocess.html. \n '''\n def post(self):\n authenticateUser = str(users.get_current_user()) \n featureList = database.gaeSessionNavBuilder()\n processmenu = database.gaeSessionProcessMenu()\n\n conn = config.get_connection()\n cursor = conn.cursor() \n \n cursor.execute('INSERT INTO proc_case (case_nm, emp_id, status) ' # status = 1 = ACTIVE\n 'VALUES (%s, %s, 1)',\n (\n self.request.get('case_nm'),\n (authenticateUser),\n )) \n \n conn.commit() \n \n cursor.execute(\"SELECT case_id, case_nm FROM proc_case WHERE status = 1 AND emp_id =%s\", (authenticateUser))\n ddb_active_case = cursor.fetchall()\n \n client = memcache.Client() \n client.set('ddb_active_case', ddb_active_case, 120) \n \n cursor.execute(\"SELECT * FROM capability.vw_proc_run_sum WHERE proc_step_conf is null AND emp_id = %s\", (authenticateUser))\n openoperations = cursor.fetchall() \n \n conn.close()\n \n tabindex = 2\n\n template_values = {'ddb_active_case': ddb_active_case, 'processmenu': processmenu, 'openoperations': openoperations, \n 'authenticateUser': authenticateUser, 'tabindex': tabindex, 'featureList': featureList }\n template = jinja2_env.get_template('operateprocess.html')\n self.response.out.write(template.render(template_values))\n","sub_path":"CapabilityApp/controllers/operate.py","file_name":"operate.py","file_ext":"py","file_size_in_byte":16378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"610601351","text":"from flask import Flask, render_template, json, request\nimport random\nimport string\nimport json\nimport datetime\n\n# import time\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef main():\n return render_template(\"index.html\")\n\n\n@app.route(\"/LogString\", methods=[\"POST\", \"GET\"])\ndef LogString():\n nrow = int(request.form[\"nrow\"])\n dest = request.form[\"dest\"]\n format_row = request.form[\"format\"]\n id_session = \"\".join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(10)\n )\n for i in range(nrow):\n data = {}\n data[\"ID_Sessione\"] = id_session\n data[\"Timestamp\"] = str(datetime.datetime.now())\n data[\"Username\"] = (\n \"\".join(random.choice(string.ascii_lowercase) for _ in range(10))\n + \"@\"\n + \"\".join(random.choice(string.ascii_lowercase) for _ in range(5))\n + \".com\"\n )\n data[\"Sorgente\"] = \".\".join([str(random.randint(0, 255)) for x in range(4)])\n data[\"Servizio\"] = random.choice([\"HTTPS\", \"HTTP\", \"FTP\", \"SFTP\"])\n data[\"Tipo_Evento\"] = None\n data[\"Profilo_Utenza\"] = \"\".join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(10)\n )\n data[\"OCPLOGDEST\"] = dest\n if format_row == \"JSON\":\n print(json.dumps(data))\n else:\n row = '' % data[\"OCPLOGDEST\"]\n for d in data:\n if d != \"OCPLOGDEST\":\n row += \"[%s]\" % data[d]\n print(row)\n\n return \"Loggged! Rows id: %s\" % id_session\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8080)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"577572512","text":"from datetime import datetime, timedelta\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom social_auth.signals import pre_update\nfrom auth import InstagramBackend\nfrom utils import expire_view_cache\n\nLICENSES = (\n ('CC0', 'Creative Commons Public Domain'),\n ('CC-BY', 'Creative Commons Attribution'),\n ('CC-BY-SA', 'Creative Commons Attribution-ShareAlike'),\n ('CC-BY-NC', 'Creative Commons Attribution-NonCommercial'),\n ('CC-BY-ND', 'Creative Commons Attribution-NoDerivs'),\n ('CC-BY-NC-SA', 'Creative Commons Attribution-NonCommercial-ShareAlike'),\n ('CC-BY-NC-ND', 'Creative Commons Attribution-NonCommercial-NoDerivs'),\n)\n\nLICENSE_URL_MAP = {\n 'CC0': 'http://creativecommons.org/publicdomain/zero/1.0/',\n 'CC-BY': 'http://creativecommons.org/licenses/by/3.0/',\n 'CC-BY-SA': 'http://creativecommons.org/licenses/by-sa/3.0/',\n 'CC-BY-NC': 'http://creativecommons.org/licenses/by-nc/3.0/',\n 'CC-BY-ND': 'http://creativecommons.org/licenses/by-nd/3.0/',\n 'CC-BY-NC-SA': 'http://creativecommons.org/licenses/by-nc-sa/3.0/',\n 'CC-BY-NC-ND': 'http://creativecommons.org/licenses/by-nc-nd/3.0/',\n}\n\n\nclass InstagramInfo(models.Model):\n user = models.ForeignKey(User)\n\n instagram_username = models.CharField(\n _(\"Instagram account\"), null=True, blank=True, max_length=250)\n instagram_id = models.IntegerField()\n full_name = models.CharField(null=True, max_length=250)\n avatar_url = models.URLField(null=True)\n website = models.URLField(null=True)\n\n license = models.CharField(choices=LICENSES, max_length=25,\n default='CC-BY', blank=False)\n start_date = models.DateTimeField(null=True)\n end_date = models.DateTimeField(null=True)\n\n last_used_in_api = models.DateTimeField(null=True)\n\n def license_full_name(self):\n for abbrv, full in LICENSES:\n if self.license == abbrv:\n return full\n\n def license_url(self):\n return LICENSE_URL_MAP[self.license]\n\n\ndef invalidate_index(sender, instance, created, **kws):\n if created:\n expire_view_cache(\"index\")\n\n\npost_save.connect(invalidate_index, sender=InstagramInfo)\n\n\nclass InstagramPhoto(models.Model):\n \"\"\"\n A CC-licensed instagram photo that's associated with an InstagramInfo\n record.\n\n Mirrors the JSON response from Instagram.\n \"\"\"\n license_info = models.ForeignKey(InstagramInfo)\n\n caption = models.TextField(null=True, blank=True)\n created_time = models.DateTimeField(null=True, blank=True)\n filter = models.CharField(max_length=250, null=True, blank=True)\n photo_id = models.CharField(max_length=250, null=True, blank=True)\n image_low_resolution = models.URLField(null=True, blank=True)\n image_standard_resolution = models.URLField(null=True, blank=True)\n image_thumbnail = models.URLField(null=True, blank=True)\n link = models.URLField(null=True, blank=True)\n # XXX TODO make this a M2M field\n tags = models.TextField(null=True, blank=True)\n # XXX TODO make this GeoDjango aware\n location = models.TextField(null=True, blank=True)\n\n def get_absolute_url(self):\n return reverse('instagram-photo', kwargs={\n 'username': self.license_info.instagram_username,\n 'photo_id': self.id\n })\n\n\ndef instagram_user_init(sender, user, response, details, **kwargs):\n if InstagramInfo.objects.filter(user=user, end_date__gte=datetime.now()):\n info = InstagramInfo.objects.filter(user=user).order_by('-end_date')[0]\n # Partially-filled-out form from before\n elif InstagramInfo.objects.filter(user=user, start_date=None):\n info = InstagramInfo.objects.filter(user=user, start_date=None)[0]\n else:\n # Create a new instance because their previous one expired\n info = InstagramInfo(user=user)\n info.instagram_username = details['username']\n info.instagram_id = details['user_id']\n info.website = details.get('website', None)\n info.avatar_url = details['avatar_url']\n # Full name stored as first_name by InstagramBackend\n info.full_name = details['first_name'].strip() or details['username']\n info.save()\n return True\n\npre_update.connect(instagram_user_init, sender=InstagramBackend)\n","sub_path":"cc/profiles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"522457238","text":"from tkinter import *\n\nglobal balance\nbalance = 0\nbank_window = Tk()\nframe = Frame(bank_window, width=300, height=300)\nframe.pack()\nphoto = PhotoImage(file=\"emilfinans.png\")\nphoto_label = Label(frame, image=photo)\nphoto_label.pack()\n\ndef frame_click(event):\n print(\"Please click a button instead\")\ndef balance_click(event):\n print(balance)\ndef deposit_click(event):\n global balance\n try:\n deposit = float(input(\"How much would you like to deposit? \"))\n except ValueError:\n print(\"You must enter a number\\nTry again\")\n return 0\n else:\n balance += deposit\ndef withdraw_click(event):\n global balance\n try:\n withdraw = float(input(\"How much would you like to withdraw? \"))\n except ValueError:\n print(\"You must enter a number\\nTry again\")\n return 0\n else:\n if withdraw > balance:\n print (\"Your balance is too low\")\n else:\n balance -= withdraw\ndef loan_click(event):\n global balance\n try:\n loan = float(input(\"How much would you like to loan? \"))\n except ValueError:\n print(\"You must enter a number\\nTry again\")\n return 0\n else:\n balance += loan\n print(\"You have been granted\", loan, \"as a loan\\nYou will have to pay back\", loan*1.03)\n\nbalance_button = Button(bank_window,text=\"Check balance\")\ndeposit_button = Button(bank_window,text=\"Deposit\")\nwithdraw_button = Button(bank_window,text=\"Withdraw\")\nloan_button = Button(bank_window,text=\"Loan\")\n\nframe.bind(\"\", frame_click)\nbalance_button.bind(\"\", balance_click)\ndeposit_button.bind(\"\", deposit_click)\nwithdraw_button.bind(\"\", withdraw_click)\nloan_button.bind(\"\", loan_click)\nbalance_button.pack()\ndeposit_button.pack()\nwithdraw_button.pack()\nloan_button.pack()\nbank_window.mainloop()\n","sub_path":"BankGUI/bankgui.py","file_name":"bankgui.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"639222225","text":"#need to import app variable\nfrom app import app\n#if you want to render html\nfrom flask import render_template\nfrom app.tables import show_table\nfrom app.graphs import plotPoints\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/graphs')\ndef graphs():\n x=[1,2,3,4]\n y=[2.2,3.4,2.6,4.8]\n xlabel='x label'\n ylabel='y label'\n title='flask graph'\n my_graph=plotPoints(x,y,xlabel,ylabel,title)\n return render_template('graphs.html',file=my_graph)\n\n@app.route('/tables')\ndef tables():\n table = show_table()\n return render_template('tables.html',table=table)\n\n# @app.route('/parse')\n# def parse():\n# url='http://'\n# data=getData(url)\n# return render_template('parse.html',data=data)\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"97023911","text":"class Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n \n if not p:\n return not s\n\n first_is_match = bool(s) and (p[0] == s[0] or p[0] == '.')\n\n if len(p) >= 2 and p[1] == \"*\":\n return self.isMatch(s, p[2:]) or (first_is_match and self.isMatch(s[1:], p))\n else:\n return first_is_match and self.isMatch(s[1:], p[1:])","sub_path":"alice/LC010.py","file_name":"LC010.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"34926175","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\nfrom qqbot import QQBotSlot as qqbotslot, RunBot\nfrom middware import handle, get_key_value, qq_ai\nfrom middware import get_all_key_value\n\n\n@qqbotslot\ndef onQQMessage(bot, contact, member, content):\n if bot.isMe(contact, member):\n return\n \n g = str(contact.nick)\n if '@ME' in content:\n bot.SendTo(contact, '@{} '.format(member.name) + getms(content, g))\n else:\n mession = getallms(content, g)\n if mession != '':\n bot.SendTo(contact, mession)\n\n\ndef getms(content, g):\n # 处理请求\n content = content.replace('[@ME]', '').strip()\n\n # 注册中间件\n mw = [\n handle, get_key_value,\n ]\n\n result = ''\n for func in mw:\n result = func(content, group=g)\n if not (result is ''):\n break\n if result is '' or result is None:\n result = qq_ai(content, group=g)\n return result\n\n\ndef getallms(content, g):\n # 处理请求\n content = content.strip()\n # 注册中间件\n mw = [\n get_all_key_value,\n ]\n\n result = ''\n for func in mw:\n result = func(content, group=g)\n if not (result is ''):\n break\n return result\n\n\nif __name__ == '__main__':\n RunBot()","sub_path":"mainbot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"93764347","text":"# Accepted\n#\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def split(self, head):\n second = head.next\n last = head\n while head and head.next:\n last = second\n second = second.next\n head = head.next.next\n last.next = None\n return second\n\n def reverse(self, head):\n new_root = None\n while head:\n next = head.next\n head.next = new_root\n new_root = head\n head = next\n return new_root\n\n def merge(self, first, second):\n while first and second:\n next = first.next\n next2 = second.next\n first.next = second\n second.next = next\n first = next\n second = next2\n\n # @param head, a ListNode\n # @return nothing\n def reorderList(self, head):\n if not head: return\n if not head.next: return\n\n first = head\n second = self.split(head)\n reversed = self.reverse(second)\n self.merge(first, reversed)\n\n","sub_path":"leetcode/reorder-list.py","file_name":"reorder-list.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"374302201","text":"# -*- coding: utf-8 -*-\n\"\"\"\nParse 實價登錄\n\"\"\"\nimport io\nimport html\nimport logging\nimport pprint\nfrom xml.etree import ElementTree\nfrom datetime_utils import roc_date_to_datetime\n__INT_FIELDS = [\n u\"建物現況格局-廳\", u\"建物現況格局-房\", u\"建物現況格局-衛\", u\"總價元\",\n u\"單價元平方公尺\", u\"車位總價元\"]\n__FLOAT_FIELDS = [\n u\"土地移轉總面積平方公尺\",\n u\"建物移轉總面積平方公尺\",\n u\"車位移轉總面積平方公尺\"]\n__ROC_DATE_FIELDS = [u\"交易年月日\", u\"建築完成年月\"]\n\n\ndef _convert_type(doc):\n for tag_name in __ROC_DATE_FIELDS:\n if tag_name in doc and doc[tag_name]:\n doc[tag_name] = roc_date_to_datetime(doc[tag_name])\n for tag_name in __INT_FIELDS:\n if tag_name in doc and doc[tag_name]:\n doc[tag_name] = int(doc[tag_name])\n for tag_name in __FLOAT_FIELDS:\n if tag_name in doc and doc[tag_name]:\n doc[tag_name] = float(doc[tag_name])\n return doc\n\n\nclass BaseXMLParser(object):\n \"\"\"\n Parse housing opendata xml.\n \"\"\"\n def __init__(self, xml_path):\n \"\"\"\n Args:\n xml_path -- The full path of FalV.xml. If not specified, use default\n one.\n \"\"\"\n self.xml_path = xml_path\n\n def __read_xml(self):\n ret = \"\"\n with io.open(self.xml_path, encoding=\"utf-8\") as _fp:\n ret = _fp.read()\n ret = ret.replace(\"&\", \"&\")\n ret = ret.replace(\"<\", \"<\")\n ret = ret.replace(\">\", \">\")\n return html.unescape(ret)\n\n def parse(self):\n e_lvr_land = ElementTree.fromstring(self.__read_xml())\n for transaction_element in e_lvr_land:\n doc = {_.tag: _.text for _ in transaction_element}\n try:\n doc = _convert_type(doc)\n except TypeError:\n pprint.pprint(doc)\n raise\n","sub_path":"code/housing_sales_opendata/base_xml_parser.py","file_name":"base_xml_parser.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"20871166","text":"# coding: utf-8\n\"\"\"\nCode to find temperature and density of the narrow line region of an \nAGN given its lines ratio of the ions O[III] or N[II] with the ions \nO[II] or S[II].\n\nUsage:\n\n If the program runs from the main program (this code), the user will\n be asked first of all for the pair of ions from (O[III] or N[II]) and\n (S[II] or O[II]). Then, will be asked for the value of the lines \n ratio for the selected two ions.\n\n Output:\n Related to the narrow lines region:\n Temperature in [K] and Density in [particles cm^-3]\n \n Otherwise, if the program is called as a library, then the proccedure \n calling is like\n\n >>> import fivel_class as f\n >>> values = f.agn(J1=J1, J2=J2, ion1=ion1, ion2=ion2)\n >>> Temperature, Density = values.fivel()\n \n where J1 stands for the lines ratio of the ion1 (either O[III] or \n N[II]) and J2 for the lines ratio of the ion2 (S[II] or O[II])\n\n\nAuthors: Angel Daniel Martinez Cifuentes\n Julián Hernández\n Juan Camilo Torres Rojas\n\t\t\nDate: 11 march, 2019\n\n\"\"\"\n\n\n\n# Calling libraries\n\nimport numpy as np\n\n\n\n# Start of class\n\nclass agn:\n\t\n\t\n\t\"\"\"\n\t Definition of AGN class. The objects generated by this class are going\n\t to have a \"agn\" class. This class returns a tuple when the fivel() \n\t definition inside the class is called.\n\t\"\"\"\n\t\n\t\n\t# Definition of __init__ variables inside the function (Python)\n\t\n\tdef __init__(self, J1=122.72, J2=1.36, ion1=\"OIII\",ion2=\"SII\",show=True):\n\t\tself.J1 = J1\n\t\tself.J2 = J2\n\t\tself.ion1 = ion1\n\t\tself.ion2 = ion2\n\t\tself.show = show\n\n\t\t\n\t\t\n\t# Define physical constants in MKS\n\t\n\th = 6.62607004e-34 # Planck constant\n\tc = 2.99792458e8 # Light speed\n\tk = 1.38064852e-23 # Boltzmann constant\n\tT = 1000 # Arbitrary temperature value\n\t\n\t\n\n\t# Dictionary of the constants for the ions (taken from [Astrophysics of gaseous\n\t# nebulae and Active Galactic Nuclei, Osterbrock, 2006])\n\t\n\tvalues = {'OIII':{'Ods':0.58, 'Lds':4363,'Ads':1.6, 'g':3.00,\n\t\t\t\t\t 'Ops':0.29, 'Lps':2321, 'Aps':2.3e-1,\n\t\t\t\t\t 'Odp1':2.29, 'Ldp1':4959, 'Adp1':6.8e-3,\n\t\t\t\t\t 'Odp2':2.29, 'Ldp2':5007, 'Adp2':2.0e-2},\n\t\t\t 'NII' :{'Ods':0.83, 'Lds':5755,'Ads':1.0, 'g':3.86,\n\t\t\t\t\t 'Ops':0.29, 'Lps':3063, 'Aps':3.3e-2,\n\t\t\t\t\t 'Odp1':2.64, 'Ldp1':6548, 'Adp1':9.8e-4,\n\t\t\t\t\t 'Odp2':2.64, 'Ldp2':6583, 'Adp2':3.0e-3},\n\t\t\t 'SII' :{'Oab':2.76, 'Oag':4.14, 'Obg':7.47,\n\t\t\t\t\t 'Aab':8.8e-4, 'Aag':2.6e-4, 'Lab':6731,\n\t\t\t\t\t 'Lag':6716},\n\t\t\t 'OII' :{'Oab':0.536, 'Oag':0.804, 'Obg':1.17,\n\t\t\t\t\t 'Aab':1.6e-4, 'Aag':3.6e-5, 'Lab':3726,\n\t\t\t\t\t 'Lag':3729}}\n\n\t\n\t# Define Exponential function\n\t\n\tdef E(self,val,T):\n\t\t\n\t\th = 6.62607004e-34 # Planck constant\n\t\tc = 2.99792458e8 # Light speed\n\t\tk = 1.38064852e-23 # Boltzmann constant\n\t\ty = np.exp(-h*c*10**10/(k*T*val))\n\t\t\n\t\treturn y\n\n\t\n\t\n\t# Define lines ratio function\n\n\tdef ratio(self,J, Ne=10, T=1000, ion='OIII'):\n\t\t\n\n\t\t\"\"\"\n\t\t ### Lines ratio function\n\t\n\t\t Function containing the lines ratio for a selected ion\n\t\t (O[III] or N[II] and S[II] or O[II]).\n\t\t \n\t\t If the selected ion is either O[III] or N[II], then the function\n\t\t returns (given an initial density), the temperature via calculating\n\t\t a sign change (name pending). For computational speed, the temperature\n\t\t value goes between 100 and 1e5 Kelvin. This range can be changed.\n\t\t \n\t\t Otherwise, if the selected ion is N[II] or O[II], then the \n\t\t function returns the density given a preliminar temperature\n\t\t\"\"\"\n\t\t\n\t\t\n\t\t# Constants values forthe available ions\n\t\t\n\t\tgd, gs = 5, 1 # Statistical Weigths\n\t\tgb, gg = 4, 6 # Statistical Weigths\n\t\tV = 8.6e-6 \n\t\tdic = self.values[ion]\n\t\t\n\t\t\n\t\t# Conditional selecting the avaliable ions\n\t\t\n\t\t###########################################################\n\t\t\n\t\t\n\t\t# Conditional with OIII and NII ions\n\t\t\n\t\tif ion=='OIII' or ion=='NII':\n\t\t\t\n\t\t\tg = dic['g']\n\t\t\tOds, Lds, Ads = dic['Ods'], dic['Lds'], dic['Ads']\n\t\t\tOps, Lps, Aps = dic['Ops'], dic['Lps'], dic['Aps']\n\t\t\tOpd1, Lpd1, Adp1 = dic['Odp1'], dic['Ldp1'], dic['Adp1']\n\t\t\tOpd2, Lpd2, Adp2 = dic['Odp2'], dic['Ldp2'], dic['Adp2']\n\t\t\t\n\t\t\tf1 = gd*Adp2*Lds/(gs*Ads*Lpd2)\n\t\t\tf2 = gs*(Ads+Aps)/(g*V*Ops)\n\t\t\tf3 = gs*Ads/(g*V*Opd2)\n\t\t\tf4 = gd*Adp2/(g*V*Opd2)\n\t\t\t\n\t\t\tsol = []\n\n\t\t\tdef funct(T,J):\n\t\t\t\tfac = f1*self.E(-Lds,T)\n\t\t\t\ty1 = fac*(Ne/(T**0.5) + f2*(1+(f3/f2)*self.E(Lds,T)))# +\\\n\t\t\t\t\t#Ne/(g*T**0.5)*(Ods/Opd2)*self.E(Lds,T))\n\t\t\t\ty2 = J*(Ne/(T**0.5)+f4)# + \\\n\t\t\t\t#Ne*Ods/(g*Opd2)*self.E(Lds,T)*self.E(Lpd2,T)*self.E(-Lps,T))\n\t\t\t\t\n\t\t\t\treturn y1, y2\n\t\t\t\n\t\t\t# Numpy function to return the value of intesection between the \n\t\t\t# functions y1 and y2 returned in function func(self)\n\t\t\t\n\t\t\tTe = np.arange(100,1e5,0.1)\n\t\t\tAA = funct(Te,J)[0]\n\t\t\tBB = funct(Te,J)[1]\n\t\t\tidx = np.argwhere(np.diff(np.sign(AA - BB))).flatten()\n\t\t\t\n\t\t\tsol = Te[idx]\n\t\t\t\n\t\t\treturn sol[0]\n\t\t\n\t\t###########################################################\n\t\t\n\t\t\n\t\t# Conditional with the SII and OII ions\n\t\t\n\t\telif ion=='SII' or ion=='OII':\n\t\t\t\n\t\t\tval = self.values[ion]\n\t\t\tC = V/(T**0.5)\n\t\t\tOab, Oag, Obg = val['Oab'], val['Oag'], val['Obg']\n\t\t\tAab, Aag = val['Aab'], val['Aag']\n\t\t\tLab, Lag = val['Lab'], val['Lag']\n\n\t\t\tff1 = 1+(Obg/Oab)+(Obg/Oag)\n\t\t\tff2 = gb/Oab\n\t\t\tff3 = gg/Oag\n\t\t\tff3/ff1\n\t\t\t\n\t\t\tup = (ff3/ff1)*Aag*Aab*(gg-J*gb)\n\t\t\tdown = C*(J*gb*Aab - gb*Aag)\n\t\t\t\n\t\t\tsol = up/down\n\t\t\t\n\t\t\t# Return of the Density\n\t\t\t\n\t\t\treturn sol\n\t\n\t\n\t\n\t\n\t# Fivel function to iterate\n\t\n\t#def fivel(J1,J2,ion1='OIII',ion2='SII',show=True):\n\tdef fivel(self):\n\t\t\n\t\t\n\t\t\"\"\"\n\t\t ### Iteration Function\n\t\t\n\t\t Function to iterate and find both the temperature and density from\n\t\t the above function ratio().\n\t\t \n\t\t This can be done in two ways. The first one is selecting the ion1 as\n\t\t either OIII or NII, and the second one selecting SII or NII for the \n\t\t ion1. \n\t\t \n\t\t If the first method is selected, the input density (to find\n\t\t temperature for 2p3-like ions) is 1x10ˆ4 particles per cm3 which is\n\t\t totally arbitrary and do not interfer with the result. If the slected \n\t\t density is a few magnitude orders above the resulting one, then the \n\t\t iteration will converge in one more step, while if the value of the\n\t\t density is similar to the calculated, the iteration will converge in\n\t\t fewer steps.\n\t\t \n\t\t If the second method is selected, then a similar proccess to the one\n\t\t described above is done, just for this case the input parameter is\n\t\t a temperature of 10000 kelvin. \n\t\t \n\t\t The number of iteration, independent of the initial values for Density\n\t\t and Temperature, will be less than 4 iterations.\n\t\t \n\t\t The final return of this function is the temperature and density as \n\t\t a python tuple in Kelvin and particles per cm3 respectively.\n\t\t\"\"\"\n\t\t\n\t\t\n\t\t###########################################################\n\t\t\n\t\tif self.ion1=='OIII' or self.ion1=='NII': #2p2\n\t\t\tX = 1e4 #value of density\n\t\t\tT = self.ratio(J=self.J1,Ne=X,ion=self.ion1)\n\t\t\t\n\t\t\tprint(\"\\n Begin of iteration\\n\")\n\t\t\t\n\t\t\t#Begin of \"iteration\"\n\t\t\tfor i in range(0,4):\n\t\t\t\tRES = T\n\t\t\t\tNe = self.ratio(J=self.J2,T=RES,ion=self.ion2)\n\t\t\t\tT = self.ratio(J=self.J1,Ne=Ne,ion=self.ion1)\n\t\t\t\tif self.show:\n\t\t\t\t\tprint(\" Iteration {}\\n Ne {}\\n T {}\\n\".format(i+1,Ne,RES))\n\t\t\tprint(\" Done!\\n\")\n\t\t\t\n\t\t###########################################################\n\t\t\n\t\telif self.ion1=='SII' or self.ion1=='OII': #2p3\n\t\t\tX = 1e4 #value of Temperature\n\t\t\tNe = self.ratio(J=self.J2,T=X,ion=self.ion2)\n\t\t\tprint(\"\\n Begion of iteration\\n\")\n\t\t\t\n\t\t\t#Begin of \"iteration\"\n\t\t\tfor i in range(0,4):\n\t\t\t\tRES = Ne\n\t\t\t\tT = self.ratio(J=self.J2,Ne=RES,ion=self.ion2)\n\t\t\t\tNe = ratio(J=J1,T=T,ion=ion1)\n\t\t\t\tif self.show:\n\t\t\t\t\tprint(\" Iteration {}\\n Ne {}\\n T {}\\n\".format(i+1,RES,T))\n\t\t\tprint(\" Done!\\n\")\n\t\t\t\n\t\t#Return Temperature and Density \n\t\treturn T, Ne\n\n\n\n\nif __name__=='__main__':\n\n\t\n\tprint(\"---------------------------------\\n\")\n\tprint(\" PROGRAM FIVEL - PYTHON-TEST\\n\")\n\ti1 = float(input(\" Type the number of the ion1\\n 1: O[III]\\n 2: N[II]\\n\"))\n\tif i1 == 1:\n\t\tion1 = 'OIII'\n\telif i1 == 2:\n\t\tion1 = 'NII'\n\telse: \n\t\tprint(\"Number not defined\")\n\t\tprint(\"Exiting\")\n\t\texit()\n\n\ti2 = float(input(\" Type the number of the ion2\\n 1: S[II]\\n 2: O[II]\\n\"))\n\tif i2 == 1:\n\t\tion2 = 'SII'\n\telif i2 == 2:\n\t\tion2 = 'OII'\n\telse: \n\t\tprint(\"Number not defined\")\n\t\tprint(\"Exiting\")\n\t\texit()\n\t\n\tprint(\" Input the lines ratio for the {} ion: \".format(ion1))\n\tJ1 = float(input())\n\tprint(\" Input the lines ratio for the {} ion: \".format(ion2))\n\tJ2 = float(input())\n\t\n\tA = agn(J1=J1,J2=J2,ion1=ion1,ion2=ion2,show=True)\n\tT, Ne = A.fivel()\n\tprint(\" Temperature {}\\n Density {}\".format(T,Ne))\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\texit()\n\t\n\t# Some values for other AGN\n\t\n\t### NGC 3227\n\tA = agn(J1=122.72,J2=1.36,ion1='OIII',ion2='SII')\n\tT, Ne = A.fivel()\n\tprint(\"Temperature {}\\nDensity {}\".format(T,Ne))\n\n\n\t### NGC 1068\n\t\n\tA = agn(J1=15.628,J2=1.228,ion1='OIII',ion2='SII')\n\tT, Ne = A.fivel()\n\tprint(\"Temperature {}\\nDensity {}\".format(T,Ne))\n\t\n\t### NGC 5548\n\tA = agn(J1=65.865,J2=1.523,ion1='OIII',ion2='SII')\n\tT, Ne = A.fivel()\n\tprint(\"Temperature {}\\nDensity {}\".format(T,Ne))\n\n","sub_path":"fivel.py","file_name":"fivel.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"382071187","text":"\"\"\"add project name\n\nRevision ID: 56ca4ea31125\nRevises: 68bffb8d281b\nCreate Date: 2021-03-10 14:30:05.133602\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '56ca4ea31125'\ndown_revision = '68bffb8d281b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('proposals', sa.Column('project_name', sa.String(length=100), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('proposals', 'project_name')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/56ca4ea31125_add_project_name.py","file_name":"56ca4ea31125_add_project_name.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"321895218","text":"from typing import Any, Dict, Generator\n\nfrom ...call_builder.base.base_call_builder import BaseCallBuilder as _BaseCallBuilder\nfrom ...client.base_sync_client import BaseSyncClient\nfrom ...client.response import Response\nfrom ...exceptions import NotPageableError, raise_request_exception\nfrom ...type_checked import type_checked\nfrom ...utils import urljoin_with_query\n\n__all__ = [\"BaseCallBuilder\"]\n\n\n@type_checked\nclass BaseCallBuilder(_BaseCallBuilder):\n \"\"\"Creates a new :class:`BaseCallBuilder` pointed to server defined by horizon_url.\n\n This is an **abstract** class. Do not create this object directly, use :class:`stellar_sdk.Server` class.\n\n :param client: The client instance used to send request.\n :param horizon_url: Horizon server URL.\n \"\"\"\n\n def __init__(self, client: BaseSyncClient, **kwargs) -> None:\n super().__init__(**kwargs)\n self.client: BaseSyncClient = client\n\n def call(self) -> Dict[str, Any]:\n \"\"\"Triggers a HTTP request using this builder's current configuration.\n\n :return: If it is called synchronous, the response will be returned. If\n it is called asynchronously, it will return Coroutine.\n :raises:\n | :exc:`ConnectionError `: if you have not successfully\n connected to the server.\n | :exc:`NotFoundError `: if status_code == 404\n | :exc:`BadRequestError `: if 400 <= status_code < 500\n and status_code != 404\n | :exc:`BadResponseError `: if 500 <= status_code < 600\n | :exc:`UnknownRequestError `: if an unknown error occurs,\n please submit an issue\n \"\"\"\n url = urljoin_with_query(self.horizon_url, self.endpoint)\n return self._call(url, self.params)\n\n def _call(self, url: str, params: dict = None) -> Dict[str, Any]:\n raw_resp = self.client.get(url, params)\n assert isinstance(raw_resp, Response)\n raise_request_exception(raw_resp)\n resp = raw_resp.json()\n self._check_pageable(resp)\n return resp\n\n def stream(\n self,\n ) -> Generator[Dict[str, Any], None, None]:\n \"\"\"Creates an EventSource that listens for incoming messages from the server.\n\n See `Horizon Response Format `__\n\n See `MDN EventSource `__\n\n :return: an EventSource.\n\n :raise: :exc:`StreamClientError ` - Failed to fetch stream resource.\n \"\"\"\n url = urljoin_with_query(self.horizon_url, self.endpoint)\n return self.client.stream(url, self.params)\n\n def next(self) -> Dict[str, Any]:\n if self.next_href is None:\n raise NotPageableError(\"The next page does not exist.\")\n return self._call(self.next_href, None)\n\n def prev(self) -> Dict[str, Any]:\n if self.prev_href is None:\n raise NotPageableError(\"The prev page does not exist.\")\n return self._call(self.prev_href, None)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n return (\n self.params == other.params\n and self.endpoint == other.endpoint\n and self.horizon_url == other.horizon_url\n and self.client == other.client\n )\n\n def __str__(self):\n return (\n f\"\"\n )\n","sub_path":"stellar_sdk/call_builder/call_builder_sync/base_call_builder.py","file_name":"base_call_builder.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"308436429","text":"'''\n插入排序\n主要思想:将A[j]插入到已经排好序的A[1...j-1]中\nƟ(n^2)\n'''\n\ndef insertion_sort(A):\n\tfor i in range(1, len(A)):\n\t\tkey = A[i]\t#要插入的数值key\n\t\twhile i > 0 and A[i - 1] > key:\n\t\t\tA[i] = A[i - 1]\t#当while条件为真时,向后移位\n\t\t\ti -= 1\n\t\tA[i] = key\t#寻找到空位后,插入数值key\n\treturn A\n","sub_path":"sort/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160781571","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nfrom random import shuffle\nfrom random import randint\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport sys\n\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\n# In[3]:\n\n\n# retorna todos os elementos de uma coluna de uma matriz\ndef getColumn(matrix, i):\n return [row[i] for row in matrix]\n\n\n# In[4]:\n\n\n# diminui o raio de vizinhanca de acordo com um fator, mas nao deixa chegar a 0\ndef decreaseNeighbourhoodRadius(factor, radius):\n radius = radius - factor\n if radius == 0 :\n radius = 1\n return radius\n\n\n# In[4]:\n\n\n# diminui o learning rate\ndef decreaseLearningRate(learning_rate, factor):\n return learning_rate - factor\n\n\n# In[5]:\n\n\n# inicializa os pesos com valores aleatorios\n# numero de dimensoes, numero de neuronios, valores minimo e maximos\ndef initWeigths(dimensions, neuronsNumer, minValue, maxValue):\n w = [[0 for i in range(dimensions)] for j in range(neuronsNumer)]\n for i in range(0, len(w)):\n for j in range(0, len(w[i])):\n w[i][j] = np.random.uniform(minValue, maxValue)\n\n return w\n\n\n# In[6]:\n\n\n# distancia euclidiana entre o neuronio e o dado\ndef dist(weigth, dataPoint):\n soma = 0\n for i in range(0, len(weigth)):\n soma = soma + np.sum((weigth[i]-dataPoint[i])**2)\n return np.sqrt(soma)\n\n\n# In[7]:\n\n\n# atualiza o peso de um determinado neuronio\ndef updateWeight(w, i, datapoint, radius_step, learning_rate):\n if(w[i]):\n for j in range(len(w[i])):\n #print(\"\\t\" + str(learningRate * (1/radius) * (datapoint[j] - w[i][j])))\n w[i][j] = w[i][j] + learning_rate * (1/radius_step) * (datapoint[j] - w[i][j])\n\n\n# In[23]:\n\n\n# atualiza os pesos do neuronio e seus vizinhos\ndef updateWeigths(w, winner, datapoint, m, radius_step, learning_rate):\n # Ex topologia 2x4:\n # 1-2-3-4\n # | | | |\n # 5-6-7-8\n #\n\n #Se esta dentro do raio de atualizacao\n if radius_step >= 0:\n # atualiza o peso do no vencedor\n #print('Atualizando neuronio',winner)\n updateWeight(w, winner, datapoint, 1, learning_rate)\n\n # vizinho da direita. verifica se existe e tambem esta dentro da mesma linha da dimensao (ex. 4 nao eh vizinho do 5)\n if (winner + radius_step < len(w) and (winner + radius_step) % m[1] >= winner % m[1]):\n #updateWeight(winner + 1, datapoint, radius_step)\n updateWeigths(w, winner + radius_step, datapoint, m, radius_step - 1, learning_rate) #atualiza para os outros vizinhos do raio\n # vizinho da esquerda, mesmo raciocinio\n if (winner - radius_step >= 0 and (winner - radius_step) % m[1] <= winner % m[1]):\n #updateWeight(winner - 1, datapoint, radius_step)\n updateWeigths(w, winner - radius_step, datapoint, m, radius_step - 1, learning_rate) #atualiza para os outros vizinhos do raio\n # vizinho de baixo. verifica se existe\n if (winner + (radius_step*m[1]) < len(w)):\n #updateWeight(winner + m[1], datapoint, radius_step)\n updateWeigths(w, winner + (radius_step*m[1]), datapoint, m, radius_step - 1, learning_rate) #atualiza para os outros vizinhos do raio\n # vizinho de cima, mesmo raciocinio\n if (winner - (radius_step*m[1]) >= 0):\n #updateWeight(winner - m[1], datapoint, radius_step)\n updateWeigths(w, winner - (radius_step*m[1]), datapoint, m, radius_step - 1, learning_rate) #atualiza para os outros vizinhos do raio\n\n\n# In[34]:\n\n\n# verifica se um neuronio eh vizinho de outro\ndef isNeighboor(first, second, m):\n if (first == second) : return 0\n if(first > second):\n x = first\n first = second\n second = x\n\n if(abs(first - second) == 1):\n if(second % m[1] != 0):\n return 1\n else:\n return 0\n else:\n if(abs(first - second) == m[1]):\n return 1\n else:\n return 0\n\n\n# In[9]:\n\n\n# retorna o erro de quantizacao em um conjunto de dados\ndef quantizationError(dataSet, w):\n distSum = 0\n for i in range(0, len(dataSet)):\n #distancia do neuronio vencedor\n winnerDist = sys.float_info.max\n\n #indice do neuronio vencedor\n winner = 0\n\n #calcula as distancias entre todos os neuronios e salva a menor\n for neuronIndex in range(0, len(w)):\n d = dist(w[neuronIndex], dataSet[i])\n if(d < winnerDist):\n winner = neuronIndex\n winnerDist = d\n\n distSum += winnerDist\n return distSum/len(dataSet)\n\n\n# In[36]:\n\n\n# retorna o erro topologico em um conjunto de dados\ndef topologicalError(dataSet, m, w):\n distSum = 0\n for i in range(0, len(dataSet)):\n #distancia do neuronio vencedor\n winnerDist = sys.float_info.max\n secondWinnerDist = sys.float_info.max\n\n #indice do neuronio vencedor\n winner = 0\n secondWinner = 0\n\n #calcula as distancias entre todos os neuronios e salva a menor\n for neuronIndex in range(0, len(w)):\n d = dist(w[neuronIndex], dataSet[i])\n if(d < winnerDist):\n winner = neuronIndex\n winnerDist = d\n else:\n if(d < secondWinnerDist):\n secondWinner = neuronIndex\n secondWinnerDist = d\n\n distSum += isNeighboor(winner, secondWinner, m)\n\n return distSum/len(dataSet)\n\n\n# In[11]:\n\n\ndef readSpiral():\n dataSet = []\n maxWeight = 35\n fileObject = open(\"data/clustering/spiral.txt\", \"r\")\n for line in fileObject:\n sline = line.split(\"\\t\")\n dataSet.append([float(sline[0]), float(sline[1])])\n #labels.append(int(sline[2]))\n return dataSet\n\ndef readT48():\n dataSet = []\n maxWeight = 650\n fileObject = open(\"data/clustering/t4.8k.txt\", \"r\")\n for line in fileObject:\n sline = line.split(\" \")\n dataSet.append([float(sline[0]), float(sline[1])])\n return dataSet\n\n\n\ndef getWinnersColors(w, dataSet):\n winners = []\n #colors = []\n #print(matplotlib.colors.cnames.items()['name'])\n\n #colors = matplotlib.cm.rainbow(np.linspace(0, 1, len(dataSet)))\n for i in range(0, len(dataSet)):\n #distancia do neuronio vencedor\n winnerDist = sys.float_info.max\n\n #indice do neuronio vencedor\n winner = 0\n\n #calcula as distancias entre todos os neuronios e salva a menor\n for neuronIndex in range(0, len(w)):\n d = dist(w[neuronIndex], dataSet[i])\n if(d < winnerDist):\n winner = neuronIndex\n winnerDist = d\n winners.append(winner)\n\n return winners\n\n\n# In[12]:\n\n# metodo inicial\ndef som(path, id_img, dataSet, radius, m, minWeight, maxWeight, learning_rate, learning_rate_decrease_factor):\n # dimensions, neuronsNumber, minValue, maxValue\n # Spiral => 1 ~ 35\n # T48 => 1 ~ 650\n w = initWeigths(2, m[0] * m[1], minWeight, maxWeight)\n\n #print(\"Initial Weights:\")\n #print(w)\n\n #print(dataSet)\n\n plt.figure(1)\n plt.subplot(211)\n plt.plot(getColumn(dataSet,0), getColumn(dataSet,1), \"sb\", getColumn(w,0), getColumn(w,1), \"or\")\n\n count = 0\n # enquanto a taxa de aprendizado eh maior que zero\n while(learning_rate > 0):\n count += 1\n #print('Epoca:',count)\n # para cada item dos dados, calcula o neuronio vencedor\n shuffle(dataSet)\n for i in range(0, len(dataSet)):\n #print('Dado:',i)\n #distancia do neuronio vencedor\n winnerDist = sys.float_info.max\n\n #indice do neuronio vencedor\n winner = 0\n\n #calcula as distancias entre todos os neuronios e salva a menor\n for neuronIndex in range(0, len(w)):\n d = dist(w[neuronIndex], dataSet[i])\n if(d < winnerDist):\n winner = neuronIndex\n winnerDist = d\n #print('Neuronio vencedor:',winner)\n #atualiza os pesos do vencedor e seus vizinhos\n updateWeigths(w, winner, dataSet[i], m, radius, learning_rate)\n\n learning_rate = decreaseLearningRate(learning_rate, learning_rate_decrease_factor)\n radius = decreaseNeighbourhoodRadius(1, radius)\n\n\n #print(\"Final Weights:\")\n #print(w)\n\n plt.subplot(212)\n plt.plot(getColumn(dataSet,0), getColumn(dataSet,1), \"sb\", getColumn(w,0), getColumn(w,1), \"or\")\n plt.savefig(path + \"/neurons_position_\" + id_img + \".png\")\n plt.close()\n\n if len(dataSet[0]) == 2:\n df = pd.DataFrame(dataSet, columns=list('xy'))\n else:\n df = pd.DataFrame(dataSet, columns=list('xyz'))\n df['w'] = getWinnersColors(w, dataSet)\n sns_plot = sns.pairplot(x_vars=['x'], y_vars=['y'], data=df, hue=\"w\", size=12)\n sns_plot.savefig(path + \"/cluster_result_\" + id_img + \".png\")\n\n uMatrix(path, id_img, w, m)\n\n return [quantizationError(dataSet, w),topologicalError(dataSet, m, w)]\n\ndef mediaAoRedorUMatrix(u, i, j):\n soma = 0\n count = 0\n #print(str(i) + \" - \" + str(j))\n if i - 1 >= 0:\n soma += u[i-1][j]\n count += 1\n if i + 1 < len(u):\n soma += u[i+1][j]\n count += 1\n if j - 1 >= 0:\n soma += u[i][j-1]\n count += 1\n if j + 1 < len(u[i]):\n soma += u[i][j+1]\n count += 1\n #print(\" -> \" + str(soma) + \" / \" + str(count) + \" \" + str(soma/count))\n return soma/count\n\ndef uMatrix(path, id_img, w, m):\n u = [[0 for i in range((2 * m[1]) - 1)] for j in range((2 * m[0]) - 1)]\n\n k = 0\n l = 0\n for i in range(0, len(u)):\n for j in range(0, len(u[i])):\n #print(str(i) + \",\" + str(j))\n #linha par e coluna impar\n if i % 2 == 0 and j % 2 != 0:\n # print(\" -> \" + str(k) + \",\" + str(k+1))\n u[i][j] = -dist(w[k], w[k+1])\n k = k + 1\n #linha impar e coluna par\n elif i % 2 != 0 and j % 2 == 0:\n # print(\" -> \" + str(l) + \",\" + str(l + (m[1])))\n u[i][j] = -dist(w[l], w[l + (m[1])])\n l = l + 1\n\n for i in range(0, len(u)):\n for j in range(0, len(u[i])):\n if (i % 2 == 0 and j % 2 == 0) or (i % 2 != 0 and j % 2 != 0) or (i != 0 and j != 0) or (i != 0 and j != len(u[0])-1) or (i != len(u)-1 and j != 0) or (i != len(u)-1 and j != len(u[0])-1):\n u[i][j] = mediaAoRedorUMatrix(u, i, j)\n\n u[0][0] = mediaAoRedorUMatrix(u, 0, 0)\n u[0][len(u[0])-1] = mediaAoRedorUMatrix(u, 0, len(u[0])-1)\n u[len(u)-1][0] = mediaAoRedorUMatrix(u, len(u)-1, 0)\n u[len(u)-1][len(u[0])-1] = mediaAoRedorUMatrix(u, len(u)-1, len(u[0])-1)\n\n plt.clf()\n plt.contourf(u, cmap = \"rainbow\")\n plt.colorbar()\n plt.savefig(path + \"/u_matrix_\" + id_img + \".png\")\n\n# In[1]:\n\n\ndef main():\n #values to be tested\n data_path = 'data/clustering/'\n extensao = '.txt'\n datasets_min_max = {'spiral': {'min': -10, 'max': 50},\n 't4.8k': {'min': 10, 'max': 600}}\n #topologies = [{'min': 1,'max': 1},{'min': 10,'max': 5}]\n topologiesX = [20]\n topologiesY = [20]\n radius_values = [2]\n learning_rates = [0.9]\n learning_rate_decrease_factors = [0.01]\n\n columns = ['dataset','radius','learning_rgmate','topology', 'learning_rate_decrease_factor', 'quantization_error', 'topological_error']\n tests = pd.DataFrame(columns=columns)\n path_results = \"results_clustering_UMatrix\"\n\n '''for filename in datasets_min_max:\n if filename == 'spiral':\n dataset = pd.read_csv(data_path+filename+extensao, sep='\\t', header=None)\n else:\n dataset = pd.read_csv(data_path+filename+extensao, sep=' ', header=None)\n dataset.head(5)\n #for topology_y in range(topologies[0]['min'],topologies[1]['max']+1):\n # for topology_x in range(topologies[1]['min'],topologies[0]['max']+1):\n for topology_y in topologiesY:\n for topology_x in topologiesX:\n m = [topology_x, topology_y]\n for radius in radius_values:\n for learning_rate in learning_rates:\n for learning_rate_decrease_factor in learning_rate_decrease_factors:\n combination = [filename, \"rd:\" + str(radius), \"lr:\" + str(learning_rate), str(topology_x) + 'x' + str(topology_y), \"lrdf:\" + str(learning_rate_decrease_factor)]\n img_str = '_'.join(combination)\n print(img_str)\n result = som(path_results,img_str, dataset.values.tolist(), radius, m, datasets_min_max[filename]['min'], datasets_min_max[filename]['max'], learning_rate, learning_rate_decrease_factor)\n combination.append(result[0])\n combination.append(result[1])\n tests = tests.append(pd.DataFrame([combination], columns=columns), ignore_index=True)\n # SOM calls\n print(tests)\n tests.to_csv(path_or_buf=path_results+\"/test_results.txt\", sep='\\t', index=False, header=True)\n\n '''\n spiral = readSpiral()\n tk = readT48()\n\n plt.figure(1)\n plt.subplot(211)\n plt.plot(getColumn(spiral,0), getColumn(spiral,1), \"sb\")\n\n plt.subplot(212)\n plt.plot(getColumn(tk,0), getColumn(tk,1), \"sb\")\n plt.savefig(\"dataFigs.png\")\n plt.close()\n\n # Topologia da rede: a quantidade de neurionios em cada dimensao para\n # calculo dos vizinhos. A multiplicacao dos numeros deve ser a qntd de neuronios\n #m = [2, 5]\n #dataSet=[[10, 10], [1, 1], [9,9], [2, 2], [10, 9], [2, 1], [9, 10], [1, 2]]\n #som(\"teste0\", readSpiral(), 2, m, 1, 1, 0.9)'''\n\nif __name__ == \"__main__\":\n main()\n\n#Spiral: txt\n#H. Chang and D.Y. Yeung, Robust path-based spectral clustering. Pattern Recognition, 2008. 41(1): p. 191-203.\n#t4.8k: G. Karypis, E.H. Han, V. Kumar, CHAMELEON: A hierarchical 765 clustering algorithm using dynamic modeling, IEEE Trans. on Computers, 32 (8), 68-75, 1999.\n","sub_path":"som.py","file_name":"som.py","file_ext":"py","file_size_in_byte":14223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"198126335","text":"import random\nimport itertools\n\n\nNUMBERS = (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)\n\ndef merge(arr1, arr2):\n container = []\n i = j = 0\n \n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n container.append(arr1[i])\n i += 1\n else:\n container.append(arr2[j])\n j += 1\n \n container.extend(arr1[i:])\n container.extend(arr2[j:])\n return container\n\n\ndef mergesort(arr):\n if len(arr) <= 1:\n return arr\n \n middle = len(arr) // 2\n left, right = arr[:middle], arr[middle:]\n sorted_left = mergesort(left)\n sorted_right = mergesort(right)\n return merge(sorted_left, sorted_right)\n\n\nif __name__ == \"__main__\":\n unsorted_arr = random.sample(NUMBERS, len(NUMBERS))\n print(unsorted_arr)\n sorted_arr = mergesort(unsorted_arr)\n print(sorted_arr)\n","sub_path":"mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"547978031","text":"# task_worker #\n\nimport time, queue, random\nfrom multiprocessing.managers import BaseManager\n\nclass Queuemanager(BaseManager):\n pass\n\n# 这个Queuemanager只能从网络上获取Queue,所以注册时只提供名字\nQueuemanager.register('get_task_queue')\nQueuemanager.register('get_result_queue')\n# 连接到服务器,也就是运行task_master的主机\nserver_add = '127.0.0.1'\nprint('Connect server %s' % server_add)\nm = Queuemanager(address=(server_add, 54321), authkey=b'abc')\n# 开始连接\nm.connect()\n# 获取task_queue和result_queue这两个对象\ntask = m.get_task_queue()\nresult = m.get_result_queue()\n# 从task队列中取任务,并把结果写入result队列\nfor i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d' % (n, n))\n r = '%d * %d = %d' % (n, n, n*n)\n time.sleep(1)\n result.put(r)\n except queue.Empty:\n print('task queue is empty')\n\nprint('处理结束')\n","sub_path":"python_Demo/Other/分布式进程的实现/task_worker.py","file_name":"task_worker.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"478353136","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport os\n\nfrom openpyxl import Workbook\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom .spiders.tfunc import SQL\n\nBase = declarative_base()\n\n\nclass Wuyou(Base):\n __tablename__ = 'wuyou'\n id = Column(Integer, primary_key=True, autoincrement=True)\n position = Column(String(50))\n company_name = Column(String(50))\n location = Column(String(50))\n link = Column(String(100))\n money = Column(String(10))\n date = Column(String(10))\n\n\nclass QianchengwuyouPipeline(object):\n def process_item(self, item, spider):\n data_list = []\n for i in item:\n data_list.extend(item[i])\n\n if len(data_list) == 6:\n print(data_list)\n sql = SQL('root', '123456', '3306', 'homework', 'utf8', Base)\n w = Wuyou(position=data_list[0], company_name=data_list[1], location=data_list[2], money=data_list[3],\n date=data_list[4], link=data_list[5])\n sql.write(w)\n\n return item\n","sub_path":"qianchengwuyou/qianchengwuyou/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"558729336","text":"def gen_combinations(iterable, r):\n\n if r > len(iterable):\n return\n \n def generator(iterable, index = 0, length = 0):\n \n indices = range(index, len(iterable))\n if length == r:\n yield []\n else:\n for ind in indices:\n for other in generator(iterable, ind + 1, length + 1):\n yield [iterable[ind]] + other\n \n return generator(iterable)\n\nif __name__ == \"__main__\":\n combin1 = []\n # for comb in gen_combinations(list(range(50)), 8):\n # combin1.append(tuple(comb))\n\n combin2 =[]\n import time\n start = time.time()\n for comb in combinations(list(range(30)), 10):\n # combin2.append(comb)\n print(comb)\n print(time.time() - start)\n\n # assert(combin1==combin2)\n\n\n ","sub_path":"combiations.py","file_name":"combiations.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"505793528","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport scraper\nfrom db import Dbinterface\nfrom db.models import Publicacao_Original\n\nimport argparse\nimport calendar\nimport os\n\n\n##\n# Utils\n\ndef get_dates(year, month):\n num_days = calendar.monthrange(year, month)[1]\n return ['-'.join([str(year), str(month), str(day)]) for day in range(1, num_days+1)]\n\n\n##\n# Command line arguments\n\nparser = argparse.ArgumentParser()\nparser.add_argument('year', type=int, help='Year to scrap')\nparser.add_argument('month', type=int, help='Month to scrap')\n\nyear = parser.parse_args().year\nmonth = parser.parse_args().month\n\n\n##\n# Scrap routine\n\nprint('starting scraping routine')\n\npublicacoes = []\ndates = get_dates(year, month)\nfor date in dates:\n print('scraping {}'.format(date))\n publicacoes += scraper.scrap(date)\n\n\n##\n# Persist results\n\nprint('persisting on database')\n\ndbi = Dbinterface(os.environ['DIARIOBOT_DATABASE_CONNECTIONSTRING'])\nwith dbi.opensession() as session:\n\n for publicacao in publicacoes:\n entry = Publicacao_Original(**publicacao)\n session.add(entry)\n\n session.commit()\n","sub_path":"ondemand.py","file_name":"ondemand.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"29717720","text":"from distutils.version import LooseVersion\n\nfrom demisto_sdk.commands.common.errors import Errors\nfrom demisto_sdk.commands.common.hook_validations.content_entity_validator import \\\n ContentEntityValidator\n\n\nclass ReputationValidator(ContentEntityValidator):\n \"\"\"ReputationValidator is designed to validate the correctness of the file structure we enter to content repo.\n \"\"\"\n\n def is_valid_file(self, validate_rn=True):\n \"\"\"Check whether the reputation file is valid or not\n \"\"\"\n\n is_reputation_valid = all([\n super().is_valid_file(validate_rn),\n self.is_valid_version(),\n self.is_valid_expiration()\n ])\n\n # check only on added files\n if not self.old_file:\n is_reputation_valid = all([\n is_reputation_valid,\n self.is_id_equals_details()\n ])\n\n return is_reputation_valid\n\n def is_valid_version(self):\n # type: () -> bool\n \"\"\"Validate that the reputations file as version of -1.\"\"\"\n is_valid = True\n\n internal_version = self.current_file.get('version')\n if internal_version != self.DEFAULT_VERSION:\n object_id = self.current_file.get('id')\n error_message, error_code = Errors.wrong_version_reputations(object_id, self.DEFAULT_VERSION)\n\n if self.handle_error(error_message, error_code, file_path=self.file_path):\n is_valid = False\n\n return is_valid\n\n def is_valid_expiration(self):\n # type: () -> bool\n \"\"\"Validate that the expiration field of a 5.5 reputation file is numeric.\"\"\"\n from_version = self.current_file.get(\"fromVersion\", \"0.0.0\")\n if LooseVersion(from_version) >= LooseVersion(\"5.5.0\"):\n expiration = self.current_file.get('expiration', \"\")\n if not isinstance(expiration, int) or expiration < 0:\n error_message, error_code = Errors.reputation_expiration_should_be_numeric()\n if self.handle_error(error_message, error_code, file_path=self.file_path):\n return False\n\n return True\n\n def is_id_equals_details(self):\n # type: () -> bool\n \"\"\"Validate that the id equal details.\"\"\"\n is_valid = True\n\n id_ = self.current_file.get('id', None)\n details = self.current_file.get('details', None)\n if not id_ or not details or id_ != details:\n error_message, error_code = Errors.reputation_id_and_details_not_equal()\n if self.handle_error(error_message, error_code, file_path=self.file_path):\n is_valid = False\n\n return is_valid\n","sub_path":"demisto_sdk/commands/common/hook_validations/reputation.py","file_name":"reputation.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"245501408","text":"import requests\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\nfrom time import sleep\r\nprint('This program is designed to build a graph of the friendship relations between users of social network VK.\\n'\r\n 'ATTENTION!!! If you choose a depth of search greater than 1, be ready to face a lack of RAM.\\n'\r\n 'If you have a very fast computer, please, input nonzero delay. Otherwise antiDoS system may reject you.\\n')\r\n\r\n\r\ndef get_friends_dict(user_id=1):\r\n return requests.get(HOST + 'friends.get', params={'user_id': user_id, 'fields': 'first_name', 'v': VERSION}).json()\r\n\r\n\r\ndef add_account_to_graph(graph, id):\r\n try:\r\n for friend in get_friends_dict(id)['response']['items']:\r\n graph.add_edge(id, friend['id'])\r\n except KeyError:\r\n pass\r\n\r\n\r\ndef graph_builder(graph, id, depth=1, delay=float(0), inscription='', enrich_inscription=True):\r\n add_account_to_graph(graph, id)\r\n sleep(delay)\r\n if enrich_inscription:\r\n inscription = str(len(graph.nodes()))\r\n if depth == 1:\r\n return\r\n iterator = 1\r\n for friend_id in graph.nodes():\r\n iteration = str(iterator) + '/' + inscription\r\n print('Iteration:', iteration)\r\n graph_builder(graph, friend_id, depth - 1, delay, iteration, False)\r\n iterator += 1\r\n\r\n\r\nHOST = 'https://api.vk.com/method/'\r\nVERSION = '5.62'\r\n\r\nprint('Please, input ID of some VK user:')\r\nmy_id = int(input())\r\nprint('Please, input a depth of search:')\r\nmy_depth = int(input())\r\nif my_depth < 1:\r\n my_depth = 1\r\n print('Invalid value. The depth was changed to 1.')\r\nprint('Please, input a time delay in seconds:')\r\nmy_delay = float(input())\r\n\r\nmy_graph = nx.Graph()\r\ngraph_builder(my_graph, my_id, my_depth, my_delay)\r\n\r\nprint('Calculating space configuration of vertexes…')\r\npositions = nx.spring_layout(my_graph)\r\nedges = [element for element in my_graph.edges(data=True)]\r\n\r\nprint('Drawing a graph…')\r\nnx.draw_networkx_nodes(my_graph, positions, node_shape='s', node_size=500, node_color='y')\r\nnx.draw_networkx_labels(my_graph, positions, font_size=4, font_family='sans-serif', font_color='r')\r\nnx.draw_networkx_edges(my_graph, positions, edgelist=edges, width=1)\r\n\r\nplt.axis('off')\r\nplt.savefig('friends_graph.png')\r\nplt.show()\r\n","sub_path":"vk_friends_graph.py","file_name":"vk_friends_graph.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"242219310","text":"import random\n\ndef randomizedPartition(A, i, j):\n k = random.randint(i, j)\n t = A[k]\n A[k] = A[j]\n p, q = i, j\n while p != q:\n while p != q and A[p] <= t:\n p += 1\n A[q] = A[p]\n while p != q and A[q] > t:\n q -= 1\n A[p] = A[q]\n A[q] = t\n return q\n\ndef randomizedSelect(A, i, j, k): # return the kth smallest elem. of A[i...j]\n if i == j:\n return A[i]\n q = randomizedPartition(A, i, j)\n n = q-i+1\n if k == n:\n return A[q]\n elif k < n:\n return randomizedSelect(A, i, q-1, k)\n else:\n return randomizedSelect(A, q+1, j, k-n)\n\ndef median(A):\n return randomizedSelect(A, 0, len(A)-1, int(len(A)/2)) if 1 == len(A) % 2 else 0.5 * (randomizedSelect(A, 0, len(A)-1, int(len(A)/2)) + randomizedSelect(A, 0, len(A)-1, int(len(A)/2)+1))\n\ndef weightedMedian(A, W):\n A_prime = []\n for a, w in zip(A, W):\n A_prime += [a]*w\n return median(A_prime)\n \n\n\n","sub_path":"RandomizedSelection.py","file_name":"RandomizedSelection.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"141867091","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('extuser', '0023_auto_20161110_0955'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='extuser',\n name='push_subscr',\n field=models.BooleanField(verbose_name='Подписка на рассылки', default=True),\n ),\n ]\n","sub_path":"extuser/migrations/0024_extuser_push_subscr.py","file_name":"0024_extuser_push_subscr.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"536727869","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 12 13:14:31 2018\r\n\r\n@author: tlgreiner\r\n\"\"\"\r\nimport numpy as np\r\n\r\n# R2 score\r\ndef R2_metric(y ,y_):\r\n \r\n # extract the shape (dimensions) of the model \r\n shape = y.shape\r\n n = shape[0] \r\n \r\n # compute the mean and store it as a vector\r\n y_m = np.mean(y_)\r\n y_mu = y_m*np.ones([n,1])\r\n \r\n A = np.dot(np.transpose((y - y_)),(y-y_))\r\n B = np.dot(np.transpose((y - y_mu)),(y-y_mu))\r\n \r\n # compute the R2 score\r\n R2 = 1 - A/B\r\n \r\n return R2\r\n\r\n\r\ndef MSE_metric(y, y_):\r\n \r\n # extract the shape (dimensions) of the model \r\n shape = y.shape\r\n n = shape[0] \r\n \r\n # compute the MSE score\r\n Err = np.dot(np.transpose((y - y_)),(y - y_))/n\r\n Err = np.squeeze(Err)\r\n \r\n return Err\r\n\r\ndef ord_least_square(x,y):\r\n \r\n # exctract the shape of the model\r\n shape = x.shape\r\n n = shape[0]\r\n \r\n # include bias within the data\r\n x0 = np.ones([n,1])\r\n \r\n X = np.concatenate((x0,x),axis=1)\r\n\r\n Xt = np.transpose(X)\r\n \r\n Hat = np.dot(np.linalg.inv(np.dot(Xt,X)),Xt)\r\n beta = np.dot(Hat,y)\r\n y_ = np.dot(X,beta)\r\n \r\n return y_\r\n\r\n\r\ndef ridge_least_square(x, y, lamb = 0.01):\r\n \r\n # exctract the shape of the model\r\n shape = x.shape\r\n n = shape[0]\r\n p = shape[1] \r\n \r\n # include bias within the data\r\n x0 = np.ones([n,1])\r\n \r\n X = np.concatenate((x0,x),axis=1)\r\n \r\n I = np.eye(p+1)\r\n \r\n I[0,0] = 0\r\n\r\n Xt = np.transpose(X)\r\n \r\n Hat = np.dot(np.linalg.inv(np.dot(Xt,X) + lamb*I),Xt)\r\n beta = np.dot(Hat,y)\r\n y_ = np.dot(X,beta)\r\n \r\n return y_\r\n\r\ndef least_square_w_basis_exp(x,y,basis=0):\r\n \r\n # exctract the shape of the model\r\n shape = x.shape\r\n n = shape[0]\r\n \r\n # include bias within the data\r\n x0 = np.ones([n,1])\r\n \r\n # basis expansion on x, with basis = 0 for straight line fit\r\n \r\n X = np.zeros([n,basis+2])\r\n X [:,0] = x0[:,0]\r\n \r\n \r\n for i in range(basis+1):\r\n X[:,(i+1)] = x[:,0]**(i+1)\r\n \r\n Xt = np.transpose(X)\r\n \r\n Hat = np.dot(np.linalg.inv(np.dot(Xt,X)),Xt)\r\n beta = np.dot(Hat,y)\r\n y_ = np.dot(X,beta)\r\n \r\n return y_\r\n","sub_path":"Tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"474439601","text":"# -*- coding:utf-8 -*-\n'''\nCreated on Jul 29, 2014\n\n@author: xchliu\n\n@module:mega_service.mega_client.setting\n\n'''\n#meta data\nversion='mega-client 0.1'\nTCP_HOST='' # default 0.0.0.0\nTCP_PORT=1105\nMEGA_HOST='mega-server.d.chinabank.com.cn'\nMEGA_HOST='localhost'\n\n\nKEEPALIVE=300\n\n#all the script invoked by worker should be in the directory\nSCRIPT_DIR='/home/mysql/admin/mega_client/script/'\n\n#only used for client . \nCLIENT_DIR='/home/mysql/'\n\nDEAFULT_LOG_DEBUG=True\nLOG_FILE_NAME='/var/log/mega/mega_client.log'\nDAEMON_PID='/var/run/mega_client.pid'\nSERVICE_PID='/var/run/mega_client_srv.pid'\nDAEMON_LOG=LOG_FILE_NAME\n\nDEFAULT_TARGET='cmd'\n\n","sub_path":"src/mega_client/mega-1.0/mega_client/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"491783442","text":"# this function gets the reduced variables for a given equation e.g. u/v, m2/m1 etc.\n\nimport pandas as pd\nimport sympy as sp\nfrom sympy.parsing.sympy_parser import parse_expr\n\ndef replace_variables(input_file,filename,formula):\n # clean weird things in the formula such as Pi -> pi not to be seen as a variable\n formula = str(formula)\n formula = formula.replace(\"Pi\", \"pi\")\n formula = parse_expr(formula)\n\n dimRed_file = pd.read_excel(input_file)\n dimRed_formula = dimRed_file[\"Formula\"]\n dimRed_filename = dimRed_file[\"Filename\"]\n var1 = dimRed_file[\"var1\"]\n var2 = dimRed_file[\"var2\"]\n var3 = dimRed_file[\"var3\"]\n var4 = dimRed_file[\"var4\"]\n var5 = dimRed_file[\"var5\"]\n var6 = dimRed_file[\"var6\"]\n\n # get the real dimensional reduced variables\n vars = []\n for i in range(len(dimRed_filename)):\n if dimRed_filename[i] == filename:\n if pd.isnull(var1[i])==0:\n if var1[i][0]!=\" \":\n if var1[i]!='':\n vars = vars + [var1[i]]\n else:\n if var1[i][1:]!='':\n vars = vars + [var1[i][1:]]\n if pd.isnull(var2[i])==0:\n if var2[i][0]!=\" \":\n if var2[i]!='':\n vars = vars + [var2[i]]\n else: \n if var2[i][1:]!='':\n vars = vars + [var2[i][1:]]\n if pd.isnull(var3[i])==0:\n if var3[i][0]!=\" \":\n if var3[i]!='':\n vars = vars + [var3[i]]\n else:\n if var3[i][1:]!='':\n vars = vars + [var3[i][1:]]\n if pd.isnull(var4[i])==0:\n if var4[i][0]!=\" \":\n if var4[i]!='':\n vars = vars + [var4[i]]\n else:\n if var4[i][1:]!='':\n vars = vars + [var4[i][1:]]\n if pd.isnull(var5[i])==0:\n if var5[i][0]!=\" \":\n if var5[i]!='':\n vars = vars + [var5[i]]\n else:\n if var5[i][1:]!='':\n vars = vars + [var5[i][1:]]\n if pd.isnull(var6[i])==0:\n if var6[i][0]!=\" \":\n if var6[i]!='':\n vars = vars + [var6[i]]\n else:\n if var6[i][1:]!='':\n vars = vars + [var6[i][1:]]\n break\n\n\n # get the discovered symbols and repalce them with the original ones\n discovered_symbols = sorted(formula.free_symbols, key = lambda symbol: symbol.name)\n for i in range(len(vars)):\n formula = formula.subs(discovered_symbols[i], \"(\"+vars[i]+\")\")\n\n return formula\n\n","sub_path":"theories/feynman/S_replace_variables.py","file_name":"S_replace_variables.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"145373590","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport cv2\nfrom keras.models import load_model\nimport onnx\nimport onnxruntime as ort\nfrom onnx_tf.backend import prepare\nimport numpy as np\n\n\n# In[4]:\n\n\ndef area_of(left_top, right_bottom):\n \"\"\"\n Compute the areas of rectangles given two corners.\n Args:\n left_top (N, 2): left top corner.\n right_bottom (N, 2): right bottom corner.\n Returns:\n area (N): return the area.\n \"\"\"\n hw = np.clip(right_bottom - left_top, 0.0, None)\n return hw[..., 0] * hw[..., 1]\n\ndef iou_of(boxes0, boxes1, eps=1e-5):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Args:\n boxes0 (N, 4): ground truth boxes.\n boxes1 (N or 1, 4): predicted boxes.\n eps: a small number to avoid 0 as denominator.\n Returns:\n iou (N): IoU values.\n \"\"\"\n overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])\n overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])\n\n overlap_area = area_of(overlap_left_top, overlap_right_bottom)\n area0 = area_of(boxes0[..., :2], boxes0[..., 2:])\n area1 = area_of(boxes1[..., :2], boxes1[..., 2:])\n return overlap_area / (area0 + area1 - overlap_area + eps)\n\ndef hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):\n \"\"\"\n Perform hard non-maximum-supression to filter out boxes with iou greater\n than threshold\n Args:\n box_scores (N, 5): boxes in corner-form and probabilities.\n iou_threshold: intersection over union threshold.\n top_k: keep top_k results. If k <= 0, keep all the results.\n candidate_size: only consider the candidates with the highest scores.\n Returns:\n picked: a list of indexes of the kept boxes\n \"\"\"\n scores = box_scores[:, -1]\n boxes = box_scores[:, :-1]\n picked = []\n indexes = np.argsort(scores)\n indexes = indexes[-candidate_size:]\n while len(indexes) > 0:\n current = indexes[-1]\n picked.append(current)\n if 0 < top_k == len(picked) or len(indexes) == 1:\n break\n current_box = boxes[current, :]\n indexes = indexes[:-1]\n rest_boxes = boxes[indexes, :]\n iou = iou_of(\n rest_boxes,\n np.expand_dims(current_box, axis=0),\n )\n indexes = indexes[iou <= iou_threshold]\n\n return box_scores[picked, :]\n\ndef predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.5, top_k=-1):\n \"\"\"\n Select boxes that contain human faces\n Args:\n width: original image width\n height: original image height\n confidences (N, 2): confidence array\n boxes (N, 4): boxes array in corner-form\n iou_threshold: intersection over union threshold.\n top_k: keep top_k results. If k <= 0, keep all the results.\n Returns:\n boxes (k, 4): an array of boxes kept\n labels (k): an array of labels for each boxes kept\n probs (k): an array of probabilities for each boxes being in corresponding labels\n \"\"\"\n boxes = boxes[0]\n confidences = confidences[0]\n picked_box_probs = []\n picked_labels = []\n for class_index in range(1, confidences.shape[1]):\n probs = confidences[:, class_index]\n mask = probs > prob_threshold\n probs = probs[mask]\n if probs.shape[0] == 0:\n continue\n subset_boxes = boxes[mask, :]\n box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)\n box_probs = hard_nms(box_probs,\n iou_threshold=iou_threshold,\n top_k=top_k,\n )\n picked_box_probs.append(box_probs)\n picked_labels.extend([class_index] * box_probs.shape[0])\n if not picked_box_probs:\n return np.array([]), np.array([]), np.array([])\n picked_box_probs = np.concatenate(picked_box_probs)\n picked_box_probs[:, 0] *= width\n picked_box_probs[:, 1] *= height\n picked_box_probs[:, 2] *= width\n picked_box_probs[:, 3] *= height\n return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]\n\n\n# In[22]:\n\n\ncnt = 1\ncoords = []\nimage_list = []\ndef detect_face(image):\n global cnt\n image = cv2.resize(image,(640,480))\n img = image.copy()\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_mean = np.array([127, 127, 127])\n img = (img - img_mean) / 128\n img = np.transpose(img, [2, 0, 1])\n img = np.expand_dims(img, axis=0)\n img = img.astype(np.float32)\n onnx_model = onnx.load('Copy of ultra_light_640.onnx')\n predictor = prepare(onnx_model)\n ort_session = ort.InferenceSession('Copy of ultra_light_640.onnx')\n input_name = ort_session.get_inputs()[0].name\n confidences, boxes = ort_session.run(None, {input_name: img})\n h,w,_ = image.shape\n boxes, labels, probs = predict(w, h, confidences, boxes, 0.7)\n cnt = 0\n for i in range(boxes.shape[0]):\n if probs[cnt]>0.99:\n box = boxes[i, :]\n x1, y1, x2, y2 = box\n crop_image = image[y1:y2,x1:x2]\n if crop_image.shape[0]<=0 or crop_image.shape[1]<=0 or len(crop_image)==0:\n continue\n coords.append((x1, y1, x2, y2))\n image_list.append(crop_image)\n cnt = cnt + 1\n return coords, image_list\n\n\n# In[12]:\n\n\nmodel = load_model('vgg_mask.h5')\n\n\n# In[23]:\n\n\ndef mask_detection(image):\n img = cv2.imread(image)\n original_img = img.copy()\n box,img_list = detect_face(img)\n for images,b in zip(img_list,box):\n images = cv2.cvtColor(images,cv2.COLOR_RGB2BGR)\n images = cv2.resize(images,(224,224))\n images = images/255.0\n pred = model.predict(images[np.newaxis,...])\n flag = 0\n if pred[0][0] < pred[0][1]:\n flag = 1\n if flag == 0:\n x1, y1, x2, y2 = b\n original_img = cv2.resize(original_img,(640,480))\n cv2.rectangle(original_img, (x1, y1-5), (x2, y2+5), (0,255,0), 2)\n else:\n x1, y1, x2, y2 = b\n original_img = cv2.resize(original_img,(640,480))\n cv2.rectangle(original_img, (x1, y1-5), (x2, y2+5), (0,0,255), 2)\n return original_img\n\n","sub_path":"Detection-of-Person-With-or-Without-Mask-master/Classfication_of_Person_with_or_without_Wearing_Mask.py","file_name":"Classfication_of_Person_with_or_without_Wearing_Mask.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"221004789","text":"import psycopg2\r\nimport sys\r\nimport random\r\nimport name\r\nimport time\r\n\r\n\r\n#Define our connection string\r\nconn_string = \"host='localhost' dbname='CSE190' user='\" + name.getName() + \"' password='test'\"\r\n \r\n# print the connection string we will use to connect\r\nprint (\"Connecting to database \", conn_string, \"\")\r\n \r\n# get a connection, if a connect cannot be made an exception will be raised here\r\nconn = psycopg2.connect(conn_string)\r\n \r\n# conn.cursor will return a cursor object, you can use this cursor to perform queries\r\ncursor = conn.cursor() \r\nprint (\"Connected!\\n\")\r\n \r\n#######################################################################################\r\n\r\n# Disabling the trigger temporarily. Comment if not neeeded\r\n#cursor.execute(\"ALTER TABLE posts DISABLE TRIGGER inc_post\")\r\n#conn.commit()\r\n\r\n########################################################################################\r\n\r\nnumPosts = 10000\r\nnumMember = 1000\r\nnumTopic = 10000\r\n\r\nrandom.seed(0xFE4432)\r\n\r\nstartTime = time.time()\r\n \r\nfor i in range(0, numPosts): \r\n randTopic = random.randrange(1, numTopic)\r\n randPoster = random.randrange(0, numMember) # we use the id not name.\r\n cursor.execute(\"INSERT INTO posts VALUES ('\" + str(i) + \"', '\" + str(randPoster) + \"', 'Random Title', 'This is text body', NULL, '\" + str(randTopic) + \"')\" )\r\n \r\n###############################################################################################\r\nconn.commit()\r\n\r\nendTime = time.time()\r\ntotalTime = endTime - startTime\r\n\r\nprint(\"Time taken: \" + str(totalTime))\r\n\r\n# Re-enabling the trigger temporarily. Comment if not neeeded\r\n#cursor.execute(\"ALTER TABLE posts DISABLE TRIGGER inc_post\")\r\n#conn.commit()\r\n\r\n########################################################################################\r\n\r\ncursor.close()\r\nconn.close()\r\n","sub_path":"CSE190/backup_pre_final/src/script_table_filling_posts.py","file_name":"script_table_filling_posts.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"339642032","text":"# coding: utf-8\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nt = np.arange(0.0, 2.0, 0.01)\ns = 2 + np.sin(3 * np.pi * t)\n\nfig, ax = plt.subplots()\nax.plot(t, s)\n\nax.set(xlabel='time (s)', ylabel='voltage (mV)',\n title='matplotlib test')\nax.grid()\n\nplt.show()\n","sub_path":"jt/t/plot-test-1.py","file_name":"plot-test-1.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"237677887","text":"#! /usr/bin/env python3\n\nimport json\n\ndef main(inpath=\"output/extract_around.json\", outpath=\"output/stats-phrases_around.json\"):\n with open(inpath, 'r') as infile:\n data = json.load(infile)\n \n stats = {}\n for company_name, company in data.items():\n company_articles = len(company.values())\n company_phrases = 0\n no_phrases = 0\n one_phrase = 0\n more_phrases = 0\n for article in company.values():\n article_phrases = 0\n for paragraph in article[\"paragraphs\"]:\n for sentence in paragraph:\n if \"company\" in sentence[\"phrases\"]:\n #article_phrases += len(sentence[\"phrases\"][\"company\"])\n article_phrases += 1\n company_phrases += article_phrases\n if article_phrases == 0:\n no_phrases += 1\n elif article_phrases == 1:\n one_phrase += 1\n elif article_phrases > 1:\n more_phrases += 1\n stats[company_name] = {\n \"articles\" : company_articles,\n \"phrases\" : company_phrases,\n \"articles_with_0_phrases\" : no_phrases,\n \"articles_with_1_phrase\" : one_phrase,\n \"articles_with_2+_phrases\" : more_phrases\n }\n\n with open(outpath, 'w') as outfile: \n json.dump(stats, outfile, indent=4) \n\nif __name__ == \"__main__\":\n main()","sub_path":"taurin/phrases_to_stats.py","file_name":"phrases_to_stats.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160960797","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom .models import Movies\nfrom .forms import MovieForm\n\n\n# Create your views here.\ndef index(request):\n movie=Movies.objects.all()\n context={\n 'movie_list':movie\n }\n return render(request,'index.html',context)\ndef details(request,movie_id):\n movie=Movies.objects.get(id=movie_id)\n return render(request,'details.html',{'movie':movie})\n\ndef add_movie(request):\n if request.method == 'POST':\n name=request.POST.get('name')\n decs = request.POST.get('decs')\n year = request.POST.get('year')\n img = request.FILES['img']\n movie=Movies(name=name,decs=decs,year=year,img=img)\n movie.save()\n return render(request,'add.html')\n\ndef update(request,id):\n movie=Movies.objects.get(id=id)\n form=MovieForm(request.POST or None,request.FILES,instance=movie)\n if form.is_valid():\n form.save()\n return redirect('/')\n return render(request,'edit.html',{'form':form,'movie':movie})\n\ndef delete(request,id):\n if request.method =='POST':\n movie=Movies.objects.get(id=id)\n movie.delete()\n return redirect('/')\n return render(request,'delete.html')","sub_path":"mymovie/movieproject/movieproject/movieapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"285283858","text":"import sys\nimport pandas as pd\nimport numpy as np\nimport csv\n#np.set_printoptions(threshold=np.inf)\n# from google.colab import drive \n# !gdown --id '1wNKsrdAxQ29G15kgpBy_asjTcZRRgmsCZRm' --output data.zip\n# !unzip data.zip\n# data = pd.read_csv('gdrive/My Drive/hw1-regression/train.csv', header = None, encoding = 'big5')\nnum = 9\ndata = pd.read_csv('./train.csv', encoding = 'big5')\ndata = data.iloc[:, 3:]\ndata[data == 'NR'] = 0\nraw_data = data.to_numpy()\nmonth_data = {}\nfor month in range(12):\n sample = np.empty([18, 480])\n for day in range(20):\n \n sample[:, day * 24 : (day + 1) * 24] = raw_data[18 * (20 * month + day) : 18 * (20 * month + day + 1), :]\n sample[0]=np.zeros(480);\n sample[8]=np.zeros(480);\n #sample[2]=np.zeros(480);\n sample[12]=np.zeros(480);\n sample[17]=np.zeros(480);\n\n month_data[month] = sample\n #print(np.shape(sample))\n #print(\"sample = \",sample)\n\nx = np.empty([12 * 471, 18 * num], dtype = float)\ny = np.empty([12 * 471, 1], dtype = float)\nfor month in range(12):\n for day in range(20):\n for hour in range(24):\n if day == 19 and hour > 14:\n continue\n x[month * 471 + day * 24 + hour, :] = month_data[month][:,day * 24 + hour : day * 24 + hour + num].reshape(1, -1) #vector dim:18*9 (9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9)\n y[month * 471 + day * 24 + hour, 0] = month_data[month][9, day * 24 + hour + num] #value\n#print(x)\n#print(y)\nimport math\nx_validation = x[math.floor(len(x) * 0.8): , :]\ny_validation = y[math.floor(len(y) * 0.8): , :]\nx = x[: math.floor(len(x) * 0.8), :]\ny = y[: math.floor(len(y) * 0.8), :]\n\nmean_x = np.mean(x, axis = 0) #18 * 9 \nstd_x = np.std(x, axis = 0) #18 * 9 \nprint(std_x)\nmean_x_v = np.mean(x_validation, axis = 0) #18 * 9 \nstd_x_v = np.std(x_validation, axis = 0) #18 * 9 \nfor i in range(len(x)): #12 * 471\n for j in range(len(x[0])): #18 * 9 \n if std_x[j] != 0:\n x[i][j] = (x[i][j] - mean_x[j]) / std_x[j]\nfor i in range(len(x_validation)):#12 * 471\n for j in range(len(x_validation[0])): #18 * 9 \n if std_x_v[j] != 0:\n x_validation[i][j] = (x_validation[i][j] - mean_x_v[j]) / std_x_v[j]\ndim = 18 * num + 1\nw = np.zeros([dim, 1])\nx = np.concatenate((np.ones([math.floor(12 * 471*0.8), 1]), x), axis = 1).astype(float)\nx_validation = np.concatenate((np.ones([1131, 1]), x_validation), axis = 1).astype(float)\nlearning_rate = 0.5401\niter_time = 10000\nadagrad = np.zeros([dim, 1])\neps = 0.0000000001\ntrain_loss=[]\ndev_loss=[]\nfor t in range(iter_time):\n t_loss = np.sqrt(np.sum(np.power(np.dot(x, w) - y, 2))/471/12)#rmse\n d_loss = np.sqrt(np.sum(np.power(np.dot(x_validation, w) - y_validation, 2))/1131)\n train_loss.append(t_loss)\n dev_loss.append(d_loss)\n if(t%100==0):\n print(str(t) + \":\" + str(t_loss))\n print(str(t) + \":\" + str(d_loss))\n gradient = 2 * np.dot(x.transpose(), np.dot(x, w) - y) #dim*1\n adagrad += gradient ** 2\n w = w - learning_rate * gradient / np.sqrt(adagrad + eps)\n\nnp.save('weight_best.npy', w)\nnp.save('std_x_best.npy',std_x)\nnp.save('mean_x_best.npy',mean_x)\ntestdata = pd.read_csv(sys.argv[1], header = None, encoding = 'big5')\ntest_data = testdata.iloc[:, 2:]\ntest_data[test_data == 'NR'] = 0\ntest_data = test_data.to_numpy()\ntest_x = np.empty([240, 18*9], dtype = float)\ntest_y = np.empty([240*9,1], dtype = float)\n\nfor i in range(240):\n test_x[i, :] = test_data[18 * i: 18* (i + 1), :].reshape(1, -1)\n #print(test_y[:,0])\n\nfor i in range(len(test_x)):\n for j in range(len(test_x[0])):\n if std_x[j] != 0:\n test_x[i][j] = (test_x[i][j] - mean_x[j]) / std_x[j]\ntest_x = np.concatenate((np.ones([240, 1]), test_x), axis = 1).astype(float)\nw = np.load('weight.npy')\nans_y = np.dot(test_x, w)\n\n#print(np.sqrt(np.sum(np.power((ans_y - y_validation), 2))/1131))#rmse\n\nwith open(sys.argv[2], mode='w', newline='') as submit_file:\n csv_writer = csv.writer(submit_file)\n header = ['id', 'value']\n #print(header)\n csv_writer.writerow(header)\n for i in range(240):\n row = ['id_' + str(i), ans_y[i][0]]\n csv_writer.writerow(row)\n #print(row)","sub_path":"hw1_best.py","file_name":"hw1_best.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"246394255","text":"import json\nfrom sys import argv\nimport numpy as np\nimport math\n\ndef main(infile, outfile):\n\tnp.set_printoptions(precision=12)\n\tdist = 4*math.sqrt(pow(1.0/111000.0,2))\n\tdata = json.load(open(infile))\n\tdata2 = { \"type\": \"FeatureCollection\", \"crs\": { \"type\": \"name\", \"properties\": { \"name\": \"urn:ogc:def:crs:OGC:1.3:CRS84\" } }, \"features\": []}\n\tfor feature in data['features']:\n\t\taFeature = {}\n\t\tbFeature = {}\n\t\tcoords = feature['geometry']['coordinates']\n\t\taFeature['properties'] = bFeature['properties'] = feature['properties']\n\t\taFeature['type'] = bFeature['type'] = feature['type']\n\t\taFeature['geometry'] = { \"type\": \"LineString\", \"coordinates\": []}\n\t\tbFeature['geometry'] = { \"type\": \"LineString\", \"coordinates\": []}\n\t\t''' \n\t\tSee that we don't go above limit when setting second point\n\t\t*** Remember also that coordinates is an array of arrays of tuple-lists ***\n\t\t'''\n\t\tif feature['properties']['fclass'] in [\"primary\", \"secondary\", \"tertiary\", \"residential\"]: # not sure if we should add \"service\"\n\t\t\tlineString = coords\n\t\t\tlineStringA = []\n\t\t\tlineStringB = []\n\t\t\ttempLeftLine = [0.0, 0.0]\n\t\t\ttempRightLine = [0.0, 0.0]\n\t\t\t# lineString is an list of coordinates\n\t\t\twhile len(lineString) > 1:\n\t\t\t\t# coords is an element (itself a list of two doubles) of the line\n\t\t\t\tpoint1 = np.array(lineString[0])\n\t\t\t\tpoint2 = np.array(lineString[1])\n\t\t\t\tif (np.array_equal(point1, point2)):\n\t\t\t\t\tif (len(lineString)>2):\n\t\t\t\t\t\tdel lineString[1]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak\n\t\t\t\ttempLeftLine[0], tempLeftLine[1], tempRightLine[0], tempRightLine[1] = computeFourOrthoganalPointsDistanceAway(point1, point2, dist)\n\t\t\t\t# Currently the format is not lat long it's long lat and that's just messed up so we convert it here\n\t\t\t\tlineStringA.append(tempLeftLine[0])\n\t\t\t\tlineStringB.append(tempRightLine[0])\n\t\t\t\tdel lineString[0]\n\t\t\tlineStringA.append(tempLeftLine[1]) # add the final point\n\t\t\tlineStringB.append(tempRightLine[1]) # add the final point\n\t\t\taFeature['geometry'] = {\"type\": \"LineString\", \"coordinates\": lineStringA[:]}\n\t\t\tbFeature['geometry'] = {\"type\": \"LineString\", \"coordinates\": lineStringB[:]}\n\t\t\tdata2['features'].append(aFeature)\n\t\t\tdata2['features'].append(bFeature)\n\twith open(outfile, 'w') as outfileobj:\n\t\tjson.dump(data2, outfileobj)\n\n\ndef computeFourOrthoganalPointsDistanceAway(point1, point2, d):\n\t'''\n\tcoords is a 2D numpy array for calculations\n\treturns two tuples of two coordinates (list of two doubles)\n\t'''\n\tdet = (d/np.linalg.norm(point1-point2))*(np.array([point1[1]-point2[1], point2[0]-point1[0]]))\n\t# Vectorized code\n\treturn (point1+det).tolist(), (point2+det).tolist(),(point1-det).tolist(), (point2-det).tolist()\n\nif __name__ == '__main__':\n\tinfile = argv[1]\n\toutfile = argv[2]\n\tmain(infile, outfile)\n","sub_path":"ConvertStreetMapToTwoStreets.py","file_name":"ConvertStreetMapToTwoStreets.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"132254705","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/cclarke/Development/django-wagtail-feeds/wagtail_feeds/__init__.py\n# Compiled at: 2018-05-08 12:59:10\n# Size of source mod 2**32: 116 bytes\n__author__ = 'Christopher Clarke'\n__email__ = 'cclarke@chrisdev.com'\n__version__ = '0.1.0'","sub_path":"pycfiles/django-wagtail-feeds-0.1.0.macosx-10.12-x86_64.tar/__init__.cpython-36.py","file_name":"__init__.cpython-36.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"642291354","text":"#!/usr/bin/env python\n\nimport os \nimport re\n\nresult = []\nmyrootdir = \"/Users/ikegarrow/src/PayrollDB\"\ndir_compile = re.compile(\".*[Ss]tore.*\")\nfile_compile = re.compile(\".*\\.sql\")\n\n# Output File: seperator between path and .sql\ndelim = \"/\"\n\n# Print/store results\noutfile = open(\"payroll-db-stored-procs.csv\", \"w\")\nfor dirpath, dirnames, filenames in os.walk(myrootdir):\n for dirname in dirnames:\n\n # Stored proc folders found\n if dir_compile.match(dirname):\n \n current_path = os.path.join(dirpath, dirname)\n # print(\"current path: {}\".format(current_path))\n for fdirpath, fdirnames, ffilenames in os.walk(current_path):\n # print(\"****************************************\")\n # print(\"dirpath: {}\".format(fdirpath))\n # print(\"dirnames: {}\".format(fdirnames))\n # print(\"filenames: {}\".format(ffilenames))\n # print(\"****************************************\")\n\n for filenm in ffilenames:\n if file_compile.match(filenm):\n outfile.write(\"{}{}{}\\n\".format(fdirpath, delim, filenm))\n\noutfile.close()\n","sub_path":"payroll-db-stored-procs/get_stored_procs_from_repo.py","file_name":"get_stored_procs_from_repo.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"601164829","text":"#!/usr/bin/env/python\n\nfrom ciscoconfparse import CiscoConfParse\nfrom netmiko import ConnectHandler\nimport yaml\n\n\ndevices = yaml.load(open('/home/dkhorn/.netmiko.yml'))\nnet_conncet = ConnectHandler(**devices.get('cisco4'))\noutput = net_conncet.send_command('show run')\n\n\nparcer = CiscoConfParse(output.splitlines())\n\nint_w_ip = parcer.find_objects_w_child(parentspec = r\"^interface\", childspec = r\"^\\s+ip address\")\n\nfor elem in int_w_ip:\n print('Interface Line: ' + elem.text)\n print ('IP Address Line: ' + elem.re_search_children(r'ip address')[0].text)\n","sub_path":"Class_3/task6.py","file_name":"task6.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"350797973","text":"from datetime import datetime\n\nfrom PySide import QtCore\nfrom PySide.QtGui import QGraphicsGridLayout, QFrame, QWidgetAction, QMenu\nfrom pyqtgraph import PlotItem, InfiniteLine, LabelItem, PlotDataItem\nfrom Logger import logger\nfrom inspector.ui.SetShiftFrame import Ui_ShiftFrame\nfrom inspector.widget.AxisItems import OffsetAxisItem, UnitsLabelAxisItem\nfrom inspector.widget.PlotDataItems import CloneablePlotDataItem, ShiftedPlotDataItem\nfrom inspector.widget.RegionItems import ShadowRegionItem\nfrom inspector.widget.ViewBoxs import RigidViewBox\n\n\nclass DownSamplePlotItem(PlotItem):\n\n def plot(self, *args, **kargs):\n clear = kargs.get('clear', False)\n params = kargs.get('params', None)\n\n if clear:\n self.clear()\n\n item = PlotDataItem(*args, **kargs)\n\n if params is None:\n params = {}\n self.addItem(item, params=params)\n\n return item\n\n def addItem(self, item, *args, **kargs):\n if isinstance(item, PlotDataItem):\n plotDataCount = 0\n for it in self.items:\n if isinstance(it, PlotDataItem):\n plotDataCount += 1\n if plotDataCount == 0:\n self.vb.setXRange(item.xData[0], item.xData[-1])\n self.items.append(item)\n vbargs = {}\n if 'ignoreBounds' in kargs:\n vbargs['ignoreBounds'] = kargs['ignoreBounds']\n self.vb.addItem(item, *args, **vbargs)\n name = None\n if hasattr(item, 'implements') and item.implements('plotData'):\n name = item.name()\n self.dataItems.append(item)\n # self.plotChanged()\n\n params = kargs.get('params', {})\n self.itemMeta[item] = params\n # item.setMeta(params)\n self.curves.append(item)\n # self.addItem(c)\n\n if hasattr(item, 'setLogMode'):\n item.setLogMode(self.ctrl.logXCheck.isChecked(), self.ctrl.logYCheck.isChecked())\n\n if isinstance(item, PlotDataItem):\n ## configure curve for this plot\n (alpha, auto) = self.alphaState()\n item.setAlpha(alpha, auto)\n item.setFftMode(self.ctrl.fftCheck.isChecked())\n ds, auto, method = self.downsampleMode()\n item.setDownsampling(ds, True, method)\n item.setClipToView(True)\n item.setPointMode(self.pointMode())\n\n ## Hide older plots if needed\n self.updateDecimation()\n\n ## Add to average if needed\n self.updateParamList()\n if self.ctrl.averageGroup.isChecked() and 'skipAverage' not in kargs:\n self.addAvgCurve(item)\n\n # c.connect(c, QtCore.SIGNAL('plotChanged'), self.plotChanged)\n # item.sigPlotChanged.connect(self.plotChanged)\n # self.plotChanged()\n # name = kargs.get('name', getattr(item, 'opts', {}).get('name', None))\n if name is not None and hasattr(self, 'legend') and self.legend is not None:\n self.legend.addItem(item, name=name)\n\n\nclass OffsetPlotItem(DownSamplePlotItem):\n \"\"\"\n 支持数据偏移显示\n 如x轴数据为[-2,-1,0,1,2],offset=2时可显示为[1,2,3,4,5]\n\n ====================================== ===================================\n **Signals:**\n signalMouseLeftClick(x, [y]) 鼠标单击事件,返回鼠标点击坐标\n signalSelectRegionChanged(x0, length) 区间选择\n signalShadowRegionEnter(min, max) 区间选择后按enter键触发该事件\n ====================================== ====================================\n \"\"\"\n\n signalMouseLeftClick = QtCore.Signal(int, list)\n signalSelectRegionChanged = QtCore.Signal(int, int)\n signalShadowRegionEnter = QtCore.Signal(int, int)\n\n def __init__(self, offset=0.0, **kwargs):\n \"\"\"\n ====================== ===========================\n **Arguments:**\n offset: 曲线偏移\n ====================== ===========================\n \"\"\"\n axisItems = {'bottom': OffsetAxisItem(offset, 'bottom'), 'left': UnitsLabelAxisItem('left')}\n super().__init__(axisItems=axisItems, viewBox=RigidViewBox(), **kwargs)\n self.layout.removeItem(self.titleLabel)\n self.titleLabel.setParent(None)\n self.tLayout = QGraphicsGridLayout()\n self.layout.addItem(self.tLayout, 0, 1)\n self.offset = offset\n self.curves_ = []\n self.titles = {}\n self.showTitle_ = True\n self.vb.signalMouseLeftClick.connect(self.mouseLeftClick)\n self.vb.signalMouseLeftDrag.connect(self.selectedRegionMove)\n # 双击指示线\n self.vLine = InfiniteLine(angle=90, movable=False, pen='#f0f0f0')\n self.vLine.setVisible(False)\n self.vLine.setZValue(10)\n self.addItem(self.vLine, ignoreBounds=True)\n self.hLines = []\n # 选中区域\n self.shadowRegion = ShadowRegionItem()\n self.addItem(self.shadowRegion)\n\n def addItem(self, item, *args, **kwargs):\n \"\"\"\n 重写addItem方法使其在添加对象时根据offset修改其x数据\n \"\"\"\n if isinstance(item, PlotDataItem):\n data_x, data_y = item.xData, item.yData\n if self.offset != 0:\n s = datetime.now()\n logger.debug('offset copy data %s', s)\n data_x_ = data_x - self.offset\n logger.debug('offset copy data <<< %s', datetime.now() - s)\n s = datetime.now()\n logger.debug('curve set data %s', datetime.now() - s)\n item.setData(data_x_, data_y)\n logger.debug('curve set data <<< %s', datetime.now() - s)\n self.curves_.append(item)\n self.addTitle(item)\n super().addItem(item, *args, **kwargs)\n\n def removeItem(self, item):\n if isinstance(item, PlotDataItem):\n self.curves_.remove(item)\n self.delTitle(item)\n super().removeItem(item)\n\n def setXOffset(self, offset):\n \"\"\"\n 设置偏移\n\n ====================== ========================\n **Arguments:**\n offset: 偏移\n originalDataX: 原始的X数据\n ====================== ========================\n \"\"\"\n # if originalDataX is not None:\n # for item in self.curves_:\n # _, data_y = item.getData()\n # data_x = [i - offset for i in originalDataX]\n # item.setData(data_x, data_y)\n if len(self.curves_) > 0:\n for curve in self.curves_:\n data_x, data_y = curve.xData, curve.yData\n if self.offset == offset:\n continue\n data_x_ = [i + self.offset - offset for i in data_x]\n curve.setData(data_x_, data_y)\n self.offset = offset\n self.getAxis('bottom').offset = offset\n\n def setScales(self, xScale=1.0, yScale=1.0):\n self.getAxis('bottom').setScale(xScale)\n self.getAxis('left').setScale(yScale)\n\n def setLabel(self, axis, text=None, units=None, unitPrefix=None, scale=None, **args):\n axis_ = self.getAxis(axis)\n axis_.setLabel(text, units, unitPrefix)\n axis_.setScale(scale)\n self.showAxis(axis)\n\n def setTitle(self, title=None, **args):\n pass\n\n def showTitle(self, show):\n self.showTitle_ = show\n self.updateTitles()\n\n def updateTitles(self):\n \"\"\"\n 更新标题\n\n 当标题超过 >=5 时只显示第一个和最后一个曲线的标题\n\n # TODO 这里的曲线标题排序有问题,需要传入曲线index,以后更正\n\n **内部调用方法**\n \"\"\"\n if self.showTitle_:\n for i in range(self.tLayout.rowCount()):\n for j in range(self.tLayout.columnCount()):\n old = self.tLayout.itemAt(i, j)\n if old is not None:\n self.tLayout.removeItem(old)\n old.setParentItem(None)\n old.setVisible(False)\n labels = list(self.titles.values())\n labels.sort(key=lambda l: l.text)\n if len(labels) > 5:\n self.tLayout.addItem(LabelItem(labels[0].text + ' ... ' + labels[-1].text), 0, 0)\n else:\n for i, label in enumerate(labels):\n label.setVisible(True)\n self.tLayout.addItem(label, 0, i)\n else:\n for i in range(self.tLayout.rowCount()):\n for j in range(self.tLayout.columnCount()):\n old = self.tLayout.itemAt(i, j)\n if old is not None:\n old.setVisible(False)\n\n def addTitle(self, curve):\n \"\"\"\n 添加标题\n\n **内部调用方法**\n\n ====================== =========================\n **Arguments:**\n curve 需要添加标题的曲线\n ====================== =========================\n \"\"\"\n self.titles[curve] = LabelItem(curve.name(), color=curve.opts['pen'])\n self.updateTitles()\n\n def delTitle(self, curve):\n \"\"\"\n 删除标题\n\n **内部调用方法**\n\n ====================== =========================\n **Arguments:**\n curve 需要删除标题的曲线\n ====================== =========================\n \"\"\"\n self.titles.pop(curve)\n self.updateTitles()\n\n def mouseLeftClick(self, x, y):\n x_ = int(x + self.offset)\n ys = []\n for curve in self.curves_:\n curve = curve # type:CloneablePlotDataItem\n y_ = curve.yData[x_]\n ys.append(y_)\n self.signalMouseLeftClick.emit(x_, ys)\n self.setLinePosition(None)\n self.shadowRegion.setRegion(0, 0)\n self.shadowRegion.setVisible(False)\n\n def selectedRegionMove(self, x0, y0, x1, y1):\n self.shadowRegion.setVisible(True)\n self.shadowRegion.setRegion(left=round(x0), right=round(x1))\n mn, mx = self.shadowRegion.getRegion()\n self.signalSelectRegionChanged.emit(mn + self.offset, mx - mn)\n\n def mouseDoubleClickEvent(self, ev):\n if ev.button() == QtCore.Qt.LeftButton:\n mousePoint = self.vb.mapSceneToView(ev.scenePos())\n self.setLinePosition(mousePoint.x())\n ev.accept()\n\n def setLinePosition(self, v):\n for hLine in self.hLines:\n self.removeItem(hLine)\n self.hLines.clear()\n if v is None:\n self.vLine.setVisible(False)\n else:\n v_ = int(v)\n self.vLine.setPos(v_)\n self.vLine.setVisible(True)\n hs = []\n for curve in self.curves_:\n h = curve.yData[int(v + self.offset)]\n hs.append(h)\n hLine = InfiniteLine(angle=0, movable=False, pen='#f0f0f0')\n self.hLines.append(hLine)\n for i, hLine in enumerate(self.hLines):\n self.addItem(hLine)\n hLine.setPos(hs[i])\n\n def clear(self):\n for item in self.items[:]:\n if isinstance(item, PlotDataItem):\n self.removeItem(item)\n\n def keyPressEvent(self, ev):\n if (ev.key() == QtCore.Qt.Key_Enter or ev.key() == QtCore.Qt.Key_Return) and self.shadowRegion.isVisible():\n mn, mx = self.shadowRegion.getRegion()\n self.signalShadowRegionEnter.emit(mn + self.offset, mx + self.offset)\n self.shadowRegion.setRegion(0, 0)\n self.shadowRegion.setVisible(False)\n\n\nclass ShiftedPlotItem(OffsetPlotItem):\n\n def __init__(self, offset=0.0, **kwargs):\n super().__init__(offset, **kwargs)\n self.shiftFrame = ShiftFrame(plotItem=self)\n setShift = QMenu('Shift')\n act = QWidgetAction(self)\n act.setDefaultWidget(self.shiftFrame)\n setShift.addAction(act)\n self.ctrlMenu.addMenu(setShift)\n\n def setShift(self, index, shift):\n item = self.keyItems.get(index) # type:ShiftedPlotDataItem\n if item is None:\n return\n item.setShift(shift)\n\n def getContextMenus(self, event):\n self.keyItems = {}\n self.shiftFrame.clear()\n for item in self.items:\n if isinstance(item, ShiftedPlotDataItem):\n self.keyItems[item.index] = item\n self.shiftFrame.addItem(item.index, item.shift)\n return super().getContextMenus(event)\n\n\nclass ShiftFrame(QFrame, Ui_ShiftFrame):\n\n def __init__(self, *args, plotItem = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.setupUi(self)\n self.comboBox.currentIndexChanged[int].connect(self.setSpinBoxValue)\n self.pushButton.clicked.connect(self.setShift)\n self.plotItem = plotItem # type: ShiftedPlotItem\n\n def clear(self):\n self.comboBox.clear()\n\n def addItem(self, item, shift=0):\n self.comboBox.addItem(str(item), shift)\n\n def setSpinBoxValue(self, index):\n if index < 0:\n return\n self.spinBox.setValue(self.comboBox.itemData(index))\n\n def setShift(self):\n self.plotItem.setShift(int(self.comboBox.currentText()), self.spinBox.value())\n self.comboBox.setItemData(self.comboBox.currentIndex(), self.spinBox.value())\n\n","sub_path":"inspector/widget/PlotItems.py","file_name":"PlotItems.py","file_ext":"py","file_size_in_byte":13434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"558163770","text":"# A program that takes three floating point numbers as command line arguments:\n# a starting radius, a radius increment and an ending radius.\n# The program should then print a table of corresponding sphere\n# surface areas and volume\n\nimport sys\nimport math\n\n\ndef main():\n start_r = float(sys.argv[1])\n inc_r = float(sys.argv[2])\n end_r = float(sys.argv[3])\n\n h1 = \"Radius (m)\"\n h4 = \"-\" * len(h1)\n h2 = \"Area (m^2)\"\n h5 = \"-\" * len(h2)\n h3 = \"Volume (m^3)\"\n h6 = \"-\" * len(h3)\n\n print(\"{:>s} {:>15s} {:>15s}\".format(h1, h2, h3))\n print(\"{:>s} {:>15s} {:>15s}\".format(h4, h5, h6))\n\n def sphere_area(r):\n return 4 * math.pi * r ** 2\n\n def sphere_volume(r):\n return float(4.0 / 3.0) * math.pi * r ** 3\n\n while start_r != end_r + inc_r:\n area = sphere_area(start_r)\n volume = sphere_volume(start_r)\n print(\"{:>10.1f} {:=15.2f} {:=15.2f}\".format(start_r, area, volume))\n start_r += inc_r\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"week-01/sphere_12.py","file_name":"sphere_12.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"218215983","text":"########################################\n## Big Data Analytics\n## assignment 1 - part II (a), at Stony Brook University\n## Fall 2017\n\n## by Shubham Kumar Jain (SBU ID: 111482623)\n\nfrom pyspark import SparkContext, SparkConf\nfrom random import random\n\n\n\ndef punctuation(x):\n \n punctuation=(\",\", \"'\",\"\\r\",\"\\t\", \"\\n\", \".\", \"!\", \"?\", \"(\", \")\", \":\", \"\\\"\", \"/\", \"\\\\\", \"<\", \">\", \"#\", \"$\", \"-\")\n \n for p in punctuation:\n x = x.replace(p,' ')\n return x\n\n\n# Function for wordcount\ndef wordcount(data):\n \n filteredtext=[]\n \n for (k,v) in data:\n filteredtext.append(v)\n \n rdd=sc.parallelize(filteredtext)\n \n rdd=rdd.flatMap(lambda x: [punctuation(x)])\n \n rdd1=rdd.flatMap(lambda line: line.lower().split(\" \")).map(lambda word: (word, 1))\n \n \n print(\"\\n\\n*****************\\n Word Count\\n*****************\\n\")\n print(\"\\nAfter Map Task Completion: \")\n print(rdd1.collect())\n\n rdd2 = rdd1.reduceByKey(lambda v1,v2:v1 +v2)\n\n wordcount = rdd2.sortByKey(True)\n \n print(\"\\nAfter Reduce Task Completion: \")\n print(wordcount.collect())\n \n\n \n#Function for set difference \ndef setdifference(data):\n \n filteredtext=[]\n \n for (k,v) in data:\n filteredtext.append(v)\n \n rdd3=sc.parallelize(filteredtext)\n \n \n def function(x):\n if(isinstance(x,str)):\n return x.lower()\n else:\n return x\n \n \n rdd4=rdd3.flatMap(lambda x: function(x)).map(lambda word: (word, 1))\n \n \n print(\"\\n\\n*****************\\n Set Difference\\n*****************\\n\")\n print(\"\\nAfter Map Task Completion: \")\n print(rdd4.collect())\n \n rdd5 = rdd4.reduceByKey(lambda v1,v2: v1+v2)\n\n setdiff = rdd5.filter(lambda x: x[1]==1 and x[0] in data[0][1]).map(lambda x: x[0])\n \n \n print(\"\\nAfter Reduce Task Completion: \")\n print(setdiff.collect())\n\n\nif __name__ == \"__main__\":\n\n \n conf= SparkConf()\n conf.setAppName(\"Different Implementations\")\n sc = SparkContext(conf=conf)\n \n #WordCount Implementation Below\n \n data = [(1, \"The horse raced past the barn fell\"),\n (2, \"The complex houses married and single soldiers and their families\"),\n (3, \"There is nothing either good or bad, but thinking makes it so\"),\n (4, \"I burn, I pine, I perish\"),\n (5, \"Come what come may, time and the hour runs through the roughest day\"),\n (6, \"Be a yardstick of quality.\"),\n (7, \"A horse is the projection of peoples' dreams about themselves - strong, powerful, beautiful\"),\n (8, \"I believe that at the end of the century the use of words and general educated opinion will have altered so much that one will be able to speak of machines thinking without expecting to be contradicted.\"),\n\t\t\t(9, \"The car raced past the finish line just in time.\"),\n\t\t\t(10, \"Car engines purred and the tires burned.\")]\n \n wordcount(data)\n \n #Set Difference Implementation Below\n \n data1 = [('R', ['apple', 'orange', 'pear', 'blueberry']),\n\t\t\t ('S', ['pear', 'orange', 'strawberry', 'fig', 'tangerine'])]\n\t\n data2 = [('R', [x for x in range(50) if random() > 0.5]),\n\t \t\t ('S', [x for x in range(50) if random() > 0.75])]\n \n \n setdifference(data1)\n setdifference(data2)\n \n \n \n \n \n","sub_path":"Part-I/a1p2a_jain.py","file_name":"a1p2a_jain.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"629156854","text":"#Redirect to actual FM using Blueprints\n#Temporary access granted\n#MANYcode destructed\n##--------------------------FROM: Folder_Search.py--------------------------\n##----------------------TO: Arrival_Or_Departure.py-------------------------\n##-----------------------------Importing Section----------------------------\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom PIL import ImageTk, Image\nimport os\nimport time\nimport tkinter as tk\nimport openpyxl\nimport threading\nimport bags_decode\n\n##-----------------------------Code Section----------------------------------\n\ndef main():\n \n global RowID\n global access\n f = open(\"RowID.txt\",\"r\")\n RowID = int(f.read())\n f.close()\n global bag_count\n\n #Accessing Excel\n loadingExcel = openpyxl.load_workbook('PassengerDataStore.xlsx')\n\n #Accessing Sheet in that Excel\n airlinesSheet=loadingExcel.get_sheet_by_name('Form Responses 1')\n for x in range(1,50):\n parameter = airlinesSheet.cell(row=x, column=2).value\n if parameter == RowID:\n bag_count = airlinesSheet.cell(row=x, column=12).value\n access = x\n break\n\n loadingExcel.save('PassengerDataStore.xlsx')\n\n bag_count = int(bag_count) - 1\n\n if (bag_count == 0):\n messagebox.showinfo(\"Successful verification\",\"Have a nice day !!\\nThank you for the co operation.\")\n return None\n elif (bag_count > 0 and bag_count < 5): \n bags_decode_QR(bag_count)\n\ndef bags_decode_QR(bag_count):\n y = 0\n cipher = []\n final = []\n plain = []\n new = []\n key = 3\n transposition = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\n start_bag_decode = time.time()\n x = int(bag_count)\n while (x > 0):\n print(x)\n messagebox.showwarning(\"Verification process\",\"You have \" + str(x) + \" more bag(s) to be scanned.\\n\" + str(j) + \" bag(s) verified successfully.\")\n #call decode QR\n #write decoded value to a text file\n #os.system(\"QR.exe\")\n os.system(r'Next_decode.exe')\n f = open(\"Bag_check.txt\",\"r\")\n bag_check = f.read()\n f.close()\n #Decrypting transposition cipher\n final = bag_check\n for a in final:\n new.append(a)\n j = 0\n for m in range(0,4):\n for n in range(0,4):\n #j = j+1\n transposition[m][n] = new[j]\n j = j+1\n print(transposition)\n cipher = []\n for m in range(0,4):\n for n in range(0,4):\n #j = j+1\n cipher.append(transposition[n][m])\n cipher = cipher[0:11]\n print(cipher)\n\n for a in cipher:\n print(a)\n diff = int(a) - key\n if (diff < 0):\n diff = abs(diff)\n diff = 10 - diff\n plain.append(diff)\n else:\n plain.append(diff)\n \n plain = ''.join(str(e) for e in plain)\n print(plain)\n bag_check = str(plain)\n \n ID = \"\"\n for i in range(4,11):\n ID += bag_check[i]\n\n print(RowID)\n print(ID)\n if (str(RowID) != str(ID)):\n print(x)\n messagebox.showwarning(\"Verification process\",\"Security breach.\\nThe bag does not belong to this user !!\")\n else:\n x = x - 1\n if (x == 0):\n messagebox.showinfo(\"Successful verification\",\"Have a nice day !!\\nThank you for the co operation.\")\n return None\n end_bag_decode = time.time()\n bag_decode_time = end_bad_decode - start_bag_decode\n end_session = time.time()\n\n f = open(\"start_session.txt\",\"r\")\n start_session = float(f.read())\n f.close()\n \n session_time = end_session - start_session\n\n #Accessing Excel\n loadingExcel = openpyxl.load_workbook('PassengerDataStore.xlsx')\n\n #Accessing Sheet in that Excel\n airlinesSheet=loadingExcel.get_sheet_by_name('Form Responses 1')\n \n format_end_time = time.gmtime(end_session)\n format_end_time = time.asctime(format_end_time)\n airlinesSheet.cell(row=access, column=19).value = format_end_time\n airlinesSheet.cell(row=access, column=20).value = session_time\n loadingExcel.save('PassengerDataStore.xlsx')\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"Code/bags_decode.py","file_name":"bags_decode.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"534871341","text":"# # Part 2:\n\ndef print_grid(n):\n\n \"\"\" Write a function that draws a grid like the following:\n\n + - + - +\n | | |\n + - + - +\n | | |\n + - + - +\n\n Where the input will be the size of the grid \"\"\"\n\n m = int(n / 2)\n for i in range (0, 2):\n print ('+' + ('-' * m) + '+' + ('-' * m) + '+')\n for i in range(0, m):\n print ('|' + (' ' * m) + '|' + (' ' * m) + '|')\n print ('+' + ('-' * m) + '+' + ('-' * m) + '+')\n\n\n\nwhile True:\n\n size = input('Choose a size for the grid or Exit to stop: ')\n\n if size.lower() == 'exit':\n break\n elif size == '':\n size = input('Choose a size for the grid or Exit to stop: ')\n else:\n print (print_grid(int(size)))\n\n\n\n\n\n\n# Part 3:\n\ndef print_grid2(number, size):\n\n \"\"\" Write a function that draws a grid like the following:\n\n + - + - + - +\n | | | |\n + - + - + - +\n | | | |\n + - + - + - +\n | | | |\n + - + - + - +\n\n\n Where the first input will be the number of grids and the second the size for each grid \"\"\"\n\n for i in range (0, number):\n print (('+' + ('-' * size)) * number + '+')\n for i in range(0, size):\n print (('|' + (' ' * size)) * number + '|')\n print(('+' + ('-' * size)) * number + '+')\n\n\n\nwhile True:\n\n number_size = input('Choose number of grids and its size or Exit to stop: ')\n number_size = ' '.join(number_size.split())\n\n if number_size.lower() == 'exit':\n break\n elif number_size == '':\n number_size = input('Choose number of grids and its size or Exit to stop: ')\n else:\n if ',' in number_size:\n number_size = number_size.strip().replace(',', ' ')\n\n print ('size number', number_size)\n\n number_size = ' '.join(number_size.split())\n number, size = number_size.split(' ')\n print (print_grid2(int(number), int(size)))","sub_path":"students/lolaguerrero/session02/print_grid.py","file_name":"print_grid.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"333663412","text":"#сегодня мы с вами попробуем выступить в роли детектива\n# у нас есть множество людей, которые пользуется машинной марки, которую пользуется убийца\n# есть множество людей, которые живут недалеко от мест преступления\n# и множество людей, у которых и работа недалеко от мест преступления\n\n#имена обычно значения неуникальные, но предплоложим, это были бы номер соц страховок\nshevrole_owner = {'sam', 'edit', 'semen', 'petr'}\n\nwork_near = {'konstantin', 'vladislav', 'sam', 'petr', 'edit'}\n\nlive_near = {'john', 'vladislav', 'olga', 'mike', 'grant', 'covid', 'bilbo'}\n\n#print(type(shevrole_owner))\n\na = shevrole_owner & work_near & live_near\nb = shevrole_owner & work_near\nc = shevrole_owner & live_near\nd = work_near & live_near\n\n#quest = input(\"If you want to define those who live and work near places of crime, type 1: \")\n#print(list(live_near) + list(work_near))\n# asd = work_near | live_near\n# print(asd)\nprint((work_near | live_near) & shevrole_owner)","sub_path":"HW_3_detective_story.py","file_name":"HW_3_detective_story.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"652604022","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/3/7 14:51\n# @Author : Yajun Yin\n# @Note :\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nHIDDEN_SIZE = 30 # num of state.c\nNUM_LAYERS = 2\n\nTIMESTEPS = 10 # length of train series\nTRAINING_STEPS = 10000\nBATCH_SIZE = 100\n\nTRAINING_EXAMPLES = 10000\nTESTING_EXAMPLES = 1000\nSAMPLE_GAP = 0.01\n\n\ndef generate_data(seq):\n X = []\n y = []\n for i in range(len(seq) - TIMESTEPS):\n X.append([seq[i:i + TIMESTEPS]])\n y.append([seq[i + TIMESTEPS]])\n return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)\n\n\ndef lstm_model(X, y, is_training):\n cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)])\n outputs, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n # Here, 'outputs' is a tensor of shape [BATCH_SIZE, TIMESTEP, HIDDEN_SIZE]\n output = outputs[:, -1, :]\n\n # add a fully connected layer\n predictions = tf.contrib.layers.fully_connected(output, 1, activation_fn=None)\n\n if not is_training:\n return predictions, None, None\n\n loss = tf.losses.mean_squared_error(labels=y, predictions=predictions)\n\n train_op = tf.contrib.layers.optimize_loss(loss=loss, global_step=tf.train.get_global_step(), optimizer=\"Adagrad\",\n learning_rate=0.1)\n return predictions, loss, train_op\n\n\ndef train(sess, train_X, train_y):\n # tensor -> dataset\n ds = tf.contrib.data.Dataset.from_tensor_slices((train_X, train_y))\n ds = ds.repeat()\n ds = ds.shuffle(10000).batch(BATCH_SIZE)\n X, y = ds.make_one_shot_iterator().get_next()\n\n with tf.variable_scope(\"model\"):\n predictions, loss, train_op = lstm_model(X, y, True)\n\n sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\n for i in range(TRAINING_STEPS):\n _, l = sess.run([train_op, loss])\n if i % 100 == 0:\n print(\"Train step:\" + str(i) + \", loss:\" + str(l))\n print(\"Training is finished!\")\n\n\ndef run_eval(sess, test_X, test_y):\n ds = tf.contrib.data.Dataset.from_tensor_slices((test_X, test_y))\n ds = ds.batch(1)\n X, y = ds.make_one_shot_iterator().get_next()\n\n with tf.variable_scope(\"model\", reuse=True):\n # Only prediction is needed, so y can be arbitrary\n prediction, _, _ = lstm_model(X, [0.0], False)\n predictions = []\n labels = []\n for i in range(TESTING_EXAMPLES):\n p, l = sess.run([prediction, y])\n predictions.append(p)\n labels.append(l)\n\n # calculate rmse\n predictions = np.array(predictions).squeeze()\n labels = np.array(labels).squeeze()\n rmse = np.sqrt(((predictions - labels) ** 2).mean(axis=0))\n print(\"Mean square error is : %f\" % rmse)\n\n plt.figure()\n # plt.plot(predictions, label=\"predictions\")\n # plt.plot(labels, '*', label=\"real_sin\")\n # make fig more clear\n plt.plot(predictions[0:len(predictions):10], label=\"predictions\")\n plt.plot(labels[0:len(labels):10], '*', label=\"real_sin\")\n plt.legend()\n plt.show()\n\n\ntest_start = (TRAINING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP\ntest_end = test_start + (TESTING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP\ntrain_X, train_y = generate_data(np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES + TIMESTEPS, dtype=np.float32)))\ntest_X, test_y = generate_data(\n np.sin(np.linspace(test_start, test_end, TESTING_EXAMPLES + TIMESTEPS, dtype=np.float32)))\n\nwith tf.Session() as sess:\n train(sess, train_X, train_y)\n run_eval(sess, test_X, test_y)\n","sub_path":"Practice-In-Tensorflow/lstm_sin.py","file_name":"lstm_sin.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"517665819","text":"import typing\n\nimport dask.array as da\nimport numpy as np\nimport pytest\nfrom scipy.spatial.distance import ( # type: ignore\n correlation,\n euclidean,\n pdist,\n squareform,\n)\n\nfrom sgkit.distance.api import pairwise_distance\nfrom sgkit.typing import ArrayLike\n\n\ndef get_vectors(\n array_type: str = \"da\",\n dtype: str = \"i8\",\n size: typing.Tuple[int, int] = (100, 100),\n chunk: typing.Tuple[int, int] = (20, 10),\n) -> ArrayLike:\n if array_type == \"da\":\n rs = da.random.RandomState(0)\n x = rs.randint(0, 3, size=size).astype(dtype).rechunk(chunk)\n else:\n x = np.random.rand(size[0], size[1]).astype(dtype)\n return x\n\n\ndef create_distance_matrix(\n x: ArrayLike, metric_func: typing.Callable[[ArrayLike, ArrayLike], np.float64]\n) -> ArrayLike:\n \"\"\"\n Parameters\n ----------\n x\n [array-like, shape: (M, N)]\n An array like two dimensional matrix. The rows are the\n vectors used for comparison, i.e. for pairwise distance.\n metric_func\n metric function for the distance metric.\n\n Returns\n -------\n A two dimensional distance matrix.\n\n \"\"\"\n m = x.shape[0]\n distance_matrix = np.zeros((m, m), dtype=np.float64)\n for i in range(x.shape[0]):\n for j in range(x.shape[0]):\n k = np.stack([x[i], x[j]])\n k = k[:, k.min(axis=0) >= 0]\n vi, vj = k[0], k[1]\n try:\n distance_matrix[i][j] = metric_func(vi, vj)\n except RuntimeWarning:\n # unable to calculate distance metric which\n # which means array contains only one element or\n # not possible to calculate distance metric\n distance_matrix[i][j] = np.nan\n return distance_matrix\n\n\n@pytest.mark.parametrize(\n \"size, chunk\",\n [\n ((100, 100), (20, 10)),\n ((100, 100), (25, 10)),\n ((100, 100), (50, 10)),\n ],\n)\ndef test_distance_correlation(\n size: typing.Tuple[int, int], chunk: typing.Tuple[int, int]\n) -> None:\n x = get_vectors(size=size, chunk=chunk)\n distance_matrix = pairwise_distance(x, metric=\"correlation\")\n distance_array = pdist(x, metric=\"correlation\")\n expected_matrix = squareform(distance_array)\n np.testing.assert_almost_equal(distance_matrix, expected_matrix)\n\n\n@pytest.mark.parametrize(\n \"size, chunk\",\n [\n ((100, 100), (20, 10)),\n ((100, 100), (25, 10)),\n ((100, 100), (50, 10)),\n ],\n)\ndef test_distance_euclidean(\n size: typing.Tuple[int, int], chunk: typing.Tuple[int, int]\n) -> None:\n x = get_vectors(size=size, chunk=chunk)\n distance_matrix = pairwise_distance(x, metric=\"euclidean\")\n expected_matrix = squareform(pdist(x))\n np.testing.assert_almost_equal(distance_matrix, expected_matrix)\n\n\ndef test_distance_ndarray() -> None:\n x = get_vectors(array_type=\"np\")\n distance_matrix = pairwise_distance(x, metric=\"euclidean\")\n expected_matrix = squareform(pdist(x))\n np.testing.assert_almost_equal(distance_matrix, expected_matrix)\n\n\n@pytest.mark.parametrize(\n \"metric, metric_func, dtype\",\n [\n (\"euclidean\", euclidean, \"f8\"),\n (\"euclidean\", euclidean, \"i8\"),\n (\"correlation\", correlation, \"f8\"),\n (\"correlation\", correlation, \"i8\"),\n ],\n)\ndef test_missing_values(\n metric: str,\n metric_func: typing.Callable[[ArrayLike, ArrayLike], np.float64],\n dtype: str,\n) -> None:\n x = get_vectors(array_type=\"np\", dtype=dtype)\n\n ri_times = np.random.randint(5, 20)\n m, n = x.shape\n for i in range(ri_times):\n if dtype == \"f8\":\n x[np.random.randint(0, m)][np.random.randint(0, m)] = np.nan\n x[np.random.randint(0, m)][np.random.randint(0, m)] = np.random.randint(\n -100, -1\n )\n\n distance_matrix = pairwise_distance(x, metric=metric)\n expected_matrix = create_distance_matrix(x, metric_func)\n np.testing.assert_almost_equal(distance_matrix, expected_matrix)\n\n\n@pytest.mark.parametrize(\n \"dtype, expected\",\n [\n (\"i8\", \"float64\"),\n (\"f4\", \"float32\"),\n (\"f8\", \"float64\"),\n ],\n)\ndef test_data_types(dtype, expected):\n x = get_vectors(dtype=dtype)\n distance_matrix = pairwise_distance(x)\n assert distance_matrix.dtype.name == expected\n\n\ndef test_undefined_metric() -> None:\n x = get_vectors(array_type=\"np\")\n with pytest.raises(NotImplementedError):\n pairwise_distance(x, metric=\"not-implemented-metric\")\n","sub_path":"sgkit/tests/test_distance.py","file_name":"test_distance.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"91030805","text":"from hearthstone.enums import BnetGameType\nfrom sqlalchemy import Date, Integer, String\nfrom sqlalchemy.sql import bindparam, text\n\nfrom hsredshift.analytics.filters import Region, TimeRange\nfrom hsredshift.analytics.library.base import BaseRedshiftQuery, QueryDisplayVisual\n\n\nclass SingleAccountLoNarrowEscapes(BaseRedshiftQuery):\n\tname = \"single_account_lo_narrow_escapes\"\n\tenabled = False\n\tis_personalized = True\n\tis_premium = False\n\tcache_warming_enabled = False\n\trequired_parameters = [\"account_lo\", \"Region\"]\n\tavailable_parameters = [\"account_lo\", \"Region\", \"GameType\", \"TimeRange\", \"RankRange\"]\n\tdisplay_visual = QueryDisplayVisual.TABLE\n\tstmt = text(\"\"\"\n\t\t\tSELECT\n\t\t\t\tmax(g.game_date) AS game_date,\n\t\t\t\tf_enum_name('CardClass', max(p.player_class)) AS player_class,\n\t\t\t\tf_replay_turn(g.shortid, min(es.turn)) AS replay_url\n\t\t\tFROM player p\n\t\t\tJOIN game g ON g.id = p.game_id\n\t\t\tJOIN entity_state es ON es.game_id = p.game_id AND es.controller = p.player_id\n\t\t\tWHERE p.game_date BETWEEN :start_date AND :end_date\n\t\t\tAND g.game_date BETWEEN :start_date AND :end_date\n\t\t\tAND es.game_date BETWEEN :start_date AND :end_date\n\t\t\tAND p.options_visible\n\t\t\tAND (es.health + nvl(es.armor,0)) - es.damage = 1\n\t\t\tAND es.cardtype = f_enum_val('CardType.HERO')\n\t\t\tAND es.controller_final_state = 4\n\t\t\tAND p.final_state = 4\n\t\t\tAND p.region BETWEEN :min_region AND :max_region\n\t\t\tAND p.rank BETWEEN (CASE WHEN :game_type = 2 or :game_type = 30 THEN :min_rank ELSE -1 END) AND :max_rank\n\t\t\tAND p.account_lo = :account_lo\n\t\t\tAND p.game_type = :game_type\n\t\t\tGROUP BY g.shortid\n\t\t\tORDER BY max(g.game_date) DESC;\n\t\"\"\").bindparams(\n\t\tbindparam(\"start_date\", type_=Date),\n\t\tbindparam(\"end_date\", type_=Date),\n\t\tbindparam(\"game_type\", value=BnetGameType.BGT_RANKED_STANDARD.value, type_=Integer),\n\t\tbindparam(\"min_rank\", value=0, type_=Integer),\n\t\tbindparam(\"max_rank\", value=20, type_=Integer),\n\t\tbindparam(\"min_region\", type_=Integer),\n\t\tbindparam(\"max_region\", type_=Integer),\n\t\tbindparam(\"account_lo\", type_=Integer)\n\t).columns(\n\t\tgame_date=Date,\n\t\tplayer_class=String,\n\t\treplay_url=String,\n\t)\n\n\tdef to_chart_series(self, params, result_set):\n\t\tresult = {\n\t\t\t\"metadata\": {\n\t\t\t},\n\t\t\t\"data\": {\n\t\t\t\t\"ALL\": [],\n\t\t\t}\n\t\t}\n\n\t\tfor row in result_set:\n\t\t\tresult[\"data\"][\"ALL\"].append(dict(\n\t\t\t\tgame_date=row[\"game_date\"],\n\t\t\t\tplayer_class=row[\"player_class\"],\n\t\t\t\treplay_url=row[\"replay_url\"]\n\t\t\t))\n\n\t\treturn result\n\n\tdef get_supported_filter_members(self, filter):\n\t\tresult = super(SingleAccountLoNarrowEscapes, self).get_supported_filter_members(filter)\n\t\tif filter == Region:\n\t\t\treturn [Region.REGION_EU, Region.REGION_CN, Region.REGION_KR, Region.REGION_US]\n\t\telif filter == TimeRange:\n\t\t\treturn [TimeRange.ALL_TIME]\n\t\telse:\n\t\t\treturn result\n\n\tdef get_default_value_for_filter(self, filter):\n\t\tif filter == Region:\n\t\t\traise ValueError(\"There is no default value for the Region param. It must be supplied.\")\n\t\telse:\n\t\t\treturn filter.get_default_member()\n\n\tdef example_parameters(self):\n\t\treturn dict(\n\t\t\tRegion=\"REGION_US\",\n\t\t\taccount_lo=23226729\n\t\t)\n\n\nif __name__ == \"__main__\":\n\tfrom hsredshift.utils.runners import LocalQueryRunner\n\trunner = LocalQueryRunner(locals())\n\trunner.run()\n","sub_path":"ideas/single_account_lo_narrow_escapes.py","file_name":"single_account_lo_narrow_escapes.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"472071279","text":"# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to for full license details.\n\nfrom typing import Any, Callable, List, Optional, Tuple\n\nfrom PIL import Image, ImageDraw\n\nfrom doctr.io.image import tensor_from_pil\nfrom doctr.utils.fonts import get_font\n\nfrom ..datasets import AbstractDataset\n\n\ndef synthesize_char_img(char: str, size: int = 32, font_family: Optional[str] = None) -> Image:\n \"\"\"Generate a synthetic character image with black background and white text\n\n Args:\n char: the character to render as an image\n size: the size of the rendered image\n font_family: the font family (has to be installed on your system)\n\n Returns:\n PIL image of the character\n \"\"\"\n\n if len(char) != 1:\n raise AssertionError('expected a single character input')\n\n img = Image.new('RGB', (size, size), color=(0, 0, 0))\n d = ImageDraw.Draw(img)\n\n # Draw the character\n font = get_font(font_family, size)\n d.text((4, 0), char, font=font, fill=(255, 255, 255))\n\n return img\n\n\nclass _CharacterGenerator(AbstractDataset):\n\n def __init__(\n self,\n vocab: str,\n num_samples: int,\n cache_samples: bool = False,\n sample_transforms: Optional[Callable[[Any], Any]] = None,\n font_family: Optional[str] = None,\n ) -> None:\n self.sample_transforms = sample_transforms\n self.vocab = vocab\n self._num_samples = num_samples\n self.font_family = font_family\n\n self._data: List[Image.Image] = []\n if cache_samples:\n self._data = [synthesize_char_img(char, font_family=self.font_family) for char in self.vocab]\n\n def __len__(self) -> int:\n return self._num_samples\n\n def _read_sample(self, index: int) -> Tuple[Any, int]:\n target = index % len(self.vocab)\n # Samples are already cached\n if len(self._data) > 0:\n pil_img = self._data[target].copy()\n else:\n pil_img = synthesize_char_img(self.vocab[target], font_family=self.font_family)\n img = tensor_from_pil(pil_img)\n\n return img, target\n","sub_path":"doctr/datasets/classification/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"472123010","text":"from Globals import *\nfrom DoneDialog import *\nfrom gi.repository import Pango\n\nclass QuestBox(Gtk.Builder):\n def __init__(self):\n Gtk.Builder.__init__(self)\n UI_FILE = path.join(di, '..', \"gui\", \"QuestBox.glade\")\n self.add_from_file(UI_FILE)\n\n self.current_qfilter = None\n self._alist = []\n\n self.qentery = self.get_object(\"qentry\")\n self.scombo = self.get_object(\"combobox1\")\n self.abutton = self.get_object(\"add\")\n self.tdone = self.get_object(\"ToggleDone\")\n self.ttodo = self.get_object(\"ToggleToDo\")\n\n renderer_text = Gtk.CellRendererText()\n renderer_text.props.is_expanded = True\n self._alist.append(renderer_text)\n self.scombo.pack_start(renderer_text, True)\n self.scombo.add_attribute(renderer_text, \"text\", 0)\n self.scombo.set_model(skills_store)\n\n self.abutton.connect(\"clicked\", self.on_add)\n self.qentery.connect(\"changed\", self.on_entery_chaged)\n\n self.qfilter = quests_store.filter_new()\n self.qfilter.set_visible_func(self.qfilter_func)\n self.qview = Gtk.TreeView(model = self.qfilter)\n\n self.ttodo.connect(\"toggled\", self.on_ttodo)\n self.tdone.connect(\"toggled\", self.on_tdone)\n\n select = self.qview.get_selection()\n select.connect(\"changed\", self.on_quest)\n\n cell = Gtk.CellRendererToggle()\n cell.set_padding(100, 5)\n cells.append(cell)\n cell.connect(\"toggled\", self.on_cell_toggled)\n column = Gtk.TreeViewColumn(\"Done\",cell, active = 0)\n self.qview.append_column(column)\n\n cell = Gtk.CellRendererText()\n cell.set_padding(100, 5)\n cell.set_property(\"editable\", True)\n cell.connect(\"edited\", self.on_cell_entery)\n self._alist.append(cell)\n cells.append(cell)\n column = Gtk.TreeViewColumn(\"Quest\", cell, text = 1)\n self.qview.append_column(column)\n self.qview.props.activate_on_single_click = True\n\n cell = Gtk.CellRendererCombo()\n cell.set_padding(100, 5)\n cells.append(cell)\n cell.set_property(\"editable\", True)\n cell.set_property(\"model\", skills_store)\n cell.set_property(\"text-column\", 0)\n cell.set_property(\"has-entry\", False)\n self._alist.append(cell)\n cell.connect(\"edited\", self.on_cell_combo)\n column = Gtk.TreeViewColumn(\"Skill\", cell, text = 2)\n self.qview.append_column(column)\n\n self.get_object(\"scrolledwindow1\").add(self.qview)\n self.ttodo.set_active(True)\n self.tdone.set_active(True)\n self.qview.set_headers_visible(False)\n\n def qfilter_func(self, model, iter, data):\n if self.current_qfilter is None or self.current_qfilter == \"None\":\n return True\n else:\n return model[iter][0] == self.current_filter_language\n\n def on_tdone(self, widget):\n if widget.get_active():\n if self.tdone.get_active():\n self.current_qfilter = None\n\n else:\n self.current_qfilter = True\n else:\n self.current_qfilter = None\n\n self.qfilter.refilter()\n\n def on_ttodo(self, widget):\n if widget.get_active():\n if self.ttodo.get_active():\n self.current_qfilter = None\n\n else:\n self.current_qfilter = False\n else:\n self.current_qfilter = None\n\n self.qfilter.refilter()\n\n def set_attributes(self, attributes):\n self.qentery.set_attributes(attributes)\n\n for c in self._alist:\n c.props.attributes = attributes\n\n def get(self):\n return self.get_object(\"box1\")\n\n def on_add(self, button):\n n = self.qentery.get_text()\n\n tree_iter = self.scombo.get_active_iter()\n\n if tree_iter != None:\n model = self.scombo.get_model()\n s = model[tree_iter][0]\n quests_store.append([False, n, s])\n self.qentery.set_text(\"\")\n self.scombo.set_title(\"\")\n\n else:\n self.skill_warn(window[0])\n\n def skill_warn(self, partent):\n dialog = Gtk.MessageDialog(partent, 0, Gtk.MessageType.WARNING,\n Gtk.ButtonsType.OK, \"Skill WARNING!\")\n\n dialog.format_secondary_text(\"You must choose skill for quest.\")\n response = dialog.run()\n\n if response == Gtk.ResponseType.OK:\n dialog.close()\n\n def on_cell_toggled(self, widget, path):\n dialog = DoneDialog(window[0])\n response = dialog.run()\n\n if response == Gtk.ResponseType.CANCEL:\n dialog.close()\n\n else:\n time = dialog.get_time()[1]\n\n if time == 0:\n self.time_warn(window[0])\n\n else:\n quests_store[path][0] = not quests_store[path][0]\n update_skill(quests_store[path][2], time)\n dialog.close()\n\n def time_warn(self, parent):\n dialog = Gtk.MessageDialog(parent, 0, Gtk.MessageType.WARNING,\n Gtk.ButtonsType.OK, \"Time WARNING!\")\n\n dialog.format_secondary_text(\"You must set time for quest.\")\n response = dialog.run()\n\n if response == Gtk.ResponseType.OK:\n dialog.close()\n\n def on_cell_entery(self, widget, path, text):\n quests_store[path][1] = text\n\n def on_cell_combo(self, widget, path, iter):\n if iter != None:\n model = widget.get_model()\n s = model[iter][0]\n\n quests_store[path][2] = s\n\n def on_entery_chaged(self, widget):\n if widget.get_text == \"\":\n self.abutton.set_sensitive(False)\n\n else:\n self.abutton.set_sensitive(True)\n\n def on_quest(self, selection):\n qmodel[0], qtreeiter[0] = selection.get_selected()\n\n if qtreeiter != None:\n quest = qmodel[0][qtreeiter[0]]\n print( \"set quest\", quest[1])\n\n qlabel.set_text(quest[1])\n timer_cover.set_time(0, pomodoro, 0)\n","sub_path":"src/QuestBox.py","file_name":"QuestBox.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"436607690","text":"# coding:utf-8\r\n\r\nimport copy\r\n\r\n\r\nclass BasicMap(object):\r\n '''Map类描绘了当前整个海面上的状况\r\n 在这个简单的字符界面中\"~\"代表空白海面,E代表敌方舰船,F代表友方舰船'''\r\n\r\n def __init__(self, width, height):\r\n super(BasicMap, self).__init__()\r\n self.width, self.height = width, height\r\n self.targetX = 0.0\r\n self.targetY = 0.0\r\n self.friendlyShips = []\r\n self.enemyShips = []\r\n self.ships = []\r\n\r\n def setTarget(self, x, y):\r\n self.targetX, self.targetY = x, y\r\n\r\n def targetCoordinate(self):\r\n return self.targetX, self.targetY\r\n\r\n def addShip(self, ship):\r\n if(ship.isEnemy):\r\n self.enemyShips.append(ship)\r\n else:\r\n self.friendlyShips.append(ship)\r\n self.ships.append(ship)\r\n\r\n def __str__(self):\r\n _str_ = \"\"\r\n\r\n matrix = [['~' for i in range(self.width)] for j in range(self.height)]\r\n matrix[self.targetY][self.targetX] = 'T'\r\n for ship in self.ships:\r\n shipX, shipY = ship.coordinate()\r\n if(ship.isEnemy):\r\n matrix[shipY][shipX] = 'E'\r\n else:\r\n matrix[shipY][shipX] = 'F'\r\n\r\n for line in matrix:\r\n for one in line:\r\n _str_ += one + ' '\r\n _str_ += '\\n'\r\n\r\n return _str_\r\n\r\n\r\nclass LargeMap(BasicMap):\r\n \"\"\"一个较大的地图,用大型矩阵来描绘一个伪二维连续平面\r\n 每次USV移动之后,LargeMap会对USV的位置进行校正,将其浮点区域去掉并\r\n 定位到最近的那一个矩阵点\"\"\"\r\n\r\n def __init__(self, width, height):\r\n super(LargeMap, self).__init__(width, height)\r\n","sub_path":"Map.py","file_name":"Map.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"13903320","text":"#Guess a number game\r\n\r\nimport random\r\n\r\nguesses = 6\r\nnumber = random.randint(1 , 100)\r\nwin = False\r\n\r\nwhile guesses > 0:\r\n guess = int(input(\"Guess:\"))\r\n\r\n\r\n guesses -= 1\r\n\r\n \r\n if guess > number:\r\n print(\"Your guess numberis too high\", guesses , \"Remaining\")\r\n elif guess < number:\r\n print(\"Your guess number is too low\" , guesses , \"Remaining\")\r\n else:\r\n print(\"Congrats , you guessed the correct number\" )\r\n win = True\r\n guesses = 0\r\n \r\n \r\n\r\nif win == False:\r\n print(\"Sorry , the number is , \" , number)\r\n\r\n \r\n","sub_path":"project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"487998354","text":"def read_file(path):\n '''\n Takes a path to an off file. reads the data in and returns a tuple\n containing a list of points and a list of faces\n '''\n ret_vertices = []\n ret_faces = []\n with open(path) as file:\n identifier = file.readline().strip()\n if identifier != \"OFF\":\n return -1\n numbers = file.readline()\n numbers = numbers.split()\n vertices = int(numbers[0])\n faces = int(numbers[1])\n for i in range(vertices):\n line = file.readline()\n line = line.split()\n line = [float(i) for i in line]\n ret_vertices.append((line[0], line[1], line[2]))\n for i in range(faces):\n #DOESNT WORK\n #seems the first num on the line represents how many come after\n #need to account for that\n line = file.readline()\n line = line.split()\n line = [int(i) for i in line]\n ret_faces.append((line[0], line[1], line[2]))\n return (ret_vertices, ret_faces)\n\n\ndef off_print(data):\n '''\n Takes a tuple of lists of tuples representing an off file and nicely prints it\n '''\n for l in data:\n for points in l:\n print('x: {0}, y: {1}, z: {2}'.format(points[0], points[1], points[2]))\n\n\ndef main():\n test = read_file(\"chair_0890.off\")\n if test == -1:\n print(\"Error\")\n return -1\n off_print(test)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"off_reader.py","file_name":"off_reader.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"130730977","text":"# -*- coding: utf-8 -*-\nfrom functools import partial\n\nimport dask.bag as db\n\nfrom kartothek.core import naming\nfrom kartothek.core.factory import _ensure_factory\nfrom kartothek.core.utils import _check_callable\nfrom kartothek.core.uuid import gen_uuid\nfrom kartothek.io.eager import read_dataset_as_metapartitions\nfrom kartothek.io_components.docs import default_docs\nfrom kartothek.io_components.index import update_indices_from_partitions\nfrom kartothek.io_components.metapartition import (\n MetaPartition,\n parse_input_to_metapartition,\n)\nfrom kartothek.io_components.utils import normalize_args\nfrom kartothek.io_components.write import (\n raise_if_dataset_exists,\n store_dataset_from_partitions,\n)\n\n\ndef _store_dataset_from_partitions_flat(mpss, *args, **kwargs):\n return store_dataset_from_partitions(\n [mp for sublist in mpss for mp in sublist], *args, **kwargs\n )\n\n\n@default_docs\n@normalize_args\ndef store_bag_as_dataset(\n bag,\n store,\n dataset_uuid=None,\n metadata=None,\n df_serializer=None,\n overwrite=False,\n metadata_merger=None,\n metadata_version=naming.DEFAULT_METADATA_VERSION,\n partition_on=None,\n metadata_storage_format=naming.DEFAULT_METADATA_STORAGE_FORMAT,\n secondary_indices=None,\n):\n \"\"\"\n Transform and store a dask.bag of dictionaries containing\n dataframes to a kartothek dataset in store.\n\n This is the dask.bag-equivalent of\n :func:`store_delayed_as_dataset`. See there\n for more detailed documentation on the different possible input types.\n\n Parameters\n ----------\n bag: dask.bag\n A dask bag containing dictionaries of dataframes or dataframes.\n\n Returns\n -------\n A dask.bag.Item dataset object.\n \"\"\"\n _check_callable(store)\n if dataset_uuid is None:\n dataset_uuid = gen_uuid()\n\n if not overwrite:\n raise_if_dataset_exists(dataset_uuid=dataset_uuid, store=store)\n\n input_to_mps = partial(\n parse_input_to_metapartition, metadata_version=metadata_version\n )\n mps = bag.map(input_to_mps)\n\n if partition_on:\n mps = mps.map(MetaPartition.partition_on, partition_on=partition_on)\n\n if secondary_indices:\n mps = mps.map(MetaPartition.build_indices, columns=secondary_indices)\n\n mps = mps.map(\n MetaPartition.store_dataframes,\n store=store,\n df_serializer=df_serializer,\n dataset_uuid=dataset_uuid,\n )\n\n aggregate = partial(\n _store_dataset_from_partitions_flat,\n dataset_uuid=dataset_uuid,\n store=store,\n dataset_metadata=metadata,\n metadata_merger=metadata_merger,\n metadata_storage_format=metadata_storage_format,\n )\n\n return mps.reduction(perpartition=list, aggregate=aggregate, split_every=False)\n\n\n@default_docs\ndef build_dataset_indices__bag(\n store, dataset_uuid, columns, partition_size=None, factory=None\n):\n \"\"\"\n Function which builds a :class:`~kartothek.core.index.ExplicitSecondaryIndex`.\n\n This function loads the dataset, computes the requested indices and writes\n the indices to the dataset. The dataset partitions itself are not mutated.\n\n Parameters\n ----------\n partition_size: Optional[int]\n Dask bag partition size. Use a larger numbers to decrease scheduler load and overhead, use smaller numbers for a\n fine-grained scheduling and better resilience against worker errors.\n\n Returns\n -------\n A dask.delayed computation object.\n \"\"\"\n ds_factory = _ensure_factory(\n dataset_uuid=dataset_uuid,\n store=store,\n factory=factory,\n load_dataset_metadata=False,\n )\n\n mps = read_dataset_as_metapartitions(factory=ds_factory)\n\n return (\n db.from_sequence(seq=mps, partition_size=partition_size)\n .map(MetaPartition.build_indices, columns=columns)\n .map(MetaPartition.remove_dataframes)\n .reduction(list, list, split_every=False, out_type=db.Bag)\n .flatten()\n .map_partitions(list)\n .map_partitions(\n update_indices_from_partitions, dataset_metadata_factory=ds_factory\n )\n )\n","sub_path":"kartothek/io/dask/bag.py","file_name":"bag.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"57882755","text":"from urllib import request\nsrc = request.urlopen('http://python.org/').read()\n\n\n\nfrom collections import OrderedDict\nod = OrderedDict()\nod['a'] = 'A'\nod['c'] = 'C'\nod['b'] = 'B'\nod\n\nd = {}\nd['a'] = 'A'\nd['c'] = 'C'\nd['b'] = 'B'\nd\n\n\n\nanimals = [\n('猫', '三毛'),\n('犬', 'コーギー'),\n('猫', 'シャム'),\n('犬', 'ダックス'),\n('犬', '黒ラブ')\n]\nd = {}\nfor k, v in animals:\n if k not in d:\n d[k] = [v]\n else:\n d[k].append(v)\nd\n\nd = {}\nfor k, v in animals:\n d.setdefault(k, []).append(v)\nd\n\nfrom collections import defaultdict\ndd = defaultdict(list)\nfor k, v in animals:\n dd[k].append(v)\ndd\n\n\n\nimport datetime\nd1 = datetime.date(2016, 6, 28)\nd2 = datetime.date(2015, 6, 28)\ntd = d1 - d2\nprint(td)\n\nimport datetime\nd1 = datetime.date(2016, 4, 14)\ntd = datetime.timedelta(days=100)\nd2 = d1 + td\nprint(d2)\n\nimport datetime\ntd = datetime.timedelta(days=5)\nprint(td*2)\nprint(td/3)\n\nimport datetime\nd1 = datetime.date(2016, 6, 28)\nd2 = datetime.date(2016, 6, 28)\nd1 > d2\nd1 == d2\n\nimport calendar\nprint(calendar.month(2199, 12))\nprint(calendar.monthcalendar(2199, 12))\n\n\n\nimport re\nfrom urllib import request\nurl = \"https://www.python.org/news/\"\nsrc = request.urlopen(url).read()\nsrc = src.decode(\"utf-8\")\n\npat = re.compile(r'href=\"(/download/releases/.+?)\"')\n\nfor match in pat.finditer(src):\n print(match.group(1))\n\n\n\nfrom urllib import request\nfrom urllib import parse\nurl = 'http://dname.com/somefile.zip'\nfilename = parse.urlparse(url)[2].split('/')[-1]\n\nfilename\nrequest.urlretrieve(url, filename)\n","sub_path":"textbook_chapter11.py","file_name":"textbook_chapter11.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"547987182","text":"from argparse import Namespace\nimport logging\nfrom redis import StrictRedis\n\nfrom rq_retry_scheduler import Scheduler\nfrom rq_retry_scheduler.cli import scheduler\n\n\ndef test_get_redis():\n args = Namespace(\n host='localhost', port=6379, db=15, password=None, url=None)\n redis = scheduler.get_redis(args)\n assert isinstance(redis, StrictRedis)\n\n args = Namespace(\n url='redis://localhost/15')\n\n redis = scheduler.get_redis(args)\n assert isinstance(redis, StrictRedis)\n\n\ndef test_setup_logging():\n args = Namespace()\n\n logger = scheduler.setup_logging(args)\n\n assert logger.getEffectiveLevel() == logging.INFO\n assert len(logger.handlers) > 0\n\n\ndef test_main(mock):\n args = Namespace(url='redis://localhost/15', interval=5, burst=False)\n\n mock.patch.object(scheduler, 'get_arguments', return_value=args)\n init = mock.spy(Scheduler, '__init__')\n run = mock.patch.object(Scheduler, 'run')\n\n scheduler.main()\n\n run.assert_called_with(False)\n\n assert init.call_args[1]['interval'] == args.interval\n\n\ndef test_get_arguments():\n fake_arguments = []\n args = scheduler.get_arguments(fake_arguments)\n assert args.host == 'localhost'\n assert args.port == 6379\n assert args.url is None\n assert args.interval == 10.0\n assert args.burst is False\n\n\ndef test_burst_flag():\n fake_arguments = ['-b']\n args = scheduler.get_arguments(fake_arguments)\n assert args.burst is True\n","sub_path":"tests/test_cli_main.py","file_name":"test_cli_main.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"2527465","text":"from .BasicGlobal import RegistrationName\r\nfrom .RegistrationAddress import RegistrationAddress\r\nfrom .TaxScheme import TaxScheme\r\nfrom .util import Xmleable, default_document\r\n\r\n\r\nclass CompanyID(Xmleable):\r\n def __init__(self, id_document, document_type=6):\r\n self.id_document = id_document\r\n self.document_type = document_type\r\n self.schemeName = \"SUNAT:Identificador de Documento de Identidad\"\r\n self.schemeAgencyName = \"PE:SUNAT\"\r\n self.schemeURI = \"urn:pe:gob:sunat:cpe:see:gem:catalogos:catalogo06\"\r\n\r\n def generate_doc(self):\r\n self.doc = default_document.createElement(\"cbc:CompanyID\")\r\n self.doc.setAttribute(\"schemeID\", self.document_type)\r\n self.doc.setAttribute(\"schemeName\", self.schemeName)\r\n self.doc.setAttribute(\"schemeAgencyName\", self.schemeAgencyName)\r\n self.doc.setAttribute(\"schemeURI\", self.schemeURI)\r\n text = default_document.createTextNode(self.id_document)\r\n self.doc.appendChild(text)\r\n\r\n\r\nclass PartyTaxScheme(Xmleable):\r\n def __init__(self, registration_name=None, company_id=None,\r\n registration_address=None, tax_scheme=None):\r\n self.registration_name = registration_name\r\n self.company_id = company_id\r\n self.registration_address = registration_address\r\n self.tax_scheme = tax_scheme\r\n\r\n def fix_values(self):\r\n if type(self.registration_name) == str:\r\n self.registration_name = RegistrationName(self.registration_name)\r\n if type(self.company_id) == str:\r\n self.company_id = CompanyID(id_document=self.company_id)\r\n if type(self.registration_address) == str:\r\n self.registration_address = RegistrationAddress(\r\n address_type_code=self.registration_address)\r\n # if self.tax_scheme is None:\r\n # self.tax_scheme = TaxScheme.TaxScheme(\"-\")\r\n\r\n def validate(self, errs, obs):\r\n assert self.registration_name is None or type(\r\n self.registration_name) == RegistrationName\r\n assert self.company_id is None or type(self.company_id) == CompanyID\r\n assert self.registration_address is None or type(\r\n self.registration_address) == RegistrationAddress\r\n assert self.tax_scheme is None or type(self.tax_scheme) == TaxScheme\r\n\r\n def generate_doc(self):\r\n self.doc = default_document.createElement(\"cac:PartyTaxScheme\")\r\n self.doc.appendChild(self.registration_name.get_document())\r\n self.doc.appendChild(self.company_id.get_document())\r\n if self.registration_address:\r\n self.doc.appendChild(self.registration_address.get_document())\r\n if self.tax_scheme:\r\n self.doc.appendChild(self.tax_scheme.get_document())\r\n","sub_path":"addons/gestionit_pe_fe/models/account/api_facturacion/efact21/PartyTaxScheme.py","file_name":"PartyTaxScheme.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"444882267","text":"from utils import *\nimport linear_regression.feature_normalization as norm\n\n\ndef compute_hypothesis(theta_vector, x: list):\n \"\"\"\n Compute the h(x)\n :param theta_vector: [Theta0 Theta1 ...] vector containing weights\n :param x: Input values from the dataset\n :return: Predicted y-values based on given m, b\n \"\"\"\n assert len(x) == len(theta_vector) # Assert that dimensions match\n\n h_of_x = []\n dataset_size = len(x[0])\n\n # For all the elements in the data set\n # Compute the predicated value using information from all dimensions\n\n for i in range(dataset_size):\n h = 0\n\n for dimension_index in range(len(theta_vector)):\n h += x[dimension_index][i] * theta_vector[dimension_index]\n\n h_of_x.append(h)\n\n return h_of_x\n\n\ndef compute_error(h_x, y):\n \"\"\"\n Squared error function\n :param h_x: predicted values for the input data set (errornous values)\n :param y: true values for the input data set\n :return: total squared difference (accumulated error term)\n \"\"\"\n assert len(h_x) == len(y)\n\n m = len(h_x)\n\n error = 0\n for index in range(m):\n error += (h_x[index] - y[index]) ** 2\n\n return (1 / (2 * m)) * error\n\n\ndef gradient_descent(alpha, x, y, weights, h_of_x):\n \"\"\"\n Computes one step of gradient descent\n :param alpha: learning rate\n :param x: independent variable values\n :param y: dependent variable values\n :param weights: current weights of the model\n :param h_of_x: predicted values by the model\n :return: new weights after applying one step of gradient descent\n \"\"\"\n assert len(x) == len(weights)\n assert len(y) == len(h_of_x)\n\n new_weights = []\n\n error_differential = 0\n\n for dimension_index in range(len(x)):\n x_of_dimension = x[dimension_index]\n m = len(x_of_dimension)\n\n for i in range(m):\n error_differential += (h_of_x[i] - y[i]) * x_of_dimension[i]\n\n total_differential_error = alpha * (1 / m) * error_differential\n\n new_weights.append(weights[dimension_index] - total_differential_error)\n\n return new_weights\n\n\nITER_COUNT = 5000 # Magic number of iterations\nALPHA = 0.022 # Arbitrary learning rate\n\n\ndef single_variable():\n points = read_dataset(\"ex1data1.csv\")\n x, y = point_list_to_axes(points)\n x = [[1 for _ in x], x] # Add the y-intercept feature\n # plot_data(x_axis, y_axis,'go')\n\n weights = [0, 0]\n h = compute_hypothesis(weights, x)\n error = compute_error(h, y)\n\n import math\n assert math.floor(error) == 32 # Given in the pdf\n error_list = []\n\n for i in range(ITER_COUNT):\n weights = gradient_descent(ALPHA, x, y, weights, h)\n\n # Update\n h = compute_hypothesis(weights, x)\n\n error_list.append(error)\n previous_error = error\n error = compute_error(h, y)\n # print(\"Current error is:\", error)\n # print(\"Weights:\", weights)\n\n if previous_error == error:\n print(\"Error is now stable, Done! iteration [\", i, \"]\")\n break\n\n x = x[1] # Remove the array of 1 that was added for the y-intercept\n plot_multiple([x, h, 'b-', x, y, 'go'])\n\n iter_axis = [i for i in range(len(error_list))]\n plot_data(iter_axis, error_list, 'r-')\n\n\ndef multi_variable():\n points = read_dataset(\"ex1data2.csv\")\n points = point_list_to_axes(points)\n y = points.pop()\n x = points\n\n x_norm, mean_arr, std_arr = norm.normalize_dataset(x)\n\n print(x_norm)\n print(mean_arr)\n print(std_arr)\n\n\ndef main():\n multi_variable()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"linear_regression/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"397322600","text":"def _preprocess_symbolic_input(x, data_format, mode):\n 'Preprocesses a tensor encoding a batch of images.\\n\\n # Arguments\\n x: Input tensor, 3D or 4D.\\n data_format: Data format of the image tensor.\\n mode: One of \"caffe\", \"tf\" or \"torch\".\\n - caffe: will convert the images from RGB to BGR,\\n then will zero-center each color channel with\\n respect to the ImageNet dataset,\\n without scaling.\\n - tf: will scale pixels between -1 and 1,\\n sample-wise.\\n - torch: will scale pixels between 0 and 1 and then\\n will normalize each channel with respect to the\\n ImageNet dataset.\\n\\n # Returns\\n Preprocessed tensor.\\n '\n global _IMAGENET_MEAN\n if (mode == 'tf'):\n x /= 127.5\n x -= 1.0\n return x\n if (mode == 'torch'):\n x /= 255.0\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n else:\n if (data_format == 'channels_first'):\n if (K.ndim(x) == 3):\n x = x[::(- 1), ...]\n else:\n x = x[:, ::(- 1), ...]\n else:\n x = x[..., ::(- 1)]\n mean = [103.939, 116.779, 123.68]\n std = None\n if (_IMAGENET_MEAN is None):\n _IMAGENET_MEAN = K.constant((- np.array(mean)))\n if (K.dtype(x) != K.dtype(_IMAGENET_MEAN)):\n x = K.bias_add(x, K.cast(_IMAGENET_MEAN, K.dtype(x)), data_format)\n else:\n x = K.bias_add(x, _IMAGENET_MEAN, data_format)\n if (std is not None):\n x /= std\n return x","sub_path":"Data Set/bug-fixing-5/fa6de5a45e6d80216b4b57aeaf7d750f5d088747-<_preprocess_symbolic_input>-fix.py","file_name":"fa6de5a45e6d80216b4b57aeaf7d750f5d088747-<_preprocess_symbolic_input>-fix.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"613557082","text":"from folder_defs import FoldersDefs\r\nimport pickle\r\nimport os\r\n\r\n\r\n# ####################################################### CONS ####################################################### #\r\n\r\ninput_lookup_file_name = \"links_pois_all.p\"\r\noutput_lookup_file_name = \"pois_links_all.p\"\r\n\r\n\r\n# ####################################################### DEFS ####################################################### #\r\n\r\ndef update_poislinksall_file_from_linkspoisall_file(inp_filename, out_filename):\r\n \"\"\"\r\n\r\n :param inp_filename:\r\n :param out_filename:\r\n :return:\r\n \"\"\"\r\n\r\n linkspois_file_path = os.path.join(FoldersDefs.bin_ancillary_folder_path, inp_filename)\r\n poislinks_file_path = os.path.join(FoldersDefs.bin_ancillary_folder_path, out_filename)\r\n\r\n # read source\r\n with(open(linkspois_file_path, \"r\")) as linkspois_file:\r\n linkspois_dict = pickle.load(linkspois_file)\r\n\r\n # prepare written dictionary\r\n poislinks_dict = {}\r\n for cur_link_id in linkspois_dict.keys():\r\n cur_poises = linkspois_dict[cur_link_id].values()\r\n for cur_pois in cur_poises:\r\n cur_pois_id = cur_pois['id']\r\n poislinks_dict[cur_pois_id] = cur_link_id\r\n\r\n # write destination\r\n with(open(poislinks_file_path, \"w+\")) as poislinks_file:\r\n pickle.dump(poislinks_dict, poislinks_file)\r\n\r\n print(\"Wrote file '{0}'.\".format(poislinks_file_path))\r\n\r\n# ####################################################### CALL ####################################################### #\r\n\r\nupdate_poislinksall_file_from_linkspoisall_file(input_lookup_file_name, output_lookup_file_name)\r\n","sub_path":"backend/server_crons/fs_ancillarybin_update_poislinksall.py","file_name":"fs_ancillarybin_update_poislinksall.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"283265336","text":"from tempfile import NamedTemporaryFile\nfrom subprocess import Popen, PIPE\nfrom DataImport.mongo_import import mongo_import_record\nfrom bson.objectid import ObjectId\nfrom Bio.Blast import NCBIXML\nfrom settings import MONGODB as db\nimport os\n\ndef blast_all(query_fasta, blast_db):\n \"\"\" Take all records of \"type\":\"CDS\" from a collection and blast against a database\n :param query_fasta: open fasta file object\n :param blast_db: string - path to blast database\n :return: temporary xml file with blast results\n \"\"\"\n tmp_results = NamedTemporaryFile()\n\n print(Popen(\n ['blastn',\n '-query', query_fasta.name,\n '-db', blast_db,\n '-out', tmp_results.name,\n '-outfmt', '5'],\n stdout=PIPE # xml output\n ).wait()) # waits for return code before proceeding\n\n return tmp_results\n\n\ndef parse_blast_results_xml(results_file):\n \"\"\" Parse and insert results of BLAST\n\n :param results_file: blast results in xml format\n \"\"\"\n counter = 0\n results_file.seek(0)\n for blast_record in NCBIXML.parse(results_file):\n counter += 1\n query_id = blast_record.query\n\n for alignment in blast_record.alignments:\n hit_id = alignment.hit_def\n\n if query_id != hit_id:\n hsp = alignment.hsps[0] # there may be multiple hsps - first one is typically best match\n perc_identity = float(hsp.identities) / float(hsp.align_length)\n\n if not check_blast_pair(query_id, hit_id):\n mongo_import_record(\n {\n \"type\": \"blast_result\",\n \"query\": query_id,\n \"subject\": hit_id,\n \"perc_identity\": perc_identity,\n \"length\": hsp.align_length,\n \"bit_score\": hsp.bits,\n \"e-value\": hsp.expect\n },\n \"blast_results\"\n )\n if counter % 500 == 0:\n print(\"---> {} blast records imported\".format(counter))\n\n\ndef check_blast_pair(query, subject):\n \"\"\" Check database for the presence of a blast result for given pair of record '_id's\n\n Since blasting seq1 vs seq2 should return the same results as seq2 vs seq1 and we don't want to duplicate data, this\n checks in order to determine whether to import or not. Can also be used to get database entry for any gene pair when\n you know the \"_id\" value of each.\n\n :param query:\n :param subject:\n :rtype: MongoDB record (dict) or None\n \"\"\"\n collection = db[\"blast_results\"]\n query_id, subject_id = ObjectId(query), ObjectId(subject)\n\n # since we don't know order of insert, check both\n pair = {\"type\": \"blast_result\", \"query\": query_id, \"subject\": subject_id}\n reciprocal = {\"type\": \"blast_result\", \"query\": subject_id, \"subject\": query_id}\n\n blast_pair = collection.find_one({\"$or\": [pair, reciprocal]}) # will evaluate None if no pair is found\n\n return blast_pair\n","sub_path":"src/FindHGT/run_blast.py","file_name":"run_blast.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"202767294","text":"\nimport pyblish.api\n\n\nclass PublishSucceed(pyblish.api.ContextPlugin):\n\n label = \"Publish Succeed\"\n order = pyblish.api.IntegratorOrder + 0.499999\n\n def process(self, context):\n if not all(result[\"success\"] for result in context.data[\"results\"]):\n self.log.warning(\"Atomicity not held, aborting.\")\n return\n\n for instance in context:\n if not instance.data.get(\"publish\", True):\n continue\n\n versioner = instance.data[\"versioner\"]\n versioner.set_succeeded()\n","sub_path":"plugins/global/publish/publish_succeed.py","file_name":"publish_succeed.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"5129408","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n flag = True\n cnt = 0\n if root:\n level = [root]\n else:\n return 0\n while flag:\n temp_level=[]\n for i in level:\n if i.left:\n temp_level.append(i.left)\n if i.right:\n temp_level.append(i.right)\n cnt+=1\n if len(temp_level) == 0:\n flag = False\n else:\n level = temp_level\n return cnt\n","sub_path":"Nail the Interview/104. Maximum Depth of Binary Tree.py","file_name":"104. Maximum Depth of Binary Tree.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"316551265","text":"__author__ = 'v.koryakov'\n#-*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom datetime import datetime\n\ndriver = webdriver.Firefox()\ndriver.get(\"http://booking.uz.gov.ua/\")\n\n#choose ukr language\nWebDriverWait(driver, 20).until(lambda d: d.find_element_by_xpath(\"//*[@id='langs']//li[1]/a\"))\nto_ukr = driver.find_element_by_xpath(\"//*[@id='langs']//li[1]/a\")\nto_ukr.click()\n\n#choose station from Kyiv\nWebDriverWait(driver, 10).until(lambda d: d.find_element_by_name(\"station_from\"))\nsource_from = driver.find_element_by_name(\"station_from\")\nsource_from.send_keys(u\"Київ\")\nWebDriverWait(driver, 20).until(lambda d: d.find_element_by_xpath(\"//*[@id='stations_from']/div[1]\"))\nkyiv = driver.find_element_by_xpath(\"//*[@id='stations_from']/div[1]\")\nkyiv.click()\n\n#choose station to Moscow\nWebDriverWait(driver, 10).until(lambda d: d.find_element_by_name(\"station_till\"))\nsource_to = driver.find_element_by_name(\"station_till\")\nsource_to.send_keys(u\"Москва\")\nWebDriverWait(driver, 20).until(lambda d: d.find_element_by_xpath(\"//*[@id='stations_till']/div[5]\"))\nmoscow = driver.find_element_by_xpath(\"//*[@id='stations_till']/div[5]\")\nmoscow.click()\n\n#search trains\nsearch_trains = driver.find_element_by_name(\"search\")\nsearch_trains.click()\n\n#select 006 train\nWebDriverWait(driver, 20).until(lambda d: d.find_element_by_xpath(\"*//a[contains(text(),'006 К')]\"))\ntrain_006 = driver.find_element_by_xpath(\"*//a[contains(text(),'006 К')]/../../td[@class='td6']/div[3]/button\")\ntrain_006.click()\n\n#choose first free upper place\nWebDriverWait(driver, 20).until(lambda d: d.find_element_by_xpath(\"*//div[@id='ts_chs_scheme'] //a[@class='upper free']\"))\nupper_free = driver.find_element_by_xpath(\"*//div[@id='ts_chs_scheme'] //a[@class='upper free']\")\nupper_free.click()\n\n#get a price\nWebDriverWait(driver, 20).until(lambda d: d.find_element_by_id(\"ts_chs_tbl\"))\nprice = driver.find_element_by_xpath(\"*//div[@id='ts_chs_tbl']/button/b\").text\n\n#write price into file with current date\noutput = str(datetime.today()) + u\" Поезд 006 К, Киев-Москва, цена билета в плацкарте: \"\nreport_file = open(\"report.txt\", \"a+\")\nreport_file.write(output.encode('utf-8') + price.encode('utf-8') + \"\\n\")\nreport_file.close\n\ndriver.close()\n","sub_path":"test_checking_prices.py","file_name":"test_checking_prices.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"486343044","text":"import usocket\nimport ujson\ntry:\n import ussl\n SUPPORT_SSL = True\nexcept ImportError:\n ussl = None\n SUPPORT_SSL = False\n\nSUPPORT_TIMEOUT = hasattr(usocket.socket, 'settimeout')\nCONTENT_TYPE_JSON = 'application/json'\n\n# Read all http headers from a socket\ndef parse_headers(sock):\n headers = {}\n line = b\"\"\n while line != b'\\r\\n':\n line = sock.readline()\n if line.strip():\n name, value = line.strip().split(b': ')\n headers[name] = value.strip()\n return headers\n\nclass Response(object):\n def __init__(self, status_code, raw, resp_headers):\n self.status_code = status_code\n self.raw = raw\n self._content = False\n self.encoding = 'utf-8'\n self.headers = resp_headers\n\n @property\n def content(self):\n if self._content is False:\n self._content = self.raw.read()\n self.raw.close()\n self.raw = None\n\n return self._content\n\n @property\n def text(self):\n content = self.content\n\n return str(content, self.encoding) if content else ''\n\n def close(self):\n if self.raw is not None:\n self._content = None\n self.raw.close()\n self.raw = None\n\n def multipart(self):\n if b'Content-Type' in self.headers and 'multipart/x-mixed-replace' in self.headers[b'Content-Type']:\n boundary = str(self.headers[b'Content-Type'],'utf-8').split('boundary=')[1]\n #print(boundary)\n block = b\"\"\n while boundary not in block:\n block += self.raw.read(1)\n block += self.raw.read(2)\n while True:\n headers = parse_headers(self.raw)\n block = b\"\"\n while boundary not in block:\n block += self.raw.read(1)\n block += self.raw.read(2)\n\n r = Response(self.status_code, None, headers)\n r._content = block\n yield r\n\n def json(self):\n return ujson.loads(self.text)\n\n def raise_for_status(self):\n if 400 <= self.status_code < 500:\n raise OSError('Client error: %s' % self.status_code)\n if 500 <= self.status_code < 600:\n raise OSError('Server error: %s' % self.status_code)\n\n\n# Adapted from upip\ndef request(method, url, json=None, timeout=None, headers=None, follow_redirect=True):\n urlparts = url.split('/', 3)\n proto = urlparts[0]\n host = urlparts[2]\n urlpath = '' if len(urlparts) < 4 else urlparts[3]\n\n if proto == 'http:':\n port = 80\n elif proto == 'https:':\n port = 443\n else:\n raise OSError('Unsupported protocol: %s' % proto[:-1])\n\n if ':' in host:\n host, port = host.split(':')\n port = int(port)\n\n if json is not None:\n content = ujson.dumps(json)\n content_type = CONTENT_TYPE_JSON\n else:\n content = None\n\n ai = usocket.getaddrinfo(host, port)\n addr = ai[0][4]\n\n sock = usocket.socket()\n\n if timeout is not None:\n assert SUPPORT_TIMEOUT, 'Socket does not support timeout'\n sock.settimeout(timeout)\n\n sock.connect(addr)\n\n if proto == 'https:':\n assert SUPPORT_SSL, 'HTTPS not supported: could not find ussl'\n sock = ussl.wrap_socket(sock)\n\n sock.write('%s /%s HTTP/1.0\\r\\nHost: %s\\r\\n' % (method, urlpath, host))\n\n if headers is not None:\n for header in headers.items():\n sock.write('%s: %s\\r\\n' % header)\n\n if content is not None:\n sock.write('content-length: %s\\r\\n' % len(content))\n sock.write('content-type: %s\\r\\n' % content_type)\n sock.write('\\r\\n')\n sock.write(content)\n else:\n sock.write('\\r\\n')\n\n l = sock.readline()\n protover, status, msg = l.split(None, 2)\n\n # Collect headers\n headers = parse_headers(sock)\n\n # Handle redirects\n if int(status) in [301, 301] and b'Location' in headers:\n if 'http' not in headers[b'Location']:\n # relative redirect\n redirect = proto+\"://\"+host+\"/\"+str(headers[b'Location'], 'utf-8')\n else:\n redirect = str(headers[b'Location'], 'utf-8')\n return request(method, redirect, json, timeout, headers, follow_redirect)\n\n return Response(int(status), sock, headers)\n\n\ndef get(url, **kwargs):\n return request('GET', url, **kwargs)\n\n\ndef post(url, **kwargs):\n return request('POST', url, **kwargs)\n","sub_path":"http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"530617662","text":"from core.tempRecommendation import TempRecommendation\nfrom dao.tempRecommendationDAO import TempRecommendationDAO\nfrom typing import List\nimport pprint\nimport constant_paths\n\n\n# Returns multiple temp recommendation entry against an user\n# from the following example, dictionary input\n# {\n# \"last_combination\": 524287,\n# \"recommendations\": [\n# {\n# \"priority\": 1,\n# \"combination_num\":16,\n# \"common_movie_length\":11,\n# \"common_genres\":[5]\n# },\n# {\n# \"priority\": 2,\n# \"combination_num\":10,\n# \"common_movie_length\":8,\n# \"common_genres\":[4]\n# },\n# {\n# \"priority\": 3,\n# \"combination_num\":6,\n# \"common_movie_length\":4,\n# \"common_genres\":[3]\n# }\n# ]\n# }\ndef get_temp_recommendation_by_user(user_id: int) -> dict:\n dao = TempRecommendationDAO(constant_paths.CONFIG_FILE_PATH)\n temp_reco_list = dao.getTempRecommendationByUserID(user_id)\n # print(\"temp_reco_list: \", temp_reco_list)\n last_combination = None\n result = {}\n recommendations = []\n for temp_reco in temp_reco_list:\n last_combination = temp_reco.getLastCombination()\n priority = temp_reco.getPriority()\n combination_num = temp_reco.getCombinationNum()\n common_movie_length = temp_reco.getCommonMovieLength()\n genres = temp_reco.getGenres()\n if len(genres) == 0:\n common_genres = []\n else:\n common_genres = [int(x) for x in genres.split(\",\")]\n reco_dict = {\"priority\": priority, \"combination_num\": combination_num,\n \"common_movie_length\": common_movie_length, \"common_genres\": common_genres.copy()}\n recommendations.append(reco_dict.copy())\n\n if last_combination is not None:\n result[\"recommendations\"] = recommendations.copy()\n result[\"last_combination\"] = last_combination\n\n return result\n\n\ndef get_all_user_id():\n dao = TempRecommendationDAO(constant_paths.CONFIG_FILE_PATH)\n return dao.getAllUserID()\n\n\nif __name__ == \"__main__\":\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(get_temp_recommendation_by_user(1))\n","sub_path":"get_data/get_temp_recommendation.py","file_name":"get_temp_recommendation.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"11752681","text":"# -*- coding: utf-8 -*-\n\nimport urllib\nimport re\nimport logging\nimport requests\n\nfrom s2sproxy.util.attribute_module import AttributeModule, NoUserData\n\n# Module level logger.\nlogger = logging.getLogger(__name__)\n\nclass FailedRequestError(Exception):\n pass\n\nclass COmanageAttributeModule(AttributeModule):\n def __init__(self, co_manage_url, co_manage_auth, co_id, \n idp_attribute_name, vo_identifier_type,\n email_re_object = re.compile('official|delivery'),\n membership_attribute_saml_name = \"urn:oid:1.3.6.1.4.1.5923.1.5.1.1\"\n ):\n \"\"\"\n Initialize the COmanage attribute module.\n :param co_manage_url: url to the COmanage server (without trailing slash)\n :param co_manage_auth: HTTP Basic Auth credentials for the COmanage server\n :param co_id: the \"collaborative organization ID\" of the CO\n :param idp_attribute_name: which attribute asserted by the IdP to use as\n user identifier, e.g. \"eduPersonPrincipalName\"\n :param vo_identifier_type: name of VO identifier to return\n :param email_re_object: regular expression object to match email type against\n :param membership_attribute_saml_name: SAML name for group membership attribute\n \"\"\"\n self.co_manage_url = co_manage_url\n self.co_manage_auth = co_manage_auth\n self.co_id = co_id\n self.idp_attribute_name = idp_attribute_name\n self.vo_identifier_type = vo_identifier_type\n self.email_re_object = email_re_object\n self.membership_attribute_saml_name = membership_attribute_saml_name\n\n def get_attributes(self, idp_attributes):\n \"\"\"Will not fail on missing user data, returns what it gets from\n COmanage.\"\"\"\n try:\n # Initialize attributes we will return to empty list.\n attributes = {}\n\n # Retrieve the identifier sent by the IdP.\n user_id = self._get_user_id(idp_attributes)\n logger.debug(\"get_attributes: user_id is %s\" % user_id)\n\n # Use the identifier to query COmanage Registry for OrgPerson ID.\n org_id = self._get_org_id(user_id)\n logger.debug(\"get_attributes: org_id is %s\" % org_id)\n\n # The identifier from IdP may not map to an OrgPerson so only\n # proceed if COmanage Registry returns an OrgPerson ID.\n if org_id:\n # Use the OrgPerson ID to obtain the CoPerson ID.\n person_id = self._get_person_id(org_id)\n logger.debug(\"get_attributes: person_id is %s\" % person_id)\n\n attributes.update(self._get_name_info(person_id))\n attributes.update(self._get_email_address(person_id))\n attributes.update(self._get_vo_info(person_id))\n attributes.update(self._get_group_info(person_id))\n\n except FailedRequestError as e:\n raise NoUserData(\n \"Failed to fetch user attributes: {}\".format(e))\n\n logger.debug(\"get_attributes: returning attributes %s\" % attributes)\n return attributes\n\n def _get_user_id(self, attributes):\n \"\"\"Get the user id from the attributes from the IdP.\"\"\"\n try:\n return attributes[self.idp_attribute_name][0]\n except KeyError:\n raise NoUserData(\n \"Necessary attribute '{}' not returned by IdP.\".format(\n self.idp_attribute_name))\n\n def _get_org_id(self, user_id):\n \"\"\"Get the 'organizational identity' to which the user identifier is\n linked.\"\"\"\n params = {\"coid\": self.co_id, \"search_identifier\": user_id}\n json = self._make_request(\"/org_identities.json\", params)\n logger.debug(\"_get_org_id: json is %s\" % json)\n org_identities_list = json[\"OrgIdentities\"]\n if org_identities_list:\n return json[\"OrgIdentities\"][0][\"Id\"]\n else:\n return None\n\n def _get_person_id(self, org_id):\n \"\"\"Get the 'CoPersonId'.\"\"\"\n json = self._make_request(\"/co_org_identity_links.json\",\n {\"orgidentityid\": org_id})\n logger.debug(\"_get_person_id: json is %s\" % json)\n\n # Search all links to find the COPerson linked with this COId\n for link in json[\"CoOrgIdentityLinks\"]:\n person_id = link[\"CoPersonId\"]\n if self._person_id_in_co(person_id):\n return person_id\n\n def _person_id_in_co(self, person_id):\n \"\"\"Verify that a COPerson is linked with this COId.\"\"\"\n json = self._make_request(\n \"/co_people/{person_id}.json\".format(person_id=person_id))\n logger.debug(\"_person_id_in_co: json is %s\" % json)\n\n return json[\"CoPeople\"][0][\"CoId\"] == self.co_id\n\n def _get_name_info(self, person_id):\n \"\"\"Get the users name information.\"\"\"\n json = self._make_request(\"/names.json\", {\"copersonid\": person_id})\n logger.debug(\"_get_name_info: json is %s\" % json)\n\n if 'Names' in json:\n for entry in json[\"Names\"]:\n if entry[\"PrimaryName\"]:\n gn = entry[\"Given\"]\n sn = entry[\"Family\"]\n return {\n \"givenName\": gn,\n \"sn\": sn,\n \"displayName\": \"{gn} {sn}\".format(gn=gn, sn=sn)\n }\n\n return {}\n\n def _get_email_address(self, person_id):\n \"\"\"Get the users email address.\"\"\"\n json = self._make_request(\"/email_addresses.json\",\n {\"copersonid\": person_id})\n logger.debug(\"_get_email_address: json is %s\" % json)\n\n if 'EmailAddresses' in json:\n for entry in json[\"EmailAddresses\"]:\n m = self.email_re_object.search(entry[\"Type\"])\n if m:\n return {\"mail\": entry[\"Mail\"]}\n\n return {}\n\n def _get_vo_info(self, person_id):\n \"\"\"Get the VO identifier\"\"\"\n json = self._make_request(\"/identifiers.json\",\n {\"copersonid\": person_id})\n logger.debug(\"_get_vo_info: json is %s\" % json)\n\n if 'Identifiers' in json:\n for entry in json[\"Identifiers\"]:\n if entry[\"Type\"] == self.vo_identifier_type:\n return {\"uid\": entry[\"Identifier\"]}\n\n return {}\n\n def _get_group_info(self, person_id):\n \"\"\"Get all groups the user is a member of.\"\"\"\n json = self._make_request(\"/co_groups.json\", {\"copersonid\": person_id})\n logger.debug(\"_get_group_info: json is %s\" % json)\n\n if 'CoGroups' in json:\n return {self.membership_attribute_saml_name : [entry[\"Name\"] for entry in json[\"CoGroups\"]]}\n \n return {}\n\n def _make_request(self, path, parameters=None):\n \"\"\"Make request to the COmanage server.\"\"\"\n url = \"{base_url}{path}\".format(base_url=self.co_manage_url,\n path=path)\n if parameters:\n url = \"{url}?{params}\".format(url=url,\n params=urllib.parse.urlencode(\n parameters))\n\n try:\n resp = requests.get(url, auth=self.co_manage_auth)\n except requests.RequestException as e:\n raise FailedRequestError(str(e))\n\n if resp.status_code == 200:\n return resp.json()\n elif resp.status_code == 204: # status code 204 is No Content\n return {}\n else:\n raise FailedRequestError(\n \"{status}: {text}\".format(status=resp.status_code,\n text=resp.text))\n","sub_path":"src/s2sproxy_module/comanage.py","file_name":"comanage.py","file_ext":"py","file_size_in_byte":7748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"412780314","text":"import subprocess\nfrom matplotlib import pyplot as plt\n\n\ndef single_run(args, log_file):\n\n cmdline = ['python3', 'Connect4.py'] + args\n out = []\n losses = [] # tuples (round, loss)\n\n losses = [] # tuples (round, loss)\n print(cmdline)\n with subprocess.Popen(cmdline, stdout=subprocess.PIPE, universal_newlines=True) as p:\n for line in p.stdout:\n print(line, end='')\n out.append(line)\n\n if 'loss' in line and 'ROUND' in line:\n line = line.strip('\\n')\n line_list = line.split(' ')\n val = line_list[line_list.index('loss') + 1]\n round = line_list[line_list.index('\\tROUND') + 1]\n if line_list[line_list.index('\\tROUND') - 1] == '1':\n losses.append((round, val))\n\n if p.stderr:\n for line in p.stderr:\n print(line, end='')\n\n games, wins1, wins2 = get_final_result(out)\n msg1 = ', '.join(args)\n msg = msg1 + '\\n' + 'Player 1 wins {}, Player 2 wins {}, out of {} games.'.format(wins1, wins2, games) + '\\n'\n final_log(msg, log_file)\n plot_loss(losses)\n\n\ndef plot_loss(loss_list):\n x = [int(l[0]) for l in loss_list]\n y = [float(l[1]) for l in loss_list]\n plt.figure()\n plt.scatter(x, y)\n plt.xticks(x)\n plt.savefig('plots/{}_loss.png'.format(log_file))\n\n\ndef get_final_result(output):\n output.reverse()\n num_games, player1_wins, player2_wins = 0, 0, 0\n\n for i, line in enumerate(output):\n if 'Player 1 wins' in line:\n if player1_wins == 0:\n l = line.split()\n if l[-1] == '':\n player1_wins = l[-2]\n else:\n player1_wins = l[-1]\n\n if 'Player 2 wins' in line:\n if player2_wins == 0:\n l = line.split()\n if l[-1] == '':\n player2_wins = l[-2]\n else:\n player2_wins = l[-1]\n\n if 'STATUS' in line:\n if num_games == 0:\n l = line.split()\n if l[-1] == '':\n l = l[:-1]\n num_games = l[-2]\n return num_games, player1_wins, player2_wins\n\n\ndef final_log(content, path):\n with open('session_logs/{}'.format(path), 'a') as f:\n f.write(content)\n print(\"Logged to {}\".format(path))\n\n\nif __name__ == '__main__':\n\n test_rounds = 1000\n log_file = 'seventh_Cnet11'\n\n final_log('Starting log... topology net #{}'.format(11), log_file)\n\n # train against self\n train_num = 'first_train'\n rounds = 1000\n first_model_save = 'SmartSmart_CNet_{}'.format(log_file)\n smart1_args = 'save_to={}1'.format(first_model_save)\n smart2_args = 'epsilon=0.85, epsilon_decay=0.1, save_to={}2'.format(first_model_save)\n final_log(\"Training against self, {} rounds with args:\\nPlayer1: {}\\nPlayer2:{}\".format(rounds, smart1_args,\n smart2_args), log_file)\n train1_args = ['-D={}'.format(rounds), '-A=SmartPolicy({});SmartPolicy({})'.format(smart1_args, smart2_args), '-bi=RandomBoard']\n single_run(train1_args, log_file)\n\n # test against random\n final_log(\"Testing against self, {} rounds\".format(test_rounds), log_file)\n smart1_args = 'load_from=models/{}1'.format(first_model_save)\n test1_args = ['-D={}'.format(test_rounds), '-A=SmartPolicy({});RandomAgent()'.format(smart1_args), '-bi=RandomBoard',\n '-t=test', '-l=logs/SmartRandomTest2.log']\n single_run(test1_args, log_file)\n\n # train some more\n train_num = 'second_train'\n rounds = 10000\n second_model_save = 'SmartSmart_CNet_{}_2'.format(log_file)\n smart1_args = 'epsilon=0.05, epsilon_decay=0.001, save_to={}1, load_from=models/{}1'.format(second_model_save,\n first_model_save)\n smart2_args = 'save_to={}2, load_From=models/{}2'.format(second_model_save, first_model_save)\n train2_args = ['-D={}'.format(rounds), '-A=SmartPolicy({});SmartPolicy({})'.format(smart1_args, smart2_args), '-bi=RandomBoard']\n final_log(\"Training against self, {} rounds with args:\\nPlayer1: {}\\nPlayer2:{}\".format(rounds, smart1_args,\n smart2_args), log_file)\n\n single_run(train2_args, log_file)\n\n # test again against random\n final_log(\"Testing against self, {} rounds\".format(test_rounds), log_file)\n smart1_args = 'load_from=models/{}1'.format(second_model_save)\n test1_args = ['-D={}'.format(test_rounds), '-A=SmartPolicy({});RandomAgent()'.format(smart1_args), '-bi=RandomBoard',\n '-t=test', '-l=logs/SmartRandomTest3.log']\n single_run(test1_args, log_file)\n\n # test against minmax\n final_log(\"Testing against self, {} rounds\".format(test_rounds), log_file)\n smart1_args = 'load_from=models/{}1'.format(second_model_save)\n test2_args = ['-D={}'.format(test_rounds), '-A=SmartPolicy({});MinmaxAgent(depth=1)'.format(smart1_args), '-bi=RandomBoard',\n '-t=test', '-l=logs/MinmaxDepth2.log']\n single_run(test2_args, log_file)\n","sub_path":"runScript.py","file_name":"runScript.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"364828726","text":"\nimport numpy as np\nfrom rdkit import Chem\nfrom mordred import Calculator, descriptors\n\n\nclass PartitionCoefficient:\n\n \"\"\"\n Kd is species-specific, even compartment specific too\n Kaw - Kaw_n can be generated from EPI Suite\n Kow - Kow_n can be generated from EPI Suite\n Koc\n Kd\n \"\"\"\n\n def __init__(self, chem_type):\n '''\n :param chem_type: organic acid or organic base or metal\n :param species_type: neutral or ionic (cation or anion)\n '''\n self.chem_type = chem_type\n\n pass\n\n\n\n def Kd_n(self, Koc_n, foc, solidP):\n '''\n It can be applied to sediment and soil\n :param Koc_n: neutral Koc\n :param foc: organic carbon concentent\n :param solidP: solid density (kg-soil/m3-soil)\n :return:\n '''\n Kd_n = Koc_n * foc * solidP/1000.0\n return Kd_n\n\n\n def Kd_water_j(self, Koc_j, foc, solidP):\n '''\n if chem_type is 'metal', the Kd_j values should be generated by user through WHAM or MINTEQ\n :return:\n '''\n Kd_water_j_val = Koc_j * foc * solidP/1000.0\n return Kd_water_j_val\n\n\n def Kd_soil_j(self, smiles, foc, CEC_soil, Koc_acid, Kow_n, pKa, Fr_n, Fr_i, density_solid, metal_str,\n pH, total_metal, SOM):\n \"\"\"\n This is used for Kd values in soil.\n If the user doesn't input the values, we will calcuate them. Otherwise, use the user provided values.\n Kd_soil_n is handled in the load data function through Koc values\n :param smiles:\n :param foc: organic carbon fraction, value is between 0 to 1\n :param CEC_soil: in unit mol/kg\n :param Koc_acid: from a search function, if Koc_acid not empty, use Koc_acid, otherwise use the function results\n :return:\n \"\"\"\n Kd_j = 0.0\n if self.chem_type == 'organic base':\n Kd_j = self.Kd_organic_base(smiles, CEC_soil, foc)\n\n elif self.chem_type == 'organic acid':\n if not Koc_acid:\n Koc_j_val = self.Koc_j(Kow_n, pKa, Fr_n, Fr_i)\n Kd_j = self.Kd_general_j(foc, Koc_j_val, density_solid)\n\n elif self.chem_type == 'metal':\n Kd_j = self.Kd_metal_j(metal_str, pH, total_metal, SOM)\n\n return Kd_j\n\n\n def Koc_j(self, Kow_n, pKa, Fr_n, Fr_i):\n '''\n for chem_type is organic base or organic acid, this equation is used for organic acid if not exp data avalable\n :param Kow_n: octanol-water partition coefficient of neutral species\n :param Fr_n: fraction of neutral species in water phase\n :param Fr_i: fraction of ionic species in water phase\n :param pKa: dissociation acid constant\n\n :return:\n\n # the equation is applicable for pKa (0,12), pKb (2,12)\n # logKow_n (-2.18, 8.5) for acides, (-1.66, 7.03) for bases\n\n Source: A multimedia activity model for ionizable compounds - validation study with 2,4-D, aniline and trimethoprim\n Antonio Franco and Stefan Trapp, 2010\n https://www.ncbi.nlm.nih.gov/pubmed/20821507\n\n '''\n\n Kow_i = 10.0 ** (np.log10(Kow_n) - 3.5)\n Kow_apparent = Fr_n * Kow_n + Fr_i * Kow_i\n\n Koc_val = 0.0\n if self.chem_type == 'organic base':\n Koc_val = 10.0 ** (0.11 * np.log(Kow_n) + 1.54)\n\n elif self.chem_type == 'organic acid':\n f = Kow_apparent / (Kow_apparent + 1)\n Koc_val = (pKa ** 0.65) * (f ** 0.14)\n\n return Koc_val\n\n\n def Kd_general_j(self, foc, Koc, density_solid):\n '''\n :param foc: organic carbon content of the dry matter (kg/kg)\n :param Koc: solid-water partition coefficient (L/kg)\n :param density_solid: kg/L, kg/m3\n\n :return: dimensionless solid-water sorption coefficient (L/L)\n '''\n\n Kd_val = Koc * foc * density_solid / 1000.0\n\n return Kd_val\n\n\n def Kd_organic_base(self, smiles, CEC_soil, foc):\n '''\n Source: Development and evaluation of a new sorption model for organic cations in soil:\n contributions from organic matter and clay minerals\n https://pubs.acs.org/doi/10.1021/es4031886\n '''\n\n RDKit_dic = self.cal_descriptor(smiles)\n Vx = RDKit_dic['VMcGowan'] / 100.0\n NAi = RDKit_dic['NAi']\n # reference sorption coefficient for the clay fraction in soils\n K_cec_clay = 10.0 ** (1.22 * Vx - 0.22 * NAi + 1.09)\n # ion-exchange-based sorption coefficient to Pahokee peat\n D_oc_ie = 10.0 ** (1.53 * Vx + 0.32 * NAi - 0.27)\n Kd_j = K_cec_clay * (CEC_soil - 3.4 * foc) + foc * D_oc_ie\n\n return Kd_j\n\n\n # Load RDKit data to the exposure table in JSON\n def cal_descriptor(self, smiles):\n # call rdkit mordred\n # McGowan's Volume and number of hydrogens bound by the charged nitrogen\n RDKIT_KEYS = [\"VMcGowan\"]\n mols = [Chem.MolFromSmiles(smi) for smi in [smiles]]\n calc = Calculator(descriptors, ignore_3D=True)\n df = calc.pandas(mols, nproc=1)\n mol = Chem.MolFromSmiles(smiles)\n NH0 = Chem.Fragments.fr_NH0(mol) # number of Tertiary amines\n NH1 = Chem.Fragments.fr_NH1(mol) # number of Secondary amines\n NH2 = Chem.Fragments.fr_NH2(mol) # number of Primary amines\n\n NAi = 0\n if NH2 != 0:\n NAi = 3\n elif NH1 != 0:\n NAi = 2\n elif NH0 != 0:\n NAi = 1\n RDKit_dic = {}\n RDKit_dic['NAi'] = NAi\n for key in RDKIT_KEYS:\n RDKit_dic[key] = float(df[key][0])\n return RDKit_dic\n\n\n def Kd_metal_j(self, metal_str, pH, total_metal, SOM):\n '''\n :param metal_str:\n :return:\n\n Source: Solid-solution partitioning of metals in contaminated soils: dependence on pH, total metal burden,\n and organic matter, 2000, ES&T\n https://pubs.acs.org/doi/abs/10.1021/es9907764\n '''\n Kd_j = 0.0\n\n # TODO: what about other metals?\n\n if metal_str == 'Cd':\n Kd_j = 10 ** (0.48 * pH + 0.82 * np.log10(SOM) - 0.65)\n\n elif metal_str == 'Cu':\n Kd_j = 10 ** (0.21 * pH + 0.51 * np.log10(SOM) + 1.75)\n\n elif metal_str == 'Ni':\n Kd_j = 10 ** (1.02 * pH + 0.80 * np.log10(SOM) - 4.16)\n\n elif metal_str == 'Pb':\n Kd_j = 10 ** (0.37 * pH + 0.44 * np.log10(total_metal) + 1.19)\n\n elif metal_str == 'Zn':\n Kd_j = 10 ** (0.60 * pH + 0.21 * np.log10(total_metal) - 1.34)\n\n return Kd_j\n\n","sub_path":"aquivalence/partition_coef.py","file_name":"partition_coef.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"436762782","text":"class Student:\r\n Name = None\r\n HW = 0\r\n Lab = 0\r\n Total = 0\r\n\r\ndef makeStudent (Name:str, HW: int, Lab: int, Total: int) ->Student:\r\n s = Student ()\r\n s.Name = Name\r\n s.Total = Total\r\n s.Lab = Lab\r\n s.HW = HW\r\n return S\r\n\r\ndef get_Lab_Scores (file_name: str, student_list:list) ->list:########\r\n while True:\r\n try:\r\n openFile = open (file_name, 'r')\r\n break\r\n except FileNotFoundError as e:\r\n print (e)\r\n file_name= input (\"Input File Name:\")\r\n Lab = openFile.readlines ()\r\n s = Student()\r\n s.Name = Lab[0].split()[0]\r\n s.Lab = int(Lab[0].split()[1])\r\n student_list.append (s)\r\n for line in Lab [1:]:\r\n each = line.strip().split()\r\n count = 0\r\n for student1 in student_list:\r\n if student1.Name == each[0]:\r\n student1.Lab += int(each[1])\r\n count =1\r\n if count == 0:\r\n student2 = Student()\r\n student2.Name = each [0]\r\n student2.Lab = int (each[1])\r\n student_list.append (student2)\r\n return student_list\r\n\r\ndef get_HW_Scores (file_name2: str, student_list:list) ->list:\r\n while True:\r\n try:\r\n openFile = open (file_name2, 'r')\r\n break\r\n except FileNotFoundError as e:\r\n print (e)\r\n file_name2= input (\"Input File Name:\")\r\n HW = openFile.readlines ()\r\n for line in HW:\r\n each = line.strip().split()\r\n count = 0\r\n for student1 in student_list:\r\n if student1.Name == each[0]:\r\n student1.HW += float(each[1])\r\n count =1\r\n if count == 0:\r\n student2 = Student()\r\n student2.Name = each [0]\r\n student2.HW = float(each[1])\r\n student_list.append (student2)\r\n return student_list\r\n\r\ndef overall_percent(student_list: list, assignments: int) -> list:\r\n for student in student_list:\r\n student.Total = float(((student.HW / assignments) * .5) + ((student.Lab / assignments) * .5))\r\n return student_list\r\n\r\ndef output_A_grade (student_list:list, file_name:str, assignments:int):\r\n file = open (\"A.txt\", \"w\")\r\n for student in student_list:\r\n if (student.Total) >= 90:\r\n file.write (student.Name + \"\\n\" + \"Overall_Percent: \" + str(student.Total) + \"% \\n\")\r\n file.close()\r\n\r\ndef output_B_grade (student_list:list, file_name: str, assignments:int):\r\n file = open (\"B.txt\", \"w\")\r\n for student in student_list:\r\n if 90 <(Student.Total) >= 80:\r\n file.write (student.Name + \"\\n\" + \"Overall_Percent:\" + str(student.Total)+ \"% \\n\")\r\n file.close()\r\n\r\ndef output_C_grade (student_list:list, file_name: str, assignments:int):\r\n file = open (\"C.txt\", \"w\")\r\n for student in student_list:\r\n if 80 <(Student.Total) >= 70:\r\n file.write (student.Name + \"\\n\" + \"Overall_Percent:\" + str(student.Total) + \"% \\n\")\r\n file.close()\r\n\r\ndef output_D_grade (student_list:list, file_name: str, assignments:int):\r\n file = open (\"D.txt\", \"w\")\r\n for student in student_list:\r\n if 70 <(Student.Total) >= 60:\r\n file.write (student.Name + \"\\n\" + \"Overall_Percent:\" + str(student.Total) + \"% \\n\")\r\n file.close()\r\n\r\ndef output_F_grade (student_list:list, file_name: str, assignments:int):\r\n file = open (\"F.txt\", \"w\")\r\n for student in student_list:\r\n if (Student.Total) < 60:\r\n file.write (student.Name + \"\\n\" + \"Overall_Percent:\" + str(student.Total) + \"% \\n\")\r\n file.close()\r\n\r\ndef main ():\r\n student_list = []\r\n assignments = int (input(\"How many submitted assignments:\"))\r\n student_list = get_Lab_Scores (\"lab.txt\", student_list)\r\n student_list = get_HW_Scores (\"hw.txt\",student_list)\r\n student_list = overall_percent(student_list, assignments)\r\n output_A_grade (student_list, \"A.txt\", assignments)\r\n output_B_grade (student_list, \"B.txt\", assignments)\r\n output_C_grade (student_list, \"C.txt\", assignments)\r\n output_D_grade (student_list, \"D.txt\", assignments)\r\n output_F_grade (student_list, \"F.txt\", assignments)\r\n\r\nmain () \r\n \r\n","sub_path":"Lab 5 prob 3.py","file_name":"Lab 5 prob 3.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"648335782","text":"\"\"\"Optimization config and helpers.\"\"\"\nfrom torch import optim\nimport transformers\n\nfrom . import config\n\n\ndef get(cfg, model_parameters):\n if cfg.optimizer == 'adam':\n return optim.Adam(\n params=model_parameters,\n lr=cfg.lr,\n betas=(cfg.beta1, cfg.beta2),\n eps=cfg.eps,\n weight_decay=cfg.weight_decay)\n if cfg.optimizer == 'adamw':\n return transformers.AdamW(\n params=model_parameters,\n lr=cfg.lr,\n betas=(cfg.beta1, cfg.beta2),\n eps=cfg.eps,\n weight_decay=cfg.weight_decay,\n correct_bias=cfg.correct_bias)\n else:\n raise ValueError(f'Unexpected optimizer: {cfg.optimizer}')\n\n\nclass OptimizerConfig(config.Config):\n \"\"\"Base class for optimization config.\"\"\"\n\n def __init__(self, optimizer, lr, weight_decay):\n super().__init__()\n self.optimizer = optimizer\n self.lr = lr\n self.weight_decay = weight_decay\n\n\nclass AdamConfig(OptimizerConfig):\n \"\"\"Config for Adam optimization.\"\"\"\n\n def __init__(self, lr, weight_decay=0., beta1=0.9, beta2=0.999, eps=1e-08):\n super().__init__(\n optimizer='adam',\n lr=lr,\n weight_decay=weight_decay)\n self.beta1 = beta1\n self.beta2 = beta2\n self.eps = eps\n\n\nclass AdamWConfig(OptimizerConfig):\n \"\"\"Config for AdamW.\"\"\"\n\n def __init__(self, lr, weight_decay=0., beta1=0.9, beta2=0.999,\n eps=1e-08, correct_bias=True):\n super().__init__(\n optimizer='adamw',\n lr=lr,\n weight_decay=weight_decay)\n self.beta1 = beta1\n self.beta2 = beta2\n self.eps = eps\n self.correct_bias = correct_bias\n","sub_path":"pan20/util/pytorch/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"191046832","text":"# Preparation:\n# Emulator: Used for build id and verification. Download the adt, and create x86 and arm emulators (use host GPU)\n# Webdriver: Used for build id and verification. pip install selenium, and download chromedriver\n# Apk tool: Download it from http://connortumbleson.com/apktool/test_versions\n# Download:\n# Install Chrome\n# Set its download directory as /workspace/server/chromium/android-chrome-todo/download\n# Open it with google-chrome --user-data-dir=/workspace/tool/arm/chrome-profile\n# Open it with google-chrome --user-data-dir=/workspace/tool/x86/chrome-profile\n# Install extension SwitchySharp\n# Install extension at share/python/apk-downloader\n# Login extension with: webperf0@gmail.com and 32761AAE6636D2A3(arm)/376FCD341892D871(x86) as device id.\n\nfrom selenium import webdriver\nimport urllib2\nfrom util import *\n\ndir_root = ''\nvers = []\nver_types = []\ntarget_archs = []\nrun_act = 0\n\nACT_DOWNLOAD = 1 << 0\nACT_FILE = 1 << 1\nACT_DIR = 1 << 2\nACT_CHECK = 1 << 3\nACT_ALL = ACT_DOWNLOAD | ACT_FILE | ACT_DIR | ACT_CHECK\n\ncmd_common = python_chromium + ' --repo-type chrome-android --target-os android --target-module chrome'\n\ndevices_id = []\n\n\ndef parse_arg():\n global args, args_dict\n parser = argparse.ArgumentParser(description='Script about chrome for android',\n formatter_class=argparse.RawTextHelpFormatter,\n epilog='''\nexamples:\n python %(prog)s --ver 36.0.1985.81 --ver-type stable --target-arch x86\n''')\n parser.add_argument('--run', dest='run', help='run', action='store_true')\n parser.add_argument('--run-act', dest='run_act', help='run act', type=int, default=ACT_ALL)\n parser.add_argument('--check', dest='check', help='check if there is new apk', action='store_true')\n parser.add_argument('--download', dest='download', help='download apk from google play', action='store_true')\n parser.add_argument('--download_type', dest='download_type', help='version type to download', default='all')\n parser.add_argument('--backup', dest='backup', help='backup', action='store_true')\n parser.add_argument('--backup-ver', dest='backup_ver', help='backup versions less than the designated')\n parser.add_argument('--ver', dest='ver', help='version', default='all')\n parser.add_argument('--ver-type', dest='ver_type', help='ver type', default='all')\n parser.add_argument('--target-arch', dest='target_arch', help='target arch', default='all')\n parser.add_argument('--analyze', dest='analyze', help='analyze test tombstone', action='store_true')\n parser.add_argument('--analyze-type', dest='analyze_type', help='type to analyze', choices=['tombstone', 'anr'], default='tombstone')\n add_argument_common(parser)\n\n args = parser.parse_args()\n args_dict = vars(args)\n\n if len(sys.argv) <= 1:\n parser.print_help()\n quit()\n\n\ndef setup():\n global vers, ver_types, target_archs, run_act\n global dir_root, log, timestamp\n\n (timestamp, dir_root, log) = setup_common(args, _teardown)\n dir_root = dir_project + '/chrome-android'\n\n if args.ver_type == 'all':\n ver_types = ['stable', 'beta']\n else:\n ver_types = args.ver_type.split(',')\n\n if args.target_arch == 'all':\n target_archs = ['x86', 'arm']\n else:\n target_archs = args.target_arch.split(',')\n\n for target_arch in target_archs:\n dir_temp = dir_server_chromium + '/android-%s-chrome/archive' % target_arch\n if not os.path.exists(dir_temp):\n os.makedirs(dir_temp)\n\n ensure_dir(dir_server_chrome_android_todo)\n run_act = args.run_act\n\n\ndef run(force=False, act=ACT_ALL):\n if not args.run and not force:\n return\n\n if act & ACT_DOWNLOAD:\n download(force=True)\n\n if act & ACT_FILE:\n _handle_todo_file()\n\n if act & ACT_DIR:\n _handle_todo_dir()\n\n if act & ACT_CHECK:\n check(force=True)\n\n\ndef download(force=False):\n if not args.download and not force:\n return\n\n dir_download = dir_server_chrome_android_todo + '/download'\n ensure_dir(dir_download)\n execute('rm -rf %s/*' % dir_download)\n\n dir_trash = dir_server_chrome_android_todo + '/trash'\n ensure_dir(dir_trash)\n\n # download the apk\n for target_arch in target_arch_chrome_android:\n options = webdriver.ChromeOptions()\n options.add_experimental_option('excludeSwitches', ['user-data-dir', 'ignore-certificate-errors', 'disable-default-apps'])\n options.add_argument('user-data-dir=%s' % (dir_tool + '/' + target_arch + '/chrome-profile'))\n driver = webdriver.Chrome(executable_path=dir_tool + '/chromedriver', chrome_options=options, service_args=['--verbose', '--log-path=%s/chromedriver-%s.log' % (dir_share_ignore_log, timestamp)])\n\n if args.download_type == 'all' or args.download_type == 'stable':\n driver.get('https://play.google.com/store/apps/details?id=' + chromium_android_info['chrome_stable'][CHROMIUM_ANDROID_INFO_INDEX_PKG])\n time.sleep(3)\n if args.download_type == 'all' or args.download_type == 'beta':\n driver.get('https://play.google.com/store/apps/details?id=' + chromium_android_info['chrome_beta'][CHROMIUM_ANDROID_INFO_INDEX_PKG])\n\n finished = False\n while not finished:\n finished = True\n files = os.listdir(dir_download)\n if not files:\n finished = False\n else:\n for f in files:\n if re.search('crdownload', f):\n finished = False\n break\n\n if not finished:\n time.sleep(3)\n\n driver.quit()\n\n execute('mv %s/* %s' % (dir_download, dir_server_chrome_android_todo), dryrun=False)\n\n\ndef check(force=False):\n if not args.check and not force:\n return\n\n info('Begin to check..')\n content = ''\n subject = ''\n\n # get all the combos\n url = 'http://www.hiapphere.org/app-chrome_beta'\n try:\n u = urllib2.urlopen(url)\n except BadStatusLine:\n warning('Failed to open ' + url)\n return\n\n html = u.read()\n pattern = re.compile('Version(\\d+\\.\\d+\\.\\d+\\.\\d+)')\n vers_all = pattern.findall(html)\n combos_all = []\n for ver in vers_all:\n if ver_cmp(ver, '33.0.1750.132') < 0:\n continue\n for target_arch in target_arch_chrome_android:\n combos_all.append((target_arch, ver))\n\n # get all combos done\n combos_done = []\n for target_arch in target_arch_chrome_android:\n dirs_done = os.listdir(dir_server_chromium + '/android-%s-chrome' % target_arch)\n dirs_done += os.listdir(dir_server_chromium + '/android-%s-chrome/archive' % target_arch)\n combos_done += _get_combos(dirs_done, target_arch)\n\n # get all combos todo\n combos_todo = []\n for target_arch in target_arch_chrome_android:\n dirs_todo = os.listdir(dir_server_chrome_android_todo + '/%s' % target_arch)\n combos_todo += _get_combos(dirs_todo, target_arch)\n\n combos_new = sorted(list_diff(combos_all, list_union(combos_done, combos_todo)))\n\n if len(combos_new):\n subject += ' download required'\n content += 'The following combos need to be downloaded: ' + ','.join(str(i) for i in combos_new) + '
'\n else:\n subject += ' download clean'\n\n if len(combos_todo):\n subject += ' build required'\n content += 'The following combos need to be built: ' + ','.join(str(i) for i in combos_todo) + '
'\n else:\n subject += ' build clean'\n\n info(content)\n if host_name == 'wp-03':\n to = ['yang.gu@intel.com', 'zhiqiangx.yu@intel.com']\n send_mail('webperf@intel.com', to, 'Chrome for Android -' + subject, content, type='html')\n\n\ndef backup():\n if not args.backup:\n return\n\n for target_arch in target_arch_chrome_android:\n dirs = os.listdir(dir_server_chromium + '/android-%s-chrome' % target_arch)\n for dir_temp in dirs:\n if dir_temp == 'archive':\n continue\n\n info_temp = dir_temp.split('-')\n ver_temp = info_temp[0]\n ver_type_temp = info_temp[1]\n\n dir_chrome = 'chromium/android-%s-chrome/%s-%s' % (target_arch, ver_temp, ver_type_temp)\n execute('smbclient %s -N -c \"prompt; recurse; mkdir %s;\"' % (path_server_backup, dir_chrome))\n backup_dir(dir_server + '/' + dir_chrome)\n if os.path.exists('Chrome.apk'):\n backup_smb(path_server_backup, dir_chrome, 'Chrome.apk')\n backup_smb(path_server_backup, dir_chrome, 'Chromium.apk')\n backup_smb(path_server_backup, dir_chrome, 'README')\n else:\n backup_smb(path_server_backup, dir_chrome, 'Null.apk')\n restore_dir()\n\n\ndef backup_ver():\n if not args.backup_ver:\n return\n\n dirs = os.listdir('.')\n for dir_ver in dirs:\n if re.match('\\d+\\.\\d+\\.\\d+\\.\\d+', dir_ver) and ver_cmp(dir_ver, args.backup_ver) <= 0:\n execute('tar zcf %s.tar.gz %s' % (dir_ver, dir_ver))\n backup_smb(path_server_backup, 'chromium', '%s.tar.gz' % dir_ver)\n\n\ndef analyze():\n if not args.analyze:\n return\n\n _setup_device()\n lines = analyze_file(device_id=devices_id[0], type=args.analyze_type)\n dirs_symbol = []\n pattern = re.compile('libchrome\\.(.*)\\.so')\n for line in lines:\n match = pattern.search(line)\n if match:\n ver_part = match.group(1)\n break\n\n for dir_temp in [dir_server_chromium + '/android-x86-chrome', dir_server_chromium + '/android-chrome-todo/x86']:\n dirs = os.listdir(dir_temp)\n for d in dirs:\n if re.search(ver_part, d):\n dirs_symbol.append(dir_temp + '/' + d)\n break\n\n get_symbol(lines, dirs_symbol)\n\n\ndef _setup_device():\n global devices_id, devices_product, devices_type, devices_arch, devices_mode\n\n if devices_id:\n return\n\n (devices_id, devices_product, devices_type, devices_arch, devices_mode) = setup_device()\n\n\ndef _get_combos(dirs_check, target_arch):\n combos = []\n pattern = re.compile('(\\d+\\.\\d+\\.\\d+\\.\\d+)-(stable|beta)')\n\n for dir_check in dirs_check:\n match = pattern.search(dir_check)\n if not match:\n continue\n ver_temp = match.group(1)\n\n combos.append((target_arch, ver_temp))\n\n return combos\n\n\ndef _handle_todo_file():\n backup_dir(dir_server_chrome_android_todo)\n todos = os.listdir('.')\n for todo in todos:\n if os.path.isfile(todo):\n cmd = cmd_common + ' --dir-root ' + dir_server_chrome_android_todo\n cmd += ' --chrome-android-apk \"' + todo + '\"'\n cmd += ' --buildid'\n execute(cmd, interactive=True)\n\n restore_dir()\n\n\ndef _handle_todo_dir():\n backup_dir(dir_server_chrome_android_todo)\n todos = os.listdir('.')\n for todo in todos:\n if os.path.isdir(todo):\n target_arch_temp = todo\n if target_arch_temp not in target_arch_all:\n continue\n\n dirs_todo = os.listdir(dir_server_chrome_android_todo + '/' + target_arch_temp)\n for dir_todo in dirs_todo:\n info = dir_todo.split('-')\n ver_temp = info[0]\n ver_type_temp = info[1]\n\n cmd = cmd_common + ' --dir-root ' + dir_root + '/' + ver_temp\n cmd += ' --target-arch ' + target_arch_temp\n cmd += ' --ver ' + ver_temp\n cmd += ' --ver-type ' + ver_type_temp\n cmd += ' --phase-continue'\n\n execute(cmd, interactive=True)\n restore_dir()\n\n\ndef _teardown():\n pass\n\n\nif __name__ == \"__main__\":\n parse_arg()\n setup()\n run(act=run_act)\n check()\n download()\n backup()\n backup_ver()\n analyze()\n","sub_path":"python/chrome-android.py","file_name":"chrome-android.py","file_ext":"py","file_size_in_byte":11884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"494235050","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport datetime as dt\nimport matplotlib.dates as md\nimport matplotlib.pyplot as mp\n\n#.txt\narr = np.arange(1,10).reshape(3,3)\n\nnp.savetxt('a.csv',arr,fmt='%d')\n\nb = np.loadtxt('a.csv')\n\nprint(b)\n\n\n# k.py\n\ndef dmy2ymd(dmy):\n dmy = str(dmy,encoding='utf-8')\n ymd = dt.datetime.strptime(dmy,'%d-%m-%Y').date()\n ymd = str(ymd)\n return ymd\n\n# M8[D] : 8位字符的日期格式,精度到 day ; converters={0:dmy2ymd} 表示对获取后的数组的第一列使用转换方法\ndates,opening_prices,highest_prices,lowest_prices,closing_proces = np.loadtxt(\n 'aapl.csv',delimiter=',',usecols=(1,3,4,5,6), dtype=bytes,unpack=True)\nprint(dates)\n\ndatesConveted = []\nfor dmy in dates:\n ymd = dmy2ymd(dmy)\n datesConveted.append(ymd)\nprint(datesConveted)\n#mean.py\nmean = np.mean(b[1])\nprint(mean)\n\n#average.py\naverage = np.average(b[0],weights=b[1][::-1])\nprint(average)\n\n\n","sub_path":"python-data-analysis-note-master/numpy/numpy_func.py","file_name":"numpy_func.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"259921352","text":"import requests, os, bs4, sys\nimport re, time\nfrom datetime import datetime\nfrom datetime import timedelta\nimport sqlite3\n\nDateSearch = re.compile(r'(\\d\\d\\d\\d)(\\d\\d)')\n\n#dir_temp = os.getcwd()\n#dir_temp = dir_temp + '/DBs'\n#os.chdir(dir_temp)\nconn = sqlite3.connect('stock_list.db')\nc = conn.cursor()\n\nf = open('monthly_revenue_update.log',\"r\", encoding=\"utf-8\")\ncurrent_last_date = f.readline()\ncurrent_last_date = current_last_date.replace('last update: ','')\n#print(current_last_date)\nf.close()\n\npath = os.getcwd()\npath = path + '\\\\stock_db'\nos.chdir(path)\nprint(os.getcwd())\n\nstock_url1 = 'https://mops.twse.com.tw/nas/t21/sii/t21sc03_---ROCyear---_---month---_0.html'\nstock_url2 = 'https://mops.twse.com.tw/nas/t21/sii/t21sc03_---ROCyear---_---month---_1.html'\nstock_url3 = 'https://mops.twse.com.tw/nas/t21/otc/t21sc03_---ROCyear---_---month---_0.html'\nstock_url4 = 'https://mops.twse.com.tw/nas/t21/otc/t21sc03_---ROCyear---_---month---_1.html'\ndelta_1day = timedelta(days = 1) #delta_1day\nskip = 0\nif current_last_date == '':\n current_last_date = datetime.today()\n cursor = c.execute(\"SELECT stock_id, start_date, stock_type, business from stock_list\")\n #find oldest data in the stock by stock list\n for current_entry in cursor:\n if ((current_entry[2] == '上櫃') or (current_entry[2] == '上市')) and (current_entry[3] != 'ETF'):\n db_name = current_entry[0] + '.db'\n conn_stock = sqlite3.connect(db_name)\n c_stock = conn_stock.cursor()\n cursor_stock = c_stock.execute('SELECT max(date_ID) FROM stock_month')\n data_ID_str = cursor_stock.fetchone()[0]\n if data_ID_str == None:\n conn_stock.close()\n continue\n data_ID_str = data_ID_str + '-01'\n latest_date = datetime.strptime(data_ID_str, \"%Y-%m-%d\")\n if latest_date < current_last_date:\n current_last_date = latest_date\n print(db_name + ' not up to date')\n #print(current_last_date)\n conn_stock.close()\n skip = 1\nelse:\n current_last_date = DateSearch.search(current_last_date)\n current_last_date = datetime(int(current_last_date.group(1)), int(current_last_date.group(2)), 1)\n\nEnd_date = datetime.today()\nif End_date.month == 12:\n End_date = End_date.replace(year = End_date.year - 1, month = 1)\nelse:\n End_date = End_date.replace(month = End_date.month - 1)\nStart_date_ptr = current_last_date\nif skip == 1:\n Start_date_ptr = End_date + delta_1day\nwhile Start_date_ptr <= End_date:\n print(Start_date_ptr)\n url = stock_url1.replace('---ROCyear---',str(Start_date_ptr.year - 1911)).replace('---month---',str(Start_date_ptr.month))\n print('Cool down for requesting web content')\n time.sleep(3)\n res = requests.get(url)\n revenue_content = res.text\n soup = bs4.BeautifulSoup(revenue_content, \"lxml\")\n \n url = stock_url2.replace('---ROCyear---',str(Start_date_ptr.year - 1911)).replace('---month---',str(Start_date_ptr.month))\n print('Cool down for requesting web content')\n time.sleep(3)\n res = requests.get(url)\n soup1 = bs4.BeautifulSoup(res.text, \"lxml\")\n for element in soup1.body:\n soup.body.append(element)\n url = stock_url3.replace('---ROCyear---',str(Start_date_ptr.year - 1911)).replace('---month---',str(Start_date_ptr.month))\n print('Cool down for requesting web content')\n time.sleep(3)\n res = requests.get(url)\n soup1 = bs4.BeautifulSoup(res.text, \"lxml\")\n for element in soup1.body:\n soup.body.append(element)\n url = stock_url4.replace('---ROCyear---',str(Start_date_ptr.year - 1911)).replace('---month---',str(Start_date_ptr.month))\n print('Cool down for requesting web content')\n time.sleep(3)\n res = requests.get(url)\n soup1 = bs4.BeautifulSoup(res.text, \"lxml\")\n for element in soup1.body:\n soup.body.append(element)\n \n\n cursor = c.execute(\"SELECT stock_id, stock_name, start_date, stock_type, business from stock_list\")\n for current_entry in cursor:\n if ((current_entry[3] == '上櫃') or (current_entry[3] == '上市')) and (current_entry[4] != 'ETF'):\n db_name = current_entry[0] + '.db'\n print(db_name)\n\n conn_stock = sqlite3.connect(db_name)\n c_stock = conn_stock.cursor()\n cursor_stock = c_stock.execute('SELECT max(date_ID) FROM stock_month')\n date_ID_str = cursor_stock.fetchone()[0]\n if date_ID_str == None:\n date_ID_str = '2009-12'\n latest_date_inDB = datetime.strptime(date_ID_str, \"%Y-%m\")\n if latest_date_inDB >= Start_date_ptr:\n conn_stock.close()\n continue\n td = soup.find(string=str(current_entry[0]))\n if td == None:\n conn_stock.close()\n continue\n print(td.parent.parent)\n soup1 = bs4.BeautifulSoup(str(td.parent.parent), \"lxml\")\n tds = soup1.find_all('td')\n sql_cmd = 'INSERT INTO stock_month (date_ID, revenue) VALUES (\\'' + str(Start_date_ptr.year) + '-' + Start_date_ptr.strftime(\"%m\") + '\\',' +\\\n tds[2].getText().strip().replace(',','') + ')'\n print(sql_cmd)\n c_stock.execute(sql_cmd)\n conn_stock.commit()\n conn_stock.close()\n\n if Start_date_ptr.month == 12:\n Start_date_ptr = Start_date_ptr.replace(year = Start_date_ptr.year + 1, month = 1)\n else:\n Start_date_ptr = Start_date_ptr.replace(month = Start_date_ptr.month + 1)\n\n #print(revenue_content)\n\npath = os.getcwd()\npath = path.replace('\\\\stock_db','')\nos.chdir(path)\nif skip != 1:\n f= open('monthly_revenue_update.log',\"w\", encoding=\"utf-8\")\n f.write('last update: ' + str(End_date.year) + End_date.strftime(\"%m\"))\n f.close()\nconn.close()","sub_path":"5_stock_data_revenue.py","file_name":"5_stock_data_revenue.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"65005667","text":"#!/usr/bin/env python\n\nimport csv\nimport json\nimport os\nimport urllib.request\nimport re\nimport io\nfrom collections import defaultdict\n\n\"\"\"Action data is available in csv form that has 3 headers looks like:\nkey,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65\n#,Name,,Icon,ActionCategory,,Animation{Start},VFX,Animation{End},ActionTimeline{Hit},,ClassJob,BehaviourType,ClassJobLevel,IsRoleAction,Range,CanTargetSelf,CanTargetParty,CanTargetFriendly,CanTargetHostile,,,TargetArea,,,,CanTargetDead,,CastType,EffectRange,XAxisModifier,,PrimaryCost{Type},PrimaryCost{Value},SecondaryCost{Type},SecondaryCost{Value},Action{Combo},PreservesCombo,Cast<100ms>,Recast<100ms>,CooldownGroup,,MaxCharges,AttackType,Aspect,ActionProcStatus,,Status{GainSelf},UnlockLink,ClassJobCategory,,,AffectsPosition,Omen,IsPvP,,,,,,,,,,,,IsPlayerAction\nint32,str,bit&01,Image,ActionCategory,byte,ActionCastTimeline,ActionCastVFX,ActionTimeline,ActionTimeline,byte,ClassJob,byte,byte,bit&02,sbyte,bit&04,bit&08,bit&10,bit&20,bit&40,bit&80,bit&01,bit&02,bit&04,sbyte,bit&08,bit&10,byte,byte,byte,bit&20,byte,uint16,byte,Row,Action,bit&40,uint16,uint16,byte,byte,byte,AttackType,byte,ActionProcStatus,byte,Status,Row,ClassJobCategory,byte,bit&80,bit&01,Omen,bit&02,bit&04,bit&08,bit&10,bit&20,bit&40,bit&80,bit&01,bit&02,byte,bit&04,bit&08,bit&10\n0,\"\",False,405,0,0,0,0,0,0,0,-1,0,0,False,0,False,False,False,False,False,False,False,False,False,0,True,True,0,0,0,False,0,0,0,0,0,False,0,0,0,0,0,0,0,0,0,0,0,0,0,False,False,0,False,False,True,False,True,True,False,False,True,0,False,False,False\n===rest of data===\n\nWe take the raw output and convert it into a usable JavaScript file to be imported and used within modules so we don't need to find and hardcode action data.\n\nTODO:\nMove constants into config file\nImplement argparse\nMake more universal functions more friendly for reuse\nGeneral streamlining\nImplement column filtering on returned data\nValidate output\nImplement proper logging\nImplement proper error handling\nReview and streamline name normalization (current normalization taken from csv_util.py and coinach.py)\nBuild out local file functionality\nMove table structuring from dunders to get_data_table()\nStab feature creep/overengineering in the eye\n\"\"\"\n\nconfig = {\n \"output\": {\n \"pve\": \"pve_action_info.js\",\n \"pvp\": \"pvp_action_info.js\",\n \"crafting\": \"crafting_action_info.js\",\n \"combo\": \"pve_action_combos.js\",\n \"invalid\": \"invalid_action.log\",\n },\n \"locale_url\": {\n \"root\": \"https://raw.githubusercontent.com/\",\n \"intl\": \"xivapi/ffxiv-datamining/master/csv/\",\n \"cn\": \"thewakingsands/ffxiv-datamining-cn/master/\",\n \"ko\": \"Ra-Workspace/ffxiv-datamining-ko/master/csv/\",\n \"local\": \"\",\n },\n \"path\": {\"cactbot\": os.path.abspath(__file__)[:-24]},\n \"log\": {\"error\": \"gen_action_info.log\"},\n}\n\n\ndef tree():\n return defaultdict(tree)\n\n\ndef __get_remote_table(url, inputs, outputs=None):\n \"\"\"Connects to a remote source to retrieve the table data\"\"\"\n # TODO: Error handling\n with urllib.request.urlopen(url) as response:\n csv_file = csv.reader(io.StringIO(response.read().decode(\"utf-8-sig\")))\n\n # First line is the indices and third line is the data types. They aren't currently used, so we discard them.\n next(csv_file)\n # Second line is the headers\n # Append the hexidecimal version of the ID as a new column so the original ID is usable for correllation\n headers = next(csv_file) + [\"HexID\"]\n next(csv_file)\n\n # Change # to ID for more readable data\n headers[0] = headers[0].replace(\"#\", \"ID\")\n # Generate the hex ID from the ID and return the data with the headers prepended\n return [headers] + [x + [format(int(x[0]), \"X\")] for x in csv_file]\n\n\ndef __get_local_table(filename, inputs, outputs=None):\n \"\"\"Gets table data from a local file\"\"\"\n # I'm honestly not even sure if this will work as expected. It can be updated/fixed if a use case is found.\n with open(filename, \"r\") as table:\n csv_file = [x.rstrip(\"\\r\\n\").split(\",\") for x in table.read().decode(\"utf-8-sig\")]\n\n # First line is the indices and third line is the data types. They aren't currently used, so we discard them.\n csv_file.pop(2)\n csv_file.pop(0)\n\n # Next line is the headers\n # Append the hexidecimal version of the ID as a new column so the original ID is usable for correllation\n headers = csv_file.pop(0) + [\"HexID\"]\n # Change # to ID for more readable data\n headers[0] = headers[0].replace(\"#\", \"ID\")\n # Generate the hex ID from the ID and return the data with the headers prepended\n return [headers] + [x + [format(int(x[0]), \"X\")] for x in csv_file]\n\n\ndef get_data_table(table_name, locale=\"intl\", inputs=None, outputs=None):\n \"\"\"Retrieves table data based on provided locale\"\"\"\n if locale == \"local\" and os.path.exists(table_name):\n return __get_local_table(table_name + \".csv\", inputs, outputs)\n if locale in config[\"locale_url\"]:\n url = config[\"locale_url\"][\"root\"] + config[\"locale_url\"][locale] + table_name + \".csv\"\n return __get_remote_table(url, inputs, outputs)\n else:\n raise Exception(\"Invalid locale: %s\" % locale)\n\n\ndef normalize_name(str):\n \"\"\"Converts names into JavaScript-safe ascii string keys that adhere to the expected conventions.\"\"\"\n if not str:\n return str\n\n # The Tam\\u2013Tara Deepcroft\n str = re.sub(r\"\\u2013\", \"-\", str)\n\n # The Whorleater Extreme\n str = re.sub(r\"\", \"\", str)\n\n # Various symbols to get rid of.\n str = re.sub(r\"[':(),]\", \"\", str)\n\n # Sigmascape V4.0 (Savage)\n str = re.sub(r\"\\.\", \"\", str)\n\n # Common case hyphen: TheSecondCoilOfBahamutTurn1\n # Special case hyphen: ThePalaceOfTheDeadFloors1_10\n str = re.sub(r\"([0-9])-([0-9])\", r\"\\1_\\2\", str)\n str = re.sub(r\"[-]\", \" \", str)\n\n # Of course capitalization isn't consistent, that'd be ridiculous.\n str = str.title()\n\n # collapse remaining whitespace\n str = re.sub(r\"\\s+\", \"\", str)\n\n # remove non-ascii characters\n str = re.sub(r\"[^0-9A-z_]\", \"\", str)\n\n return str\n\n\ndef write_js(filename, scriptname, variable, d):\n \"\"\"Writes the created data structure to a .js file\"\"\"\n with open(filename, \"w\", encoding=\"utf-8\") as f:\n f.write(\"'use strict';\\n\\n\")\n f.write(\"// Auto-generated from %s\\n\" % scriptname)\n f.write(\"// DO NOT EDIT THIS FILE DIRECTLY\\n\\n\")\n f.write(\"const %s = \" % variable)\n\n str = json.dumps(d, sort_keys=True, indent=2, ensure_ascii=False)\n # single quote style\n str = re.sub(r\"'\", '\\\\\"', str)\n str = re.sub(r'\"', \"'\", str)\n # add trailing commas\n str = re.sub(r\"([0-9]|'|]|})\\s*$\", r\"\\1,\", str, flags=re.MULTILINE)\n # remove final trailing comma\n str = re.sub(r\",$\", \"\", str)\n # make keys integers, remove leading zeroes.\n str = re.sub(r\"'0*([0-9]+)': {\", r\"\\1: {\", str)\n f.write(str)\n f.write(\";\\n\\n\")\n\n f.write(\"if (typeof module !== 'undefined')\\n\")\n f.write(\" module.exports = %s;\\n\" % variable)\n\n print(\"wrote: %s\" % filename)\n\n\ndef save_error(header, what, map, key):\n with open(config[\"log\"][\"error\"], \"a\") as error_log:\n error_log.write(\"%s %s: %s\" % (header, what, map[key]))\n\n\nif __name__ == \"__main__\":\n actions_table = get_data_table(\"Action\")\n jobs_table = get_data_table(\"ClassJob\")\n\n actions = tree()\n jobs = defaultdict()\n\n # Restructure each row as a dictionary\n for job in ({k: v for k, v in zip(jobs_table[0], row)} for row in jobs_table[1:]):\n # Nest the data inside a new dictionary using the ID as the key\n jobs[job.pop(\"ID\")] = job\n\n # This big pile of loop takes the header row and the data row and merges them together as a dictionary\n # Then it does some filtering to validate the action data.\n # Then it sorts the actions into relevant categories and nests the data as class/job abbreviation (ADV, GLA, DRK, etc.) then power name\n # Structure should end up looking vaguely like {'pve': {'CLS':{'AbilityName':{'RemainingKey':'Value'}}}}\n for action in ({k: v for k, v in zip(actions_table[0], row) if k} for row in actions_table[1:]):\n # It seems that any action not slottable on the hotbar is not considered a player action. This isn't going to be a useful check.\n is_player_action = action[\"IsPlayerAction\"] == \"True\"\n\n # They seem to use -1 for deprecated actions. And apparently role actions.\n is_valid_classjob = int(action[\"ClassJob\"]) >= 0\n is_role_action = action[\"IsRoleAction\"] == \"True\"\n\n # Categories 30 and 31 are DoW and DoM respectively, 32 and 33 are DoH and DoL respectively. 0 is ADV which is the base for everything\n is_combat_classjob = is_role_action or (\n is_valid_classjob and int(jobs[action[\"ClassJob\"]][\"ClassJobCategory\"]) in [0, 30, 31]\n )\n is_crafting_classjob = is_valid_classjob and int(\n jobs[action[\"ClassJob\"]][\"ClassJobCategory\"]\n ) in [0, 32, 33]\n\n # We keep the ID as the key for invalid actions in the event of a name collision.\n if action[\"Name\"] and is_combat_classjob:\n if action[\"IsPvP\"] == \"False\":\n if (\n not is_role_action\n and action[\"Name\"] in actions[\"pve\"][jobs[action[\"ClassJob\"]][\"Abbreviation\"]]\n ) or (is_role_action and action[\"Name\"] in actions[\"ADV\"]):\n actions[\"invalid\"][action.pop(\"ID\")] = action\n else:\n if int(action[\"Action{Combo}\"]) > 0:\n actions[\"combo\"][action[\"ID\"]][\"Name\"] = action[\"Name\"]\n actions[\"combo\"][action[\"ID\"]][\"Previous\"][action[\"Action{Combo}\"]] = \"\"\n actions[\"combo\"][action[\"Action{Combo}\"]][\"Next\"][action[\"ID\"]] = action[\n \"Name\"\n ]\n if is_role_action:\n actions[\"pve\"][\"ADV\"][action.pop(\"Name\")] = action\n else:\n actions[\"pve\"][jobs[action[\"ClassJob\"]][\"Abbreviation\"]][\n action.pop(\"Name\")\n ] = action\n elif action[\"IsPvP\"] == \"True\" and not is_role_action:\n if action[\"Name\"] in actions[\"pvp\"][jobs[action[\"ClassJob\"]][\"Abbreviation\"]]:\n actions[\"invalid\"][action.pop(\"ID\")] = action\n else:\n actions[\"pvp\"][jobs[action[\"ClassJob\"]][\"Abbreviation\"]][\n action.pop(\"Name\")\n ] = action\n elif action[\"Name\"] and is_player_action and is_crafting_classjob:\n if action[\"Name\"] in actions[\"crafting\"][jobs[action[\"ClassJob\"]][\"Abbreviation\"]]:\n actions[\"invalid\"][action.pop(\"ID\")] = action\n else:\n actions[\"crafting\"][jobs[action[\"ClassJob\"]][\"Abbreviation\"]][\n action.pop(\"Name\")\n ] = action\n else:\n actions[\"invalid\"][action.pop(\"ID\")] = action\n\n for each in config[\"output\"]:\n write_js(\n os.path.join(config[\"path\"][\"cactbot\"], \"resources\", config[\"output\"][each]),\n os.path.basename(os.path.abspath(__file__)),\n each.capitalize() + \"Action\",\n {k: {normalize_name(n): v for n, v in actions[each][k].items()} for k in actions[each]},\n )\n","sub_path":"util/gen_action_info.py","file_name":"gen_action_info.py","file_ext":"py","file_size_in_byte":11660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"114990877","text":"import pandas as pd\nfrom flask import Flask, request, jsonify\nimport pickle\n\napp = Flask(__name__)\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef home():\n if(request.method==\"GET\"):\n\n data =\"hello world\"\n return jsonify({'data': data})\n\n@app.route('/predict/')\ndef salary_predict():\n model = pickle.load(open('model.pkl','rb'))\n years = request.args.get('years')\n\n test_df=pd.DataFrame({'years':[years]})\n\n pred_salary = model.predict(test_df)\n return jsonify({'salary': str(pred_salary)})\n\nif __name__==\"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"350559775","text":"from rlkit.launchers.launcher_util import run_experiment\nfrom rlkit.launchers.sets.example_set_gen import gen_example_sets_full_experiment\nfrom furniture.env.furniture_multiworld import FurnitureMultiworld\n\nvariant = dict(\ndo_state_exp=True,\n example_set_variant=dict(\n n=50,\n subtask_codes=[\n {17: 17, 18: 18, 19: 19},\n {14: 14, 15: 15, 16: 16},\n {11: 11, 12: 12, 13: 13},\n ],\n other_dims_random=True,\n ),\n env_class=FurnitureMultiworld,\n env_kwargs=dict(\n name=\"FurnitureCursorRLEnv\",\n unity=False,\n tight_action_space=True,\n preempt_collisions=True,\n boundary=[0.5, 0.5, 0.95],\n pos_dist=0.2,\n num_connect_steps=0,\n num_connected_ob=False,\n num_connected_reward_scale=5.0,\n goal_type='zeros', # reset\n reset_type='var_2dpos+no_rot', # 'var_2dpos+var_1drot', 'var_2dpos+objs_near',\n\n control_degrees='3dpos+select+connect',\n obj_joint_type='slide',\n connector_ob_type=None, # 'dist',\n\n move_speed=0.05,\n\n reward_type='state_distance',\n\n clip_action_on_collision=True,\n\n light_logging=True,\n\n furniture_name='shelf_ivar_0678_4obj_bb',\n anchor_objects=['1_column'],\n goal_sampling_mode='uniform',\n task_type='select2+move2',\n ),\n imsize=256,\n)\n\nif __name__ == \"__main__\":\n run_experiment(\n method_call=gen_example_sets_full_experiment,\n variant=variant,\n exp_prefix='shelf-4obj-oracle-goal-example-set', # change to exp_name is this doesn't work\n )\n","sub_path":"experiments/soroush/lha/example_set_gen/shelf4obj_oracle_goal.py","file_name":"shelf4obj_oracle_goal.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"59065746","text":"from tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, BatchNormalization, Activation, MaxPooling2D, Conv2D\nfrom keras.constraints import maxnorm\nfrom keras.utils import np_utils\nfrom A1 import preprocess_data as lp\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras import optimizers\nimport os\nfrom os.path import dirname, abspath, split\n\nbasedir = dirname(dirname(abspath(__file__)))\nsaved_model = os.path.join(basedir, 'A1')\nsaved_model = os.path.join(saved_model, 'A1_NN_Model')\n\n\"\"\"\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\"\"\"\n\n\ndef get_data(extract_features):\n X, Y = lp.preprocess(extract_features)\n dataset_size = X.shape[0]\n training_size = int(dataset_size * 0.8)\n validation_size = training_size + int(dataset_size * 0.1)\n tr_X = X[:training_size]\n tr_Y = Y[:training_size]\n va_X = X[training_size:validation_size]\n va_Y = Y[training_size:validation_size]\n te_X = X[validation_size:]\n te_Y = Y[validation_size:]\n\n return tr_X, tr_Y, va_X, va_Y, te_X, te_Y\n\n\ndef execute(testing):\n # Global variables\n #extract_features = False\n\n # loading in the data\n tr_X, tr_Y, va_X, va_Y, te_X, te_Y = get_data(False)\n\n # normalize the inputs from 0-255 to between 0 and 1 by dividing by 255\n tr_X = tr_X.astype('float32')\n va_X = va_X.astype('float32')\n te_X = te_X.astype('float32')\n tr_X = tr_X / 255.0\n te_X = te_X / 255.0\n va_X = va_X / 255.0\n\n\n # reshape to include 1 for grayscale colours\n tr_X = tr_X.reshape(tr_X.shape[0], tr_X.shape[1], tr_X.shape[2], 1)\n va_X = va_X.reshape(va_X.shape[0], va_X.shape[1], va_X.shape[2], 1)\n te_X = te_X.reshape(te_X.shape[0], te_X.shape[1], te_X.shape[2], 1)\n\n # one hot encode outputs\n tr_Y = np_utils.to_categorical(tr_Y)\n va_Y = np_utils.to_categorical(va_Y)\n te_Y = np_utils.to_categorical(te_Y)\n class_num = te_Y.shape[1]\n input_shape = (tr_X.shape[1], tr_X.shape[2], 1)\n\n if not testing:\n model = Sequential()\n\n # Convolutional layers\n model.add(Conv2D(100, (4, 4), input_shape=input_shape, activation='relu', padding='same'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n #model.add(Dropout(0.3))\n model.add(BatchNormalization())\n model.add(Conv2D(200, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n #model.add(Dropout(0.3))\n model.add(BatchNormalization())\n #model.add(Conv2D(200, (2, 2), activation='relu', padding='same'))\n\n model.add(Flatten())\n #model.add(Dropout(0.3))\n\n model.add(Dense(128, kernel_constraint=maxnorm(3)))\n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n model.add(BatchNormalization())\n\n model.add(Dense(64, kernel_constraint=maxnorm(3)))\n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n model.add(BatchNormalization())\n\n model.add(Dense(class_num)) #Final layer has same number of neurons as classes\n model.add(Activation('softmax'))\n\n epochs = 20\n batch_size = 64\n optimizer = optimizers.Nadam(lr=0.0001)\n\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n es_callback = EarlyStopping(monitor='val_loss', patience=10)\n\n history = model.fit(tr_X, tr_Y, validation_data=(va_X, va_Y), epochs=epochs, batch_size=batch_size)\n #model.save(\"A1_NN_Model\")\n #print(\"Saved Neural Network Model\")\n \"\"\"\n plt.plot(history.history['loss'],marker='x')\n plt.plot(history.history['val_loss'], marker='x')\n plt.title(\"Learning Rate Curve for A1's CNN Model\")\n plt.ylabel('Cost', fontsize='large', fontweight='bold')\n plt.xlabel('Number of Epochs', fontsize='large', fontweight='bold')\n plt.legend(['train', 'test'], loc='upper left')\n plt.rcParams.update({'font.size': 18})\n plt.show()\n \"\"\"\n print(history.history[\"accuracy\"][epochs-1])\n\n # Model evaluation\n scores = model.evaluate(te_X, te_Y, verbose=0)\n print(\"Accuracy: %.2f%%\" % (scores[1] * 100))\n\n return history.history[\"accuracy\"][epochs - 1] * 100, scores[1] * 100\n\n else:\n print(\"Loaded Neural Network Model\")\n model = load_model(saved_model)\n\n # Model evaluation\n scores = model.evaluate(te_X, te_Y, verbose=0)\n print(\"Accuracy: %.2f%%\" % (scores[1]*100))\n return scores[1] * 100\n\n\n","sub_path":"A1/ANN.py","file_name":"ANN.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"597603364","text":"# print square pattern with digits in descending order \n# 5 5 5 5 5\n# 4 4 4 4 4\n# 3 3 3 3 3 \n# 2 2 2 2 2\n# 1 1 1 1 1\nn = int(input(\"Enter number of rows \")) # 4 \n# for i in range(n):\n# for j in range(n):\n# print(n-i,end=\" \")\n# print()\nfor i in range(n):\n print((str(n-i)+\" \")*n)\n","sub_path":"square/eight.py","file_name":"eight.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"463294228","text":"\n\n# You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n\n# You may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\n# 输入是逆序链表,不含前导0,输出结果也是逆序的(把新节点插在curr后面),如果输出是正序,则把新节点插在curr前面\n\n\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param l1: the first list\n @param l2: the second list\n @return: the sum list of l1 and l2 \n \"\"\"\n def addLists(self, l1, l2):\n # write your code here\n #\n dummy = ListNode('#')\n curr = dummy #新节点插在curr后面\n carry = 0 \n while l1 or l2:\n if l1:\n carry += l1.val\n l1 = l1.next\n if l2:\n carry += l2.val\n l2 = l2.next\n curr.next = ListNode(carry % 10)\n curr = curr.next\n carry //= 10\n \n if carry > 0:\n curr.next = ListNode(carry)\n \n return dummy.next","sub_path":"Add Two Numbers.py","file_name":"Add Two Numbers.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"393666846","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nfrom os import listdir\n\nAUTHOR = u'Vincent Beffara'\nSITENAME = u'Vincent Beffara'\nSITEURL = ''\n\nPLUGIN_PATH = \"plugins\"\nPLUGINS = ['jmd','sitemap','assets']\n\nTHEME = \"theme\"\n\nSTATIC_PATHS = [\"images\", \"pdfs\", \"pics\", \"misc\"]\nEXTRA_PATH_METADATA = { \"misc/\" + f : { \"path\" : f } for f in listdir(\"content/misc\") }\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = u'en'\n\nSITEMAP = { 'format' : 'xml' }\n\nFEED_ALL_ATOM = None # \"atom.xml\"\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\n\nLINKS = (('Images des Maths', 'http://images.math.cnrs.fr/'),\n ('UMPA', 'http://www.umpa.ens-lyon.fr/index.php'),\n ('ENS Lyon', 'http://www.ens-lyon.fr/'),\n ('CNRS', 'http://www.cnrs.fr/'),\n ('MathSciNet', 'http://www.ams.org/mathscinet/search'),\n ('ArXiv (front)', 'http://front.math.ucdavis.edu/'))\n\nPAGE_URL = '{slug}.html'\nARTICLE_URL = 'blog/{date:%Y-%m-%d}_{slug}.html'\n\nARCHIVES_SAVE_AS = ''\nARTICLE_SAVE_AS = ARTICLE_URL\nAUTHOR_SAVE_AS = ''\nAUTHORS_SAVE_AS = ''\nCATEGORY_SAVE_AS = ''\nCATEGORIES_SAVE_AS = ''\nPAGE_SAVE_AS = PAGE_URL\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"167672054","text":"import base64\r\nimport datetime\r\nimport json\r\nfrom google.cloud import iot_v1\r\nfrom google.cloud import firestore\r\n\r\nproject_id = 'smarthouseiot-261017'\r\ncloud_region = 'europe-west1'\r\nregistry_id = 'smarthouse'\r\ndevice_id = 'serverZWAVE'\r\ncollection_name = 'ROOM02'\r\nhistory_name = collection_name + '_HISTORY'\r\nversion = 0\r\nretention_days = 5\r\n\r\ndef zwave_func(event, context):\r\n \r\n time = context.timestamp\r\n y_m_d = time.split('T')[0].split('-')\r\n h_m = time.split('T')[1].split(':')\r\n \r\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\r\n message = json.loads(pubsub_message)\r\n \r\n db = firestore.Client()\r\n collection_ref = db.collection(collection_name)\r\n try:\r\n collection = collection_ref.get()\r\n except google.cloud.exceptions.NotFound:\r\n print('LOG: No collection ROOM02 found ! Exiting...')\r\n return 0\r\n history_ref = db.collection(history_name)\r\n try:\r\n history = history_ref.get()\r\n except google.cloud.exceptions.NotFound:\r\n print('LOG: No collection ROOM02_HISTORY found ! Exiting...')\r\n return 0\r\n \r\n y_m_d = int(y_m_d[0] + y_m_d[1] + y_m_d[2])\r\n h_m = int(h_m[0] + h_m[1])\r\n if ((h_m > 2300) and (h_m < 2302)):\r\n docs = history_ref.where(u'zDATE', u'<', (today-retention_days)).stream()\r\n for doc in docs:\r\n doc.reference.delete()\r\n \r\n status = {}\r\n status['SENSOR01'] = message['SENSOR01']\r\n status['LAMP01'] = message['LAMP01']\r\n status['zDATE'] = y_m_d\r\n \r\n document = time + '_z'\r\n db.collection(history_name).document(document).set(status)\r\n \r\n temp = message[\"LAMP01\"]\r\n del message[\"LAMP01\"]\r\n db.collection(collection_name).document(u'zLAMP01').update({u'RealVal' : temp[\"real_val\"]})\r\n for sensors in message:\r\n print(\"Sensor found:\" + sensors)\r\n db.collection(collection_name).document('z'+sensors).set(message[sensors])\r\n \r\n db_zparam = db.collection(collection_name).document(u'zLAMP01').get().to_dict()\r\n realValue = db_zparam['RealVal']\r\n setValue = db_zparam['SetVal']\r\n \r\n if realValue != setValue:\r\n \tclt = iot_v1.DeviceManagerClient()\r\n \tdevice_path = clt.device_path(project_id, cloud_region, registry_id, device_id)\r\n \r\n \tconfig = {}\r\n \tconfig[\"type\"] = \"config\"\r\n \tconfig[\"device\"] = \"lamp01\"\r\n \tconfig[\"value\"] = setValue\r\n\r\n \tconfig = str(config)\r\n \tconfig = config.replace('\\'', '\"')\r\n\r\n \tdata = str(config).encode('utf-8')\r\n\r\n \tclt.modify_cloud_to_device_config(device_path, data, version)\r\n","sub_path":"GoogleCloud/GoogleFunctions/zFunctionRoom02.py","file_name":"zFunctionRoom02.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"178861511","text":"import os\nimport json\nimport urllib\nimport time\nimport shutil\n\nfrom mapbox import Static\n\n\nMAPBOX_ACCESS_TOKEN = os.environ['MAPBOX_ACCESS_TOKEN']\nservice = Static(access_token=MAPBOX_ACCESS_TOKEN)\n\n\nclass ImagesManager:\n\n def __init__(self, sites_data_filepath='json/sites.json'):\n self._STANDARD_ZOOM = 15\n self._STATIC_DIRECTORY = 'static/'\n self._BASE_IMAGES_DIRECTORY = 'base_images/'\n self._CURRENT_IMAGE_DIRECTORY = 'current_image/'\n self._LABELED_IMAGES_DIRECTORY = 'labeled_images/'\n self._SITES_DATA_FILEPATH = sites_data_filepath\n self._IMAGE_WIDTH = 1000\n self._IMAGE_HEIGHT = 1000\n\n self._sites_dict = self.get_sites_dict()\n self._unchecked_sites = self._sites_dict['unchecked']\n self._water_sites = self._sites_dict['water']\n self._no_water_sites = self._sites_dict['no_water']\n\n self._current_site = None\n next_image_is_ready = self.initialize_next_image()\n\n if not next_image_is_ready:\n raise Exception('No sites left.')\n\n def save_sites_dict(self):\n self._sites_dict = {\n 'unchecked': self._unchecked_sites,\n 'water': self._water_sites,\n 'no_water': self._no_water_sites\n }\n\n with open(self._SITES_DATA_FILEPATH, 'w+') as fp:\n json.dump(self._sites_dict, fp)\n\n def get_current_zoom(self):\n return self._current_site.get('zoom')\n\n def get_num_images_left(self):\n num_images_left = len(self._unchecked_sites)\n return num_images_left\n\n def classify_site(self, image_label):\n if image_label < 2:\n if image_label == 0:\n self._no_water_sites.append(self._current_site)\n label = 'no_water'\n elif image_label == 1:\n self._water_sites.append(self._current_site)\n label = 'water'\n\n current_image_filepath = self.get_current_image_filepath()\n labeled_image_filepath = self.get_labeled_image_filepath(label)\n shutil.copyfile(current_image_filepath, labeled_image_filepath)\n\n self._unchecked_sites.pop(0)\n self.delete_current_image()\n self.save_sites_dict()\n\n def initialize_next_image(self):\n if self._unchecked_sites:\n self._current_site = self._unchecked_sites[0]\n self._current_site['zoom'] = self._STANDARD_ZOOM\n self.load_image_from_base_dir()\n\n return True\n\n return False\n\n def next_image(self, image_label):\n self.classify_site(image_label)\n next_image_is_ready = self.initialize_next_image()\n\n return next_image_is_ready\n\n def update_image(self, zoom):\n download_status_code = self.download_image(zoom)\n\n if download_status_code == 200:\n self._current_site['zoom'] = zoom\n \n return download_status_code\n\n def get_sites_dict(self):\n with open(self._SITES_DATA_FILEPATH) as fp:\n sites_dict = json.load(fp)\n\n return sites_dict\n\n def print_image_log(self):\n print('Downloaded {}'.format(self._current_site.get('name')))\n print('Zoom: {}'.format(self._current_site.get('zoom')))\n print('{}.png'.format(self._current_site.get('site_code')))\n\n def delete_current_image(self):\n image_filepath = self.get_current_image_filepath()\n os.remove(image_filepath)\n\n def load_image_from_base_dir(self):\n base_image_filepath = self.get_base_image_filepath()\n current_image_filepath = self.get_current_image_filepath()\n shutil.copyfile(base_image_filepath, current_image_filepath)\n\n def get_base_image_filepath(self):\n return self.get_image_filepath(self._BASE_IMAGES_DIRECTORY)\n\n def get_current_image_filepath(self):\n return self.get_image_filepath(self._CURRENT_IMAGE_DIRECTORY)\n\n def get_labeled_image_filepath(self, label):\n subdirectory = os.path.join(self._LABELED_IMAGES_DIRECTORY, label)\n return self.get_image_filepath(subdirectory)\n\n def get_image_filepath(self, subdirectory):\n site_code = self._current_site.get('site_code')\n image_filename = '{}.png'.format(site_code)\n image_filepath = os.path.join(self._STATIC_DIRECTORY,\n subdirectory,\n image_filename)\n\n return image_filepath\n\n def download_image(self, zoom):\n response = self.get_mapbox_api_response(zoom)\n\n if response.status_code == 200:\n image_filepath = self.get_current_image_filepath()\n\n with open(image_filepath, 'wb') as output:\n output.write(response.content)\n\n self.print_image_log()\n\n return response.status_code\n\n def get_mapbox_api_response(self, zoom):\n lat = self._current_site.get('lat')\n lon = self._current_site.get('lon')\n\n response = service.image('mapbox.satellite',\n lon=lon,\n lat=lat,\n z=zoom,\n width=self._IMAGE_WIDTH,\n height=self._IMAGE_HEIGHT)\n\n return response\n","sub_path":"data_labelling/images_api.py","file_name":"images_api.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"578023065","text":"from can_test import CanTest\nfrom tkinter import *\nfrom tkinter import messagebox\nimport inspect\nfrom threading import Thread\nimport sys\nfrom io import StringIO\nimport time\nimport os\n\nclass Gui:\n def __init__(self, can_test):\n self.can_test = can_test\n self.func_list = inspect.getmembers(self.can_test, inspect.ismethod)[1:]\n self.selected_func = None\n self.console_str = \"\"\n self.logging = False\n self.stop_thread = Thread(target=self.stop_logging, args=[])\n self.halted=False\n \n self.master = Tk()\n self.console = Frame(self.master)\n self.status_bar = Frame(self.master, relief=\"sunken\")\n self.tool_bar = Frame(self.master)\n self.master.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n\n #######################################\n\n self.master_menu = Menu(self.master)\n self.master.config(menu=self.master_menu)\n\n self.file_menu = Menu(self.master_menu)\n self.file_menu.add_command(label=\"Choose Test\", command=self.choose_test)\n self.file_menu.add_command(label=\"Save Console\", command=self.open_save_window)\n self.master_menu.add_cascade(label=\"File\", menu=self.file_menu)\n\n self.test_menu = Menu(self.master_menu)\n self.test_menu.add_command(label=\"Run\", command=self.run)\n self.test_menu.add_command(label=\"Halt\", command=self.halt)\n self.master_menu.add_cascade(label=\"Test\", menu=self.test_menu)\n\n self.console_menu = Menu(self.master_menu)\n self.console_menu.add_command(label=\"Clear\", command=self.clear)\n self.master_menu.add_cascade(label=\"Console\", menu=self.console_menu)\n\n ###########################################\n\n self.console_output = Text(self.console)\n self.console_output.pack(fill=\"both\", expand=True)\n \n self.test_name = Label(self.status_bar, text=\"Current Test: None\")\n self.test_name.pack(side=\"left\")\n \n self.test_status = Label(self.status_bar, text=\"Test Status: Halted\", bg=\"red\")\n self.test_status.pack(side=\"right\")\n \n self.new_image = PhotoImage(file=\"New.png\")\n self.start_image = PhotoImage(file=\"Start.png\")\n self.stop_image = PhotoImage(file=\"Stop.png\")\n \n self.new_button = Button(self.tool_bar, image=self.new_image, command=self.choose_test)\n self.new_button.pack(side=\"left\")\n \n self.start_button = Button(self.tool_bar, image=self.start_image, command=self.run)\n self.start_button.pack(side=\"left\")\n \n self.stop_button = Button(self.tool_bar, image=self.stop_image, command=self.halt)\n self.stop_button.pack(side=\"left\")\n\n ###########################################\n\n self.selection_window = Toplevel()\n self.selection_window.title(\"Select Test\")\n self.selection_window.transient(self.master)\n self.selection_window.protocol(\"WM_DELETE_WINDOW\", lambda : self.on_delete(self.selection_window))\n \n self.save_window = Toplevel()\n self.save_window.title(\"Save Console\")\n self.save_window.transient(self.master)\n self.save_window.protocol(\"WM_DELETE_WINDOW\", lambda : self.on_delete(self.save_window))\n self.frames = []\n\n for func in self.func_list:\n frame = Frame(self.selection_window)\n label = Label(frame, text=func[0])\n button = Button(frame, text=\"Select\", command=lambda x=func[0]: self.select_func(x))\n label.pack(side=\"left\")\n button.pack(side=\"right\")\n frame.pack(side=\"top\", anchor=\"e\")\n \n self.container = Frame(self.save_window)\n self.name_entry = Frame(self.container)\n self.name_label = Label(self.name_entry, text=\"File Name: \")\n self.name_text = Text(self.name_entry, height=1, width=30)\n self.name_text.bind(\"\", lambda event : self.save_console())\n self.name_label.grid(row=0)\n self.name_text.grid(row=0, column=1)\n self.save_button = Button(self.container, text=\"Save\", command=self.save_console)\n self.name_entry.grid(row=0, columnspan=2)\n self.save_button.grid(row=1, columnspan=2)\n self.container.grid(pady=\"5px\", padx=\"5px\")\n \n self.selection_window.withdraw()\n self.save_window.withdraw()\n\n ###########################################\n \n self.tool_bar.pack(side=\"top\", fill=\"x\", expand=False)\n self.console.pack(side=\"top\", fill=\"both\", expand=True)\n self.status_bar.pack(side=\"bottom\", fill=\"x\", expand=False)\n self.master.geometry(\"600x500\")\n\n for func in self.func_list:\n print(func)\n\n def choose_test(self):\n self.selection_window.deiconify()\n\n def run(self):\n if self.selected_func:\n if not self.can_test.run:\n self.can_test.run = True\n self.logging = True\n self.halted = False\n self.test_status.config(bg=\"#ffffff000\", text=\"Test Status: In Progress\")\n self.test_thread = Thread(target=self.run_literal, args=[])\n self.test_thread.start()\n \n else:\n messagebox.showwarning(\"Error\", \"Test already in progress\")\n\n else:\n messagebox.showwarning(\"Error\", \"Please select a test\")\n\n def halt(self):\n self.stop_thread = Thread(target=self.stop_testing, args=[])\n self.stop_thread.run()\n\n def clear(self):\n self.stop_thread = Thread(target=self.stop_logging, args=[])\n self.stop_thread.run()\n time.sleep(.1)\n self.can_test.output_buffer = \"\"\n self.console_output.delete(1.0, \"end\")\n time.sleep(.1)\n self.logging = True\n self.console_thread = Thread(target=self.console_log, args=[])\n self.console_thread.start()\n\n def select_func(self, func):\n self.selection_window.withdraw()\n self.selected_func = self.func_wrapper(func)\n self.test_name.config(text=\"Current Test: {}\".format(func))\n print(self.selected_func)\n \n def func_wrapper(self, func):\n return \"self.can_test.{}()\".format(func) + \"\\n\" + \"\"\"self.can_test.run = False\nif self.halted:\n self.test_status.config(bg=\"red\", text=\"Test Status: Halted\")\nelse:\n self.test_status.config(bg=\"#000fff000\", text=\"Test Status: Completed\")\"\"\"\n \n def console_log(self):\n while self.logging:\n time.sleep(.1)\n if self.logging:\n data = self.can_test.output_buffer\n if self.logging:\n console_contents = self.console_output.get(1.0, \"end\")\n if self.logging:\n if len(console_contents) - 1 != len(data):\n if self.logging:\n self.console_output.insert(\"end\", data[len(console_contents) - 1:])\n if self.logging:\n self.console_output.see(\"end\")\n \n def mainloop(self):\n self.logging = True\n self.console_thread = Thread(target=self.console_log, args=[])\n self.console_thread.start()\n self.master.mainloop()\n \n def save_console(self):\n file_name = self.name_text.get(1.0, \"end\").rstrip(\"\\r\\n\")\n \n with open(file_name, \"w\") as file:\n file.write(self.console_output.get(1.0, \"end\"))\n \n self.console_output.delete(1.0, \"end\")\n self.save_window.withdraw()\n \n def open_save_window(self):\n self.save_window.deiconify()\n \n def stop_logging(self):\n try:\n self.logging = False\n time.sleep(.1)\n except:\n pass\n \n def stop_testing(self):\n self.halted = True\n try:\n self.can_test.run = False\n time.sleep(.1)\n except:\n pass\n \n \n def on_closing(self):\n self.stop_logging()\n self.stop_testing()\n os.system(\"sudo /sbin/ip link set can0 down\")\n self.master.destroy()\n \n def on_delete(self, window):\n window.withdraw()\n \n def run_literal(self):\n exec(self.selected_func)\n \nif __name__ == \"__main__\":\n can_test = CanTest(\"am\", 8)\n can_gui = Gui(can_test)\n can_gui.mainloop()\n","sub_path":"can_test_gui.py","file_name":"can_test_gui.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"603222728","text":"def circle():\n input('What is the radius of your circle? : ')\n\n if circle == int:\n print(\"Your circle's circumference is: \")\n else:\n print(\"I'm sorry. That is not correct!\")\n\ncircle = 2*(pi)*r^2\npi = 3.14\nr = radius","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"307995915","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\n\nimport sys\nimport multiprocessing\n\n\nclass CaseReader(object):\n def __init__(self, cases=None):\n if cases is None:\n cases = sys.argv[1]\n if isinstance(cases, str):\n cases = open(cases)\n cases.seek(0)\n coin_length, total_cases = cases.readlines().pop().split(\" \")\n self.coin_length = abs(int(coin_length))\n self.total_cases = abs(int(total_cases))\n self._value = int(\"1{:0>#0{}}\".format(1, self.coin_length - 1), 2)\n while len(self.value) < self.coin_length:\n next(self)\n\n def __len__(self):\n return self.total_cases\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self._value += 2\n value = self.value\n if len(value) > self.coin_length:\n raise StopIteration()\n return value\n\n @property\n def value(self):\n return str(bin(self._value)).replace(\"0b\", \"\", 1)\n\n\ndef get_divisor(number):\n number = abs(int(number))\n if number <= 1:\n return None\n if not number % 2:\n return 2\n if not number % 3:\n return 3\n\n def multipliers(value=5):\n while True:\n yield value\n value += 6\n\n def find(divisors):\n \"\"\"\n https://en.wikipedia.org/wiki/Primality_test#Pseudocode\n \"\"\"\n while True:\n value = next(divisors)\n if pow(value, 2) > number:\n return None\n if not number % value:\n return value\n value += 2\n if not number % value:\n return value\n return find(multipliers())\n\n\ndef iter_cases(tests):\n for item in range(tests.total_cases):\n iter_values(tests)\n\n\ndef iter_values(tests):\n for value in tests:\n if iter_bases(value):\n return\n\n\ndef iter_bases(value):\n divisors = [value, None]\n for base in range(2, 11):\n base_value = int(value, base)\n divisors.insert(base, get_divisor(base_value))\n if divisors[base] is None:\n return False\n print(\"{0} {2} {3} {4} {5} {6} {7} {8} {9} {10}\".format(*divisors))\n return True\n\n\nif __name__ == \"__main__\":\n print(\"Case #1:\")\n iter_cases(CaseReader())\n","sub_path":"codes/CodeJamCrawler/16_0_3/oliveiraev/coin_jam.py","file_name":"coin_jam.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"48520047","text":"from django.conf.urls import include, url\n\nfrom aor_messages.views import AorMessageView, AorConversationView, \\\n AorReplyView, AorWriteView\n\nfrom postman.urls import urlpatterns as postman_urlpatterns\n\nmerged_urlpatterns = ([\n url(r'^reply/(?P[\\d]+)/$', AorReplyView.as_view(), name='reply'),\n url(r'^view/(?P[\\d]+)/$', AorMessageView.as_view(), name='view'),\n url(r'^view/t/(?P[\\d]+)/$', AorConversationView.as_view(), name='view_conversation'),\n url(r'^write/(?:(?P[^/#]+)/)?$', AorWriteView.as_view(), name='write'),\n] + [\n u for u in postman_urlpatterns if u.name not in ['reply', 'view', 'view_conversation', 'write']\n], 'postman')\n\nurlpatterns = [\n url(r'^', include(merged_urlpatterns))\n]\n","sub_path":"aor_messages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"462777521","text":"from users.models import User\nfrom django.views.generic import ListView\nfrom docs.models import DocList\nfrom common.template.doc import get_template_user_doc\n\n\nclass UserLoadDoclist(ListView):\n\ttemplate_name, paginate_by = None, 15\n\n\tdef get(self,request,*args,**kwargs):\n\t\tself.user = User.objects.get(pk=self.kwargs[\"pk\"])\n\t\tself.list = DocList.objects.get(uuid=self.kwargs[\"uuid\"])\n\t\tself.template_name = get_template_user_doc(self.list, \"docs/user/\", \"list.html\", request.user, request.META['HTTP_USER_AGENT'])\n\t\treturn super(UserLoadDoclist,self).get(request,*args,**kwargs)\n\n\tdef get_context_data(self,**kwargs):\n\t\tcontext = super(UserLoadDoclist,self).get_context_data(**kwargs)\n\t\tcontext['user'] = self.user\n\t\tcontext['list'] = self.list\n\t\treturn context\n\n\tdef get_queryset(self):\n\t\tlist = self.list.get_docs()\n\t\treturn list\n","sub_path":"docs/view/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"80516288","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport copy\n\nv2rayBaseConf = {\n \"log\": {\n \"access\": \"/var/log/v2ray/access.log\",\n \"error\": \"/var/log/v2ray/error.log\",\n \"logLevel\": \"none\"\n },\n \"inbounds\": [\n\n ],\n \"outbounds\": [\n {\n \"settings\": {},\n \"protocol\": \"freedom\",\n \"tag\": \"direct\"\n },\n {\n \"settings\": {},\n \"protocol\": \"blackhole\",\n \"tag\": \"blocked\"\n }\n ],\n \"routing\": {\n \"strategy\": \"rules\",\n \"settings\": {\n \"domainStrategy\": \"AsIs\",\n \"rules\": [\n {\n \"type\": \"field\",\n \"ip\": [\n \"geoip:cn\",\n \"geoip:private\"\n ],\n \"outboundTag\": \"direct\"\n },\n {\n \"type\": \"field\",\n \"inboundTag\": [\"in\"],\n \"outboundTag\": \"out\"\n }\n ]\n }\n }\n}\n\n\nclass ConfMaker:\n\n def __init__(self, servers):\n self.servers = servers\n for i, server in enumerate(servers):\n server['inPort'] = 7810 + i\n\n def formatConfig(self):\n v2rayConf = copy.copy(v2rayBaseConf)\n\n serverNameMap = {}\n for i, server in enumerate(self.servers):\n inTag = \"server_in_\" + str(i)\n outTag = \"server_out_\" + str(i)\n\n inBound = self.makeInBound(inTag, port=server['inPort'])\n outBound = self.makeOutBound(outTag, server['ip'], server['port'], server['uuid'], server['alterId'])\n rule = self.makeRule(inTag, outTag)\n\n v2rayConf['inbounds'].append(inBound)\n v2rayConf['outbounds'].append(outBound)\n v2rayConf['routing']['settings']['rules'].append(rule)\n\n serverNameMap[inTag] = {\"out\": outTag, 'name': server['remark'], 'port': server['inPort']}\n\n return v2rayConf, serverNameMap\n\n def makeRule(self, inTag, outTag):\n return {\n \"type\": \"field\",\n \"inboundTag\": [inTag],\n \"outboundTag\": outTag\n }\n\n def makeInBound(self, tag, port):\n return {\n \"port\": int(port),\n \"protocol\": \"http\",\n \"settings\": {\n \"accounts\": [],\n },\n \"tag\": tag\n }\n\n def makeOutBound(self, tag, ip, port, uuid, alterId):\n return {\n \"protocol\": \"vmess\",\n \"settings\": {\n \"vnext\": [{\n \"address\": ip,\n \"port\": int(port),\n \"users\": [\n {\n \"id\": uuid,\n \"alterId\": alterId\n }\n ]\n }]\n },\n \"streamSettings\": {\n \"network\": \"tcp\"\n },\n \"tag\": tag\n }\n","sub_path":"confMaker.py","file_name":"confMaker.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"290682061","text":"from pyfcm import FCMNotification\nimport os\n\n\nclass PushNotificationService():\n\n def send_push_notification(self, registration_ids):\n push_service = FCMNotification(api_key=os.getenv('FCM_API_KEY'))\n #registration_ids = ['e60w7vyvSrY:APA91bGMqp8VNOinW4fD61CbS9_HI6Ty1aZc98jl7fZhoJWh3JGnIAn7IimN7fUB8r9bKhB8V7vua-pArr8CsChxNDFC46lhCsj8fHoIsMTm2CFDarFxeGrFA69o-JfnJIg4WVDPYr4b'] #([devices])\n data_message = {\n \"title\": \"Kulkuneuvo saapuu!\",\n \"message\": \"Tilaamasi kulkuneuvo on pysäkilläsi hetken kuluttua\"\n }\n return push_service.notify_multiple_devices(registration_ids=registration_ids,\n data_message=data_message)\n ","sub_path":"src/push_notification_service.py","file_name":"push_notification_service.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"648729403","text":"\"\"\" Initialize the parameters of the DQN algorithm \"\"\"\nbatch_size = 32 #How many experiences to use for each training step.\nupdate_freq = 4 #How often to perform a training step.\ntest_freq = 10 #How often to perform a test network.\nsave_freq = 10 #How often to perform a save network.\ny = .99 #Discount factor on the target Q-values\nstartE = 1 #Starting chance of random action\nendE = 0.05 #Final chance of random action\nannealing_steps = 10000. #How many steps of training to reduce startE to endE.\nnum_episodes = 10000 #How many episodes of game environment to train network with.\npre_train_steps = 50000 #How many steps of random actions before training begins.\nload_model = True #Whether to load a saved model.\nh_size = 256 #The size of the final convolutional layer before splitting it into Advantage and Value streams.\ntau = 0.001 #Rate to update target network toward primary network\nn_action = 3\nn_step = 1\nZ = 1 #Pixels per a nucleotide in the rendered image\n","sub_path":"param/FFTDQN.py","file_name":"FFTDQN.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"254420793","text":"from valideer import parse\nfrom functools import wraps\nfrom urlparse import parse_qs\nfrom tornado.web import HTTPError\nfrom tornado.escape import json_decode\n\nfrom .validators import *\n\n\ndef validated(arguments=None, body=None, extra_arguments=True, extra_body=False):\n if type(body) in (dict, str):\n body = parse(body, additional_properties=extra_body)\n elif body not in (None, False):\n raise ValueError('body must be type None, False, or dict')\n if type(arguments) is dict:\n arguments = parse(arguments, additional_properties=extra_arguments)\n elif arguments not in (None, False):\n raise ValueError('arguments must be type None, False, or dict')\n\n def wrapper(method):\n @wraps(method)\n def validate(self, *args, **kwargs):\n # ------------------\n # Validate Body Data\n # ------------------\n if body:\n try:\n _body = json_decode(self.request.body) if self.request.body else {}\n except:\n # ex. key1=value2&key2=value2\n try:\n _body = dict([(k, v[0] if len(v) == 1 else v) for k, v in parse_qs(self.request.body, strict_parsing=True).items()])\n except:\n raise HTTPError(400, \"body was not able to be decoded\")\n\n kwargs['body'] = body.validate(_body, adapt=True)\n\n elif body is False and self.request.body:\n raise HTTPError(400, reason='No body arguments allowed')\n\n # -------------------\n # Validate URL Params\n # -------------------\n if arguments:\n # include url arguments\n if self.request.query_arguments:\n _arguments = dict([(k, v[0] if len(v) == 1 else v) for k, v in self.request.query_arguments.items() if v != [''] and k[0] != '_'])\n else:\n _arguments = {}\n kwargs[\"arguments\"] = arguments.validate(_arguments)\n\n elif arguments is False and self.request.query_arguments and not any(map(lambda a: a[0] == '_', self.request.query_arguments)):\n raise HTTPError(400, reason='No url arguments allowed')\n\n return method(self, *args, **kwargs)\n\n return validate\n return wrapper\n","sub_path":"tornwrap/validated.py","file_name":"validated.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"175994229","text":"# written 2-23-17, JLW\n# create a pickled object of tuples of human protein\n# atlas options to include in Django interface\n\nimport pickle\nf = 'human_protein_unique_tissues_cell_types.txt'\nd = [l.strip().split('\\t') for l in open(f,'rU')]\n\n# merge names, create keys\nnew_data = tuple(((str(d.index(x))+'_'+x[0][0:3]+x[1][0:3], x[0]+', '+x[1]) for x in d))\npickle.dump(new_data,open('human_protein_tuples.pkl','wb'))\n","sub_path":"human_protein_atlas/create_tuples_for_django.py","file_name":"create_tuples_for_django.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"478623454","text":"##==================================================\n# Vu Hoang Minh, MAIA\n# Lab 5 : Image Processing\n##==================================================\n\nfrom __future__ import print_function, division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import img_as_ubyte\nfrom skimage.color import rgb2gray ,gray2rgb\nfrom skimage.io import imread, imshow\nfrom skimage.measure import label, regionprops\nfrom skimage.morphology import binary_closing, binary_opening, disk, binary_erosion, binary_dilation\nfrom skimage.filters import threshold_otsu\nfrom skimage.transform import rescale\nfrom skimage.draw import circle\nfrom math import exp, expm1, floor\nfrom PIL import Image\n\n\n\n# ===============================================================\n# Segmentation of toy ===========================================\n# ===============================================================\n\n# ---------------------------------------------------------------\n# Read and rescale image\nPath = './images/'\ninputImage = 'coins.jpg'\ncoinsImage = imread(Path.__add__(inputImage))\ncoinsImage_ubyte = img_as_ubyte(coinsImage)\ncoinsImage_scaled = rescale(coinsImage_ubyte, 1 / 4)\n\n# Show scaled image\nimshow(coinsImage_scaled) #Displaying the image\nplt.title('Rescaled coins')\nplt.axis('off')\n\n\n# ---------------------------------------------------------------\n# Convert image to gray scale\ncoinsImage_gray=rgb2gray(coinsImage_scaled)\nplt.figure()\nimshow(coinsImage_gray)\nplt.title('Gray coins')\nplt.axis('off')\n\n# Threshold image using Otsu's thresholding\nglobalThreshold = threshold_otsu(coinsImage_gray)\notsuThreshold = coinsImage_gray <= globalThreshold\n\n# Show thresholeded image\nplt.figure()\nimshow(otsuThreshold)\nplt.title('Otsu thresholded coins')\nplt.axis('off')\n\n\n# ---------------------------------------------------------------\n# Applying Different Morphological operations\ndisk1 = disk(2) #Setting disk size 4x4 for erosion\n\n# Apply morphological operations\ncoinsImage_open = binary_opening(otsuThreshold, disk1, out = None)\ncoinsImage_close = binary_closing(otsuThreshold, disk1, out = None)\n\n# Clean the image using new disk and openning operation of coinsImage_bin_close\ndisk2 = disk(6)\ncoinsImage_cleaned = binary_opening(coinsImage_close, disk2, out = None)\n\n# Show images after some Morphological operations\nplt.figure()\nimshow(coinsImage_open)\nplt.title('Morphological opening with disk(2)')\nplt.axis('off')\nplt.figure()\nimshow(coinsImage_close)\nplt.title('Morphological closing with disk(2)')\nplt.figure()\nplt.axis('off')\nimshow(coinsImage_cleaned)\nplt.title('Morphological closing with disk(2) followed by openning with disk(6)')\nplt.axis('off')\n\n\n# ---------------------------------------------------------------\n# Find labelled image\nlabelledImage , numRegion = label(coinsImage_cleaned, return_num = True, connectivity = 1)\n\n# Show labelled image with number of found regions\nplt.figure()\nimshow(labelledImage)\nplt.title('Labelled image with %d regions'%(numRegion))\nplt.axis('off')\n\n\n# Print number of found regions\nprint (\"============================================\")\nprint (\"Part 1: \")\nprint (\"Numer of found regions are : %d \" % numRegion)\n\n# Measure properties of labeled image regions\nregionedImage = regionprops(labelledImage)\n\n\n# ---------------------------------------------------------------\n# Find and display the radius of each region\ncoinsImage_cleaned_gray = gray2rgb(img_as_ubyte(coinsImage_cleaned))\nnumLabel=0\nfor regionLabel in regionedImage:\n numLabel = numLabel+1\n radius = float(regionLabel[\"major_axis_length\"] / 2) + 3 # +3 to make the circle cover the whole region\n [xCoordinate, yCoordinate] = circle(float(regionLabel[\"centroid\"][0]),\n float(regionLabel[\"centroid\"][1]),\n radius)\n # Display the radius of each region\n print(\"The radius of Region %d is %f\" %(numLabel,radius))\n # Each region has different color code\n colorCode = floor(255/numRegion*(numLabel-1))\n coinsImage_cleaned_gray[xCoordinate, yCoordinate] = (colorCode, colorCode, colorCode)\n\n# Draw the corresponding circles on the image\nplt.figure()\nimshow(coinsImage_cleaned_gray)\nplt.title(\"Circles coins with different labels\")\nplt.axis('off')\n\n\n\n\n# ===============================================================\n# Segmentation of markers =======================================\n# ===============================================================\n\n# ---------------------------------------------------------------\n# Read and display image\nPath = './images/' #Adding Path of the image\ninputImage = 'objets4.jpg' #Image name\nmarkersImage = imread(Path.__add__(inputImage)) #Reading the image\nplt.figure()\nimshow(markersImage) #Displaying the image\nplt.title('Markers')\nplt.axis('off')\n\n\n# ---------------------------------------------------------------\n# Convert image to gray scale\nmarkersImage_ubyte = img_as_ubyte(markersImage) #Image as Ubyte\nmarkersImage_gray = rgb2gray(markersImage_ubyte)\n\nplt.figure()\nimshow(markersImage_gray) #Displaying the image\nplt.title('Gray markers')\nplt.axis('off')\n\n# Threshold image using Otsu's thresholding\nglobalThreshold = threshold_otsu(markersImage_gray) #applying threshiold_otsu function\notsuThreshold = markersImage_gray < globalThreshold #Setting threshold\nplt.figure()\nimshow(otsuThreshold) #Showing the image\nplt.title('Otsu thresholded markers')\nplt.axis('off')\n\n\n# ---------------------------------------------------------------\n# Applying Different Morphological operations\ndisk1 = disk(8)\n\n# Apply morphological operations to clean the image\nmarkersImage_cleaned = binary_closing(otsuThreshold, disk1, out = None)\n\n# Show clean image\nplt.figure()\nimshow(markersImage_cleaned)\nplt.title('Morphological closing with disk(8)')\nplt.axis('off')\n\n\n# ---------------------------------------------------------------\n# Using segmentation find the number of each object in the image.\n# Note: the result above can be considered a cleaned image. However to find the number\n# each object (marker or glue) we have to use dilation operation, so that different\n# part of one object can be connected into one\ndisk2 = disk(6)\nmarkersImage_dilated = binary_dilation(otsuThreshold, disk2, out = None)\nplt.figure()\nimshow(markersImage_dilated)\nplt.title('Morphological closing with disk(8) followed by dilation with disk(6)')\nplt.axis('off')\n\n\n# ---------------------------------------------------------------\n# Find labelled image\nlabelledImage, numRegion = label(markersImage_dilated, return_num=True)\nregionedImage = regionprops(labelledImage)\n\n# Show labelled image with number of found regions\nplt.figure()\nimshow(labelledImage)\nplt.title('Labelled image with %d regions'%(numRegion))\nplt.axis('off')\n\n# Print number of found regions\nprint (\"============================================\")\nprint (\"Part 2: \")\nprint (\"Numer of found regions are : %d \" % numRegion)\n\n# Find the number of glue and marker in the image\n# Note: I realized that the length of marker is bigger than glue's\n# and the offset I found is 100\n# Using major_axis_length, property of regionprops, to compare to\n# 100, I can find the number of each object\nnumMarkers = 0\nnumGlues = 0\nnumRow, numColumn = markersImage_gray.shape\ncolorImage = np.zeros(shape=(numRow, numColumn))\nfor regionLabel in regionedImage:\n radius = float(regionLabel[\"major_axis_length\"] / 2)\n if (radius>100):\n numMarkers = numMarkers + 1\n for point in regionLabel[\"coords\"]:\n colorImage[point[0],point[1]] = 0.6\t\t# paint color \n else:\n numGlues = numGlues + 1\n for point in regionLabel[\"coords\"]:\n colorImage[point[0],point[1]] = 1\t\t# paint color \n\nprint(\"The number of markers are : %d\" % numMarkers)\nprint(\"The number of gum are : %d\" % numGlues)\n\nplt.figure()\nimshow(colorImage)\nplt.title(\"Different object with different color\")\nplt.axis('off')\n\n\n\n\n# ===============================================================\n# Display results\n# ===============================================================\nplt.show()\n\n\n","sub_path":"5-segmentation/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":8097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"4245483","text":"import simplejson as json\nfrom collections import namedtuple\nimport csv\n\nAnalysisResult = namedtuple('AnalysisResultEx', ['commit', 'bug_count', 'bug_delta', 'diff_lines', 'is_pair', 'is_logged', 'is_merge', 'author', 'bug_text'])\nAnalysisResultShort = namedtuple('AnalysisResultEx', ['commit', 'bug_count', 'bug_delta', 'diff_lines', 'is_pair', 'is_logged', 'is_merge', 'author'])\n\nwith open('results_list.json', 'r') as _file:\n _r = json.load(_file)\n results_list = list(map(lambda i: AnalysisResultShort(i['commit'], i['bug_count'], i['bug_delta'], i['diff_lines'], i['is_pair'], i['is_logged'], i['is_merge'], i['author']), _r))\n\nwith open('results.csv', 'w') as _file:\n csv_out = csv.writer(_file)\n csv_out.writerow(['commit', 'bug_count', 'bug_delta', 'diff_lines', 'is_pair', 'is_logged', 'is_merge', 'author'])\n csv_out.writerows(results_list)\n","sub_path":"scripts/export_csv.py","file_name":"export_csv.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"332283986","text":"\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom collections import Counter\nfrom scipy import ndimage\n\n#Class containing methods for simulating the Game of Life\nclass GOL(object):\n\n#Initialises an instance based on user defined arguments. Square array of size dimension and initial conditions\n def __init__(self,dimension,initial):\n self.dimension = dimension\n self.N = dimension**2.\n self.initial = initial\n self.time_list =[]\n self.CoM_list = []\n \n #If random initial condition then array randomly filled with live and dead cells\n if initial == 'random':\n self.array = np.random.choice([0,1],size=(dimension,dimension))\n \n elif initial == 'absorbing':\n index1 = int((np.random.uniform()*dimension))\n index2 = int((np.random.uniform()*dimension))\n self.array = np.full((dimension,dimension),0)\n self.array[index1,index2] = 1\n \n #If blinker condition is selected then randomly creates an oscillator\n elif initial == 'blinker':\n self.array = np.full((dimension,dimension),0)\n blink1 = int((np.random.uniform())*dimension)\n blink2 = int((np.random.uniform())*dimension)\n for i in range(3):\n blink1 += 1\n if blink1 >= dimension:\n blink1 = 0\n self.array[blink2,blink1] = 1\n \n #Randomly creates a moving glider if user selects.\n elif initial == 'glider':\n self.array = np.full((dimension,dimension),0)\n #g1 = int(np.random.uniform()*(dimension-1))\n #g2 = int(np.random.uniform()*(dimension-1))\n g1 = 1\n g2 = 1\n \n self.array[g1-1,g2] = 1\n self.array[g1+1,g2] = 1\n self.array[g1,g2+1] = 1\n self.array[g1+1,g2+1] = 1\n self.array[g1+1,g2-1] = 1\n \n else: print(\"No such initial condition installed. Enter either 'random', 'blinker', 'glider'\")\n\n #Instance method which returns the 8 nns of a given index while employing periodic boundary conditions\n def NNs(self,i,j):\n max = self.dimension-1\n iup = (i+1)%self.dimension\n idown = (i-1)%self.dimension\n jup = (j+1)%self.dimension\n jdown = (j-1)%self.dimension\n \n nn1 = self.array[idown,j]\n nn2 = self.array[idown,jup]\n nn3 = self.array[i,jup]\n nn4 = self.array[iup,jup]\n nn5 = self.array[iup,j]\n nn6 = self.array[iup,jdown]\n nn7 = self.array[i,jdown]\n nn8 = self.array[idown,jdown]\n \n return [nn1,nn2,nn3,nn4,nn5,nn6,nn7,nn8]\n \n #Algorithm containing the rules for the GOL and imposes them on a given index. An 'alive' cell(1) will 'die' (-1) if it has less than 2 or greater than 3 'alive' neighbours. A 'dead' cell will come alive if it has 3 'alive' neighbours.\n def Rules(self,i,j):\n nn_list = self.NNs(i,j)\n c = Counter(nn_list)\n \n if self.array[i,j] == 1:\n if c[1]==2 or c[1]==3:\n return 1\n else: return 0\n\n elif self.array[i,j] == 0 and c[1]==3: return 1\n else: return 0\n\n #Instance method which sweeps the entire array sequentially and then updates all in parallel\n def Sweep(self):\n temp_latt = np.copy(self.array)\n for i in range(self.dimension):\n for j in range(self.dimension):\n value = self.Rules(i,j)\n temp_latt[i,j] = value\n self.array = np.array(temp_latt)\n self.array = temp_latt\n\n #Instance method which finds the Center of Mass of the live cells in the array. Returns the index of the CoM\n def CoM(self):\n coors = np.argwhere(self.array==1)\n xcom = int(np.average(coors[:,0]))\n ycom = int(np.average(coors[:,1]))\n return [xcom,ycom]\n \n\n def Get_CoM(self,time):\n x,y=ndimage.measurements.center_of_mass(self.array)\n #Ignore boundary conditions\n if 3< x < (self.dimension-5) and 3< y < (self.dimension-5):\n time+=1\n if time%150==0:\n time=0\n self.time_list.append(time)\n CoM=np.sqrt(x**2 + y**2)\n self.CoM_list.append(CoM)\n return (self.time_list,self.CoM_list,time) \n \n \n \n \n #Static method which takes as arguments a list of time and position and returns the velocity using numpy linear regression.\n def Get_Velocity(self,time,position):\n coeffs = np.polyfit(time,position,1)\n return coeffs[0]\n","sub_path":"GOL_class.py","file_name":"GOL_class.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"318415397","text":"import json\n\n\"\"\"Usernames\"\"\"\ndef usernameDB(uid, username):\n\n #Formatting pythonic dictionary to githubData.json format\n dataToAppend = [{\n \"uid\" : uid,\n \"username\" : username\n }]\n\n #Capturing old data\n with open(\"static/userData.json\") as dataViewer:\n oldData = json.load(dataViewer)\n\n\n #Checking if data is already present for repo\n for n in oldData:\n #remove old info and bump updated info to top\n if dataToAppend[0]['uid'] == n['uid']:\n\n filteredData = [d for d in oldData if d['uid'] != n['uid']]\n newData = dataToAppend + filteredData\n\n with open(\"static/userData.json\", \"w\") as writeToFile:\n json.dump(newData, writeToFile)\n\n break\n\n #add new repo to top\n else:\n newData = dataToAppend + oldData\n\n with open(\"static/userData.json\", \"w\") as writeToFile:\n json.dump(newData, writeToFile)\n\n\"\"\"Usernames\"\"\"\ndef projectDB(userID, identifier, name, desc, imgurl):\n\n #Formatting pythonic dictionary to githubData.json format\n dataToAppend = [{\n \"uid\" : userID,\n \"identifier\": identifier,\n \"name\": name,\n \"desc\": desc,\n \"imgurl\": imgurl\n }]\n\n #Capturing old data\n with open(\"static/projectData.json\") as dataViewer:\n oldData = json.load(dataViewer)\n\n\n\n newData = dataToAppend + oldData\n\n with open(\"static/projectData.json\", \"w\") as writeToFile:\n json.dump(newData, writeToFile)\n","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"235635402","text":"def numRev(n):\n rev = 0\n while (n>0):\n x = n%10\n rev = rev*10 + x\n n = n//10\n return rev\n\ndef checkPalindrome(n):\n if n == numRev(n):\n return True\n else:\n return False\n\nn = int(input())\nif checkPalindrome(n):\n print(\"true\")\nelse:\n print(\"false\")","sub_path":"functions/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"325994212","text":"### tle data analysis using ephem ###\n### being used as module ###\n### time : utcnow() ###\n\n# import important modules\nimport ephem\nimport datetime\n\n# function tledata\ndef tledata(name, line1, line2):\n\n # process TLE data\n tle_rec = ephem.readtle(name, line1, line2)\n tle_rec.compute()\n\n # get the data into the table\n latitude = tle_rec.sublat / ephem.degree\n longitude = tle_rec.sublong / ephem.degree\n altitude = tle_rec.elevation\n right_asc = str((tle_rec.ra))\n declination = str(ephem.hours(tle_rec.dec))\n \n # return as dictionary (python) or object (JavaScript)\n return {'latitude': latitude, 'longitude': longitude, 'altitude': altitude, 'right_asc': right_asc, 'declination': declination}","sub_path":"sattrack_1/tle_calculate.py","file_name":"tle_calculate.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"535258942","text":"#if~else구문으로 가위바위보 게임 만들기\n#else : if의 조건이 맞지 않은 경우 항상 실행\n#반드시 if뒤에 나와야한다.\nSCISSOR = '가위'\nROCK = '바위'\nPAPER = '보'\n\nWIN = '이겼다'\nDRAW = '비겼다'\nLOSE = '졌다ㅠㅜ'\n\nmine = SCISSOR\nyours = ROCK\n\nif mine==yours : \n result = DRAW\n\nelse : \n if mine == SCISSOR : \n if yours == ROCK : \n result = LOSE\n else : \n result = WIN\n else : \n if mine == ROCK : \n if yours == PAPER : \n result = LOSE\n else :\n result = WIN\n else :\n if mine == PAPER :\n if yours == SCISSOR : \n result = LOSE\n else : \n result = WIN\n else : \n print('이상해요')\n\n# elif -> else + if\n#조건이 맞지 않는 경우 다른 경우를 검사\n#if~else구문과 기능의 차이가 아닌, 보이는 것의 차이\nif mine==yours : \n result = DRAW\n\nelse : \n if mine == SCISSOR : \n if yours == ROCK : \n result = LOSE\n else : \n result = WIN\n elif mine == ROCK : \n if yours == PAPER : \n result = LOSE\n else :\n result = WIN\n elif mine == PAPER :\n if yours == SCISSOR : \n result = LOSE\n else : \n result = WIN\n else : \n print('이상해요')\n\n\n#문제 1. \nmine = '가위'\nyours = '바위'\nif mine == yours:\n print(\"비겼습니다.\")\n#이 아래줄에 else문을 추가해서 비기지 않은 경우에만 아래 print문이 실행되도록 만들어 보세요\nelse : \n print(\"비기지 않았습니다.\")#else문이 추가되고 나면 이 줄은 들여쓰기 되어야 합니다.\n\n#문제 2\ngender = \"남자\"\n#이 아래줄에 if문을 추가하세요\nif gender == \"남자\" : \n print(\"남자입니다.\")\n#이 아래줄에 elif문을 추가하세요\nelif gender == \"여자\" : \n print(\"여자입니다.\")\n#이 아래줄에 else문을 추가하세요\nelse : \n print(\"논바이너리입니다\")","sub_path":"ifElse.py","file_name":"ifElse.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"340385915","text":"import requests\r\nfrom urllib.parse import urlencode\r\nimport time\r\nimport os\r\nimport os.path\r\nfrom os import path\r\nfrom datetime import datetime, timedelta\r\nfrom pytz import timezone\r\nimport pytz\r\nimport tzlocal\r\n\r\nmonths = {'01': 'January', '02': 'February', '03': 'March', '04': 'April', '05': 'May', '06': 'June',\r\n\t'07': 'July', '08': 'August', '09': 'September', '10': 'October', '11': 'November', '12': 'December'}\r\n\r\ndef navigate(url):\r\n\t#print('Navigated to: ',url)\r\n\twhile (True):\r\n\t\ttry:\r\n\t\t\tresponse = requests.get(url, auth=(user, pwd))\r\n\t\t\treturn response\r\n\t\texcept:\r\n\t\t\ttime.sleep(60)\r\n\t\t\t\r\ndef searchRecURL(dict):\r\n\tfor element in dict['comments']:\r\n\t\ttry:\r\n\t\t\treturn element['data']['recording_url']\r\n\t\texcept:\r\n\t\t\tcontinue\r\n\traise Exception(\"Could not find recording URL\")\r\n\t\r\ndef reformatDate(date):\r\n\tdate = date.replace('T', ' ') # Date\r\n\tdate = date.replace('Z', '')\r\n\tdt = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\r\n\tlocal_tz = tzlocal.get_localzone()\r\n\tdt = dt.replace(tzinfo=pytz.utc).astimezone(local_tz)\r\n\thours = int(dt.strftime('%H'))\r\n\tif (hours >= 12):\r\n\t\ttime = 'PM'\r\n\t\tif (hours != 12):\r\n\t\t\thours = hours - 12\r\n\telse:\r\n\t\ttime = 'AM'\r\n\treturn dt.strftime('%Y-%m-%d {}-%M{}').format(hours, time)\r\n\t\r\ndef reformatPhone(phone):\r\n\treturn phone.replace(' ', '', 2)\t\r\n\r\n# DateTime Threshold\r\nd = datetime.utcnow() - timedelta(days=1)\r\nprint(d)\r\n\r\nuser = [YourEmailAddress] + '/token'\r\npwd = [YourAPIKey]\r\n\r\nurl = 'https://lislv.zendesk.com/api/v2/views/360126690174/execute.json?page=&sort_by=status&sort_order=desc'\r\nfinished = False\r\nonGoing = []\r\ncount = 0\r\n\r\n# loop through all tickets that are not archived yet\r\nwhile(not finished):\r\n\t# Set the request parameters\r\n\t# + urlencode(params)\r\n\tresponse = navigate(url)\r\n\tdata = response.json()\r\n\t\r\n\t# loop through each ticket in json result (100 per request)\t\r\n\tfor ticket in data['rows']:\r\n\t\tcount = count + 1\r\n\t\tcr = ticket['created']\r\n\t\tcr = cr.replace('T', ' ')\r\n\t\tcr = cr.replace('Z', '')\r\n\t\tcr = datetime.strptime(cr, '%Y-%m-%d %H:%M:%S')\r\n\t\t#print(cr)\r\n\t\t\r\n\t\t# Check if ticket created is within 24 hours\r\n\t\tif((cr - d).days <= -1):\r\n\t\t\tprint('24 Hour update reached')\r\n\t\t\texit()\r\n\t\tcr = reformatDate(ticket['created'])\r\n\t\tti = ticket['ticket_id']\r\n\t\ttry:\r\n\t\t\tcc = ticket['custom_fields'][0]['name'] # company code\r\n\t\texcept:\r\n\t\t\tprint('No Custom Field')\r\n\t\t\tcc = 'UNKNOWN'\r\n\t\ttry:\r\n\t\t\tba = ticket['custom_fields'][1]['value'] if ticket['custom_fields'][1]['value'] != 'N/A' else 'noBA' # BA number\r\n\t\texcept:\r\n\t\t\tba = 'noBA'\r\n\t\t\t\r\n\t\twhile(True):\r\n\t\t\ttry:\r\n\t\t\t\tpn = reformatPhone(ticket['via']['source']['from']['formatted_phone']) # phone number\r\n\t\t\t\tbreak\r\n\t\t\texcept:\r\n\t\t\t\tprint('Could not find phone number immediately')\t\r\n\t\t\t\r\n\t\tcr = reformatDate(ticket['created'])\r\n\t\tmonth = cr[5:7]\r\n\t\tyear = cr[:4]\r\n\t\t\r\n\t\tif (os.path.isfile('Company_Codes/' + cc + '/' + months[month] + ' ' + year + '/' + ba + '__' + pn + '__' + cr + '.mp3')):\r\n\t\t\tprint('file already exists')\r\n\t\t\tcontinue\r\n\t\t\r\n\t\tprint('processing ticket: ', ti,' | ', cc,' | ', ba, ' | ', pn, ' | ', cr, ' | ', months[month], year)\r\n\t\t# new request for ticket\r\n\t\tticketResponse = navigate('https://lislv.zendesk.com/api/v2/tickets/' + str(ti) + '/comments.json')\r\n\t\tticketData = ticketResponse.json()\r\n\t\ttry:\r\n\t\t\trecordingUrl = searchRecURL(ticketData)\r\n\t\texcept:\r\n\t\t\tprint('No recording attached yet')\r\n\t\t\tonGoing.append(ti)\r\n\t\t\tprint('Done with processing: ', ti, ' || ', ticket['ticket']['status'], ' || ', 'Count: ', count)\r\n\t\t\tcontinue\r\n\t\t# new request for recording\r\n\t\trecordingResponse = navigate(recordingUrl)\r\n\t\t# create directory of company code if it does not exist yet\r\n\t\ttry:\r\n\t\t\topen('Company_Codes/' + cc + '/' + months[month] + ' ' + year + '/' + ba + '__' + pn + '__' + cr + '.mp3', 'wb').write(recordingResponse.content)\r\n\t\texcept:\r\n\t\t\tif (not path.exists('Company_Codes/' + cc)):\r\n\t\t\t\tos.mkdir('Company_Codes/' + cc)\r\n\t\t\tos.mkdir('Company_Codes/' + cc + '/' + months[month] + ' ' + year)\t\r\n\t\t\topen('Company_Codes/' + cc + '/' + months[month] + ' ' + year + '/' + ba + '__' + pn + '__' + cr + '.mp3', 'wb').write(recordingResponse.content)\r\n\t\tprint('Done with processing: ', ti, ' || ', ticket['ticket']['status'], ' || ', 'Count: ', count)\r\n\t\r\n\turl = data['next_page']\r\n\tprint('----------Next Page----------')\r\n\tprint(onGoing)\r\n\tif (url is not None):\r\n\t\ttime.sleep(20)\r\n\telse:\r\n\t\tfinished = True\r\nprint('Done with loop')\r\nexit()","sub_path":"getRecsDaily.py","file_name":"getRecsDaily.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"234398658","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n#import subprocess\nimport numpy as np\nimport pandas as pd\nfrom SetParameter import SetParameter\n\nclass MakeDatatoDf(SetParameter):\n def __init__(self,filename,channelname,direct):\n super(MakeDatatoDf,self).__init__(direct)\n self.channelname=channelname\n self.filename=filename\n\n def DataToDf(self):\n dewinpath = \"%s\" % str(self.DEWIN_FILE_PATH)+ str(self.channelname)\n index_col = 'datetime'\n parse_dates = {'datetime': [0, 1]}\n date_parser = lambda x: pd.datetime.strptime(x, '%y%m%d.%H%M%S %f')\n dt = pd.to_datetime(self.filename,format='%y%m%d%H.%M')\n dtidx= pd.date_range(dt,periods=6000, freq='10L')\n try :\n df = pd.read_csv(dewinpath, header=None, delim_whitespace=True, index_col=index_col, parse_dates=parse_dates, date_parser=date_parser)\n except:\n array =np.empty((6000,1),int)\n df = pd.DataFrame(array)\n df.columns =[self.channelname,]\n self.initdf=df.reindex(dtidx)\n else:\n df.columns =[self.channelname,]\n self.initdf=df.reindex(dtidx)\n finally:\n return self.initdf\n\n def ToPhysicalQuantity(self):\n channelsinfo=pd.read_pickle(self.DIRECION_CHTBL_PATH)\n try:\n physicalQuantityDF=self.DataToDf()\n except:\n print(\"Can't ADC Transform.\" +self.channelname)\n pass\n else:\n ADC=channelsinfo.ix[self.channelname,\"ADC\"]\n sensitive=channelsinfo.ix[self.channelname,\"v/unit\"]\n amp=channelsinfo.ix[self.channelname,\"ampl\"]\n physicalQuantityDF=physicalQuantityDF.applymap(lambda x : (x * ADC)/(sensitive*10**(amp/20))*10**6 ) #10**6掛けている\n self.physicalQuantityDF=physicalQuantityDF.astype(np.float64)\n return self.physicalQuantityDF\n\n def ConstantTimeMerge(self,counter):\n try:\n df=self.ToPhysicalQuantity()\n except:\n print (\"Can't open PhysicalQuantity data. \" +str(self.channelname))\n pass\n else:\n if counter < 1:\n df.to_pickle(self.quontityfilepath)\n else:\n initdf=pd.read_pickle(self.quontityfilepath)\n odf =pd.merge(initdf, df, right_index=True, left_index=True, how='outer')\n odf.to_pickle(self.quontityfilepath)\n\n\nif __name__ == \"__main__\":\n directionlist=[\"V\", \"N\", \"E\"]\n for direct in directionlist:\n Parameter = SetParameter(direct)\n channelslist = Parameter.return_channelslist()\n filename = str(Parameter.getFilename()).strip(\"\\n\")\n\n if os.path.exists(Parameter.quontityfilepath):\n os.remove(Parameter.quontityfilepath)\n\n counter = 0\n for channelname in channelslist:\n data = MakeDatatoDf(filename, channelname, direct)\n store=data.ConstantTimeMerge(counter)\n counter += 1\n\n","sub_path":"MakeDataDF.py","file_name":"MakeDataDF.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"647368134","text":"import os\r\nimport shutil\r\nimport tempfile\r\nimport zipfile\r\nimport json\r\nfrom tqdm import tqdm\r\nfrom PIL import Image\r\n\r\nfrom tuple_calculation import mult \r\nfrom find_minecraft import getMinecraftFolder\r\nfrom util import singleton\r\n\r\n@singleton\r\nclass ResourceManager:\r\n def __init__(self):\r\n self.local_model_folder = os.path.join(\".\", \"models\", \"block\")\r\n self.model_loader = ModelLoader(os.path.join(\".\", \"models\"))\r\n self.local_texture_folder = os.path.join(\"..\", \"scenes\", \"block\")\r\n self.scene_folder = os.path.join(\"..\", \"scenes\")\r\n self.setup()\r\n\r\n self.table_alpha = {}\r\n\r\n def hasAlpha(self, texture_fn):\r\n \"\"\"Check if texture file has alpha channel\r\n\r\n Args:\r\n texture_fn: filename of texture.\r\n Returns:\r\n Texture has alpha channel or not.\r\n \"\"\"\r\n if texture_fn not in self.table_alpha:\r\n full_filename = os.path.join(self.local_texture_folder, \"..\", texture_fn)\r\n image = Image.open(full_filename)\r\n self.table_alpha[texture_fn] = len(image.mode) == 4\r\n\r\n return self.table_alpha[texture_fn]\r\n\r\n def setup(self):\r\n \"\"\"\r\n 1. Copy Model.json into folder\r\n 2. Copy Texture into folder\r\n \"\"\"\r\n\r\n has_model = self.checkModelFolder()\r\n has_texture = self.checkTextureFolder()\r\n\r\n if has_model and has_texture:\r\n return\r\n\r\n minecraft_dir = getMinecraftFolder()\r\n version = \"1.13.2\"\r\n version_file = os.path.join(minecraft_dir, \"versions\", version, version + \".jar\")\r\n with tempfile.TemporaryDirectory() as temp_dir:\r\n with zipfile.ZipFile(version_file, 'r') as vzip:\r\n vzip.extractall(temp_dir)\r\n\r\n if not has_model:\r\n print(\"Copy model json files...\", )\r\n block_model_dir = os.path.join(temp_dir, \"assets\", \"minecraft\", \"models\", \"block\")\r\n for filename in tqdm(os.listdir(block_model_dir), ascii=True):\r\n if filename.endswith(\".json\"):\r\n full_filename = os.path.join(block_model_dir, filename)\r\n shutil.copy(full_filename, self.local_model_folder)\r\n\r\n if not has_texture:\r\n print(\"Copy texture files...\")\r\n texture_dir = os.path.join(temp_dir, \"assets\", \"minecraft\", \"textures\", \"block\")\r\n for filename in tqdm(os.listdir(texture_dir), ascii=True):\r\n full_filename = os.path.join(texture_dir, filename)\r\n shutil.copy(full_filename, self.local_texture_folder)\r\n\r\n def checkModelFolder(self):\r\n \"\"\"Check if the folder has model json file\r\n\r\n Returns:\r\n Model json file is ready or not\r\n \"\"\"\r\n json_list = [fn for fn in os.listdir(self.local_model_folder) if fn.endswith(\".json\")]\r\n # Check with hash function ?\r\n return len(json_list) > 0\r\n\r\n def checkTextureFolder(self):\r\n \"\"\"Check if the folder has texture pngs\r\n\r\n Returns:\r\n Texture image file is ready or not\r\n \"\"\"\r\n\r\n png_list = [fn for fn in os.listdir(self.local_texture_folder) if fn.endswith(\".png\")]\r\n # Check with hash function ?\r\n return len(png_list) > 0\r\n\r\n\r\nclass ModelLoader:\r\n def __init__(self, path = \".\"):\r\n self.path = path\r\n self.db = {}\r\n\r\n def _resolveTexture(self, data, texname):\r\n if texname[0] != '#' : return texname\r\n if \"textures\" in data and texname[1:] in data[\"textures\"]:\r\n return data[\"textures\"][texname[1:]]\r\n return texname\r\n\r\n def _resolveElements(self, data):\r\n if \"elements\" in data:\r\n for ele in data[\"elements\"]:\r\n for facename in ele[\"faces\"]:\r\n face = ele[\"faces\"][facename]\r\n face[\"texture\"] = self._resolveTexture(data, face[\"texture\"])\r\n return True\r\n return False\r\n\r\n def _resolveTextures(self, data):\r\n if \"textures\" in data:\r\n texs = data[\"textures\"]\r\n for tex in texs:\r\n texs[tex] = self._resolveTexture(data, texs[tex])\r\n return True\r\n return False\r\n\r\n def _getModel(self, name): \r\n with open(self.path + \"/\" + name + \".json\", \"r\") as f:\r\n data = json.load(f)\r\n \r\n self._resolveElements(data)\r\n \r\n if \"parent\" in data and data[\"parent\"] not in [\"block/block\", \"block/thin_block\"]:\r\n par_data, par = self._getModel(data[\"parent\"])\r\n if \"textures\" in data:\r\n if \"textures\" not in par_data:\r\n par_data[\"textures\"] = {}\r\n for tex in data[\"textures\"]:\r\n par_data[\"textures\"][tex] = data[\"textures\"][tex]\r\n \r\n flag_eles = self._resolveElements(par_data)\r\n flag_texs = self._resolveTextures(par_data)\r\n if flag_eles or flag_texs: \r\n return par_data, data[\"parent\"]\r\n return data, \"\"\r\n\r\n def getModel(self, name):\r\n if name not in self.db:\r\n model, par = self._getModel(name)\r\n self.db[name] = (model, par)\r\n if \"elements\" in model:\r\n for ele in model[\"elements\"]:\r\n ele[\"from\"] = mult(ele[\"from\"], 1./16)\r\n ele[\"to\"] = mult(ele[\"to\"], 1./16)\r\n for facename in ele[\"faces\"]:\r\n face = ele[\"faces\"][facename]\r\n uv = [0., 0., 1., 1.]\r\n if \"uv\" in face:\r\n uv = list(face[\"uv\"])\r\n for i in range(4):\r\n uv[i] /= 16.\r\n # swap UV\r\n uv = [uv[1], uv[0], uv[3], uv[2]]\r\n face[\"uv\"] = tuple(uv)\r\n return self.db[name]\r\n","sub_path":"mc2pbrt/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"276797973","text":"###\n# Calculates a path given it's begin and end points. Can give expected positions\n# when given a percentage\n###\n\nfrom numpy import sqrt\nfrom coordinates import Coordinate\n\nCoordinate.default_order = 'xy'\n\n\nclass Path:\n def __init__(self, begin: Coordinate, end: Coordinate):\n self.begin = begin\n self.end = end\n self.xDist = end.x - begin.x\n self.yDist = end.y - begin.y\n self.totalDist = sqrt(pow(self.xDist, 2) + pow(self.yDist, 2))\n self.xOrientation = self.xDist >= 0\n self.yOrientation = self.yDist >= 0\n\n # Precondition: we haven't passed the end point yet\n def expectedPos(self, distanceLeft: float):\n percentage = 1 - distanceLeft / self.totalDist\n xCoor = self.begin.x + percentage * self.xDist\n yCoor = self.begin.y + percentage * self.yDist\n return Coordinate(xCoor, yCoor)\n\n def __str__(self):\n return \"Path[begin: \" + str(self.begin) \\\n + \"; end: \" + str(self.end) \\\n + \"; totalDist: \" + str(self.totalDist) + \"]\"\n","sub_path":"drivers/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"56272761","text":"import re\nfile = input(\"Enter file:\")\ncount = 0\nsum = 0\ntry:\n handle = open(file)\n for line in handle:\n line = line.rstrip()\n lst = re.findall('^New .*: ([0-9.]+)',line)\n count += 1\n for i in lst:\n sum = sum + int(i)\n print(count)\n print(sum)\n print(sum/count)\nexcept FileNotFoundError:\n quit(\"Enter correct file name.\")","sub_path":"src/chapter11/exercise02.py","file_name":"exercise02.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"129096739","text":"class Tabung:\n\n phi = 22/7\n jml_tabung = 0\n\n def __init__(self, tinggi, radius):\n\n self.t = tinggi\n self.r = radius\n\n Tabung.jml_tabung += 1\n\n def luasPermukaan(self):\n\n lp = Tabung.phi*2*self.r * (self.r+self.t)\n return lp\n\n def volume(self):\n\n vol = Tabung.phi*self.r**2*self.t\n return vol\n\n\ntabung1 = Tabung(18, 7)\ntabung2 = Tabung(21, 14)\nprint('Luas Permukaan tabung 1:', tabung1.luasPermukaan())\nprint('Vol tabung 1:', tabung1.volume())\nprint('vol tabung 2:', tabung2.volume())\nprint('jml tabung:', Tabung.jml_tabung)\n","sub_path":"matkul/PBO/task/1 class tabung.py","file_name":"1 class tabung.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"11476300","text":"\"\"\"\nProblem:\n\nSpreadsheets often use this alphabetical encoding for its columns: \"A\", \"B\", \"C\", ..., \"AA\", \"AB\", ..., \"ZZ\", \"AAA\", \"AAB\", ....\nGiven a column number, return its alphabetical column id. \n\nExample:\n\nInput = 1\nOutput = \"A\"\n\nInput = 27\nOutput = \"AA\"\n\"\"\"\n\n# FUNCTION TO PERFORM THE OPERATION\ndef get_col_name(num):\n # declaring the result\n result = \"\"\n\n # generating the result from the last character to the 1st\n while num > 0:\n result = chr(64 + (num % 26)) + result\n num = num // 26\n\n # returning the result\n return result\n\n\n# DRIVER CODE\nprint(get_col_name(1))\nprint(get_col_name(27))\nprint(get_col_name(30))\nprint(get_col_name(53))\n","sub_path":"Solutions/212.py","file_name":"212.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"477211757","text":"class BTSearch:\n\n def __init__(self, csp):\n\n self.csp = csp\n self.unassigned_vars = []\n self.num_assignments = 0\n self.num_prunings = 0\n\n def restore_values(self, prunings):\n for var, val in prunings:\n var.unprune_value(val)\n\n def restore_all_variable_domains(self):\n for var in self.csp.vars:\n if var.is_assigned():\n var.unassign()\n var.restore_current_domain()\n\n def restore_unassigned_variable(self, var):\n self.unassigned_vars.append(var)\n\n def bt_search(self, propagator=None):\n self.restore_all_variable_domains()\n self.num_prunings = 0\n self.num_assignments = 0\n self.unassigned_vars = list(self.csp.vars)\n\n if not propagator:\n propagator = BTSearch.no_prop_enforce\n\n status, prunings = propagator(self.csp)\n self.num_prunings += len(prunings)\n\n if not status:\n print(\"Propagator eliminated domains at beginning, no solution\")\n print(str.format(\"Number of prunings: {}\", self.num_prunings))\n return\n\n if self.bt_recurse(propagator):\n print(\"Result found...\")\n print(str.format(\"Number of assignments: {}\", self.num_assignments))\n print(str.format(\"Number of prunings: {}\", self.num_prunings))\n self.print_result()\n else:\n print(\"No result found...\")\n \n def bt_recurse(self, propagator):\n\n # All variables are assigned, solution found.\n if not self.unassigned_vars:\n return True\n\n # Choose next variable to assign.\n var = self.variable_order(self.unassigned_vars)\n self.unassigned_vars.remove(var)\n\n # Try each value for the chosen variable.\n for val in var.get_current_domain():\n var.assign(val)\n self.num_assignments += 1\n\n # Propagate the value (checks partial result)\n status, prunings = propagator(self.csp, var)\n self.num_prunings += len(prunings)\n\n if status:\n if self.bt_recurse(propagator):\n return True\n\n self.restore_values(prunings)\n var.unassign()\n\n self.restore_unassigned_variable(var)\n return False\n \n def variable_order(self, vars):\n '''\n Returns a variable with the smallest current domain\n '''\n var = None\n min = float('inf')\n for v in vars:\n size = v.current_domain_size()\n if size < min:\n var = v\n min = size\n return var\n\n @staticmethod\n def no_prop_enforce(csp, new_var=None):\n if not new_var:\n return True, []\n\n for con in csp.cons:\n if con.num_unassigned() == 0:\n scope = con.get_scope()\n vals = []\n for var in scope:\n vals.append(var.get_value())\n if not con.check_tuple(vals):\n return False, []\n\n return True, []\n\n def print_result(self):\n meal_names = []\n daily_values = {}\n\n for var in self.csp.vars:\n meal = var.get_value()\n names = [meal.get_main().get_name()]\n for side in meal.get_sides():\n names.append(side.get_name())\n meal_names.append(names)\n\n cats = [\"prot\", \"fat\", \"satFat\", \"carb\", \"sug\", \"sod\", \"fib\", \"kcal\", \"chol\"]\n\n for cat in cats:\n if cat in daily_values:\n daily_values[cat] += meal.category_sum(cat)\n else:\n daily_values[cat] = meal.category_sum(cat)\n\n s = \"\"\n for i in range(len(meal_names)):\n s += \"Meal \" + str(i + 1) + \": \"\n for j in range(len(meal_names[i])):\n s += meal_names[i][j]\n if j < len(meal_names[i]) - 1:\n s += \" ||| \"\n s += \"\\n\"\n\n s += \"\\nTotals:\\n\\n\"\n\n s += str.format(\"kcal:\\t\\t{}\\n\"\n \"protein:\\t{}\\n\"\n \"fat:\\t\\t{}\\n\"\n \"carbs:\\t\\t{}\\n\"\n \"sugar:\\t\\t{}\\n\"\n \"fibre:\\t\\t{}\\n\"\n \"sat fat:\\t{}\\n\"\n \"sodium:\\t\\t{}\\n\"\n \"cholesterol:\\t{}\",\n daily_values['kcal'],\n daily_values['prot'],\n daily_values['fat'],\n daily_values['carb'],\n daily_values['sug'],\n daily_values['fib'],\n daily_values['satFat'],\n daily_values['sod'],\n daily_values['chol'])\n\n print(s)\n\n\n\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"301213304","text":"from sys import stdin\r\nimport math\r\n\r\ndef primeFactors(n):\r\n factors = []\r\n \r\n # Print the number of two's that divide n\r\n while n % 2 == 0:\r\n factors.append(2)\r\n n = n / 2\r\n \r\n # n must be odd at this point\r\n # so a skip of 2 ( i = i + 2) can be used\r\n for i in range(3,int(math.sqrt(n))+1,2):\r\n \r\n # while i divides n , print i ad divide n\r\n while n % i== 0:\r\n factors.append(i)\r\n n = n / i\r\n \r\n # Condition if n is a prime\r\n # number greater than 2\r\n if n > 2:\r\n factors.append(n)\r\n return factors\r\n\r\ndef analyse(int1, int2):\r\n int2primefactors = primeFactors(int2)\r\n if max(int2primefactors) > int1:\r\n return False\r\n for i in range(1, int1+1):\r\n curPrimeFactors = primeFactors(i)\r\n for factor in curPrimeFactors:\r\n if factor in int2primefactors:\r\n int2primefactors.remove(factor)\r\n if len(int2primefactors) == 0:\r\n return True\r\n return False\r\n\r\ndef main():\r\n for line in stdin:\r\n if line == '': # If empty string is read then stop the loop\r\n break\r\n int1, int2 = line.split(' ')\r\n int1 = int(int1)\r\n originalint2 = int2[:-1]\r\n int2 = int(int2)\r\n if analyse(int1, int2):\r\n print(originalint2 + ' divides ' + str(int1) + '!')\r\n else:\r\n print(originalint2 + ' does not divide ' + str(int1) + '!')\r\n\r\nmain()","sub_path":"kattis/factovisors.py","file_name":"factovisors.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"546222649","text":"from math import sqrt\nimport operator\nimport os\nfrom os import path\nimport re\nimport shlex\nimport signal\nimport subprocess\nimport time\nimport urllib\nimport urllib.parse\nimport requests\n\nimport config\nimport log\n\n\nrs = requests.Session()\nrs.headers.update({'User-Agent': 'pbot'})\n\nchroot_dir = path.join(path.dirname(path.abspath(__file__)), 'chroot')\nMB = 1024 * 1024\n\ndef reload(bot, target, nick, command, text):\n\timport sys\n\timport imp\n\tif config.settings['owner'] == nick:\n\t\tif config.settings['autoreload']:\n\t\t\tbot.notice(nick, 'not reloading: autoreload is on')\n\t\t\treturn\n\t\timp.reload(sys.modules[__name__])\n\t\tbot.notice(nick, 'reloaded!')\n\ndef calc(bot, target, nick, command, text):\n\tif not text:\n\t\treturn\n\tresponse = rs.get('https://www.calcatraz.com/calculator/api', params={'c': text})\n\tbot.say(target, '%s: %s' % (nick, response.text.rstrip()))\n\ndef roll(bot, target, nick, command, text):\n\tif not text:\n\t\ttext = '1d6'\n\tresponse = rs.get('https://rolz.org/api/?' + urllib.parse.quote_plus(text))\n\tsplit = response.text.split('\\n')\n\tdetails = split[2].split('=', 1)[1].strip()\n\tdetails = details.replace(' +', ' + ').replace(' + ', ' + ')\n\tresult = split[1].split('=', 1)[1]\n\tbot.say(target, \"%s: %s = %s\" % (nick, details, result))\n\ndef nodejs(bot, target, nick, command, text):\n\tcmd = ['../nsjail/nsjail', '-Mo', '--rlimit_as', '700', '--chroot', chroot_dir,\n\t\t\t'-R/usr', '-R/lib', '-R/lib64', '--user', 'nobody', '--group', 'nogroup',\n\t\t\t'--time_limit', '2', '--disable_proc', '--iface_no_lo',\n\t\t\t'--cgroup_mem_max', str(50 * MB), '--cgroup_pids_max', '1', '--quiet', '--',\n\t\t\t'/usr/bin/nodejs', '--print', text]\n\tproc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.PIPE, universal_newlines=True)\n\tstdout, stderr = proc.communicate()\n\t# https://github.com/nodejs/node/blob/master/doc/api/process.md#exit-codes is all lies\n\tif proc.returncode == 0:\n\t\toutput = stdout.split('\\n', 1)[0]\n\telif proc.returncode == 109:\n\t\toutput = 'timed out' # node catches OOM and exits 111; see below\n\telse:\n\t\tsplit = stderr.split('\\n', 5)\n\t\ttry:\n\t\t\toutput = split[4]\n\t\texcept IndexError:\n\t\t\tif split[0].startswith('FATAL ERROR:'):\n\t\t\t\t# often returncode 111 when OOM\n\t\t\t\t# curiously, the doc linked above claims a fatal error will exit 5\n\t\t\t\t# ENOMEM is 12. 128 - 5 - 12 = 111\n\t\t\t\toutput = split[0]\n\t\t\telse:\n\t\t\t\toutput = 'unknown error'\n\tbot.say(target, '%s: %s' % (nick, output[:250]))\n\ndef irb(bot, target, nick, command, text):\n\tcmd = ['../nsjail/nsjail', '-Mo', '--chroot', '',\n\t\t\t'-R/usr', '-R/lib', '-R/lib64', '--user', 'nobody', '--group', 'nogroup',\n\t\t\t'--time_limit', '2', '--disable_proc', '--iface_no_lo',\n\t\t\t'--cgroup_mem_max', str(50 * MB), '--cgroup_pids_max', '1', '--quiet', '--',\n\t\t\t'/usr/bin/irb', '-f', '--noprompt']\n\tproc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)\n\tstdout, _ = proc.communicate(text)\n\tif proc.returncode == 109:\n\t\toutput = 'timed out or memory limit exceeded'\n\telse:\n\t\ttry:\n\t\t\toutput = stdout.split('\\n', 2)[2].lstrip('\\n')\n\t\t\toutput = output.split('\\n', 1)[0][:250]\n\t\texcept IndexError:\n\t\t\toutput = 'unknown error'\n\tbot.say(target, '%s: %s' % (nick, output))\n\ndef python2(bot, target, nick, command, text):\n\tcmd = ['../nsjail/nsjail', '-Mo', '--chroot', chroot_dir, '-E', 'LANG=en_US.UTF-8',\n\t\t\t'-R/usr', '-R/lib', '-R/lib64', '--user', 'nobody', '--group', 'nogroup',\n\t\t\t'--time_limit', '2', '--disable_proc', '--iface_no_lo',\n\t\t\t'--cgroup_mem_max', str(50 * MB), '--cgroup_pids_max', '1', '--quiet', '--',\n\t\t\t'/usr/bin/python2', '-ESsi']\n\tproc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.PIPE, universal_newlines=True)\n\tstdout, stderr = proc.communicate(text + '\\n')\n\tif proc.returncode == 0:\n\t\tstderr = stderr.split('\\n', 2)[2] # ignore first 2 lines (version and compiler; python3 has -q for this)\n\t\tif stderr not in ['>>> >>> \\n', '>>> ... \\n>>> \\n']:\n\t\t\ttry:\n\t\t\t\toutput = stderr.split('\\n')[-3]\n\t\t\texcept IndexError:\n\t\t\t\toutput = ''\n\t\telse:\n\t\t\toutput = stdout.split('\\n', 1)[0]\n\telif proc.returncode == 109:\n\t\toutput = 'timed out or memory limit exceeded'\n\telse:\n\t\toutput = 'unknown error'\n\tbot.say(target, '%s: %s' % (nick, output[:250]))\n\ndef python3(bot, target, nick, command, text):\n\tcmd = ['../nsjail/nsjail', '-Mo', '--chroot', chroot_dir, '-E', 'LANG=en_US.UTF-8',\n\t\t\t'-R/usr', '-R/lib', '-R/lib64', '--user', 'nobody', '--group', 'nogroup',\n\t\t\t'--time_limit', '2', '--disable_proc', '--iface_no_lo',\n\t\t\t'--cgroup_mem_max', str(50 * MB), '--cgroup_pids_max', '1', '--quiet', '--',\n\t\t\t'/usr/bin/python3', '-ISqi']\n\tproc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.PIPE, universal_newlines=True)\n\tstdout, stderr = proc.communicate(text + '\\n')\n\tif proc.returncode == 0:\n\t\tif stderr not in ['>>> >>> \\n', '>>> ... \\n>>> \\n']:\n\t\t\ttry:\n\t\t\t\toutput = stderr.split('\\n')[-3]\n\t\t\texcept IndexError:\n\t\t\t\toutput = ''\n\t\telse:\n\t\t\toutput = stdout.split('\\n', 1)[0]\n\telif proc.returncode == 109:\n\t\toutput = 'timed out or memory limit exceeded'\n\telse:\n\t\toutput = 'unknown error'\n\tbot.say(target, '%s: %s' % (nick, output[:250]))\n\ndef unicode_search(bot, target, nick, command, text):\n\tcmd = ['unicode', '--format', '{pchar} U+{ordc:04X} {name} (UTF-8: {utf8})\\\\n', '--max', '5', '--color', '0', text]\n\toutput = subprocess.check_output(cmd)\n\tsplit = output.decode('utf-8').split('\\n')\n\tif len(split) > 8: # text is something like '0000..ffff'\n\t\treturn\n\telif len(split) == 1:\n\t\tbot.say(target, '%s: nothing found' % nick)\n\telif split[-2].startswith('Too many characters to display,'):\n\t\tsplit[-2] = split[-2][:split[-2].rfind(',')]\n\tbot.say(target, ' '.join(split))\n\ndef ddate(bot, target, nick, command, text):\n\toutput = subprocess.check_output(['ddate'] + shlex.split(text), universal_newlines=True)\n\tbot.say(target, output.replace('\\n', ' '))\n\ndef units(bot, target, nick, command, text):\n\tcommand = ['units', '--compact', '--one-line', '--quiet'] + text.split(' in ', 1)\n\tproc = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)\n\toutput, _ = proc.communicate()\n\tbot.say(target, output.replace('\\n', ' ')[:250])\n\n\n\nyoutube_re = re.compile(r'((youtube\\.com\\/watch\\?\\S*v=)|(youtu\\.be/))([a-zA-Z0-9-_]+)')\ndef youtube(bot, msg):\n\tmatch = youtube_re.search(msg.text)\n\tif match is None:\n\t\treturn\n\tvid = match.group(4)\n\tparams = {\n\t\t'id': vid,\n\t\t'part': 'contentDetails,snippet',\n\t\t'key': config.settings['youtube_key'],\n\t}\n\tresponse = rs.get('https://www.googleapis.com/youtube/v3/videos', params=params)\n\tif response.status_code == 400:\n\t\tbot.say(msg.target, \"%s: invalid id\" % msg.nick)\n\t\treturn\n\tvideo = response.json()['items'][0]\n\ttitle = video['snippet']['title']\n\tchannel = video['snippet']['channelTitle']\n\tduration = video['contentDetails']['duration']\n\tduration = duration[2:].replace('H', 'h ').replace('M', 'm ').replace('S', 's')\n\tdate = video['snippet']['publishedAt'].split('T', 1)[0]\n\tbot.say(msg.target, \"%s's video: %s, %s, by %s, %s\" % (msg.nick, title, duration, channel, date))\n\ndef weather(bot, target, nick, command, text):\n\turl = 'https://api.wunderground.com/api/%s/conditions/q/%s.json' % (\n\t\t\tconfig.settings['weather_key'], urllib.parse.quote_plus(text.replace(' ', '_')))\n\tresponse = rs.get(url)\n\tresponse.raise_for_status()\n\tdata = response.json()\n\tif 'current_observation' in data:\n\t\tcurrent = data['current_observation']\n\t\toutput = '%s: %s: %s | %s°C (%s°F) %sRH %smb(%s) | Wind: %s (%s°), %skm/h (%smph); Windchill: %s°C (%s°F) | Dew @ %s°C (%s°F)' % (\n\t\t\t\tnick, current['display_location']['full'], current['weather'], current['temp_c'], current['temp_f'], current['relative_humidity'], current['pressure_mb'], current['pressure_trend'],\n\t\t\t\tcurrent['wind_dir'], current['wind_degrees'], current['wind_kph'], current['wind_mph'], current['windchill_c'], current['windchill_f'],\n\t\t\t\tcurrent['dewpoint_c'], current['dewpoint_f'] )\n\t\tbot.say(target, output)\n\telif 'results' in data['response']:\n\t\tbot.say(target, '%s: got %s results. try narrowing your search' % (\n\t\t\t\tnick, len(data['response']['results'])))\n\telse:\n\t\tbot.say(target, '%s: error fetching results' % nick)\n\nhandlers = {\n\t'reload': reload,\n\n\t'calc': calc,\n\t'roll': roll,\n\n\t'js': nodejs,\n\t'ruby': irb,\n\t'py2': python2,\n\t'py3': python3,\n\n\t'unicode': unicode_search,\n\t'ddate': ddate,\n\t'units': units,\n\t'weather': weather,\n\t'w': weather\n}\n","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":8393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"140616358","text":"# Author: Tristan Challener \n# Copyright: please don't steal this that is all\n\nimport random\nimport configparser\nimport sys\nfrom builtins import input\nfrom log.logger import Logger\nfrom digimon.handler import DigimonWorldHandler\n\nconfig = configparser.ConfigParser( allow_no_value=True )\nconfig.read( 'settings.ini' )\n\nverbose = config[ 'general' ][ 'LogLevel' ]\nlogger = Logger( verbose, filename='randomize.log' )\n\nif( len(sys.argv) > 1 ):\n inFile = sys.argv[1]\nelif( config[ 'general' ][ 'Input' ] != '' ):\n inFile = config[ 'general' ][ 'Input' ]\nelse:\n logger.fatalError( 'Must provide file name via command line or settings.' )\n exit()\n\n#If an output file was passed or set, use that as the output.\n#Otherwise, read and write the same file\nif( len(sys.argv) > 2 ):\n outFile = sys.argv[2]\nelif( config[ 'general' ][ 'Output' ] != '' ):\n outFile = config[ 'general' ][ 'Output' ]\nelse:\n outFile = inFile\n\n#Give the user a warning when we are going to overwrite the base ROM\nif( outFile == inFile ):\n qa = input( 'Warning: currently set to overwrite the input file.\\nAre you sure you want to continue? (y/n)' )\n if( qa != 'y' ):\n print( 'Exiting. Please update settings.ini \\'Output\\' to select a different output location.' )\n exit()\n\nprint( 'Reading data from ' + inFile + '...\\n' )\n\nseedcfg = config[ 'general' ][ 'Seed' ]\n\nif( seedcfg == '' ):\n handler = DigimonWorldHandler( inFile, logger )\nelse:\n try:\n handler = DigimonWorldHandler( inFile, logger, seed=int( seedcfg ) )\n except ValueError:\n logger.fatalError( 'Seed must be an integer. ' + str( seedcfg ) + ' is not a valid value.' )\n\nprint( 'Modifying data...\\n' )\n\nif( config[ 'digimon' ].getboolean( 'Enabled' ) ):\n handler.randomizeDigimonData( dropItem=config[ 'digimon' ].getboolean( 'DropItem' ),\n dropRate=config[ 'digimon' ].getboolean( 'DropRate' ) )\n\nif( config[ 'techs' ].getboolean( 'Enabled' ) ):\n handler.randomizeTechData( power=config[ 'techs' ].getboolean( 'Power' ),\n cost=config[ 'techs' ].getboolean( 'Cost' ),\n accuracy=config[ 'techs' ].getboolean( 'Accuracy' ),\n effect=config[ 'techs' ].getboolean( 'Effect' ),\n effectChance=config[ 'techs' ].getboolean( 'EffectChance' ) )\n\nif( config[ 'starter' ].getboolean( 'Enabled' ) ):\n handler.randomizeStarters( useWeakestTech=config[ 'starter' ].getboolean( 'UseWeakestTech' ) )\n\n#if( config[ 'recruitment' ].getboolean( 'Enabled' ) ):\n# handler.randomizeRecruitments()\n\nif( config[ 'chests' ].getboolean( 'Enabled' ) ):\n handler.randomizeChestItems( allowEvo=config[ 'chests' ].getboolean( 'AllowEvo' ) )\n\nif( config[ 'tokomon' ].getboolean( 'Enabled' ) ):\n handler.randomizeTokomonItems( consumableOnly=config[ 'tokomon' ].getboolean( 'ConsumableOnly' ) )\n\nif( config[ 'techgifts' ].getboolean( 'Enabled' ) ):\n handler.randomizeTechGifts()\n\nif( config[ 'mapItems' ].getboolean( 'Enabled' ) ):\n handler.randomizeMapSpawnItems( foodOnly=config[ 'mapItems' ].getboolean( 'FoodOnly' ) )\n\nif( config[ 'evolution' ].getboolean( 'Enabled' ) ):\n handler.randomizeEvolutions()\n\nif( config[ 'patches' ].getboolean( 'FixEvoItemStatGain' ) ):\n handler.applyPatch( 'fixEvoItems' )\n\nif( config[ 'patches' ].getboolean( 'AllowDropQuestItems' ) ):\n handler.applyPatch( 'allowDrop' )\n\nif( config[ 'patches' ].getboolean( 'Woah' ) ):\n handler.applyPatch( 'woah' )\n\n\n\nprint( 'Writing to ' + outFile + '...\\n' )\nhandler.write( outFile )\n\nif( not logger.error ):\n print( 'Modifications completed successfully. See log file for details (Warning: spoilers!).' )\n print( 'Seed was ' + str( handler.randomseed ) )\n print( 'Enter this seed in settings file to produce the same ROM again.' )\nelse:\n print( 'Program ended with errors. See log file for details.' )\n\nlogger.logAlways( 'End of log.' )\n\ninput( 'Press Enter to finish...' )\n","sub_path":"digimon_randomize.py","file_name":"digimon_randomize.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"96163329","text":"import socket\nfrom PyQt4 import QtGui, QtCore\nimport sys\nimport threading\n\n\nclass myThread (threading.Thread):\n def __init__(self, result, path):\n threading.Thread.__init__(self)\n self.result = result\n self.path = path\n def run(self):\n # Get lock to synchronize threads\n # self.lock.acquire()\n self.result.append((self.ping(self.path), self.path, int(self.path.split(\".\")[3])))\n # Free lock to release next thread\n # self.lock.release()\n\n def ping(self, host):\n \"\"\"\n Returns True if host responds to a ping request\n \"\"\"\n import os, platform\n\n # Ping parameters as function of OS\n ping_str = \"-n 1\" if platform.system().lower() == \"windows\" else \"-c 1\"\n\n # Ping\n return os.system(\"ping \" + ping_str + \" \" + host) == 0\n\n\nclass MainUI(QtGui.QMainWindow):\n def __init__(self, lower, upper, master=None):\n QtGui.QMainWindow.__init__(self, master)\n self.setWindowTitle(\"IP Scanner\")\n # self.setWindowIcon(QtGui.QIcon(\"icon.png\"))\n\n self.lower = lower\n self.upper = upper\n\n self.myIP = str(socket.gethostbyname(socket.gethostname()))\n\n ipseg = self.myIP.split(\".\")\n self.path = \"\"\n for i in range(0, len(ipseg) - 1):\n self.path += ipseg[i] + \".\"\n\n self.create_ui()\n\n def create_ui(self):\n\n self.build_initial_ui()\n\n # Add all layouts to main container\n self.widget.setLayout(self.build_list(self.threaded_ping()))\n\n # Refresh the IP list every 20 seconds\n timer = QtCore.QTimer(self)\n timer.timeout.connect(self.update_ui)\n timer.start(20000)\n\n def build_initial_ui(self):\n self.widget = QtGui.QWidget(self)\n self.widget.setContentsMargins(20, 0, -20, 0)\n self.resize(300, 50)\n self.setCentralWidget(self.widget)\n\n def update_ui(self):\n list = self.build_list(self.threaded_ping())\n self.widget.destroy(True, True)\n self.build_initial_ui()\n self.widget.setLayout(list)\n\n def threaded_ping(self):\n # threadLock = threading.Lock()\n threads = []\n results = []\n\n for i in range(self.lower, self.upper + 1):\n thread = myThread(results, (self.path + str(i)))\n thread.start()\n threads.append(thread)\n\n for t in threads:\n t.join()\n\n return sorted(results, key=lambda ip: ip[2])\n\n def build_list(self, results):\n vbox = QtGui.QVBoxLayout()\n for i in range(0, len(results)):\n if results[i][0]:\n hbox = QtGui.QHBoxLayout()\n label = QtGui.QLabel()\n label.setText(results[i][1])\n label2 = QtGui.QLabel()\n label2.setText(\"Active\")\n hbox.addWidget(label)\n hbox.addWidget(label2)\n vbox.addLayout(hbox)\n\n return vbox\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n\n # UI updates every 20 seconds, given upper and lower bounds of IPs to scan over\n ui = MainUI(1, 200)\n ui.show()\n\n # Start the UI loop\n sys.exit(app.exec_())","sub_path":"Prototyping/BaseStation/IPChecker/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"633455315","text":"from business_register.models.kved_models import Kved\nfrom business_register.models.rfop_models import Rfop\nfrom business_register.models.ruo_models import State\nfrom data_ocean.converter import Converter, BulkCreateUpdateManager\nfrom data_ocean.models import Register\n\n\nclass RfopConverter(Converter):\n LOCAL_FILE_NAME = \"fop.xml\"\n API_ADDRESS_FOR_DATASET = Register.objects.get(source_register_id=\"1c7f3815-3259-45e0-bdf1-64dca07ddc10\").api_address\n CHUNK_SIZE = 200\n\n def rename_file(self, file):\n new_filename = file\n if (file.upper().find('UO') >= 0): new_filename = 'uo.xml'\n if (file.upper().find('FOP') >= 0): new_filename = 'fop.xml'\n return new_filename\n\n # list of models for clearing DB\n tables = [\n Rfop\n ]\n\n # format record's data\n record = {\n 'RECORD': '',\n 'FIO': '',\n 'ADDRESS': '',\n 'KVED': '',\n 'STAN': ''\n }\n\n # creating dictionaries for registration items that had writed to db\n state_dict = {} # dictionary uses for keeping whole model class objects\n kved_dict = {}\n\n bulk_manager = BulkCreateUpdateManager(CHUNK_SIZE)\n\n for state in State.objects.all():\n state_dict[state.name] = state\n for kved in Kved.objects.all():\n kved_dict[kved.code] = kved\n\n # writing entry to db\n def save_to_db(self, record):\n state = self.save_to_state_table(record)\n kved = self.get_kved_from_DB(record, 'FIO')\n self.save_to_rfop_table(record, state, kved)\n print('saved')\n\n # writing entry to state table\n def save_to_state_table(self, record):\n if record['STAN']:\n state_name = record['STAN']\n else:\n state_name = State.EMPTY_FIELD\n if not state_name in self.state_dict:\n state = State(\n name=state_name\n )\n state.save()\n self.state_dict[state_name] = state\n return state\n state = self.state_dict[state_name]\n return state\n\n # writing entry to rfop table\n def save_to_rfop_table(self, record, state, kved):\n rfop = Rfop(\n state=state,\n kved=kved,\n fullname=record['FIO'],\n address=record['ADDRESS']\n )\n self.bulk_manager.add(rfop)\n\n print(\n 'Rfop_class already imported. For start rewriting RFOP to the DB run > RfopConverter().process()\\n',\n 'For clear RFOP tables run > RfopConverter().clear_db()'\n )","sub_path":"business_register/converter/rfop.py","file_name":"rfop.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"627547949","text":"from django.shortcuts import render, HttpResponse\nfrom django.contrib.auth.hashers import make_password, check_password\n\n#FOR USE of rest_framework\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\n\n#from .serializers import *\nimport os\n\n# for compare the diffrent summnor name\nfrom lol_frontend.models import Account\nfrom lol_frontend.functionality.playercompare import compare\nfrom lol_frontend.functionality.playerPlayStyle import playStyle\nfrom lol_frontend.functionality.leaderboardGenerate import callTogenerate\nfrom lol_frontend.functionality.suggestion import generatesuggestion\nfrom lol_frontend.functionality.Match_data import getmatchdata\nfrom lol_frontend.variable import *\nimport json\n\n\ndef index(request):\n return render(request, 'build/index.html')\n\n\n@api_view(['POST'])\ndef leaderboard_view(request):\n \n res = {} \n #post request is like this { \"region\" : \"br1\" , \"queue\": \"RANKED_SOLO_5x5\" , \"league\":\"challengerleagues\" }\n if request.method == 'POST': \n try:\n region = request.data['region']\n queue = request.data['queue']\n league = request.data['league']\n\n val = region + '_'+league + '_' + queue\n res = leaderboard_DATA[val]\n except:\n res = {}\n Response(res)\n return Response(res)\n\n \n@api_view(['POST'])\ndef upcomingLeague_view(request):\n \n res = {}\n if request.method == 'POST':\n res = {}\n try:\n region = request.data['region']\n res = upcomingleague_DATA[region]\n except:\n res = {}\n return Response(res)\n return Response(res)\n\n\n@api_view(['POST'])\ndef playercompare_view(request):\n\n res = {}\n #post request data like this \n #{ \"region\": [\"na1\", \"br1\", \"la1\", \"oc1\"] , \"player\" : [\"Doublelift\", \"just gap mid\", \"Faststroke\", \"C1ock\"] }\n if request.method == 'POST':\n res = {}\n try:\n regionList = request.data['region']\n summnorNameList = request.data['player']\n res = compare(regionList, summnorNameList)\n except:\n res = {}\n return Response(res)\n return Response(res)\n\n \n@api_view(['POST'])\ndef playerPlayStyle_view(request):\n res = {}\n \n # post request like this { \"region\":\"na1\" , \"summnorname\":\"doublelift\"}\n if request.method == 'POST':\n try:\n summnorName = request.data['summnorname']\n region = request.data['region']\n res = playStyle(region, summnorName)\n except:\n res = {}\n return Response(res)\n \n return Response(res)\n\n@api_view(['POST'])\ndef suggestion_view(request):\n res = {}\n\n # post request like this { \"region\":\"na1\" , \"summnorname\":\"doublelift\"}\n if request.method == 'POST':\n try:\n region = request.data['region']\n summnorname = request.data['summnorname']\n res = generatesuggestion(region, summnorname)\n except:\n res = {}\n return Response(res)\n \n return Response(res)\n\n@api_view(['POST'])\ndef summonerdata(request):\n \n res = {}\n # {\"region\":\"na1\" , \"summnorname\":\"doublelift\"}\n if request.method == 'POST':\n try:\n region = request.data['region']\n summnorname = request.data['summnorname']\n res = getmatchdata(region, summnorname)\n except:\n res = {}\n\n return Response(res) \n return Response(res)\n\n\n\n\n@api_view(['POST'])\ndef registerview(request):\n \n res = {}\n res['flag'] = False\n # {\"fname\":\"xyz\" , \"lname\":\"xyz\", \"email\":\"xyz@gmail.com\", \"password\":\"123456\"}\n if request.method == 'POST':\n try:\n fname = request.data['fname']\n lname = request.data['lname']\n email = request.data['email']\n password = request.data['password']\n\n fetchdb = list(Account.objects.filter(emailid= email).values())\n password = make_password(password)\n if len(fetchdb) != 0:\n res['flag'] = False\n else:\n ob = Account(firstname=fname, \n lastname=lname,\n emailid=email,\n password=password\n )\n ob.save()\n res['flag'] = True \n \n except:\n res = {}\n res['flag'] = False\n\n return Response(res) \n return Response(res)\n\n\n\n@api_view(['POST'])\ndef loginview(request):\n \n res = {}\n res['flag'] = False\n # {\"email\":\"xyz@gmail.com\", \"password\":\"123456\"}\n if request.method == 'POST':\n try:\n email = request.data['email']\n password = request.data['password']\n fetchdb = list(Account.objects.filter(emailid= email).values())\n if len(fetchdb) == 0:\n res['flag'] = False\n else:\n encoded = fetchdb[0]['password']\n if check_password(password, encoded):\n res['flag'] = True\n else:\n res['flag'] = False\n except:\n res = {}\n res['flag'] = False\n\n return Response(res) \n return Response(res)","sub_path":"Main_Project/lol_backend/lol_frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"117444585","text":"import urllib.request, urllib.error\nimport requests\nimport pandas as pd # csv用のライブラリ\nfrom bs4 import BeautifulSoup\nimport re # 正規表現用のライブラリ\n\n################\n# SEO分析\n################\n\n# Google検索上位1ページの情報を取得する\ninputWord = input(\"検索したいワードを入力: \");\n\n# カラムの用意\ncolumns = [\"TopTitle\", \"Link\", \"Description\", \"Keywords\", \"H1\", \"H2\"];\ndf = pd.DataFrame(columns=columns);\n\n\n# url: 検索結果上位のURL\ndef getSeoParts(url):\n\n\tpattren = r'^https://|http://'\n\n\t# 検索するURLが正しいURLかをチェックする\n\turlcheck = re.match(pattren, url);\n\n\tseoParts = [];\n\tif urlcheck:\n\t\tres = requests.get(url);\n\n\t\tsoup = BeautifulSoup(res.text, \"html.parser\");\n\n\t\t##############\n\t\t# SEOでは、description,keyword,title,h1,h2,h3\n\t\t# そのため、それらを取得する\n\t\t##############\n\n\t\theaders = soup.find(\"head\");\n\t\tif headers is not None:\n\t\t\t# descriptionの取得\n\t\t\tdescription = headers.find('meta', attrs={\"name\" : \"description\"});\n\t\t\tdescription = description.attrs['content'] if description is not None else '';\n\t\t\t\n\t\t\t# keywords取得\n\t\t\tkeywords = headers.find('meta', attrs={\"name\" : \"keywords\"});\n\t\t\tkeywords = keywords.attrs['content'] if keywords is not None else '';\n\n\t\t\tprint(\"Description: \" + str(description));\n\t\t\tprint(\"Keywords: \" + str(keywords));\n\t\t\n\t\th1 = soup.find('h1');\n\t\th1 = str(h1.string) if h1 is not None else '';\n\t\th2 = soup.find('h2');\n\t\th2 = str(h2.string) if h2 is not None else '';\n\n\t\tprint(\"H1: \" + h1);\n\t\tprint(\"H2: \" + h2);\n\t\tprint('######################################'); \n\n\t\tseoParts = {\n\t\t\t'description' : description,\n\t\t\t'keywords' : keywords,\n\t\t\t'h1' : h1,\n\t\t\t'h2' : h2,\n\t\t};\n\n\t\treturn seoParts;\n\n\telse:\n\t\tprint(\"Error occur Invalid url: \" + url);\n\n\ndef query_string_remove(url):\n\treturn url[:url.find('&')];\n\n\n\nif inputWord is not \"\":\n\n\trootUrl = \"https://www.google.co.jp/search?q=\";\n\n\tparam = \"&oq=\";\n\n\tparamSecure = \"&sourceid=chrome&ie=UTF-8\";\n\n\t# 検索URLを取得\n\taccessUrl = rootUrl + str(inputWord) + param + str(inputWord) + paramSecure;\n\n\tres = requests.get(accessUrl);\n\n\tsoup = BeautifulSoup(res.text, \"html.parser\");\n\n\tsearchContent = soup.find_all(class_=\"g\");\n\t\n\tfor content in searchContent:\n\t\t# h3タグに検索結果のタイトル、URLがあるため取得\n\t\th3 = content.find('h3');\n\t\tif h3 is not None:\n\t\t\t# googleの検索結果のwebページタイトルを取得\n\t\t\ttitle = h3.find('a').getText();\n\t\t\t\n\t\t\t# 余計なクエリを削除する\n\t\t\tlink =query_string_remove(h3.find('a').get('href').replace(\"/url?q=\", \"\"));\n\n\t\t\t# ログ用に出力\n\t\t\tprint(\"Title: \" + title)\n\t\t\tprint(\"Link: \" + link);\n\n\t\t\t# SEO(metaタグ)の中身を取得\n\t\t\tseo = getSeoParts(link);\n\n\t\t\t# seoの取得がうまくいかなかった場合は処理をスキップ\n\t\t\tif seo is not None:\n\n\t\t\t\t# CSVに落とし込む\n\t\t\t\tcsv = pd.Series([title, link, seo['description'], seo['keywords'], seo['h1'], seo['h2']], columns);\n\t\t\t\tdf = df.append(csv, columns);\n\t\t\t\tdf.to_csv(\"analyzeSEO.csv\", encoding=\"shift_jis\");\n\t\t\telse:\n\t\t\t\tcontinue;\nelse:\n\tprint(\"検索ワードを入力してください\");\n\n","sub_path":"analyzeSEO.py","file_name":"analyzeSEO.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"341114762","text":"import inspect\nimport yaml\nimport torch\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n GREY = '\\033[90m'\n @staticmethod\n def bold(x):\n return bcolors.BOLD + x + bcolors.ENDC\n\n @staticmethod\n def grey(x):\n return bcolors.GREY + x + bcolors.ENDC\n\n\nclass Visualizer:\n @staticmethod\n def pad(line, n):\n assert(len(line) < n)\n padding = n - len(line)\n left = ' ' * (padding // 2)\n right = ' ' * (padding - padding // 2)\n return left + line + right\n\n @staticmethod\n def right_pad(line, n):\n assert(len(line) < n)\n padding = n - len(line)\n right = ' ' * padding\n return line + right\n\n @staticmethod\n def cat_horizontal(args):\n for i in range(len(args)):\n args[i] = args[i].split('\\n')\n\n lines = []\n for i in range(len(args[0])):\n lines.append(' '.join([x[i] for x in args]))\n\n return '\\n'.join(lines)\n\n @staticmethod\n def viz(model):\n if hasattr(model, 'viz'):\n raise NotImplementedError\n\n else:\n body = '| ' + str(type(model)) + ' |'\n head = '-' * len(body)\n return '\\n'.join([head, body, head])\n\n\ndef get_defaults(module):\n argspec = inspect.getargspec(module)\n defaults = {}\n\n if argspec.defaults:\n start = len(argspec.args) - len(argspec.defaults)\n for i, default in enumerate(argspec.defaults):\n key = argspec.args[start + i]\n defaults[key] = default\n\n return defaults\n\n\ndef get_object(str_, parent):\n components = str_.split('.')\n\n if len(components) == 1:\n return parent.__dict__[components[0]]\n else:\n components[0] = parent.__dict__[components[0]]\n return get_object('.'.join(components[1:]), components[0])\n\n\nclass Builder:\n def __init__(self, parent=None, path_=None, cf=None, **kwargs):\n\n assert(path_ or cf)\n\n self.kwargs = kwargs\n\n if path_:\n with open(path_) as f:\n self.cf = yaml.load(f)\n else:\n self.cf = cf\n\n self._isstem = True\n for key in self.cf.keys():\n if type(self.cf[key]) == dict:\n setattr(\n self,\n key,\n Builder(parent, cf=self.cf[key], **self.kwargs)\n )\n obj_ = getattr(self, key)\n obj_._isstem = False\n else:\n setattr(self, key, self.cf[key])\n\n self.module = None\n\n self.parent = parent\n\n def __repr__(self):\n out = []\n if self.module:\n lines = self.module.__repr__().split('\\n')\n lines[0] = ' ' + lines[0]\n lines = [bcolors.grey(x) for x in lines]\n out.extend(lines)\n for key in self.cf.keys():\n obj_ = getattr(self, key)\n if type(obj_) == Builder:\n out.append(bcolors.bold(key) + ':')\n lines = obj_.__repr__().split('\\n')\n for line in lines:\n out.append(line)\n else:\n value = getattr(self, key)\n\n if type(value) == str:\n if value.startswith('$'):\n value = self.kwargs[value[1:]].__repr__()\n if len(value) > 50:\n value = value[:50] + '...'\n out.append('{}: {}'.format(bcolors.bold(key), value))\n\n if self._isstem:\n return bcolors.bold('Builder:\\n') + '\\n '.join(out)\n else:\n return '\\n '.join(out)\n\n def load(self, path_):\n self.module.load_state_dict(torch.load(\n path_,\n map_location=lambda location, loc: location,\n ))\n\n return self.module\n\n def build(self):\n\n module = get_object(self.cf['model'], self.parent)\n\n keys_ = set(self.cf.keys() - {'model', 'args', 'pretrained'})\n\n if keys_:\n module_args = {}\n for key in keys_:\n module_args[key] = getattr(self, key).build()\n else:\n module_args = {}\n\n if 'args' in self.cf or 'pretrained' in self.cf:\n if 'args' in self.cf:\n cf_args = self.cf['args']\n elif'pretrained' in self.cf:\n with open(self.cf['pretrained'] + '/config.yaml') as f:\n cf_args = yaml.load(f)['model']['args']\n else:\n raise KeyError('args or pretrained must be present....')\n\n for key in cf_args:\n if type(cf_args[key]) == str:\n if cf_args[key].startswith('$'):\n cf_args[key] = self.kwargs[cf_args[key][1:]]\n else:\n cf_args = {}\n \n defaults = get_defaults(module)\n \n if defaults:\n print('Module {}'.format(module))\n for key in defaults.keys():\n if key not in cf_args and key not in module_args:\n print('\\tunspecified kwarg: {}: defaulted to ({})'.format(\n key, defaults[key],\n ))\n\n self.module = module(**{**module_args, **cf_args})\n\n if 'pretrained' in self.cf:\n print('loading pretrained state-dict for node {} from {}/model.pt'.format(\n module,\n self.cf['pretrained'],\n ))\n\n sd = torch.load(\n self.cf['pretrained'] + '/model.pt',\n map_location=lambda storage, loc: storage,\n )\n\n self.module.load_state_dict(sd)\n\n return self.module\n","sub_path":"torchabc/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"235773827","text":"import telebot\nfrom telebot import types\nfrom time import sleep\nimport app.config as config\nimport app.functions as functions\n\nbot = telebot.AsyncTeleBot(config.TELEGRAM_TOKEN)\n\n\n\n@bot.message_handler(commands=['start'])\ndef start_command(message):\n sleep(0.1)\n bot.send_message(message.chat.id, 'Доброго дня. Голосування триватиме з 8:55 до 13:45. Введіть свій індивідуальний код. Формат (XXX-XXX)')\n\n\n@bot.message_handler(commands=['exit'])\ndef exit_command(message):\n if not message.chat.id == 395809791:\n return\n sleep(0.1)\n if config.isWork:\n config.isWork = False\n else:\n config.isWork = True\n bot.send_message(message.chat.id, str(config.isWork))\n\n\n@bot.message_handler(commands=[\"look\"])\ndef look_command(message):\n if not message.chat.id == 395809791:\n return\n sleep(0.1)\n try:\n bot.send_message(message.chat.id, functions.look(message.text.split(\" \")[1]))\n except:\n bot.send_message(message.chat.id,'No')\n\n\n@bot.message_handler(commands=['admin'])\ndef admin_command(message):\n sleep(0.1)\n if not message.chat.id == 395809791:\n return\n try:\n command, name, key = list(map(str, message.text.split(\" \")))\n if functions.admin_send_answer(name, key):\n bot.send_message(message.chat.id,'OK')\n else:\n bot.send_message(message.chat.id,'BAD')\n except:\n bot.send_message(message.chat.id,'BAD')\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef answer_message(message):\n sleep(0.1)\n if functions.check_message(message.text):\n if not functions.check_code(message.text, str(message.chat.id)):\n return\n functions.commit_key(message.text, str(message.chat.id))\n name = functions.get_name(str(message.chat.id))\n keyboard = types.InlineKeyboardMarkup()\n callback_button = types.InlineKeyboardButton(text=\"Детальніше...\", url=config.MAIN_URL)\n keyboard.add(callback_button)\n bot.send_message(message.chat.id, name + ', дякуємо за інтерес до життя Рішельєвського ліцею! Ознайомитися з програмою наших кандидатів можна тут.', reply_markup=keyboard)\n keyboard = types.InlineKeyboardMarkup()\n sleep(0.1)\n keyboard.add(types.InlineKeyboardButton(text='Анастасия Гуренко', callback_data='Анастасия Гуренко'))\n keyboard.add(types.InlineKeyboardButton(text='Генч Деніз', callback_data='Генч Деніз'))\n keyboard.add(types.InlineKeyboardButton(text='Шиндер Михайло', callback_data='Шиндер Михайло'))\n if not config.isWork:\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=info+'Голосування не йде. Голосування за Віце-п��езидента парламенту Рішельєвського ліцею. Натисніть щоб вибрати кандидата, у вас ще буде можливість змінити рішення протягом 3 хвилин.', reply_markup=keyboard)\n else:\n bot.send_message(message.chat.id, 'Голосування за Віце-президента парламенту Рішельєвського ліцею. Натисніть щоб вибрати кандидата, у вас ще буде можливість змінити рішення.', reply_markup=keyboard)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n sleep(0.1)\n if call.message:\n \"\"\"\n if not functions.check_time(str(call.message.chat.id)):\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=\"На жаль відповіді більше не приймаються, ще раз дякуємо.\")\n return\n \"\"\"\n info = ''\n keyboard = types.InlineKeyboardMarkup()\n keyboard.add(types.InlineKeyboardButton(text='Анастасия Гуренко', callback_data='Анастасия Гуренко'))\n keyboard.add(types.InlineKeyboardButton(text='Генч Деніз', callback_data='Генч Деніз'))\n keyboard.add(types.InlineKeyboardButton(text='Шиндер Михайло', callback_data='Шиндер Михайло'))\n if not config.isWork:\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=info+'Голосування не йде. Голосування за Віце-президента парламенту Рішельєвського ліцею. Натисніть щоб вибрати кандидата, у вас ще буде можливість змінити рішення протягом 3 хвилин.', reply_markup=keyboard)\n return\n if call.data == 'Анастасия Гуренко':\n info = 'Ви проголосували за Анастасию Гуренко. '\n functions.send_answer(str(call.message.chat.id), 'Анастасия Гуренко')\n elif call.data == 'Генч Деніз':\n info = 'Ви проголосували за Генча Деніза. '\n functions.send_answer(str(call.message.chat.id), 'Генч Деніз')\n elif call.data == 'Шиндер Михайло':\n info = 'Ви проголосували за Шиндера Михайла. '\n functions.send_answer(str(call.message.chat.id), 'Шиндер Михайло')\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=info+'Голосування за Віце-президента парламенту Рішельєвського ліцею. Натисніть щоб вибрати кандидата, у вас ще буде можливість змінити рішення.', reply_markup=keyboard)\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"110595222","text":"#node for singly linked list\r\nclass Node():\r\n def __init__(self,data = None):\r\n self.data = data\r\n self.next = None\r\n self.prev = None\r\n\r\nclass QueueBasedOnSinglyLinkedList():\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n self.size = 0\r\n\r\n def Enqueue(self,data):\r\n node = Node(data)\r\n if self.tail:\r\n node.prev = self.tail\r\n self.tail.next = node\r\n self.tail = node\r\n else:\r\n self.head = node\r\n self.tail = node\r\n self.size += 1\r\n\r\n def Dequeue(self):\r\n current = self.head\r\n if current:\r\n data = current.data\r\n if self.size == 1:\r\n self.head = None\r\n self.tail = None\r\n else:\r\n self.head = self.head.next\r\n self.head.prev = None\r\n self.size -= 1\r\n return data\r\n return None\r\n\r\n def Traverse(self):\r\n current = self.head\r\n while current:\r\n print(current.data,end=' ')\r\n current = current.next\r\n\r\nif __name__ == '__main__':\r\n queue = QueueBasedOnSinglyLinkedList()\r\n queue.Enqueue(1)\r\n print('after appending the first element')\r\n queue.Traverse()\r\n popped = queue.Dequeue()\r\n print(\"\\nthe popped element is\\t\",popped)\r\n queue.Traverse()\r\n popped = queue.Dequeue()\r\n print(\"\\nthe popped element is\\t\", popped)\r\n queue.Traverse()\r\n queue.Enqueue(1)\r\n queue.Enqueue(2)\r\n queue.Enqueue(3)\r\n queue.Enqueue(4)\r\n queue.Enqueue(5)\r\n queue.Traverse()\r\n\r\n","sub_path":"Queue/Node_based_using_doublylinedlist_to_form_queue.py","file_name":"Node_based_using_doublylinedlist_to_form_queue.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"190286010","text":"#!/usr/bin/env python3\n# Python script to install MyVim\nimport os\nimport sys\n\nCWD = os.getcwd ()\nHOME = os.path.expanduser ('~')\nVIMDIR = os.path.join (HOME, '.vim')\nVIMRC = os.path.join (HOME, '.vimrc')\n\ndef command (cmd):\n\tif 0 != os.system (cmd):\n\t\tprint ('Could not execute {0}'. format (cmd))\n\t\tsys.exit (1)\n\ndef clean_myvim ():\n\tprint ('check %s' % VIMDIR)\n\tif os.path.exists (VIMDIR):\n\t\tprint ('removing %s' % VIMDIR)\n\t\tcommand ('rm -rf %s' % VIMDIR)\n\tif os.path.exists (VIMRC):\n\t\tcommand ('rm -rf %s' % VIMRC)\n\ndef make_dirs ():\n\tif not os.path.exists (VIMDIR):\n\t\tcommand ('mkdir -p %s' % VIMDIR)\n\ndef clone_vundle ():\n\tbundle = os.path.join (VIMDIR, 'bundle')\n\tif not os.path.exists (bundle):\n\t\tcommand ('mkdir -p %s' % bundle)\n\tos.chdir (bundle)\n\tprint ('Cloning Plungin manager Vundle in %s' % os.getcwd ())\n\tcommand ('git clone https://github.com/gmarik/Vundle.vim.git')\n\n\ndef link_myvim ():\n\tcommand ('ln -sf %s/colors %s/colors' % (CWD, VIMDIR))\n\tcommand ('ln -sf %s/ftdetect %s/ftdetect' % (CWD, VIMDIR))\n\tcommand ('ln -sf %s/syntax %s/syntax' % (CWD, VIMDIR))\n\tcommand ('ln -sf %s/markdown %s/markdown' % (CWD, VIMDIR))\n\tcommand ('ln -sf %s/ycm %s/ycm' % (CWD, VIMDIR))\n\tcommand ('ln -sf %s/.vimrc %s/.vimrc' % (CWD, HOME))\n\ndef setup_fonts ():\n\tos.chdir (CWD)\n\tfont_dir = os.path.join (HOME, '.fonts')\n\tif not os.path.exists (font_dir):\n\t\tcommand ('mkdir -p %s' % font_dir)\n\tcommand ('cp ./Monaco_Linux-Powerline.ttf %s' % font_dir)\n\tcommand ('fc-cache -fv')\n\ndef install_ctags ():\n\tcommand ('sudo apt-get install exuberant-ctags')\n\n\nif __name__ == '__main__':\n\tif 'clean' in sys.argv:\n\t\tclean_myvim ()\n\telif 'base' in sys.argv:\n\t\tmake_dirs ()\n\t\tclone_vundle ()\n\telif 'link' in sys.argv:\n\t\tlink_myvim ()\n\telif 'setup' in sys.argv:\n\t\tinstall_ctags ()\n\t\tsetup_fonts ()\n\telif 'all' in sys.argv:\n\t\tclean_myvim ()\n\t\tmake_dirs ()\n\t\tclone_vundle ()\n\t\tlink_myvim ()\n\t\tsetup_fonts ()\n\t\tinstall_ctags ()\n\telse:\n\t\tprint ('No command provided (clean|clone|install|link|all)')\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"537046103","text":"import os\nfrom setuptools import setup, find_packages\n\nname = \"plone.recipe.precompiler\"\nversion = '0.7.1'\n\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\n\nlong_description = \"\"\"%s\n\nChange history\n==============\n\nChangelog for %s.\n\n%s\n\nContributors\n============\n\n%s\n\n\"\"\" % (\n read('README.rst'),\n name,\n read('docs', 'HISTORY.txt'),\n read('docs', 'CONTRIBUTORS.txt'),\n)\n\nsetup(\n name=name,\n version=version,\n author=\"Steve McMahon\",\n author_email=\"steve@dcn.org\",\n description=\"zc.buildout recipe to precompile python and po files.\",\n long_description=long_description,\n license=\"GPL v 2\",\n keywords=\"buildout\",\n url='https://github.com/plone/plone.recipe.precompiler',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Framework :: Buildout',\n ],\n packages=find_packages('src'),\n include_package_data=True,\n package_dir={'': 'src'},\n namespace_packages=['plone', 'plone.recipe'],\n install_requires=[\n 'zc.buildout',\n 'setuptools',\n 'zc.recipe.egg',\n 'python_gettext',\n ],\n dependency_links=['http://download.zope.org/distribution/'],\n zip_safe=False,\n entry_points={'zc.buildout': ['default=%s:Recipe' % name]},\n)\n","sub_path":"pypi_install_script/plone.recipe.precompiler-0.7.1/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"483844831","text":"\"\"\"\nTetris-race system implemented by Ivan Gushchin\nEnvironment, realizing the agent's behavior in the world, similar to the classic Tetris race\nwith the presence of a machine (agent) and walls (obstacles). The agent's goal is to reach the end,\navoiding collisions. Agent has two options to do - make left move of right move to avoid collision.\n\"\"\"\n# TO DO: future release - provide more agent' options\n\nimport logging\nimport math\nimport gym\nimport random\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport time\nimport pickle\n#import distutils\n\nlogger = logging.getLogger(__name__)\n\nclass TetrisRaceEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n def __init__(self, walls_num = 60, walls_spread = 5, episodes_to_run = 30,\n world_type = 'Fat',smooth_car_step = 5, level_difficulty ='Easy', car_spawn = 'Random'):\n super(TetrisRaceEnv, self).__init__()\n # unmutable gui var\n self.screen_width = 400\n self.screen_height = 604\n self.episode_bar_height = 4 # horizontal on top or on bottom\n self.border_width = self.screen_width / 20 # use for cur episode progress bar\n self.road_width = self.screen_width - self.border_width * 2\n self.road_height = self.screen_height - self.episode_bar_height\n\n # unmutable model var\n self.levels = 3\n self.walls_num = walls_num\n self.pass_wall = False\n self.pass_count = 0 # walls steps down on car\n self.episode_count = 1\n self.total_episodes = episodes_to_run\n self.y_steps_counter = 0\n\n # mutable gui var\n self.car_width = self.road_width / 12 if world_type == 'Fat' else 10\n self.car_height = 2 * self.car_width if world_type == 'Fat' else 6 * self.car_width\n self.wall_width = self.car_height\n self.wall_height = self.car_height / 3 if world_type == 'Fat' else self.car_height / 6\n\n self.walls_per_level = self.walls_num / self.levels # noob -> exp -> pro\n assert self.walls_num % self.levels == 0 , 'Number of walls per level is not integer. Please change value of' \\\n ' \"walls_num\" parameter.'\n self.walls_spread = walls_spread\n self.walls_x_num = round(self.road_width / self.wall_width)\n\n blck =[]\n par = [0] * self.levels\n self.level_difficulty = level_difficulty\n for i in range(self.levels,0,-1):\n if level_difficulty == 'Easy':\n par[i-1] = i+1\n else:\n par[i-1] = i\n [blck.append([(self.walls_x_num - i) - 2, self.walls_x_num - par[i-1]]) for i in\n range(self.levels, 0, -1)]\n self.wall_blocks_per_level = blck # range of max available number of bricks in wall by levels\n\n # mutable moves var\n self.spawn = car_spawn\n self.car_step = self.car_width / smooth_car_step if world_type == 'Fat' else self.car_width\n\n # mutabale model var\n cx = []\n # car states\n self.car_states_num = round((self.road_width - self.car_width) / self.car_step) + 1\n [cx.append(self.border_width + i * self.car_step) for i in range(0,self.car_states_num)]\n self.car_states = cx # car freedom to move by x\n\n self.wall_states_num = round(self.road_width / self.car_step) # ++!\n wx =[] #++!\n [wx.append(self.border_width + i * self.car_step) for i in range(0,self.wall_states_num)] #++!\n self.wall_states = wx #++!\n\n all_states = self.walls_num * self.walls_spread + self.walls_spread\n self.wall_field = np.zeros([all_states, self.wall_states_num])\n self.points_num = int(self.wall_states_num / self.walls_x_num)\n\n #wall states\n wall_count = 0\n cur_level = 0\n for i in range(self.wall_field.shape[0]):\n if i % self.walls_spread == 0 and i != 0:\n if wall_count % (self.walls_per_level-1) == 0 and wall_count != 0 and cur_level < self.levels-1:\n cur_level += 1\n w_pos = np.zeros(self.walls_x_num)\n\n for j in range(0, self.walls_x_num):\n oc = len((np.where(w_pos == 1))[0]) # ones counter\n rb = self.wall_blocks_per_level[cur_level][1] # range bound\n if oc >= rb:\n break\n else:\n w_pos[j] = random.getrandbits(1)\n\n min_val = self.wall_blocks_per_level[cur_level][0]\n accepted_z_n = self.walls_x_num-min_val\n if len(np.where(w_pos == 0)[0]) > accepted_z_n : # least min val for cur level\n ar = np.where(w_pos == 0)[0]\n num_o = len(np.where(w_pos == 1)[0])\n coord = np.random.choice(ar,min_val - num_o)\n w_pos[coord[:]] = 1\n\n w_ind = np.where(w_pos == 1)[0]\n s =[]; e =[]\n [s.append(i * self.points_num) for i in range(self.walls_x_num)]\n [e.append(i * self.points_num) for i in range(1,self.walls_x_num+1)]\n for j in range(0,len(w_ind)):\n self.wall_field[i][s[w_ind[j]]:e[w_ind[j]]] =1\n wall_count += 1\n\n self.action_space = spaces.Discrete(2)\n self.actions =np.array([0, 1])\n\n self._seed()\n self.reset()\n self.viewer = None\n\n # Just need to initialize the relevant attributes\n self._configure()\n\n def _configure(self, display=None):\n self.display = display\n\n def _complexity(self):\n self.path_complexity = 0\n zero = self.car_states.index(self.state[0])\n # - count value\n for i in range(self.wall_field.shape[0]):\n if np.any(self.wall_field[i] == 1):\n left_ways = np.where(self.wall_field[i][:zero-1] ==0)\n right_ways = np.where(self.wall_field[i][zero+1:-1] == 0)\n\n left_lenght = left_ways[0].shape[0] - 1\n left_go = zero - left_ways[0].max() if left_ways[0].shape[0] != 0 else 100\n right_go = right_ways[0].min() if right_ways[0].shape[0] != 0 else 100\n\n self.path_complexity += np.minimum(left_go, right_go)\n\n # - normalize value\n hardest_option_x = [self.car_states_num * 0.3, self.car_states_num * 0.4, self.car_states_num * 0.5]\n range_options = [int(self.walls_per_level * hardest_option_x[0]),\n int(self.walls_per_level * hardest_option_x[1]),\n int(self.walls_per_level * hardest_option_x[2])]\n normalized_complexity = self.path_complexity / np.sum(range_options)\n\n self.path_complexity = [self.path_complexity, round(normalized_complexity,3)]\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\" % (action, type(action))\n\n self.cur_action = action\n car_x, wall_y, glob_st = self.state\n\n# wall vs car\n car_top = int(self.car_height / self.wall_height)\n nearest_wall_ind =abs(wall_y) + car_top\n direct_flag =True\n side_flag = True\n\n# ---- craash!?----------------\n CL = car_x\n CR = car_x + self.car_width\n condition = [False, False, False]\n delta = car_top + 1\n condition[2] = abs(self.state[1]) == self.wall_field.shape[0]- delta # epic win\n\n if condition[2] == False:\n if direct_flag and np.any(self.wall_field[nearest_wall_ind + 1] ==1.): # direct crash\n found_wall_ind = np.where(self.wall_field[nearest_wall_ind + 1] == 1.)[0]\n for i in range(0, len(found_wall_ind), self.points_num):\n tmp = self.car_states[found_wall_ind[i]:found_wall_ind[i]+self.points_num]\n WL = tmp[0]; WR = tmp[-1]\n if not (CR <= WL or CL >= WR):\n direct_flag = False\n break\n self.pass_wall = True\n\n if self.pass_wall and np.any(self.wall_field[nearest_wall_ind + 1] == 0.): # side crash\n block_ind = np.where(self.wall_field[nearest_wall_ind-self.pass_count] ==1.)\n block_ind = list(block_ind[0])\n block_xs = []; block_xe = []\n for i in range(0,len(block_ind),self.points_num):\n tmp = self.car_states[block_ind[i]:block_ind[i] + self.points_num]\n block_xs.append(tmp[0])\n block_xe.append(tmp[1])\n\n if side_flag and CR in block_xs or CR in block_xe or CL in block_xs or CL in block_xe:\n # to do: rewrite side crash handler in future releases\n side_flag = False\n self.pass_wall = False\n self.pass_count = 0\n\n self.pass_count += 1\n\n if self.pass_count == int(self.car_height / self.wall_height)+2:\n self.pass_wall = False\n self.pass_count =0\n self.wall_iterator += 1\n\n# car states inc / dec\n if car_x > self.car_states[0] and car_x < self.car_states[-1]:\n car_x = car_x + self.car_step if action else car_x - self.car_step\n if action == 1:\n glob_st = glob_st + self.car_step + self.road_width\n else:\n glob_st = glob_st - self.car_step + self.road_width\n elif car_x == self.car_states[0]:\n car_x += self.car_step\n glob_st = glob_st + self.car_step + self.road_width\n self.cur_action = 1\n else:\n car_x -= self.car_step\n glob_st = glob_st - self.car_step + self.road_width\n self.cur_action = 0\n# wall states dec\n wall_y -= 1\n\n self.state = car_x,wall_y, glob_st\n\n condition[0] = direct_flag == False\n condition[1] = side_flag == False\n\n done = bool(any(condition))\n\n if not done:\n reward = 0.\n else:\n self.episode_count += 1\n reward = -1.\n\n self.y_steps_counter += 1\n return np.array(self.state), reward, done, {}\n\n def _reset(self):\n self.wall_iterator = 1\n self.pass_wall = False\n self.pass_count = 0\n\n if self.spawn == 'Center':\n rnd = self.car_states[int(round(self.car_states_num / 2))]\n if self.spawn == 'Random':\n rnd = random.choice(self.car_states)\n self.state = (rnd, 0, rnd ) # cx, wy, global\n self._complexity()\n\n return np.array(self.state)\n\n def _render(self,mode = 'human', close = False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n from gym.envs.classic_control import rendering\n if self.viewer is None:\n self.viewer = rendering.Viewer(self.screen_width, self.screen_height, display=self.display)\n\n # finish line\n self.FGo = []\n for i in range(12):\n W = self.road_width / 12\n self.FGo.append(rendering.Transform())\n if i % 2 == 0:\n l, r, t, b = self.border_width + W*i, self.border_width + W*i + W, \\\n (self.wall_field.shape[0] - 1) * self.wall_height, \\\n (self.wall_field.shape[0] - 1) * self.wall_height - self.wall_height\n Fin = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n Fin.add_attr(self.FGo[i])\n Fin.set_color(0.4, 0.5, 0.5)\n self.viewer.add_geom(Fin)\n else:\n l, r, t, b = W + self.border_width + W * i, self.border_width + W * i, \\\n (self.wall_field.shape[0] - 2) * self.wall_height, \\\n (self.wall_field.shape[0] - 2) * self.wall_height - self.wall_height\n Fin = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n Fin.add_attr(self.FGo[i])\n Fin.set_color(0.4, 0.5, 0.5)\n self.viewer.add_geom(Fin)\n\n # episode progress bar and slider\n Episode = rendering.FilledPolygon([(self.border_width,self.road_height),\n (self.border_width,self.screen_height),\n (self.screen_width - self.border_width,self.screen_height),\n (self.screen_width - self.border_width, self.road_height)\n ])\n Episode.set_color(0, 0, 0)\n self.viewer.add_geom(Episode)\n\n EpisodeSlider = rendering.FilledPolygon([(self.border_width,self.road_height),\n (self.border_width,self.screen_height),\n (self.border_width + self.car_step, self.screen_height),\n (self.border_width + self.car_step, self.road_height)\n ])\n self.EpisodeGo = rendering.Transform()\n EpisodeSlider.add_attr(self.EpisodeGo)\n EpisodeSlider.set_color( 1, 0, 1)\n self.viewer.add_geom(EpisodeSlider)\n\n # borders\n LeftBorder = rendering.FilledPolygon([(0,0),(0,self.screen_height),\n (self.border_width,self.screen_height),\n (self.border_width,0)\n ])\n RightBorder = rendering.FilledPolygon([(self.screen_width - self.border_width, 0),\n (self.screen_width - self.border_width, self.screen_height),\n (self.screen_width,self.screen_height),\n (self.screen_width,0)\n ])\n LeftTrack = rendering.Line((self.border_width,0),(self.border_width,self.screen_height))\n RightTrack = rendering.Line((self.screen_width - self.border_width,0),\n (self.screen_width - self.border_width,self.screen_height))\n LeftBorder.set_color(.8, .6, .4)\n LeftTrack.set_color(0, 0, 0)\n RightBorder.set_color(.8, .6, .4)\n RightTrack.set_color(0, 0, 0)\n self.viewer.add_geom(LeftBorder)\n self.viewer.add_geom(LeftTrack)\n self.viewer.add_geom(RightBorder)\n self.viewer.add_geom(RightTrack)\n\n # progress bar scale and slider\n scale = self.screen_height / self.walls_num\n for k in range(0, self.walls_num):\n self.track_l = rendering.Line((0, k * scale), (self.border_width, k * scale))\n self.track_r = rendering.Line((self.screen_width - self.border_width, k * scale),\n (self.screen_width, k * scale))\n self.track_l.set_color(0, 0, 0)\n self.track_r.set_color(0, 0, 0)\n self.viewer.add_geom(self.track_l)\n self.viewer.add_geom(self.track_r)\n\n self.BarStep = (self.wall_iterator - 1) * (self.screen_height / self.walls_num)\n ProgressBar_L = rendering.FilledPolygon([(0, 0),\n (0, self.BarStep + scale),\n (self.border_width, self.BarStep + scale),\n (self.border_width, 0)])\n ProgressBar_R = rendering.FilledPolygon([(self.screen_width - self.border_width, 0),\n (self.screen_width - self.border_width, self.BarStep + scale),\n (self.screen_width, self.BarStep + scale),\n (self.screen_width, 0)])\n self.BarLGo = rendering.Transform()\n self.BarRGo = rendering.Transform()\n ProgressBar_L.add_attr(self.BarLGo)\n ProgressBar_R.add_attr(self.BarRGo)\n ProgressBar_L.set_color(.1, .5, .9)\n ProgressBar_R.set_color(.1, .5, .9)\n self.viewer.add_geom(ProgressBar_L)\n self.viewer.add_geom(ProgressBar_R)\n\n # --- walls ----\n self.WGo = []\n by_y = self.wall_field.shape[0]\n by_x = self.wall_field.shape[1]\n\n w_c = 0\n for i in range(by_y-1,-1,-1): # each line\n self.WGo.append(rendering.Transform())\n if np.any(self.wall_field[i]) == 1.0: # got wall\n ind = np.where(self.wall_field[i] == 1.0)[0]\n walls_amount = int(len(ind)/ self.points_num)\n tmp = np.split(ind, walls_amount)\n\n for j in range(0, walls_amount):\n xs = tmp[j][0]\n xe = tmp[j][-1]\n yt = i\n\n l,r,t,b = self.wall_states[xs],self.wall_states[xe] + self.car_step,\\\n yt*self.wall_height,yt*self.wall_height - self.wall_height\n # print(l,r)\n WWW = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n WWW.add_attr(self.WGo[w_c])\n WWW.set_color(.9, .0, .0)\n self.viewer.add_geom(WWW)\n w_c += 1\n\n # --- car ---\n l, r, t, b = 0, self.car_width, self.car_height , -self.car_height\n Car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n self.CarGo = rendering.Transform()\n Car.add_attr(self.CarGo)\n self.viewer.add_geom(Car)\n\n x = self.state\n\n carx = x[0]\n self.CarGo.set_translation(carx, 0)\n\n # walls move\n for i in range(0, self.wall_field.shape[0]):\n self.WGo[i].set_translation(0, x[1] * self.wall_height)\n\n # progress bars move\n self.BarStep = (self.wall_iterator - 1) * (self.screen_height / self.walls_num)\n self.BarLGo.set_translation(0, self.BarStep)\n self.BarRGo.set_translation(0, self.BarStep)\n ep_step = self.road_width / self.total_episodes\n self.EpisodeGo.set_translation((self.episode_count-1) * ep_step,0)\n for i in range(12):\n self.FGo[i].set_translation(0, x[1] * self.wall_height)\n\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n\n\n\n\n\n","sub_path":"tetris_race.py","file_name":"tetris_race.py","file_ext":"py","file_size_in_byte":19012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"221802285","text":"UseIdealEstimator=1\n\n# Physical properties\nMass = 0.5\nL = 0.17\nIxx = 0.0023\nIyy = 0.0023\nIzz = 0.0046\nkappa = 0.016\nminMotorThrust = .1\nmaxMotorThrust = 4.5\n\n# Position control gains\nkpPosXY = 32\nkpPosZ = 50\nKiPosZ = 30\n\n# Velocity control gains\nkpVelXY = 13\nkpVelZ = 9\n\n# Angle control gains\nkpBank = 7\nkpYaw = 2\n\n# Angle rate gains\nkpPQR = 90, 90, 6\n\n# limits\nmaxAscentRate = 5\nmaxDescentRate = 2\nmaxSpeedXY = 5\nmaxHorizAccel = 12\nmaxTiltAngle = .7","sub_path":"mavic_Edit_python/controllers/mavic2proPython/PARAMETERS.py","file_name":"PARAMETERS.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"311742781","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2019-2023 Ramon van der Winkel.\n# All rights reserved.\n# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.\n\nfrom django.test import TestCase\nfrom Competitie.definities import DEEL_RK, DEEL_BK\nfrom Competitie.models import Regiocompetitie, Kampioenschap\nfrom Competitie.operations import competities_aanmaken\nfrom Functie.operations import maak_functie\nfrom Geo.models import Rayon, Regio, Cluster\nfrom Locatie.definities import BAAN_TYPE_BUITEN, BAAN_TYPE_EXTERN, BAAN_TYPE_ONBEKEND\nfrom Locatie.models import Locatie\nfrom Sporter.models import Sporter\nfrom TestHelpers.e2ehelpers import E2EHelpers\nfrom TestHelpers import testdata\nfrom Vereniging.models import Vereniging\nfrom Vereniging.models2 import Secretaris\nimport datetime\n\n\nclass TestVerenigingenLijst(E2EHelpers, TestCase):\n\n \"\"\" tests voor de Vereniging applicatie, Lijst Verenigingen \"\"\"\n\n url_lijst = '/vereniging/lijst/'\n url_lijst_details = '/vereniging/lijst/%s/' # ver_nr\n url_geen_beheerders = '/vereniging/contact-geen-beheerders/'\n\n testdata = None\n\n @classmethod\n def setUpTestData(cls):\n cls.testdata = testdata.TestData()\n cls.testdata.maak_accounts_admin_en_bb()\n\n def _prep_beheerder_lid(self, voornaam):\n lid_nr = self._next_lid_nr\n self._next_lid_nr += 1\n\n lid = Sporter(\n lid_nr=lid_nr,\n geslacht=\"M\",\n voornaam=voornaam,\n achternaam=\"Tester\",\n email=voornaam.lower() + \"@test.not\",\n geboorte_datum=datetime.date(year=1972, month=3, day=4),\n sinds_datum=datetime.date(year=2010, month=11, day=12),\n bij_vereniging=self._ver)\n lid.save()\n\n return self.e2e_create_account(lid_nr, lid.email, E2EHelpers.WACHTWOORD, accepteer_vhpg=True)\n\n def setUp(self):\n \"\"\" eenmalige setup voor alle tests\n wordt als eerste aangeroepen\n \"\"\"\n self._next_lid_nr = 100001\n\n self.rayon_2 = Rayon.objects.get(rayon_nr=2)\n self.regio_101 = Regio.objects.get(regio_nr=101)\n\n # maak een test vereniging\n ver = Vereniging(\n naam=\"Grote Club\",\n ver_nr=1000,\n regio=self.regio_101)\n ver.save()\n self._ver = ver # wordt gebruikt door _prep_beheerder_lid\n self.ver1 = ver\n\n # maak de beheerders aan van deze vereniging\n self.functie_sec = maak_functie(\"SEC Vereniging %s\" % ver.ver_nr, \"SEC\")\n self.functie_sec.vereniging = ver\n self.functie_sec.save(update_fields=['vereniging'])\n\n self.functie_hwl = maak_functie(\"HWL Vereniging %s\" % ver.ver_nr, \"HWL\")\n self.functie_hwl.vereniging = ver\n self.functie_hwl.save(update_fields=['vereniging'])\n\n self.functie_wl = maak_functie(\"WL Vereniging %s\" % ver.ver_nr, \"WL\")\n self.functie_wl.vereniging = ver\n self.functie_wl.save(update_fields=['vereniging'])\n\n # maak test leden aan die we kunnen koppelen aan beheerders functies\n self.account_bko = self._prep_beheerder_lid('BKO')\n self.account_rko = self._prep_beheerder_lid('RKO')\n self.account_rcl = self._prep_beheerder_lid('RCL')\n self.account_hwl = self._prep_beheerder_lid('HWL')\n self.account_schutter = self._prep_beheerder_lid('Schutter')\n\n # referentie uit de CRM welke leden secretaris zijn\n lid_nr = self._next_lid_nr\n self._next_lid_nr += 1\n lid = Sporter(\n lid_nr=lid_nr,\n geslacht=\"M\",\n voornaam=\"Secretaris\",\n achternaam=\"Zonder account\",\n email=\"secretaris@test.not\",\n geboorte_datum=datetime.date(year=1972, month=3, day=4),\n sinds_datum=datetime.date(year=2010, month=11, day=12),\n bij_vereniging=self.ver1)\n lid.save()\n self.sec = Secretaris(vereniging=self.ver1)\n self.sec.save()\n self.sec.sporters.add(lid)\n\n # creëer een competitie met regiocompetities\n competities_aanmaken(jaar=2019)\n\n self.functie_bko = Kampioenschap.objects.filter(deel=DEEL_BK)[0].functie\n self.functie_rko = Kampioenschap.objects.filter(deel=DEEL_RK, rayon=self.rayon_2)[0].functie\n self.functie_rcl = Regiocompetitie.objects.filter(regio=self.regio_101)[0].functie\n\n self.functie_bko.accounts.add(self.account_bko)\n self.functie_rko.accounts.add(self.account_rko)\n self.functie_rcl.accounts.add(self.account_rcl)\n self.functie_hwl.accounts.add(self.account_hwl)\n\n # maak nog een test vereniging, zonder HWL functie\n ver = Vereniging(\n naam=\"Kleine Club\",\n ver_nr=1100,\n regio=self.regio_101)\n ver.save()\n # stop de vereniging in clusters\n cluster = Cluster.objects.filter(regio=ver.regio, gebruik='18').first()\n ver.clusters.add(cluster)\n cluster = Cluster.objects.filter(regio=ver.regio, gebruik='25').all()[2]\n ver.clusters.add(cluster)\n self.ver2 = ver\n\n # geef een verenigingen alle mogelijke externe locaties\n loc = Locatie(baan_type=BAAN_TYPE_BUITEN)\n loc.save()\n loc.verenigingen.add(self.ver1)\n self.loc_buiten = loc\n\n loc = Locatie(baan_type=BAAN_TYPE_EXTERN)\n loc.save()\n loc.verenigingen.add(self.ver1)\n\n loc = Locatie(baan_type=BAAN_TYPE_ONBEKEND)\n loc.save()\n loc.verenigingen.add(self.ver1)\n self.loc_binnen = loc\n\n def test_anon(self):\n self.e2e_logout()\n with self.assert_max_queries(20):\n resp = self.client.get(self.url_lijst)\n self.assert403(resp)\n self.e2e_assert_other_http_commands_not_supported(self.url_lijst)\n\n def test_it(self):\n # landelijke lijst met rayon & regio + leden aantallen\n self.testdata.account_bb.is_staff = True\n self.testdata.account_bb.save(update_fields=['is_staff'])\n\n self.e2e_login_and_pass_otp(self.testdata.account_bb)\n self.e2e_wisselnaarrol_bb()\n self.e2e_check_rol('BB')\n\n with self.assert_max_queries(12):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n def test_bb(self):\n # landelijke lijst met rayon & regio\n self.e2e_login_and_pass_otp(self.testdata.account_bb)\n self.e2e_wisselnaarrol_bb()\n self.e2e_check_rol('BB')\n\n with self.assert_max_queries(11):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n def test_competitie_beheerders(self):\n # landelijke lijst met rayon & regio\n self.e2e_login_and_pass_otp(self.account_bko)\n self.e2e_wissel_naar_functie(self.functie_bko)\n self.e2e_check_rol('BKO')\n\n with self.assert_max_queries(12):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n # rayon lijst met regio kolom (geen rayon kolom)\n self.e2e_login_and_pass_otp(self.account_rko)\n self.e2e_wissel_naar_functie(self.functie_rko)\n self.e2e_check_rol('RKO')\n\n with self.assert_max_queries(7):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n # regio lijst met hwls (zonder rayon/regio kolommen)\n self.e2e_login_and_pass_otp(self.account_rcl)\n self.e2e_wissel_naar_functie(self.functie_rcl)\n self.e2e_check_rol('RCL')\n\n with self.assert_max_queries(9):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n self.e2e_assert_other_http_commands_not_supported(self.url_lijst)\n\n # verenigingen 1 en 2 horen beide bij regio 101\n # stop ze een voor een in een eigen cluster\n\n # maak een cluster aan en stop ver1 erin\n cluster = Cluster(\n regio=self.ver1.regio,\n letter='Y',\n naam=\"Bovenlijns\",\n gebruik='18')\n cluster.save()\n self.ver1.clusters.add(cluster)\n\n with self.assert_max_queries(9):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n with self.assert_max_queries(20):\n resp = self.client.get(self.url_lijst_details % self.ver1.pk)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst-details.dtl', 'plein/site_layout.dtl'))\n\n # stop ver2 in hetzelfde cluster\n self.ver2.cluster = cluster\n self.ver2.save()\n\n with self.assert_max_queries(9):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n # stop ver2 in een apart cluster\n cluster = Cluster(\n regio=self.ver1.regio,\n letter='Z',\n naam=\"Onderlijns\",\n gebruik='18')\n cluster.save()\n self.ver2.cluster = cluster\n self.ver2.save()\n\n with self.assert_max_queries(9):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n def test_hwl(self):\n # de hwl krijgt dezelfde lijst als de rcl\n self.e2e_login_and_pass_otp(self.account_hwl)\n self.e2e_wissel_naar_functie(self.functie_hwl)\n self.e2e_check_rol('HWL')\n\n with self.assert_max_queries(9):\n resp = self.client.get(self.url_lijst)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst.dtl', 'plein/site_layout.dtl'))\n\n resp = self.client.get(self.url_lijst_details % 999999)\n self.assert404(resp, 'Geen valide vereniging')\n\n url = self.url_lijst_details % self.ver1.pk\n\n with self.assert_max_queries(20):\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst-details.dtl', 'plein/site_layout.dtl'))\n\n # corner cases\n self.loc_buiten.zichtbaar = False\n self.loc_buiten.save(update_fields=['zichtbaar'])\n with self.assert_max_queries(20):\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst-details.dtl', 'plein/site_layout.dtl'))\n\n self.loc_buiten.delete()\n self.loc_binnen.delete()\n with self.assert_max_queries(20):\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst-details.dtl', 'plein/site_layout.dtl'))\n\n self.assertTrue(str(self.sec) != '')\n\n self.sec.delete()\n with self.assert_max_queries(20):\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst-details.dtl', 'plein/site_layout.dtl'))\n\n self.functie_sec.accounts.add(self.account_hwl)\n with self.assert_max_queries(20):\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst-details.dtl', 'plein/site_layout.dtl'))\n\n self.functie_wl.delete()\n resp = self.client.get(url)\n self.assert404(resp, \"Rol ontbreekt\")\n\n self.ver1.is_extern = True\n self.ver1.save(update_fields=['is_extern'])\n\n with self.assert_max_queries(20):\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/lijst-details.dtl', 'plein/site_layout.dtl'))\n\n self.functie_sec.delete()\n resp = self.client.get(url)\n self.assert404(resp, \"Rol ontbreekt\")\n\n def test_geen_beheerders(self):\n # login als BB\n self.e2e_login_and_pass_otp(self.testdata.account_bb)\n self.e2e_wisselnaarrol_bb()\n self.e2e_check_rol('BB')\n\n # maak een extra vereniging aan zonder beheerders\n ver = Vereniging(\n naam=\"Extra Club\",\n ver_nr=1099,\n regio=Regio.objects.get(regio_nr=101))\n ver.save()\n\n # maak de SEC, HWL en WL functies aan voor deze vereniging\n for rol in ('SEC', 'HWL', 'WL'):\n tmp_func = maak_functie(rol + \" ver 1099\", rol)\n tmp_func.vereniging = ver\n\n if rol == 'SEC':\n tmp_func.bevestigde_email = 'sec@1099.not'\n\n tmp_func.save()\n # for\n\n self.functie_sec.accounts.add(self.account_hwl)\n with self.assert_max_queries(20):\n resp = self.client.get(self.url_geen_beheerders)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/contact-geen-beheerders.dtl', 'plein/site_layout.dtl'))\n\n # corner case\n self.functie_sec.delete()\n with self.assert_max_queries(20):\n resp = self.client.get(self.url_geen_beheerders)\n self.assertEqual(resp.status_code, 200) # 200 = OK\n self.assert_html_ok(resp)\n self.assert_template_used(resp, ('vereniging/contact-geen-beheerders.dtl', 'plein/site_layout.dtl'))\n\n # probeer het met een andere rol\n self.e2e_wisselnaarrol_gebruiker()\n resp = self.client.get(self.url_geen_beheerders)\n self.assert403(resp)\n\n self.e2e_assert_other_http_commands_not_supported(self.url_geen_beheerders)\n\n# end of file\n","sub_path":"Vereniging/tests/test_lijst.py","file_name":"test_lijst.py","file_ext":"py","file_size_in_byte":15325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"279127723","text":"from data_processing import preprocessing\nfrom dataset import TrainDataset, get_train_transform\nfrom torch.utils.data import DataLoader\nimport torchvision.models as models\nfrom network import NN\nimport torch\nfrom trainer import Trainer\nimport numpy as np\nfrom dataset import TrainDataset, get_test_transform\nfrom torch.utils.data import DataLoader\nimport logging\n\n\ndef training(state, lr = 0.01, num_epochs = 10):\n \n resnet = models.resnet101(pretrained = True)\n \n for param in resnet.parameters():\n param.requires_grad=False\n \n logging.debug('Weights are frozen')\n our_resnet_model = NN(resnet)\n cuda = torch.cuda.is_available()\n device = torch.device('cuda:0' if cuda else 'cpu')\n logging.info(f'The availible device is: {device}')\n our_resnet_model = our_resnet_model.to(device)\n logging.debug(f'Sent pretrained model to {device}')\n test_transform = get_test_transform()\n \n if state == 'train':\n \n test, train, val = preprocessing(state = 'train')\n logging.info(f'Preprocessing of train dataset is done: \\n {train.head()}')\n logging.info(f'Preprocessing of test dataset is done: \\n {test.head()}')\n logging.debug(f'Preprocessing is done: \\n train is {train.shape[0]} \\n val is {val.shape[0]} \\n test is {test.shape[0]}')\n \n train_transform = get_train_transform()\n \n train_dataset = TrainDataset(train, train_transform)\n val_dataset = TrainDataset(val, train_transform)\n \n train_dataloader = DataLoader(train_dataset, batch_size = 16, shuffle = True)\n val_dataloader = DataLoader(val_dataset, batch_size = 16, shuffle = False)\n \n trainer = Trainer( model= our_resnet_model, device = device, lr = lr, ready = False)\n trainer.fit(train_dataloader, val_dataloader, num_epochs = num_epochs)\n \n test_dataset = TrainDataset(test['path'].to_frame(), test_transform, is_test = True)\n test_dataloader = DataLoader(test_dataset, batch_size = 16, shuffle = False)\n \n else:\n test = preprocessing(state)\n logging.info(f'Preprocessing of test dataset is done: \\n {test.head()}')\n test_dataset = TrainDataset(test['path'].to_frame(), test_transform, is_test = True)\n test_dataloader = DataLoader(test_dataset, batch_size = 16, shuffle = False)\n trainer = Trainer(model = our_resnet_model, device = device, lr = lr, ready = True)\n \n \n test_predictions= trainer.predict(test_dataloader)\n predictions = np.around(test_predictions)\n \n if state == 'own_test':\n variants = ['alucan', 'glass', 'hdpe', 'pet']\n test['answer'] = [variants[np.where(prediction == 1)[0][0]] for prediction in predictions]\n logging.info(f'Prediction for test dataset: \\n {test}')\n test.to_csv('./results/results.tsv', sep = '\\t')\n else:\n count = 0\n for i, row in test[['alucan', 'glass', 'hdpe', 'pet']].iterrows():\n if (predictions[i] == row.values).all():\n count += 1\n logging.info(f'Accuracy: {count/test.shape[0]}') \n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"156644393","text":"#import data file\nimport os\nimport csv\n#Setting Variables\nVotes = []\nCounty = []\nCandidates = []\nKhan = []\nCorrey = []\nLi = []\nOTooley = []\n#connect directory\ncsvpath = os.path.join('Resources', 'election_data.csv')\nwith open(csvpath, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n print(csvreader)\n csv_header = next(csvreader)\n#setting the loop\n for row in csvreader:\n#Setting Votes, Candidates, and County\n Votes.append(int(row[0]))\n County.append(row[1])\n Candidates.append(row[2])\n#Calculating Total Votes\nTotal_Votes = (len(Votes))\nprint(Total_Votes)\n#Calculating Votes per Person\nfor Candidate in Candidates:\n if Candidate == \"Khan\":\n Khan.append(Candidates)\n Khan_Votes = len(Khan)\n elif Candidate == \"Correy\":\n Correy.append(Candidates)\n Correy_Votes = len(Correy)\n elif Candidate == \"Li\":\n Li.append(Candidates)\n Li_Votes = len(Li)\n else:\n OTooley.append(Candidates)\n OTooley_Votes = len(OTooley)\nprint(Khan_Votes)\nprint(Correy_Votes)\nprint(Li_Votes)\nprint(OTooley_Votes)\n#Calculating The Percentage of Votes Each Candidate Won\nKhan_Percent = round(((Khan_Votes / Total_Votes) * 100), 2)\nCorrey_Percent = round(((Correy_Votes / Total_Votes) * 100), 2)\nLi_Percent = round(((Li_Votes / Total_Votes) * 100), 2)\nOTooley_Percent = round(((OTooley_Votes / Total_Votes) * 100), 2)\nprint(Khan_Percent)\nprint(Correy_Percent)\nprint(Li_Percent)\nprint(OTooley_Percent)\n#Calculating The Winner of The Election\nif Khan_Percent > max(Correy_Percent, Li_Percent, OTooley_Percent):\n winner = \"Khan\"\nelif Correy_Percent > max(Khan_Percent, Li_Percent, OTooley_Percent):\n winner = \"Correy\" \nelif Li_Percent > max(Correy_Percent, Khan_Percent, OTooley_Percent):\n winner = \"Li\"\nelse:\n winner = \"O'Tooley\"\n#Print To Get Final Analysis\nprint(\"Election Results\")\nprint(\"-------------------------\")\nprint(f\"Total Votes: {Total_Votes}\")\nprint(\"-------------------------\")\nprint(f\"Khan: {Khan_Percent}% ({Khan_Votes}\")\nprint(f\"Correy: {Correy_Percent}% ({Correy_Votes}\")\nprint(f\"Li: {Li_Percent}% ({Li_Votes}\")\nprint(f\"OTooley: {OTooley_Percent}% ({OTooley_Votes}\")\nprint(\"-------------------------\")\nprint(f\"winner: {winner}\")\nprint(\"-------------------------\")","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"437518839","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport logging\nimport json\n\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.http import Request\n\nfrom app.items import HighIpItem\n\n\nclass Hao7188HighipSpider(CrawlSpider):\n \"\"\"\n hao7188.com 上的高精度ip\n \"\"\"\n name = \"hao7188\"\n allowed_domains = [\"www.hao7188.com\"]\n start_urls = [\n 'http://www.hao7188.com',\n 'http://www.hao7188.com/ip/183.230.20.71.html',\n ]\n\n rules = [\n Rule(LinkExtractor(allow=(\"http://www\\.hao7188\\.com$\", )), callback='parse', follow=True),\n Rule(LinkExtractor(allow=(\"http://www\\.hao7188\\.com/ip\", )), callback='parse_ip_info', follow=True),\n ]\n\n def parse_ip_info(self, response):\n try:\n item = HighIpItem()\n item['url'] = response.url\n item['ip'] = response.url.split('/')[-1].strip('.html')\n item['loc_time'] = response.css('.so_list_left').xpath('.//li[position()=9]/text()').extract_first('').split(u\":\")[-1]\n item['info'] = response.css('.so_list_left').xpath('.//li[position()>1 and position()<9]/text()').extract()\n yield item\n except Exception as e:\n logging.exception(e)\n\n\n","sub_path":"app/spiders/hao7188_highip.py","file_name":"hao7188_highip.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"589836656","text":"from django.test import TestCase\n\n# Create your tests here.\n\nimport responses\nimport re\n\nfrom ..common import (\n WordAPI,\n)\n\n\nclass TestCaseCommon(TestCase):\n\n def setUp(self):\n\n self.w = WordAPI()\n self.w.lang = \"fr\"\n self.w.word = \"sailing\"\n self.w.forvo_api_key = \"blah\"\n\n @responses.activate\n def test_bing_get_image_links(self):\n with open('lf_maker/words/testdata/search.json', 'r') as f:\n bing_json = f.read()\n\n responses.add(responses.GET, 'https://api.cognitive.microsoft.com/bing/v7.0/images/search',\n body=bing_json, status=200,\n content_type='application/json')\n\n images = self.w.get_image_links()\n assert 25 == len(images)\n\n @responses.activate\n def test_forvo_get_audio_links(self):\n with open('lf_maker/words/testdata/forvo.json', 'r') as f:\n forvo_json = f.read()\n\n url_re = re.compile(r'http://apifree.forvo.com/action/word-pronunciations/format/json/word/sailing/key/\\w+/language/fr/')\n\n\n responses.add(responses.GET, url_re,\n body=forvo_json, status=200,\n content_type='application/json')\n\n audio_links = self.w.get_audio_links()\n assert 8 == len(list(audio_links.items()))\n\n","sub_path":"lf_maker/words/tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"549278903","text":"from app import app, db\nimport app.users.models as users\nimport app.sketchup.models as sketchup\nimport app.journal.models as journal\nimport app.admin.models as admin\nprint('create database schema')\ndb.create_all()\n\nprint('create groups')\nadmin_group = users.Group('Administrators', '')\nadmin_group.id = 1\nuser_group = users.Group('Public Users', '')\nuser_group.id = 2\ndb.session.add(admin_group)\ndb.session.add(user_group)\n\nprint('create first admin user')\nuser = users.User(username='admin', email=config.EMAIL['address'], password='abcxyz')\nuser.banned = 0\nuser.groups.append(admin_group)\nuser.groups.append(user_group)\ndb.session.add(user)\n\nprint ('create version tag')\nversion_tag = admin.SystemParameter()\nversion_tag.name = 'version'\nversion_tag.value = '1.00'\ndb.session.add(version_tag)\n\nprint('commit to database')\ndb.session.commit()\nprint('finish')","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"93283040","text":"import os\nimport shutil\n\ndef exe_creator(main_file=\"\"):\n exe_name = os.path.splitext(main_file)[0]\n dist_path = os.path.basename(os.getcwd())\n\n os.system(\"pyInstaller %s -F --distpath %s -n %s\" % (main_file, dist_path, exe_name))\n shutil.rmtree('build')\n os.remove(exe_name + '.spec')\n\n","sub_path":"collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"361842323","text":"import maya.cmds as cmds\n\nclass myClass():\n\n def __init__(self):\n self.debug = False\n\n def make_shape_controller_with_selection(self):\n sel = cmds.ls(sl=1)\n\n if sel:\n sel_faces = sel\n sel_object = str(sel[0]).split('.')\n\n # -- duplicate object\n controller = cmds.duplicate(sel_object)\n cmds.delete(controller, constructionHistory=True)\n cmds.parent(controller, world=True)\n\n # -- create the magic structure\n tsf_geo = cmds.createNode('transformGeometry')\n\n else:\n print('WARNING! You have nothing selected to operate on')\n\n\n\nif __name__ == '__main__':\n mc = myClass()","sub_path":"Maya/Rigging/SHAPESMeshController.py","file_name":"SHAPESMeshController.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"421067651","text":"from .replication_manager import GameObject\nfrom .serialization import RefTag\nfrom .tools import dict_merge\n\n\nclass Player(GameObject):\n\n load_priority = -1\n\n def __init__(self, name=None, main_unit=None, id_=None):\n super().__init__(id_)\n self.name = name\n self.main_unit = main_unit\n self.is_ready = False\n # self.units = []\n\n def dump(self):\n # return {\n # **super().dump(),\n # 'name': self.name,\n # # 'main_unit': RefTag(self.main_unit),\n # 'main_unit': self.main_unit.dump(),\n # # 'units': [RefTag(unit) for unit in self.units]\n # }\n return dict_merge(\n super().dump(),\n {\n 'name': self.name,\n 'main_unit': self.main_unit.dump(),\n }\n )\n\n def load(self, struct):\n self.name = struct['name']\n self.main_unit = self.registry.load_obj(struct['main_unit'])\n # self.units = struct['units']","sub_path":"mlp/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"111833804","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.conf import settings\n\nfrom .forms import VerifyForm\nfrom .validate import validate_form_data \nfrom .cirrus_jwt import generate_jwt\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\ndef verify(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = VerifyForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n logger.debug('form={}'.format(scrub_ssn(form.cleaned_data.copy())))\n entry = validate_form_data(form)\n if entry:\n logger.debug('entry={}'.format(entry))\n # redirect to a new URL:\n cirrus_proxy_url = generate_jwt(entry)\n return redirect('confirmed')\n else:\n form.add_error(None, settings.VALIDATION_ERROR_MESSAGE)\n else:\n logger.warn('form.errors={}'.format(form.errors.as_json(escape_html=False)))\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = VerifyForm()\n\n return render(request, 'verify.html', {'form': form})\n\ndef confirmed(request):\n return HttpResponse(\"It's you!\")\n\n\ndef scrub_ssn(cleaned_form):\n if cleaned_form['ssn']:\n cleaned_form['ssn'] = 'xxxx'\n return cleaned_form\n","sub_path":"verify/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"451056948","text":"a=2\nb=2\nc=2\ny=2\nx=2\n\n\nwhile a != 0 and a != 1:\n\ta = int(input(\"Pick 1 or 0: \"))\n\tif a != 0 and a != 1:\n\t\tprint(\"Illegal input...\\nPlease type 1 or 0\")\nwhile b != 0 and b != 1:\n\tb = int(input(\"Pick 1 or 0: \"))\n\tif b != 0 and b != 1:\n\t\tprint(\"Illegal input...\\nPlease type 1 or 0\")\nwhile c != 0 and c != 1:\n\tc = int(input(\"Pick 1 or 0: \"))\n\tif c != 0 and c != 1:\n\t\tprint(\"Illegal input...\\nPlease type 1 or 0\")\n\ndef NOR_gate(a,c):\n\t#this will be the NOR gate with inputs A and C\n\tif a == 0 and c == 0: # NOR gates say that if both are 0 it is true, all else if false\n\t\tx = 1\n\telse:\n\t\tx = 0\n\treturn x\n\n\ndef OR_gate(b,c):\n\t#the following is the OR gate\n\tif b == 1 or c == 1:\n\t\ty = 1\n\telse:\n\t\ty = 0\n\treturn y\n\ndef negation(y):\n\t#here we will negate the output from the or gate\n\tif y == 1:\n\t\ty = 0\n\telse:\n\t\ty = 1\n\treturn y\n\ndef XNOR_gate(x,y):\n\t#the following code will represent the XNOR gate with inputs X and Y representing the output from the NOR and OR gates\n\tif x == 0 and y == 0:\n\t\tq = 1\n\telif x == 1 and y == 1:\n\t\tq = 1\n\telse:\n\t\tq = 0\n\tprint(\"Your final output is \", q)\n\n\nOR_gate(a,b)\n\nNOR_gate(b,c)\n\nnegation(y)\n\nXNOR_gate(x,y)\n","sub_path":"simplefunction.py","file_name":"simplefunction.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"45189385","text":"\nimport random\n\nsubstitue_words = {\n 'batteries': 'hounds',\n 'vicksburg': 'odor',\n 'april': 'clayton',\n '16': 'sweet',\n 'grand': 'tree',\n 'gulf': 'owl',\n 'forts': 'bailey',\n 'river': 'hickory',\n '25': 'multiply',\n '29': 'add',\n 'admiral': 'hermes',\n 'porter': 'langford'\n}\n\noriginal_msg = 'We will run the batteries at Vicksburg the night of April 16 ' \\\n 'and proceed to Grand Gulf where we will reduce the forts. ' \\\n 'Be prepared to cross the river on April 25 or 29. ' \\\n 'Admiral Porter.'\n\ndummy_words = [\n 'koala',\n 'konik',\n 'autobus',\n 'czarny',\n 'jedziec',\n 'samochod',\n 'karawana'\n]\n\n\ndef get_msg_with_substitue_words(msg, words_dictionary):\n substitue_msg = msg.upper()\n for word, substitue_word in words_dictionary.items():\n substitue_msg = substitue_msg.replace(word.upper(), substitue_word.upper())\n return substitue_msg\n\n\ndef main():\n # print(original_msg)\n substitue_msg = get_msg_with_substitue_words(original_msg, substitue_words)\n print(substitue_msg)\n\n msg_splited = list(substitue_msg.split())\n # print(cipherlist)\n\n cols = 6\n rows = 7\n\n signs_keys = [-1, 3, -2, 6, 5, -4]\n translation_matrix = build_matrix(signs_keys, msg_splited, cols, rows-1)\n print(\"Translation matrix:\")\n print(translation_matrix)\n matrix_with_dummy_words = add_dummy_words(translation_matrix)\n print(\"Translation matrix with dummy words:\")\n print(matrix_with_dummy_words)\n encoded_msg = ''.join(translate_matrix_to_word_list(matrix_with_dummy_words))\n print(\"Plaintext = {}\".format(encoded_msg))\n\n\ndef translate_matrix_to_word_list(matrix):\n word_list = []\n print()\n for column_id in range(len(matrix[0])):\n # print(matrix[0])\n for row_id in range(len(matrix)):\n # print(matrix[row_id])\n # print(column_id, row_id)\n word_list.append(matrix[row_id][column_id])\n\n return word_list\n\n\ndef add_dummy_words(matrix):\n matrix_with_dummy_words = matrix\n dummy_row = random.choices(dummy_words, k=len(matrix[0]))\n matrix_with_dummy_words.append(dummy_row)\n return matrix_with_dummy_words\n\n\ndef build_matrix(key_int, word_list, cols, rows):\n \"\"\"Turn every n-items in a list into a new item in a list of lists.\"\"\"\n translation_matrix = [None] * rows\n\n for key in key_int:\n fragment = []\n for word_id in range(abs(key), len(word_list), cols):\n fragment.append(word_list[word_id])\n print(fragment)\n if key < 0: # read bottom-to-top of column\n row_items = fragment\n elif key > 0: # read top-to-bottom of columnn\n row_items = list((reversed(fragment)))\n translation_matrix[abs(key) - 1] = row_items\n return translation_matrix\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Chapter_4/my_chapter_4/router_cipher_encoder.py","file_name":"router_cipher_encoder.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"499020147","text":"import numpy as np\nimport time\nfrom openvino.inference_engine import IENetwork, IECore\nimport os\nimport cv2\nimport argparse\nimport sys\n\n\nclass Queue:\n '''\n Class for dealing with queues\n '''\n def __init__(self):\n self.queues=[]\n\n def add_queue(self, points):\n self.queues.append(points)\n\n def get_queues(self, image):\n for q in self.queues:\n x_min, y_min, x_max, y_max=q\n frame=image[y_min:y_max, x_min:x_max]\n yield frame\n\n def draw_queues(self, image):\n for q in self.queues:\n startX, startY, endX, endY = q\n print (\"Draw \", startX, startY, endX, endY)\n cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 0), 5)\n\n def check_coords(self, coords):\n d={k+1:0 for k in range(len(self.queues))}\n for coord in coords:\n for i, q in enumerate(self.queues):\n if coord[0]>q[0] and coord[2] self.threshold:\n box = outputs[0, 0, i, 3:7]\n coords.append(box)\n return coords\n\n def preprocess_input(self, image):\n '''\n Preprocessing the input to fit the the inference engine\n '''\n b, c, h, w = self.input_shape\n prepo = np.copy(image)\n prepo = cv2.resize(prepo, (w,h))\n prepo = prepo.transpose((2,0,1))\n prepo = prepo.reshape(1,c,h,w)\n return prepo\n\ndef main(args):\n model=args.model\n device=args.device\n video_file=args.video\n max_people=args.max_people\n threshold=args.threshold\n output_path=args.output_path\n\n start_model_load_time=time.time()\n pd= PersonDetect(model, device, threshold)\n pd.load_model()\n total_model_load_time = time.time() - start_model_load_time\n\n queue=Queue()\n\n try:\n queue_param=[[620,1,915,562],[1000,1,1264,461]]\n for q in queue_param:\n print (q)\n queue.add_queue(q)\n except:\n print(\"error loading queue param file\")\n\n try:\n cap=cv2.VideoCapture(video_file)\n except FileNotFoundError:\n print(\"Cannot locate video file: \"+ video_file)\n except Exception as e:\n print(\"Something else went wrong with the video file: \", e)\n\n initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n out_video = cv2.VideoWriter(os.path.join(output_path, 'output_video.mp4'), cv2.VideoWriter_fourcc(*'avc1'), fps, (initial_w, initial_h), True)\n print(\"Video size = {}x{}\".format(initial_h, initial_w))\n\n counter=0\n start_inference_time=time.time()\n\n try:\n while cap.isOpened():\n ret, frame=cap.read()\n if not ret:\n break\n counter+=1\n\n coords, image= pd.predict(frame)\n queue.draw_queues(image)\n num_people= queue.check_coords(coords)\n print(f\"Total People in frame = {len(coords)}\")\n print(f\"Number of people in queue = {num_people}\")\n out_text=\"\"\n y_pixel=25\n\n for k, v in num_people.items():\n out_text += f\"No. of People in Queue {k} is {v} \"\n if v >= int(max_people):\n out_text += f\" Queue full; Please move to next Queue \"\n cv2.putText(image, out_text, (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)\n out_text=\"\"\n y_pixel+=40\n out_video.write(image)\n\n total_time=time.time()-start_inference_time\n total_inference_time=round(total_time, 1)\n fps=counter/total_inference_time\n\n with open(os.path.join(output_path, 'stats.txt'), 'w') as f:\n f.write(str(total_inference_time)+'\\n')\n f.write(str(fps)+'\\n')\n f.write(str(total_model_load_time)+'\\n')\n\n cap.release()\n cv2.destroyAllWindows()\n except Exception as e:\n print(\"Could not run Inference: \", e)\n\nif __name__=='__main__':\n parser=argparse.ArgumentParser()\n parser.add_argument('--model', required=True)\n parser.add_argument('--device', default='CPU')\n parser.add_argument('--video', default=None)\n parser.add_argument('--queue_param', default=None)\n parser.add_argument('--output_path', default='/results')\n parser.add_argument('--max_people', default=2)\n parser.add_argument('--threshold', default=0.60)\n\n args=parser.parse_args()\n\n main(args)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"347923687","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# 定义通讯头结构体\nclass CDataStruct:\n def __init__(self, version, data_type, data_len, message_id, check_sum):\n self.version = version\n self.type = data_type\n self.length = data_len\n self.message_id = message_id\n self.check_sum = check_sum\n\n\n# 计算指定字符串的无符号数值并相加\ndef check_sum_data(string):\n sum_check = 0\n bytestr = bytearray(string)\n for item in bytestr:\n sum_check += int(item)\n return sum_check\n","sub_path":"python/codeSnippet/network/tcp_comm_struct.py","file_name":"tcp_comm_struct.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"508063368","text":"import pytest\n\nfrom login.models import Post\n\n\n@pytest.mark.django_db\ndef test_get_url(fake, user, user_client):\n post = Post.objects.create(\n user=user,\n title=fake.sentence(),\n text=fake.text()\n )\n\n url = post.get_absolute_url()\n response = user_client.get(url)\n\n assert response.data[\"id\"] == post.id\n","sub_path":"login/tests/test_get_url.py","file_name":"test_get_url.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"465171967","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, ValidationError\n\nclass sale_type(models.Model):\n _name = 'sale.order.type'\n _order = 'name'\n\n name = fields.Char('Order Type')\n message = fields.Char('Message on Invoice')\n invoice_policy = fields.Selection([('free', 'Free Customer Invoice'),\n ('payment', 'Normal Customer Invoice')], string='Accounting Policy', default='payment')\n route_id = fields.Many2one('stock.location.route', 'Route')\n\n\nclass packing_master(models.Model):\n _name = 'packing.master'\n\n name = fields.Char('Name')\n code = fields.Char('Code')\n\n\nclass despatch_master(models.Model):\n _name = 'despatch.master'\n\n name = fields.Char('Name')\n code = fields.Char('Code')\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n division_id = fields.Many2one(related='partner_id.division_id', string='Division', store=True)\n branch_id = fields.Many2one(related='partner_id.branch_id', string='Branch Name', store=True)\n zone_id = fields.Many2one(related='partner_id.zone_id', string='Zone', store=True)\n\n @api.multi\n def _prepare_invoice(self):\n self.ensure_one()\n invoice_vals = super(SaleOrder, self)._prepare_invoice()\n invoice_vals.update({\n 'product_category_id': self.product_category_id.id,\n 'order_type_id': self.order_type_id.id,\n 'order_commission_type': self.order_commission_type,\n 'delivery_period': self.delivery_period,\n 'despatch_id': self.despatch_id.id,\n 'courier_station': self.courier_station,\n 'purchase_date': self.purchase_date,\n 'order_id': self.id\n })\n return invoice_vals\n\n product_category_id = fields.Many2one('product.category', 'Order Category',\n readonly=True, states={'draft': [('readonly', False)]})\n\n order_type_id = fields.Many2one('sale.order.type', string='Order Type',\n readonly=True, states={'draft': [('readonly', False)]})\n message = fields.Char(related='order_type_id.message')\n\n order_commission_type = fields.Selection([('normal', 'Normal'), ('commission', 'Commission')],\n string='Order Request', default='normal',\n readonly=True, states={'draft': [('readonly', False)]})\n\n delivery_period = fields.Char('Delivery Period', index=True, readonly=True,\n states={'draft': [('readonly', False)]})\n despatch_id = fields.Many2one('despatch.master', 'Mode of Dispatch', index=True,\n readonly=True, states={'draft': [('readonly', False)]})\n courier_station = fields.Char('Booking Station Name')\n\n client_order_ref = fields.Char('Purchase Order', readonly=True, states={'draft': [('readonly', False)]})\n purchase_date = fields.Date('Purchase Date', readonly=True, states={'draft': [('readonly', False)]})\n\n @api.onchange('partner_id')\n def onchange_user_id(self):\n '''get the correct sale team when user or customer is changed'''\n self.team_id = False\n self.user_id = False\n if self.partner_id:\n self.team_id = self.partner_id.team_id\n self.user_id = self.partner_id.user_id\n\nclass Invoice(models.Model):\n _inherit = 'account.invoice'\n\n product_category_id = fields.Many2one('product.category', 'Order Category',\n readonly=True, states={'draft': [('readonly', False)]})\n\n order_type_id = fields.Many2one('sale.order.type', string='Order Type',\n readonly=True, states={'draft': [('readonly', False)]})\n message = fields.Char(related='order_type_id.message')\n\n order_commission_type = fields.Selection([('normal', 'Normal'), ('commission', 'Commission')],\n string='Order Request', default='normal',\n readonly=True, states={'draft': [('readonly', False)]})\n\n delivery_period = fields.Char('Delivery Period', index=True, readonly=True,\n states={'draft': [('readonly', False)]})\n despatch_id = fields.Many2one('despatch.master', 'Mode of Dispatch', index=True,\n readonly=True, states={'draft': [('readonly', False)]})\n courier_station = fields.Char('Booking Station Name')\n\n client_order_ref = fields.Char('Purchase Order', readonly=True, states={'draft': [('readonly', False)]})\n purchase_date = fields.Date('Purchase Date', readonly=True, states={'draft': [('readonly', False)]})\n\n order_id = fields.Many2one('sale.order', string='Sale Order')\n\n order_count = fields.Integer('Orders', compute='_compute_orders')\n\n @api.multi\n def _compute_orders(self):\n for invoice in self:\n orders = self.env['sale.order']\n for line in invoice.invoice_line_ids:\n orders += line.sale_line_ids.mapped('order_id')\n\n invoice.order_count = len(list(set(orders)))\n\n @api.multi\n def open_sale_orders(self):\n self.ensure_one()\n\n action = self.env.ref('sale_crm.sale_action_orders').read()[0]\n orders = self.env['sale.order']\n\n for line in self.invoice_line_ids:\n orders += line.sale_line_ids.mapped('order_id')\n\n orders = list(set(orders))\n if len(orders.ids) > 1:\n action['domain'] = [('id', 'in', orders.ids)]\n elif len(orders.ids) == 1:\n action['views'] = [(self.env.ref('sale.view_order_form').id, 'form')]\n action['res_id'] = orders.id\n return action\n\n\n @api.model\n def _prepare_refund(self, invoice, date_invoice=None, date=None, description=None, journal_id=None):\n vals = super(Invoice, self)._prepare_refund(invoice, date_invoice, date, description, journal_id)\n vals.update({\n 'product_category_id': self.product_category_id.id,\n 'order_type_id': self.order_type_id.id,\n 'order_commission_type': self.order_commission_type,\n 'delivery_period': self.delivery_period,\n 'despatch_id': self.despatch_id.id,\n 'courier_station': self.courier_station,\n 'order_id': self.order_id.id\n })\n return vals\n\nclass InvoiceLine(models.Model):\n _inherit = 'account.invoice.line'\n\n return_inv = fields.Char('Credit Note')\n\n\nclass Team(models.Model):\n _inherit = 'crm.team'\n\n team_member_ids = fields.Many2many('res.users', 'crm_team_res_user_rel', 'team_id', 'user_id', string='Channel Members')\n\n @api.multi\n def compute_followers(self):\n for team in self:\n team.message_unsubscribe(team.message_partner_ids.ids)\n\n members = []\n members.append(team.user_id.partner_id.id)\n for member in team.member_ids:\n members.append(member.partner_id.id)\n\n new_members = list(set(members))\n team.message_subscribe(new_members)\n","sub_path":"sale_fields/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"570305774","text":"from rtnn import Layers as L\nfrom rtnn import network as net\nfrom rtnn import Visualizer as Vis\nfrom rtnn import inputUtil as Iu\nimport numpy as np\n\n\ninputSize = 1\noutputSize = 1\nt0 = 0\nT = 1501\n\ncalculations = 600\n\nnetworkHistory = []\n\nnetwork = net.Network()\ninputLayer = L.InputLayer(inputSize)\nnetwork.addLayer(inputLayer)\noutputLayer = L.DuoLateralInhibitoryLayer(outputSize, excitatoryTransmitterExpectation=5200, excitatoryTransmitterVariance=0,\n excitatoryReceptorExpectation=1600, excitatoryReceptorVariance=0,\n inhibitoryTransmitterExpectation=3500, inhibitoryTransmitterVariance=0,\n inhibitoryReceptorExpectation=1000, inhibitoryReceptorVariance=0)\nnetwork.addLayer(outputLayer)\n\nfor difference in range(int(-(calculations / 2)), int(calculations / 2)):\n print('difference: ', difference)\n preneuralExcitoryInputsSeries, postneuralExcitoryInputsSeries = Iu.spikeTimeDifference(difference)\n postneuralInhibitoryInputsSeries = Iu.noSpike(len(postneuralExcitoryInputsSeries), outputSize) # to ignore the inhibitory potentials\n timeBiases = Iu.makeTimeBiases(outputLayer, postneuralExcitoryInputsSeries, postneuralInhibitoryInputsSeries)\n print('Simulating until T=', T)\n # Run Simulation\n for time in range(t0, T):\n excitatoryIn = preneuralExcitoryInputsSeries[time]\n # iu.poissonDistributedSpikeTrain(T, len(inputLayer), 0.05)[t] #iu.noSpike(2, inputSize)[0] #iu.epilepsie[t] #\n inhibitoryIn = Iu.noSpike(2, inputSize)[0]\n layerBiasDict = timeBiases[time]\n network.step(excitatoryIn, inhibitoryIn, layerBiasDict=layerBiasDict, ignorePreneurons=True)\n networkHistory.append(network.logNetwork())\n if time % 500 == 0:\n print('t: ', time)\n # print('\\ninputLayer:\\n', inputLayer)\n # print('\\nhiddenLayer:\\n', hiddenLayer)\n # print('\\noutputLayer:\\n', outputLayer)\n\nprint('visualise Learning Signal History')\nnetworkHistory = np.array(networkHistory)\nVis.visualiseLearningSignalHistory(networkHistory, calculations, T)\nprint('Done!')\n#Vis.visualizeNetwork(networkHistory, visualizeSynapses=False)\n","sub_path":"stdpKosekansHyperbolicus.py","file_name":"stdpKosekansHyperbolicus.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"280519595","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\nsql_path = 'mysql://root:123@localhost:3306/tab'\napp.config['SQLALCHEMY_DATABASE_URI'] = sql_path\ndb = SQLAlchemy(app)\n\nclass Tab(db.Model):\n __tablename__ = 'tabs'\n id = db.Column(db.Integer, primary_key=True)\n artist = db.Column(db.Text)\n song = db.Column(db.Text)\n tab = db.Column(db.Text)\n\n#@app.route('/')\n#def test_tab():\n# t = Tab.query.filter_by(id='1').first()\n# return t.tab\n\n@app.route('/')\ndef main():\n return render_template('search.html')\n\n@app.route('/search')\ndef search():\n kwd = request.args.get('kwd')\n song_matchs = Tab.query.filter_by(song=kwd).all()\n artist_matchs = Tab.query.filter_by(artist=kwd).all()\n matchs = song_matchs + artist_matchs\n items = [(item.id, item.song) for item in matchs]\n return render_template('list.html', items=items)\n\n@app.route('/id/')\ndef tab(id_=1):\n item = Tab.query.filter_by(id=id_).first()\n artist = item.artist\n song = item.song\n tab = item.tab\n\n return render_template('song.html', artist=artist, song=song, tab=tab)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"tab/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"557824392","text":"from sklearn.model_selection import GridSearchCV\nimport pandas as pd\nimport namesgenerator\nfrom ppo import main\nimport json\n\n\ndefault_hyperparameters = {\n 'VERSION': '2.1.0',\n 'ENV': 'CartPole-v0',\n 'RANDOM_SEED': 42,\n 'RENDER': True,\n\n 'MLP_UNITS': 16,\n 'MLP_LAYERS': 1, # one shared hidden layer between V and P\n 'P_MLP_LAYERS': 0, # policy network is linear\n 'V_MLP_LAYERS': 1, # V network is \"deep\"\n\n # the dimensionality for all these is: \"MDP state transitions\" (not observational frames)\n 'GRADIENT_LEARNING_BATCH_SIZE': 32,\n 'TRANSITIONS_IN_EXPERIENCE_BUFFER': 1024,\n 'HORIZON': 1024,\n 'TOTAL_ENV_STEPS': 1e5, # 2e7,\n\n 'EPOCHS_PER_UPDATE': 4,\n\n 'CLIP_RANGE': .2,\n 'GAMMA': .99,\n 'ADVANTAGE_LAMBDA': .97,\n 'VALUE_LAMBDA': .99,\n 'MAX_GRAD_NORM': .5,\n 'VALUE_LOSS_WEIGHT': .25,\n 'LR': 3e-4,\n}\n\n\nclass RLEstimator():\n params_default = default_hyperparameters\n params_values = {}\n score_ = None\n _flushed_score = False\n _random_name = ''\n\n def __init__(self, *args, **kwargs):\n self.set_params(**kwargs)\n\n def get_params(self, *args, **kwargs):\n return self.params_values\n\n def set_params(self, *args, **kwargs):\n for param, param_default_value in self.params_default.items():\n if param in kwargs:\n self.params_values[param] = kwargs[param]\n elif param in self.params_values:\n pass\n else:\n self.params_values[param] = param_default_value\n\n def fit(self, X):\n self._random_name = namesgenerator.get_random_name()\n self.score_ = main(hparams=self.params_values,\n random_name=self._random_name)\n return self\n\n def score(self, X):\n if not self._flushed_score:\n self._flushed_score = True\n with open('score.log', 'a') as log:\n log.write(json.dumps(\n {'score': self.score_, 'random_name': self._random_name, **self.params_values}) + \"\\n\")\n return self.score_\n\n\nif __name__ == '__main__':\n main(hparams=default_hyperparameters)\n exit()\n INITS_PER_HYPERSET = 2\n assert INITS_PER_HYPERSET % 2 == 0\n rle = RLEstimator()\n gs = GridSearchCV(rle, {\n 'RENDER': [False],\n 'RANDOM_SEEED': [False],\n 'GAMMA': [0.95, 0.99],\n 'ADVANTAGE_LAMBDA': [0.8],\n 'TOTAL_ENV_STEPS': [10_000],\n }, cv=INITS_PER_HYPERSET, n_jobs=-1, refit=False)\n gs.fit([.0]*INITS_PER_HYPERSET)\n print(pd.DataFrame(gs.cv_results_).filter(\n regex='^(param_)|(mean_test_score)'))\n","sub_path":"cartpole_ppo_tf.py","file_name":"cartpole_ppo_tf.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"39950426","text":"#!/usr/bin/env python\n\n\"\"\"\nThis version is double buffered\n\"\"\"\n\nimport wx\nimport time\n\n\nclass TestFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, -1, \"DrawLines Test\",\n wx.DefaultPosition,\n size=(500, 500),\n style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)\n\n ## Set up the MenuBar\n MenuBar = wx.MenuBar()\n\n file_menu = wx.Menu()\n\n ID_CLEAR_MENU = wx.NewId()\n file_menu.Append(ID_CLEAR_MENU, \"&Clear\", \"Clear the Screen\")\n self.Bind(wx.EVT_MENU, self.Clear, id=ID_CLEAR_MENU)\n\n ID_ANIMATE_MENU = wx.NewId()\n file_menu.Append(ID_ANIMATE_MENU, \"&Animate\", \"Animate the Screen\")\n self.Bind(wx.EVT_MENU, self.Animate, id=ID_ANIMATE_MENU)\n\n file_menu.Append(wx.ID_EXIT, \"E&xit\", \"Terminate the program\")\n self.Bind(wx.EVT_MENU, self.OnQuit, id=wx.ID_EXIT)\n\n MenuBar.Append(file_menu, \"&File\")\n self.SetMenuBar(MenuBar)\n\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n self.Bind(wx.EVT_MOTION, self.OnMouseMove)\n self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)\n self.Bind(wx.EVT_SIZE, self.OnSize)\n\n self.LineData = []\n self.OnSize(None)\n\n def OnPaint(self, event):\n dc = wx.PaintDC(self)\n dc.DrawBitmap(self._Buffer, 0, 0)\n\n def OnSize(self,event):\n # The Buffer init is done here, to make sure the buffer is always\n # the same size as the Window\n Size = self.GetClientSizeTuple()\n self._Buffer = wx.EmptyBitmap(Size[0], Size[1])\n self.Draw()\n\n def Draw(self):\n dc = wx.MemoryDC()\n dc.SelectObject(self._Buffer)\n dc.SetBackground( wx.Brush(\"Purple\") )\n dc.Clear()\n dc.SetPen(wx.Pen(\"Red\", 3))\n for Line in self.LineData:\n dc.DrawLines(Line)\n self.Refresh()\n\n def Clear(self, event = None):\n self.LineData = []\n self.Draw()\n\n def OnLeftDown(self,event):\n xy = event.GetPosition()\n self.LineData.append( [xy] )\n\n def OnMouseMove(self, event):\n if event.Dragging() and event.LeftIsDown():\n xy = event.GetPosition()\n self.LineData[-1].append(xy)\n dc = wx.MemoryDC()\n dc.SelectObject(self._Buffer)\n dc.SetPen(wx.Pen(\"Red\", 3))\n x1, y1 = self.LineData[-1][-2]\n x2, y2 =self.LineData[-1][-1]\n dc.DrawLine(x1, y1, x2, y2)\n self.Refresh()\n\n def Animate(self, event):\n self.Refresh()\n self.LineData.append( [(0,0)] )\n dc = wx.MemoryDC()\n dc.SelectObject(self._Buffer)\n dc.SetPen(wx.Pen(\"Red\", 3))\n for i in xrange(10,500,5):\n self.LineData[-1].append((i,i))\n x1, y1 = self.LineData[-1][-2]\n x2, y2 =self.LineData[-1][-1]\n dc.DrawLine(x1, y1, x2, y2)\n self.Refresh()\n self.Update()\n time.sleep(0.01)\n\n def OnQuit(self,event):\n self.Close(True)\n\n\nclass DemoApp(wx.App):\n def OnInit(self):\n frame = TestFrame()\n frame.Show(True)\n self.SetTopWindow(frame)\n\n return True\n\n\nif __name__ == \"__main__\":\n app = DemoApp(0)\n app.MainLoop()\n","sub_path":"TestUpdate2.py","file_name":"TestUpdate2.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"173581683","text":"from server import app, redis\nfrom flask import request, g\n\n\n@app.before_request\ndef before_request():\n # 此处拦截请求,验证其token\n white_list = [\n '/user/login'\n ]\n if request.path not in white_list:\n # 获取token来获取存储的详细用户信息\n token = request.args.get('token')\n\n if not token:\n return {\n 'code': 50008\n }\n else:\n user_info = redis.get(token)\n g.token = token\n g.username = user_info.get('username')\n g.userId = user_info.get('id')\n g.role = user_info.get('role')\n","sub_path":"server/interceptor.py","file_name":"interceptor.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"400125169","text":"# File: hw8_part3.py\n# Author: Ahmed Almansoori\n# Date: 8/3/2016\n# Section: 1\n# E-mail: aalm1@umbc.edu\n# Description: thie program saves all the unique characters in an input file to alist\n\n\n\n#this function builds a list of all unique characters in that file, using a recursion\n#input: a string from the file,a list of unique characters in that file\n#output: the unique list of charcters in the file \ndef newChar(string,unique):\n\n if len(string)==0:\n return unique\n else:\n if string[0] not in unique:\n unique.append(string[0])\n return newChar(string[1:],unique)\n\ndef main():\n\n inpfile=open(\"input.txt\")\n string=inpfile.read() # generic name because this file can only have one string since there isnt any user input\n unique=[]\n print(newChar(string,unique))\n\nmain()\n\n\n\n","sub_path":"Unique_characters.py","file_name":"Unique_characters.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"231193460","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n@name: riqian.py\n@editor: PyCharm\n@Date: 2019/1/18 10:06\n@Author: ly\n@Description: \n\"\"\"\nimport os\nimport json\nfrom tornado.web import RequestHandler\nfrom .e_file_model import *\n\n\nclass Riqian(RequestHandler):\n def post(self, *args, **kwargs):\n res_data = {\n 'code': 0, 'msg': '', 'gen_cost': '', 'wind': '', 'spin': '', 'margin': '',\n 'wind_dev': [], 'power_dev': [], 'margin_dev': [], 'margin_opt': [],\n 'margin_plb': [], 'margin_pub': [], 'margin_max': [], 'margin_min': [],\n 'wind_opt': [], 'wind_plb': [], 'wind_pub': [], 'wind_fur': [], 'wind_cub': [], 'wind_clb': [],\n 'power_opt': [], 'power_plb': [], 'power_pub': []\n }\n _date = self.get_argument('date')\n _type = self.get_argument('type', '')\n file_name = 'dh_plan_{}.txt'.format(_date)\n\n cur_path = os.path.dirname(__file__)\n root_path = os.path.dirname(cur_path)\n e_file_path = os.path.join(root_path, 'efiles')\n\n engine = EFileEngine()\n flag = engine.LoadFile(os.path.join(e_file_path, file_name))\n if not flag:\n res_data['code'] = 1\n res_data['msg'] = '加载e文件失败!'\n else:\n table_list = engine.getAllTableNames()\n if _type == 'basic':\n for tb in table_list:\n table = engine.getTable(tb)\n table_col_name = table.getAllColNames()\n # col_num = table.getColNum()\n # row_num = table.getRowNum()\n if tb == 'sys':\n gen_cost = table.getColumsData('gencost', table_col_name) # 运行成本\n wind = table.getColumsData('windcurtailcost', table_col_name) # 弃风指标\n spin = table.getColumsData('spin_reserve_value', table_col_name) # 旋备指标\n margin = table.getColumsData('tie_margin', table_col_name) # 断面指标\n if gen_cost:\n res_data['gen_cost'] = gen_cost[0]['data'][0]\n if wind:\n res_data['wind'] = wind[0]['data'][0]\n if spin:\n res_data['spin'] = spin[0]['data'][0]\n if margin:\n res_data['margin'] = margin[0]['data'][0]\n elif tb == 'tie_crv': # 断面信息\n margin = table.getColumsData('name', table_col_name)\n temp = set()\n if margin:\n margin = margin[0]['data']\n for m in margin:\n temp.add(m)\n for m in temp:\n res_data['margin_dev'].append({'value': m})\n elif tb == 'statics_crv': # 新能源和传统机组总加数据\n statics = table.getColumsData('name', table_col_name)\n for n in range(1, 97):\n wind_opt = statics[2+n]['data'][0] # 新能源优化设定值\n wind_fur = statics[2+n]['data'][1] # 新能源预测均值\n wind_plb = statics[2+n]['data'][2] # 新能源计划区间下限\n wind_pub = statics[2+n]['data'][3] # 新能源计划区间上限\n\n power_opt = statics[2+n]['data'][4] # 传统机组优化设定值\n power_plb = statics[2+n]['data'][5] # 传统机计划区间下限\n power_pub = statics[2+n]['data'][6] # 传统机计划区间上限\n\n res_data['wind_opt'].append(wind_opt)\n res_data['wind_fur'].append(wind_fur)\n res_data['wind_plb'].append(wind_plb)\n res_data['wind_pub'].append(wind_pub)\n\n res_data['power_opt'].append(power_opt)\n res_data['power_plb'].append(power_plb)\n res_data['power_pub'].append(power_pub)\n elif tb == 'unit_dev': # 新能源和传统机组设备\n unit_dev = table.getColumsData('type', table_col_name)\n for index, dev in enumerate(unit_dev[0]['data']):\n if dev == '传统机组':\n res_data['power_dev'].append({'value': unit_dev[1]['data'][index]})\n else:\n res_data['wind_dev'].append({'value': unit_dev[1]['data'][index]})\n else:\n continue\n elif _type == 'margin': # 获取断面数据\n name = self.get_argument('name', '')\n margin_table = engine.getTable('tie_crv')\n table_col_name = margin_table.getAllColNames()\n margin_data = margin_table.getColumsData('name', table_col_name)\n\n dev_position = 0\n for index, v in enumerate(margin_data[0]['data']):\n if v == name:\n dev_position = index\n break\n for n in range(1, 97):\n # margin_data = margin_table.getColumsData(str(n), table_col_name)[0]['data']\n # 优化设定值\n opt = margin_data[2+n]['data'][dev_position]\n # 计划下限\n plb = margin_data[2+n]['data'][dev_position + 1]\n # 计划上限\n pub = margin_data[2+n]['data'][dev_position + 2]\n # 上限\n p_max = margin_data[2+n]['data'][dev_position + 3]\n # 下限\n p_min = margin_data[2+n]['data'][dev_position + 4]\n res_data['margin_opt'].append(opt)\n res_data['margin_plb'].append(plb)\n res_data['margin_pub'].append(pub)\n res_data['margin_max'].append(p_max)\n res_data['margin_min'].append(p_min)\n elif _type == 'wind' or _type == 'power':\n name = self.get_argument('name', '')\n unit_crt_table = engine.getTable('unit_crv')\n table_col_name = unit_crt_table.getAllColNames()\n unit_crt_data = unit_crt_table.getColumsData('name', table_col_name)\n\n dev_position = 0\n for index, v in enumerate(unit_crt_data[0]['data']):\n if v == name:\n dev_position = index\n break\n for n in range(1, 97):\n if _type == 'wind':\n wind_opt = unit_crt_data[2+n]['data'][dev_position]\n wind_pub = unit_crt_data[2+n]['data'][dev_position + 1]\n wind_plb = unit_crt_data[2+n]['data'][dev_position + 2]\n wind_cub = unit_crt_data[2+n]['data'][dev_position + 3]\n wind_clb = unit_crt_data[2+n]['data'][dev_position + 4]\n res_data['wind_opt'].append(wind_opt)\n res_data['wind_pub'].append(wind_pub)\n res_data['wind_plb'].append(wind_plb)\n res_data['wind_cub'].append(wind_cub)\n res_data['wind_clb'].append(wind_clb)\n else:\n power_opt = unit_crt_data[2 + n]['data'][dev_position]\n power_pub = unit_crt_data[2 + n]['data'][dev_position + 1]\n power_plb = unit_crt_data[2 + n]['data'][dev_position + 2]\n res_data['power_opt'].append(power_opt)\n res_data['power_pub'].append(power_pub)\n res_data['power_plb'].append(power_plb)\n self.write(json.dumps(res_data))\n","sub_path":"handlers/riqian.py","file_name":"riqian.py","file_ext":"py","file_size_in_byte":8112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"422748022","text":"import sounddevice as sd\nimport soundfile as sf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport librosa\nimport hmmlearn.hmm as hmm\nfrom math import exp\nimport pickle\n\nfrom recorder import SpeechRecognition\nfrom detector import Detector\n\ndetector = Detector(model_len_path='model_len.pkl', model_xuong_path='model_xuong.pkl')\n\n# def record_sound(filename, duration=1, fs=44100, play=False):\n# sd.play( np.sin( 2*np.pi*940*np.arange(fs)/fs ) , samplerate=fs, blocking=True)\n# sd.play( np.zeros( int(fs*0.2) ), samplerate=fs, blocking=True)\n# data = sd.rec(frames=duration*fs, samplerate=fs, channels=1, blocking=True)\n# if play:\n# sd.play(data, samplerate=fs, blocking=True)\n# sf.write(filename, data=data, samplerate=fs)\n\ndef record_data(prefix, n=50, duration=1):\n for i in range(n):\n print('{}_{}.wav'.format(prefix, i))\n # record_sound('{}_{}.wav'.format(prefix, i), duration=duration)\n recorder = SpeechRecognition('{}_{}.wav'.format(prefix, i), detector)\n recorder.detect()\n if i % 5 == 4:\n input(\"Press Enter to continue...\")\n\ndetector = Detector(model_len_path='model_len.pkl', model_xuong_path='model_xuong.pkl')\n\n# record_data(\"len\")\nrecord_data(\"xuong\")\n\ndef get_mfcc(filename):\n data, fs = librosa.load(filename, sr=None)\n mfcc = librosa.feature.mfcc(data, sr=fs, n_fft=1024, hop_length=128)\n return mfcc.T\n\n\nn_sample = 50\ndata_len = [get_mfcc('len_{}.wav'.format(i)) for i in range(n_sample)]\ndata_xuong = [get_mfcc('xuong_{}.wav'.format(i)) for i in range(n_sample)]\n\n\nmodel_len = hmm.GaussianHMM(n_components=30, verbose=True, n_iter=200)\nmodel_len.fit(X=np.vstack(data_len), lengths=[x.shape[0] for x in data_len])\n\nmodel_xuong = hmm.GaussianHMM(n_components=30, verbose=True, n_iter=200)\nmodel_xuong.fit(X=np.vstack(data_xuong), lengths=[x.shape[0] for x in data_xuong])\n\nwith open(\"model_len.pkl\", \"wb\") as file1: \n pickle.dump(model_len, file1)\n\nwith open(\"model_xuong.pkl\", \"wb\") as file2:\n pickle.dump(model_xuong, file2)","sub_path":"models/pyaudio/trainhmm.py","file_name":"trainhmm.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"454043078","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport re\nimport psycopg2\nimport numpy as np\n\nfrom operator import itemgetter\n\nfrom .datetime_utils import check_datetime_format, FULL_DATETIME_FORMAT, DEFAULT_DATE_FORMAT, \\\n DEFAULT_DATETIME_FORMAT\nfrom .string_utils import check_password\nfrom .response_message_utils import create_response_message, HTTP_400_BAD_REQUEST, HTTP_400_MSG, \\\n HTTP_401_UNAUTHORIZED, HTTP_401_MSG, HTTP_200_OK, HTTP_200_MSG\n\n_logger = logging.getLogger(__name__)\n\nDOMAIN_SERVER_SI = os.environ.get('DOMAIN_SERVER_SI', 'https://smart-inventory.qa.novobi.com')\n\n\nclass ExtraFieldType:\n # CONSTANTS\n DATETIME_FIELD_TYPE = 'datetime'\n DATE_FIELD_TYPE = 'date'\n EMAIL_FIELD_TYPE = 'email'\n URL_FIELD_TYPE = 'URL'\n IP_ADDRESS_FIELD_TYPE = 'IP_address'\n\n LIST_FIELD_TYPES = [\n DATETIME_FIELD_TYPE,\n DATE_FIELD_TYPE,\n EMAIL_FIELD_TYPE,\n URL_FIELD_TYPE,\n IP_ADDRESS_FIELD_TYPE\n ]\n\n\nclass ServerAPICode:\n REGISTER = 'register'\n UPDATE_PROD_CONF = 'update_prod_fore_conf'\n UPDATE_CLSF_CONF = 'update_clsf_fore_conf'\n UPDATE_NEXT_TIME_RUN = 'update_next_time_run'\n UPDATE_PRODUCT_AGE = 'product_age_report'\n UPDATE_UNDER_OVERSTOCK_REPORT = 'under_overstock_report'\n CHECK_LICENSE_KEY = 'check_license_key'\n UPDATE_RRWF_REPORT = 'reordering_rules_with_forecast_report'\n\n\nclass ServerAPI:\n DICT_API_SUB_DOMAIN = {}\n\n @classmethod\n def get_api_url(cls, api_code):\n \"\"\" Function return the URL corresponding with `api_code`\n\n :param api_code:\n :type api_code: str\n :return: Return the URL of API, if not exist `api_code`,\n this will return empty string\n :rtype: str\n \"\"\"\n return cls.DICT_API_SUB_DOMAIN.get(api_code, '')\n\n\nclass ServerAPIv1(ServerAPI):\n DICT_API_SUB_DOMAIN = {\n ServerAPICode.REGISTER: DOMAIN_SERVER_SI + '/api/register/',\n ServerAPICode.UPDATE_PROD_CONF: DOMAIN_SERVER_SI + '/api/update_prod_fore_conf/',\n ServerAPICode.UPDATE_CLSF_CONF: DOMAIN_SERVER_SI + '/api/update_clsf_fore_conf/',\n ServerAPICode.UPDATE_NEXT_TIME_RUN: DOMAIN_SERVER_SI + '/api/update_next_time_run/',\n ServerAPICode.UPDATE_PRODUCT_AGE: DOMAIN_SERVER_SI + '/api/product_age_report/',\n ServerAPICode.UPDATE_UNDER_OVERSTOCK_REPORT: DOMAIN_SERVER_SI + '/api/under_overstock_report/',\n ServerAPICode.CHECK_LICENSE_KEY: DOMAIN_SERVER_SI + '/api/check_license_key/',\n ServerAPICode.UPDATE_RRWF_REPORT: DOMAIN_SERVER_SI + '/api/reordering_rules_with_forecast_report/'\n }\n\n\ndef is_valid_field(data, field_name, field_type, required=True, extra_info=None):\n \"\"\"\n\n :type data: dict\n :param field_name: the label of variable using to check in data variable\n :type field_name: string\n :param field_type: type of the corresponding value of the label\n :type field_type: \n :param required: is require field\n :type required: bool\n :param extra_info: some constrain for special type of char field\n :type extra_info: ExtraFieldType\n :return: the check result of a valid field\n :rtype: boolean\n \"\"\"\n\n if required and field_name not in data.keys():\n key_error_msg = \"The key '%s' is required.\" % field_name\n _logger.exception(key_error_msg, exc_info=True)\n raise KeyError(key_error_msg)\n\n field_value = data.get(field_name, None)\n regex = None\n\n if field_value is not None:\n if not isinstance(field_value, field_type) and not (field_type is float and isinstance(field_value, int)):\n type_error_msg = \"The type of %s must be %s.\" % (field_name, field_type)\n _logger.exception(type_error_msg, exc_info=True)\n raise TypeError(type_error_msg)\n\n if extra_info is None:\n is_valid = True\n elif extra_info == ExtraFieldType.DATETIME_FIELD_TYPE:\n is_valid = check_datetime_format(field_value, DEFAULT_DATETIME_FORMAT, show_exception=False) or \\\n check_datetime_format(field_value, FULL_DATETIME_FORMAT, show_exception=False)\n elif extra_info == ExtraFieldType.DATE_FIELD_TYPE:\n is_valid = check_datetime_format(field_value, DEFAULT_DATE_FORMAT, show_exception=False)\n else:\n if extra_info == ExtraFieldType.EMAIL_FIELD_TYPE:\n # Email address regex\n regex = r'([\\w\\.\\-\\_]+)?\\w+@[\\w\\-\\_]+(\\.\\w+){1,}'\n elif extra_info == ExtraFieldType.URL_FIELD_TYPE:\n # URL regex\n regex = r'([\\--\\:\\w?\\[@%&+~#=]]*\\.[a-z]{2,4}\\/{0,2})((?:[?&](?:\\w+)=(?:\\w+))+|[--:\\w?@%&+~#=]+)?'\n elif extra_info == ExtraFieldType.IP_ADDRESS_FIELD_TYPE:\n # IPv4 address regex\n regex = r'(?:(?:2(?:[0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9])\\.){3}(?:(?:2([0-4][0-9]|5[0-5])|[0-1]?[' \\\n r'0-9]?[0-9])) '\n\n pattern = re.compile(regex)\n # result is a matched object if the string is whole matched with the pattern, otherwise result is None\n result = re.fullmatch(pattern, field_value)\n is_valid = False if result is None else True\n else:\n # the value of this field can be None\n is_valid = True\n\n return is_valid\n\n\ndef check_json_fields(json_data, infos_required_field, infos_non_required_field):\n \"\"\" Check required keyword in json data and the type of each field\n\n :type json_data: dict\n :param infos_required_field: list of required fields and type of it\n :type infos_required_field: list((, , ), ...)\n can be datetime/email/URL/IPAddress\n :param infos_non_required_field: list of required fields and type of it\n :type infos_non_required_field: list((, , ), ...)\n :return:\n \"\"\"\n try:\n for field_name, field_type, extra_info in infos_required_field:\n is_valid_field(json_data, field_name, field_type, required=True, extra_info=extra_info)\n\n for field_name, field_type, extra_info in infos_non_required_field:\n is_valid_field(json_data, field_name, field_type, required=False, extra_info=extra_info)\n\n return True\n\n except KeyError as key_error:\n _logger.exception(\"There was an KeyError when checking json fields.\")\n raise key_error\n\n except TypeError as type_error:\n _logger.exception(\"There was an TypeError when checking json fields.\")\n raise type_error\n\n\ndef check_request_authentication(context, json_data):\n \"\"\"\n\n :param context:\n :param json_data: dict\n :return:\n \"\"\"\n # check server password\n server_password = json_data.get('server_pass')\n is_auth = context.env['forecasting.config.settings'].check_si_pass(context, server_password)\n from odoo.tools import config\n forecasting_test = config.misc.get(\"forecasting_test\", False)\n return forecasting_test or is_auth\n\n\ndef check_format_crawl_data_request(json_data, non_required_fields_and_types=None):\n \"\"\"\n Check format the body of HTTP request in Client API\n For example:\n data = {\n 'num_item': ,\n 'time_to': ,\n 'password': ,\n 'last_write_time': ,\n 'last_id': }\n :param json_data: dict object\n :param non_required_fields_and_types: list of fields are not required and type of it\n :type non_required_fields_and_types: list((, , None), ...)\n :return: True if valid, otherwise raise Error\n \"\"\"\n try:\n check_json_fields(json_data,\n infos_required_field=[('num_item', int, None),\n ('time_to', str, ExtraFieldType.DATETIME_FIELD_TYPE),\n ('password', str, None),\n ('last_id', int, None)],\n infos_non_required_field=non_required_fields_and_types)\n\n return True\n\n except KeyError as key_error:\n _logger.exception(\"Missing fields in body's request.\", exc_info=True)\n raise key_error\n\n except TypeError as type_error:\n _logger.exception(\"Invalid type in in body's request.\", exc_info=True)\n raise type_error\n\n except ValueError as value_error:\n _logger.exception(\"Vale error in in body's request.\", exc_info=True)\n raise value_error\n\n\ndef check_format_data_array(data_field, required_fields_for_data, infos_non_required_field=None):\n \"\"\"\n Check format of the card data in body of HTTP request in API update\n classification\n\n :param infos_non_required_field:\n :param required_fields_for_data:\n :param data_field: list dicts\n :return: True if valid, otherwise raise Error\n \"\"\"\n infos_non_required_field = infos_non_required_field or []\n try:\n is_valid_format = True\n idx = 0\n size_data = len(data_field)\n while is_valid_format and idx < size_data:\n ith_item = data_field[idx]\n is_valid_format = check_json_fields(\n ith_item,\n infos_required_field=required_fields_for_data,\n infos_non_required_field=infos_non_required_field)\n idx += 1\n\n except Exception as e:\n _logger.exception(\"There was an error when checking format data array.\")\n raise e\n\n return is_valid_format\n\n\ndef is_authentication(raw_password, hashed_password):\n return check_password(raw_password, hashed_password)\n\n\ndef generate_domain_for_crawl_data_query(data):\n domain = [('write_date', '<', data.get('time_to'))]\n\n if 'last_write_time' in data:\n domain.append(('write_date', '>', data.get('last_write_time')))\n domain = ['|', '&', ('id', '>', data.get('last_id')), ('write_date', '=', data.get('last_write_time')),\n '&'] + domain\n\n return domain\n\n\ndef get_key_value_in_dict(dict_value, keys):\n \"\"\"\n Return value in a dictionary base on the order of key in ``keys``\n :param dict_value:\n :type dict_value: dict\n :param keys: a list of key to get the data\n :type keys: list\n :return:\n :rtype: list\n \"\"\"\n result = []\n try:\n if len(keys) == 1:\n result = [itemgetter(*list(keys))(dict_value)]\n elif len(keys) > 1:\n result = list(itemgetter(*list(keys))(dict_value))\n\n except Exception as e:\n _logger.exception(\"There was an error when get key value from dictionary.\")\n raise e\n\n return result\n\n\ndef check_format_json_request(env, model, json_data, forecast_level, **kwargs):\n \"\"\"\n Check format of the body of HTTP request in API update product classification\n\n :param json_data: dict object\n :return: True if valid, otherwise raise Error\n \"\"\"\n try:\n is_valid_format = check_json_fields(\n json_data,\n infos_required_field=[('server_pass', str, None),\n ('data', list, None)],\n infos_non_required_field=[])\n\n # check the format of ``data`` fields\n list_data = json_data.get('data', [])\n\n if is_valid_format:\n required_fields_for_data = model.get_json_required_fields(forecast_level=forecast_level)\n non_required_fields_for_data = []\n is_valid_format = check_format_data_array(\n list_data,\n required_fields_for_data=required_fields_for_data,\n infos_non_required_field=non_required_fields_for_data\n )\n return is_valid_format\n\n except KeyError as key_error:\n _logger.exception(\"There was an KeyError when checking json request format.\")\n raise key_error\n\n except TypeError as type_error:\n _logger.exception(\"There was an TypeError when checking json request format.\")\n raise type_error\n\n except ValueError as value_error:\n _logger.exception(\"There was an ValueError when checking json request format.\")\n raise value_error\n\n\ndef handle_push_data_request(request, model):\n \"\"\"\n\n :param Request request: request data\n :param model: object model\n :return:\n \"\"\"\n try:\n data = request.jsonrequest\n\n # Step 1: check if the request is authorized or not\n authorized_request = check_request_authentication(context=request, json_data=data)\n assert authorized_request, \"Unauthorized\"\n\n # get company_id from JSON data\n list_data = data.get('data', [])\n company_ids = np.unique([item.get('company_id') for item in list_data]).tolist()\n forecast_level_by_companies = request.env['res.company'].sudo().get_forecast_level_by_company(\n company_ids=company_ids)\n\n for company_id, forecast_level in forecast_level_by_companies.items():\n filtered_data = list(filter(lambda row: row.get('company_id') == company_id, list_data))\n\n # Step 2: check format of data in request\n required_fields_for_data = model.get_json_required_fields(forecast_level=forecast_level)\n non_required_fields_for_data = []\n is_valid = check_format_data_array(filtered_data,\n required_fields_for_data=required_fields_for_data,\n infos_non_required_field=non_required_fields_for_data)\n\n # if the returned data don't have any item to update, we will not need to run this logic\n if is_valid:\n if len(filtered_data):\n # Step 3: transform data in request\n parsed_data = model.transform_json_data_request(list_data=filtered_data)\n\n # get time when records are created in the database\n created_date = parsed_data[0].get('create_date')\n\n # Step 4: update data to the table\n model.create_or_update_records(vals=parsed_data, forecast_level=forecast_level)\n\n # Step 5: push next actions into queue jobs if it is existing\n if hasattr(model, 'trigger_next_actions'):\n model.trigger_next_actions(**{\n 'created_date': created_date,\n 'forecast_level': forecast_level,\n 'company_id': company_id\n })\n else:\n _logger.warning('The format of data used to import to %s is the wrong format', model.name)\n\n # Step 6: create the response\n response_message = create_response_message(success=True, code=HTTP_200_OK, res_msg=HTTP_200_MSG,\n data={})\n except KeyError as key_error:\n _logger.exception('Key Error when handle the request', exc_info=True)\n response_message = create_response_message(success=False, code=HTTP_400_BAD_REQUEST, res_msg=HTTP_400_MSG,\n detail=str(key_error))\n except TypeError as type_error:\n _logger.exception('Type Error when handle the request', exc_info=True)\n response_message = create_response_message(success=False, code=HTTP_400_BAD_REQUEST, res_msg=HTTP_400_MSG,\n detail=str(type_error))\n except ValueError as value_error:\n _logger.exception('Value Error when handle the request', exc_info=True)\n response_message = create_response_message(success=False, code=HTTP_400_BAD_REQUEST, res_msg=HTTP_400_MSG,\n detail=str(value_error))\n except AssertionError:\n _logger.exception('Unauthorized request', exc_info=True)\n response_message = create_response_message(success=False, code=HTTP_401_UNAUTHORIZED,\n res_msg=HTTP_401_MSG)\n except (psycopg2.DatabaseError, Exception) as db_error:\n _logger.exception('Error while fetching data from database', exc_info=True)\n response_message = create_response_message(success=False, code=HTTP_400_BAD_REQUEST, res_msg=HTTP_400_MSG,\n detail=str(db_error))\n return response_message\n","sub_path":"SI/si_core/utils/request_utils.py","file_name":"request_utils.py","file_ext":"py","file_size_in_byte":16326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"332400728","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 3 13:43:09 2020\n\n@author: DM\n\"\"\"\n#input the sequence\nseq = 'ATGCGACTACGATCGAGGGCCAT'\nre = seq[::-1] #reverse the sequence\n#create a dictionary to make a complementary sequence\ncomplement = {'A':'T', 'G':'C', 'T':'A', 'C':'G' }\n\ntrantab = str.maketrans(complement)\nrc = re.translate(trantab)\nprint(rc)","sub_path":"Practical8/RC.py","file_name":"RC.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"285229331","text":"'''\nplaceholder 是 Tensorflow 中的占位符,暂时储存变量。\n'''\n\nimport tensorflow as tf \n\n# Tensorflow 如果想要从外部传入data, 那就需要用到 tf.placeholder()\n# placeholder()每次run从外界传入值\n# 在 Tensorflow 中需要定义 placeholder 的 type ,一般为 float32 形式\ninput1 = tf.placeholder(tf.float32)\ninput2 = tf.placeholder(tf.float32)\n\n# mul = multiply 是将input1和input2 做乘法运算,并输出为 output \noutput = tf.multiply(input1, input2)\n\nwith tf.Session() as sess:\n # 每次run需要传入值,传入的是dict\n # 需要传入的值放在feed_dict={}\n # placeholder 与 feed_dict={} 是绑定在一起出现的。\n print(sess.run(output, feed_dict={input1:[7.], input2:[3.]}))","sub_path":"04_tensorflow_placeholder.py","file_name":"04_tensorflow_placeholder.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"584571390","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFunctions for calculating network metrics. Uses naming conventions adopted\nfrom the Brain Connectivity Toolbox (https://sites.google.com/site/bctnet/).\n\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import expm\n\n\ndef communicability_bin(adjacency, normalize=False):\n \"\"\"\n Computes the communicability of pairs of nodes in `adjacency`\n\n Parameters\n ----------\n adjacency : (N, N) array_like\n Unweighted, direct/undirected connection weight/length array\n normalize : bool, optional\n Whether to normalize `adjacency` by largest eigenvalue prior to\n calculation of communicability metric. Default: False\n\n Returns\n -------\n comm : (N, N) numpy.ndarray\n Symmetric array representing communicability of nodes {i, j}\n\n References\n ----------\n Estrada, E., & Hatano, N. (2008). Communicability in complex networks.\n Physical Review E, 77(3), 036111.\n\n Examples\n --------\n >>> from netneurotools import metrics\n\n >>> A = np.array([[1, 0, 1], [0, 1, 1], [1, 0, 1]])\n >>> Q = metrics.communicability_bin(A)\n >>> Q\n array([[4.19452805, 0. , 3.19452805],\n [1.47624622, 2.71828183, 3.19452805],\n [3.19452805, 0. , 4.19452805]])\n \"\"\"\n\n if not np.any(np.logical_or(adjacency == 0, adjacency == 1)):\n raise ValueError('Provided adjancecy matrix must be unweighted.')\n\n # normalize by largest eigenvalue to prevent communicability metric from\n # \"blowing up\"\n if normalize:\n norm = np.linalg.eigvals(adjacency).max()\n adjacency = adjacency / norm\n\n return expm(adjacency)\n\n\ndef communicability_wei(adjacency):\n \"\"\"\n Computes the communicability of pairs of nodes in `adjacency`\n\n Parameters\n ----------\n adjacency : (N, N) array_like\n Weighted, direct/undirected connection weight/length array\n\n Returns\n -------\n cmc : (N, N) numpy.ndarray\n Symmetric array representing communicability of nodes {i, j}\n\n References\n ----------\n Crofts, J. J., & Higham, D. J. (2009). A weighted communicability measure\n applied to complex brain networks. Journal of the Royal Society Interface,\n 6(33), 411-414.\n\n Examples\n --------\n >>> from netneurotools import metrics\n\n >>> A = np.array([[2, 0, 3], [0, 2, 1], [0.5, 0, 1]])\n >>> Q = metrics.communicability_wei(A)\n >>> Q\n array([[0. , 0. , 1.93581903],\n [0.07810379, 0. , 0.94712177],\n [0.32263651, 0. , 0. ]])\n \"\"\"\n\n # negative square root of nodal degrees\n row_sum = adjacency.sum(1)\n neg_sqrt = np.power(row_sum, -0.5)\n square_sqrt = np.diag(neg_sqrt)\n\n # normalize input matrix\n for_expm = square_sqrt @ adjacency @ square_sqrt\n\n # calculate matrix exponential of normalized matrix\n cmc = expm(for_expm)\n cmc[np.diag_indices_from(cmc)] = 0\n\n return cmc\n","sub_path":"netneurotools/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"115048073","text":"from datetime import datetime\n\nfrom flask import request, url_for, redirect\nfrom flask_classful import route\n\nfrom wrappers import Table, login_required, check_permission\n\nfrom routes.admin import BranchBase\nfrom methods import Sales as Methods\nfrom methods import Branchs\n\nclass Sales(BranchBase):\n route_base = '/sales'\n\n methods = Methods\n\n strings = {\n 'plural' : 'ventas',\n 'singular' : 'venta',\n }\n\n table_option = {\n 'new' : False\n }\n\n subtemplate = 'sales'\n\n columns = [\n { 'title' : 'Id', 'field' : '_id', 'hidden' : True },\n { 'title' : 'Hora ', 'field' : 'date', 'type' : 'time' },\n { 'title' : 'Ticket ', 'field' : 'ticket', 'type' : 'number' },\n { 'title' : 'Productos', 'field' : 'num_of_products', 'type' : 'number' },\n { 'title' : 'Total', 'field' : 'total', 'type' : 'currency' },\n ]\n\n permission = 'reports'\n\n def get_table_actions(self):\n actions = [\n {\n 'title' : 'Ver detalle',\n 'icon' : 'view-list-outline',\n 'color' : 'primary',\n 'endpoint' : 'Sales:view',\n }\n ]\n\n return actions\n\n def get_filters(self):\n return []","sub_path":"routes/admin/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"605355364","text":"#!/usr/bin/python3\n\"\"\" flask api module \"\"\"\nfrom flask import Flask\nfrom os import getenv\nfrom models import storage\nfrom api.v1.views import app_views\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.register_blueprint(app_views)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"0.0.0.0\"}})\n\n\n@app.errorhandler(404)\ndef error_404(e):\n \"\"\" handler for 404 error\n return a machine friendly response\n \"\"\"\n return {\"error\": \"Not found\"}, 404\n\n\n@app.errorhandler(400)\ndef error_400(e):\n \"\"\" handler for 400 error\n return a machine friendly response\n \"\"\"\n return {\"error\": \"{}\".format(e.description)}, 400\n\n\n@app.teardown_appcontext\ndef storage_close(error):\n \"\"\" Close the database when app fails\n avoiding to save any change potencially\n harmful\n \"\"\"\n storage.close()\n\n\nif __name__ == '__main__':\n app.run(\n host=getenv('HBNB_API_HOST', '0.0.0.0'),\n port=getenv('HBNB_API_PORT', '5000'),\n debug=True,\n threaded=True\n )\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"92467875","text":"import pygame\nimport os\nimport sys\n\nfrom Tile import Tile\nfrom Enemy import Enemy\nfrom Player import Player\nfrom Border import Border\nfrom MusicButton import MusicButton\nfrom Lamp import Lamp\nfrom PauseButton import PauseButton\nimport datetime\n\n\nSIZE = X, Y = 800, 600\nFPS = 60\nBLACK = pygame.Color(\"black\")\nWHITE = pygame.Color(\"white\")\n\npygame.init()\n\n\ndef load_image(name, colorkey=None): # загрузка изображения и создание прозрачного фона\n fullname = os.path.join('data', name)\n image = pygame.image.load(fullname)\n if colorkey is not None:\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image = image.convert()\n image.set_colorkey(colorkey)\n else:\n image = image.convert_alpha()\n return image\n\n\nscreen = pygame.display.set_mode((X, Y))\nclock = pygame.time.Clock()\nscreen.fill(BLACK)\n\nsecond_beginning = -1\ntile_width = tile_height = 48\ncharecter_height = charecter_width = 24\nlx = -1 # отступы поля от границы экрана\nly = -1\n\ngoing_up = False\ngoing_down = False\ngoing_right = False\ngoing_left = False\nrunning = True\n\nlamp_up_sprite = load_image(\"lamp_up.png\", (235, 255, 255))\nlamp_down_sprite = load_image(\"lamp_down.png\", (235, 255, 255))\nlamp_right_sprite = load_image(\"lamp_right.png\", (235, 255, 255))\nlamp_left_sprite = load_image(\"lamp_left.png\", (235, 255, 255))\ntile_images = {'wall': load_image('wall.png'), 'ground': load_image('ground.png')}\nplayer_sprite = load_image('player_sprite.png', (236, 255, 255))\nenemy_sprite = load_image('enemy_sprite.png', (236, 255, 255))\n\nmusic_off_button_sprite = load_image(\"music_off.png\")\nmusic_on_button_sprite = load_image(\"music_on.png\")\npause_button_sprite = load_image(\"pause_button.png\")\n\npl_down_sprite = load_image(\"player_sprite_down.png\", -1)\npl_up_sprite = load_image(\"player_sprite_up.png\", -1)\npl_right_sprite = load_image(\"player_sprite_right.png\", -1)\npl_left_sprite = load_image(\"player_sprite_left.png\", -1)\nen_down_sprite = load_image(\"enemy_sprite_down.png\", -1)\nen_up_sprite = load_image(\"enemy_sprite_up.png\", -1)\nen_right_sprite = load_image(\"enemy_sprite_right.png\", -1)\nen_left_sprite = load_image(\"enemy_sprite_left.png\", -1)\n\nmain_music = 'data/music.mp3' # Jason Garner & Vince de Vera – Creepy Forest (Vinyl) (Don t Starve OST)\nsound_of_death = pygame.mixer.Sound('data/sound_of_death.ogg')\n\nlamps_group = pygame.sprite.Group()\nbuttons_group = pygame.sprite.Group()\ngrounds_group = pygame.sprite.Group()\nwalls_group = pygame.sprite.Group()\nplayer_group = pygame.sprite.Group()\nenemies_group = pygame.sprite.Group()\n\nleft_walls_group = pygame.sprite.Group()\nright_walls_group = pygame.sprite.Group()\nup_walls_group = pygame.sprite.Group()\ndown_walls_group = pygame.sprite.Group()\nright_up_corner_group = pygame.sprite.Group()\nleft_up_corner_group = pygame.sprite.Group()\nright_down_corner_group = pygame.sprite.Group()\nleft_down_corner_group = pygame.sprite.Group()\n\nmusic_button = MusicButton(695, 5, music_on_button_sprite, music_off_button_sprite, buttons_group, 100, 44)\npause_button = PauseButton(645, 5, pause_button_sprite, buttons_group, 44, 44)\nplayer = None\n\n\ndef start_screen():\n x = 1\n y = 1\n print(\"\\033[33mПривет! Нажав в игровом окне любую кнопку, вы загрузите уровни,\")\n print(\"созданные \\033[35mразработичками.\\033[0m\")\n print(\"\\033[33mЕсли же вы хотите сыграть в \\033[32mсвой уровень\\033[33m (инструкция по ее созданию\")\n print(\"\\033[33mесть в папке с игрой), то тогда нажмите с открытым окном клавишу '\"\n \"'5'',\\nпотом вставьте сюда название своего уровня, \", end=\"\")\n print(\"если хотите сыграть на своем (не забудьте дописать ''.txt''):\\n\")\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n return\n elif event.type == pygame.MOUSEBUTTONDOWN:\n return\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_5: # если нажата кнопкаа \"5\", то ждем от юзера названия уровня\n level = input()\n return level\n return\n background = pygame.transform.scale(load_image(f'bck_frames\\\\bck_start{x}.png'), (X, Y))\n text = pygame.transform.scale(load_image(f'continue_text\\\\press_text{y // 5 + 1}.png', (235, 255, 255)),\n (810, 54))\n screen.blit(background, (0, 0))\n screen.blit(text, (-3, 530))\n x = (x + 1) % 40 + 1\n y = (y + 1) % 40 + 1\n pygame.display.flip()\n clock.tick(FPS)\n\n\ndef next_level_screen():\n x = 1\n y = 1\n print(\"Нажмите ''5'' и введите название уровня, \"\n \"если хотите запустить свой уровень, в другом случае нажмите любую кнопку\")\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n return\n elif event.type == pygame.MOUSEBUTTONDOWN:\n return\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_5: # если нажата кнопкаа \"5\", то ждем от юзера названия уровня\n level = input()\n return level\n return\n background = pygame.transform.scale(load_image(f'bck_frames\\\\bck_start{x}.png'), (X, Y))\n text = pygame.transform.scale(load_image(f'continue_text\\\\press_text{y // 5 + 1}.png', (235, 255, 255)),\n (810, 54))\n screen.blit(background, (0, 0))\n screen.blit(text, (-3, 530))\n x = (x + 1) % 40 + 1\n y = (y + 1) % 40 + 1\n pygame.display.flip()\n clock.tick(FPS)\n\n\ndef pause_screen():\n x = 1\n y = 1\n pause_table = pygame.transform.scale(load_image(\"pause_text.png\", (235, 255, 255)), (720, 171))\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n return\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return\n background = pygame.transform.scale(load_image(f'bck_frames\\\\bck_start{x}.png'), (X, Y))\n text = pygame.transform.scale(load_image(f'continue_text\\\\press_text{y // 5 + 1}.png', (235, 255, 255)),\n (810, 54))\n screen.blit(background, (0, 0))\n screen.blit(text, (-3, 530))\n screen.blit(pause_table, (45, 5))\n x = (x + 1) % 40 + 1\n y = (y + 1) % 40 + 1 # увеличиваем время между сменой кадров (умножаем на 5, и в строке\n # присваивания (text = ...) делим его на 5)\n pygame.display.flip()\n clock.tick(FPS)\n\n\ndef load_level(filename):\n filename = \"data/\" + filename\n with open(filename, 'r') as mapFile:\n level_map = [line.strip() for line in mapFile]\n max_width = max(map(len, level_map))\n return list(map(lambda x: x.ljust(max_width, '.'), level_map))\n\n\ndef generate_level(level):\n new_player, x, y = None, None, None\n ly = (Y - len(level) * tile_height) // 2\n lx = (X - len(level[0]) * tile_width) // 2\n ens = 0\n id_cntr = 0\n for y in range(len(level)):\n for x in range(len(level[y])):\n if level[y][x] == '.':\n Tile('ground', x, y, grounds_group, tile_images, tile_width, tile_height, lx, ly)\n elif level[y][x] == '#':\n Tile('wall', x, y, walls_group, tile_images, tile_width, tile_height, lx, ly)\n # для каждой стены создаем невидимые барьеры\n Border(\"left\", x * tile_width + lx, y * tile_height + ly + 1, x * tile_width + lx,\n (y + 1) * tile_height + ly - 1,\n left_walls_group)\n Border(\"right\", (x + 1) * tile_width + lx - 1, y * tile_height + ly + 1, (x + 1) * tile_width + lx - 1,\n (y + 1) * tile_height + ly - 1,\n right_walls_group)\n Border(\"up\", x * tile_width + lx + 1, y * tile_height + ly, (x + 1) * tile_width + lx - 1,\n y * tile_height + ly,\n up_walls_group)\n Border(\"down\", x * tile_width + lx + 1, (y + 1) * tile_height + ly - 1, (x + 1) * tile_width + lx - 1,\n (y + 1) * tile_height + ly - 1,\n down_walls_group)\n Border(\"corner\", x * tile_width + lx, y * tile_height + ly, x * tile_width + lx, y * tile_height + ly,\n left_up_corner_group)\n Border(\"corner\", (x + 1) * tile_width + lx - 1, y * tile_height + ly, (x + 1) * tile_width + lx - 1,\n y * tile_height + ly,\n right_up_corner_group)\n Border(\"corner\", x * tile_width + lx, (y + 1) * tile_height + ly - 1, x * tile_width + lx,\n (y + 1) * tile_height + ly - 1,\n left_down_corner_group)\n Border(\"corner\", (x + 1) * tile_width + lx - 1, (y + 1) * tile_height + ly - 1,\n (x + 1) * tile_width + lx - 1, (y + 1) * tile_height + ly - 1,\n right_down_corner_group)\n elif level[y][x] == '@':\n Tile('ground', x, y, grounds_group, tile_images, tile_width, tile_height, lx, ly)\n player_group.empty()\n new_player = Player(player_sprite, pl_up_sprite, pl_down_sprite, pl_right_sprite, pl_left_sprite, 4, 1,\n x * tile_width + (tile_width - charecter_width) / 2 + lx,\n y * tile_height + (tile_height - charecter_height) / 2 + ly, player_group)\n elif level[y][x] == '!':\n Tile('ground', x, y, grounds_group, tile_images, tile_width, tile_height, lx, ly)\n a = x * tile_width + (tile_width - charecter_width) // 2 + lx\n b = y * tile_height + (tile_height - charecter_height) // 2 + ly\n ens += 1\n curr_enemy = Enemy(enemy_sprite, en_up_sprite, en_down_sprite, en_right_sprite, en_left_sprite, 4, 1, a,\n b, enemies_group, level,\n (a - lx) // tile_width, (b - ly) // tile_height, id_cntr)\n curr_lamp = Lamp(curr_enemy.rect.x, curr_enemy.rect.y, lamp_up_sprite, lamp_down_sprite,\n lamp_right_sprite, lamp_left_sprite, id_cntr, lamps_group)\n curr_enemy.lamp = curr_lamp\n id_cntr += 1\n # создание невидимых границ уровня, чтобы игрок не смог выйти за его пределы:\n Border(\"right\", lx, ly, lx, (y + 1) * tile_height + ly, right_walls_group)\n Border(\"left\", lx + (x + 1) * tile_width, ly, lx + (x + 1) * tile_width, (y + 1) * tile_height + ly,\n left_walls_group)\n Border(\"down\", lx, ly, lx + (x + 1) * tile_width, ly, down_walls_group)\n Border(\"up\", lx, ly + (y + 1) * tile_height, lx + (x + 1) * tile_width, (y + 1) * tile_height + ly, up_walls_group)\n return new_player, x, y, level, ens\n\n\ndef terminate():\n pygame.quit()\n print(\"\\033[36mСпасибо, что играли в нашу игру!\")\n sys.exit()\n\n\npygame.mixer.music.load(main_music)\npygame.mixer.music.play(1000000)\n\n\n# ну что бы наверняка\n\n\ndef clear_groups(): # очистка\n lamps_group.empty()\n grounds_group.empty()\n walls_group.empty()\n left_walls_group.empty()\n right_walls_group.empty()\n up_walls_group.empty()\n down_walls_group.empty()\n player_group.empty()\n enemies_group.empty()\n\n\ndef main(al_cntr): # основной игровой цикл\n going_up = False\n going_down = False\n going_right = False\n going_left = False\n alive_cntr = al_cntr\n while running and alive_cntr > 0:\n screen.fill(BLACK)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n break\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pause_screen()\n break\n if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n if not pygame.sprite.spritecollideany(player,\n left_walls_group):\n # checking if our player collide with some wall from any side\n going_right = True\n else:\n going_right = False\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n if not pygame.sprite.spritecollideany(player, right_walls_group):\n going_left = True\n else:\n going_left = False\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n if not pygame.sprite.spritecollideany(player, down_walls_group):\n going_up = True\n else:\n going_up = False\n if event.key == pygame.K_DOWN or event.key == pygame.K_s:\n if not pygame.sprite.spritecollideany(player, up_walls_group):\n going_down = True\n else:\n going_down = False\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n # Перестаем бегать, если кнопка отжата\n going_right = False\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n going_left = False\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n going_up = False\n if event.key == pygame.K_DOWN or event.key == pygame.K_s:\n going_down = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if music_button.isMouseOn(event.pos):\n if music_button.turned:\n pygame.mixer.music.pause()\n else:\n pygame.mixer.music.unpause()\n music_button.switch()\n if pause_button.isMouseOn(event.pos):\n pause_screen()\n break\n went_anywhere = False\n if going_down:\n # если игрок может пойти куда-то (проверятся в цикле обработки событий) и\n # он хочет туда пойти (кнопка зажата), то тогда каждую итерацию перемещаем его на его скорость\n went_anywhere = True\n player.move_down(2)\n if pygame.sprite.spritecollideany(player, up_walls_group):\n player.move_up(2)\n if going_up:\n player.move_up(2)\n went_anywhere = True\n if pygame.sprite.spritecollideany(player, down_walls_group):\n player.move_down(2)\n if going_right:\n player.move_right(2)\n went_anywhere = True\n if pygame.sprite.spritecollideany(player, left_walls_group):\n player.move_left(2)\n if going_left:\n player.move_left(2)\n went_anywhere = True\n if pygame.sprite.spritecollideany(player, right_walls_group):\n player.move_right(2)\n if not went_anywhere:\n player.stay_on()\n\n # вручную пробегаемся по всем противникам,\n # и если мы сопприкасаемся в с кем-то, то убиваем его\n for enemy in enemies_group:\n if pygame.sprite.collide_rect(player, enemy) and not enemy.dead:\n sound_of_death.play()\n alive_cntr -= 1\n enemy.kill()\n\n if pygame.sprite.spritecollideany(player, lamps_group):\n if second_beginning == -1:\n second_beginning = datetime.datetime.now().second\n else:\n if datetime.datetime.now().second * 2 >= (second_beginning * 2 + 1) % 120:\n sound_of_death.play()\n second_beginning = -1\n return True\n else:\n second_beginning = -1\n\n player_group.update()\n enemies_group.update()\n buttons_group.update()\n grounds_group.draw(screen)\n lamps_group.draw(screen)\n enemies_group.draw(screen)\n player_group.draw(screen)\n walls_group.draw(screen)\n buttons_group.draw(screen)\n\n pygame.display.flip()\n clock.tick(FPS)\n\n\ncurr_level = start_screen()\n# проверка на то, ввел ли пользователь название карты или решил сыграть в готовые уровни\nif curr_level != None:\n try:\n while curr_level != None:\n isPlayerDead = True\n while isPlayerDead:\n player, level_x, level_y, level_map, enemies_cntr = generate_level(load_level(f\"levels/{curr_level}\"))\n running = True\n isPlayerDead = main(enemies_cntr)\n clear_groups()\n curr_level = next_level_screen()\n except FileNotFoundError:\n print(\"\\033[31mОшибка 101: Файл не найден\")\nelse:\n try:\n for i in range(1, 21):\n isPlayerDead = True\n while isPlayerDead:\n player, level_x, level_y, level_map, enemies_cntr = generate_level(load_level(f\"levels/level{i}.txt\"))\n running = True\n isPlayerDead = main(enemies_cntr)\n clear_groups()\n curr_level = next_level_screen()\n while curr_level != None:\n isPlayerDead = True\n while isPlayerDead:\n player, level_x, level_y, level_map, enemies_cntr = generate_level(\n load_level(f\"levels/{curr_level}\"))\n running = True\n isPlayerDead = main(enemies_cntr)\n clear_groups()\n curr_level = next_level_screen()\n except FileNotFoundError:\n print(\"\\033[31mОшибка 102: Системный файл карты уровня не найден\")\nterminate()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":19102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"78288847","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nSimulating the movement of Kinesin at different concentrations of ATP(MC Simulation)\r\n\r\n\"\"\"\r\n\r\n#Import the required packages\r\nimport numpy as np\r\n\r\n#Set random number seed\r\nmyseed = 91\r\n\r\n#The state of kinesin is represented by K\r\nK = 113 \r\n#K is a three-digit number, and first digit represents the state of the trailing head.(1-ATP;2-ADP;3-free)\r\n#Second digit the status of Kinesin(1-two head bound;2-Intermediate state)\r\n#Third digit represents the state of the leading head(1-ATP;2-ADP;3-free)\r\n\r\n#Set parameter value\r\nkplus = 307 #trailing head ATP->ADP rate;s-1\r\nkminus = 9.8 #leading head ATP->ADP rate;s-1\r\nkb = 3.3 #ATP bind second order rate;s-1uM-1\r\nkb_slow = 1 #leading head ATP bind slow times\r\nATP_conc = 2000 #ATP concentrations;uM\r\nF = 7 #load;pN\r\nF_all = [0,1,2] #all loads;pN\r\nr0 = 220 #0pN step ratio\r\nFs = 8 #Stop Force;pN\r\nr = r0 ** (1-F/Fs) #step ratio\r\nPe = r / (kplus/kminus + r) #Forward probability\r\nP0 = 0.65 #0pN, The escape probability after ATP hydrolysis\r\ndeltad = 1 #nm, distance parameter\r\nkBT = 4.11#pN.nm, kBT\r\nP0F = 1 - (1 - P0)*np.exp(F*deltad/kBT)#The escape probability of leading head after ATP hydrolysis when F>0\r\nd = 8.2 #step length;nm\r\nkminus2 = 30 #leading head ATP release rate; s-1\r\ntdwell = 0.001 #Intermediate state judgment, s\r\n\r\n#The time of kinesin movement is represented by T\r\nT = 0.0\r\n\r\n#The position of trailing head is represented by X\r\nX = 0\r\nP = 0.0#position of Kinesin\r\n\r\n#Number of steps\r\nNstep = 0\r\n\r\n#The number of ATP consumed\r\nNofATP = 0\r\n\r\n#Number of simulation experiments N\r\nN = 1000\r\n\r\n#Record the velocity of each simulation V\r\nV = []\r\n\r\n#Record the number of steps per simulation\r\nNATP = []\r\n\r\n#Record randomness\r\nRandomness = []\r\n\r\n#steps per simulation\r\nNumber = []\r\n\r\n#Simulation start\r\n\r\nfor F_index in range(len(F_all)):\r\n\r\n #parameters initialize\r\n F = F_all[F_index]\r\n r = r0 ** (1-F/Fs) #step ratio\r\n Pe = r / (kplus/kminus + r) #Forward probability\r\n P0F = 1 - (1 - P0)*np.exp(F*deltad/kBT)#The escape probability of leading head after ATP hydrolysis when F>0\r\n print(\"Force: \", F, \"pN\")\r\n print(\"[ATP]: \", ATP_conc, \"uM\")\r\n V = []\r\n NATP = []\r\n Randomness = []\r\n Number = []\r\n Ratio = []\r\n Frecord = []\r\n Brecord = []\r\n Fstep = 0\r\n Bstep = 0\r\n np.random.seed(myseed)\r\n \r\n for index in range(N):\r\n K = 113 #Initial state:T--0\r\n T = 0.0\r\n X = 0\r\n Time = []\r\n Position = []\r\n NofATP = 0\r\n Nstep = 0\r\n Fstep = 0\r\n Bstep = 0\r\n Time.append(T)\r\n P = X+d/2\r\n Position.append(P)\r\n while 1: \r\n K_now = K #Record current status\r\n if K_now == 113: #T--0\r\n kT = kplus + kb*ATP_conc\r\n T = T + (np.random.exponential(1.0/kT))\r\n decision_ch = np.random.random() #choice\r\n if decision_ch <= kplus / kT: #trailing head hydr\r\n if np.random.random() <= P0F:\r\n K = 3221 #0D;last digit 1 represents forward\r\n X = X+d\r\n P = X\r\n NofATP = NofATP+1\r\n else:\r\n K = 313 #not escape after hydr\r\n X = X\r\n P = X\r\n NofATP = NofATP+1 \r\n else: #leading head bind ATP\r\n K = 111 #T--T\r\n X = X\r\n P = X+d/2\r\n Time.append(T)\r\n Position.append(P)\r\n \r\n if K_now == 3221: #0D;last digit 1 represents forward\r\n kT = kb*ATP_conc\r\n T = T + (np.random.exponential(1.0/kT))\r\n decision_ch = np.random.random() #choice\r\n if decision_ch <= Pe: #Forward\r\n K = 113 #T--0\r\n X = X\r\n P = X+d/2\r\n Nstep = Nstep+1\r\n Fstep = Fstep+1\r\n else: #Back\r\n K = 311 #0--T\r\n X = X-d\r\n P = X+d/2\r\n Time.append(T)\r\n Position.append(P)\r\n \r\n if K_now == 3222: #0D;last digit 2 represents backward\r\n kT = kb*ATP_conc\r\n dwellmid = (np.random.exponential(1.0/kT))\r\n T = T + dwellmid\r\n decision_ch = np.random.random() #choice\r\n if dwellmid > tdwell:\r\n Bstep = Bstep+1\r\n if decision_ch <= Pe: #Forward\r\n K = 113 #T--0\r\n X = X\r\n P = X+d/2\r\n if dwellmid > tdwell:\r\n Fstep = Fstep+1\r\n else: #Back\r\n K = 311 #0--T\r\n X = X-d\r\n P = X+d/2\r\n Nstep = Nstep+1\r\n Time.append(T)\r\n Position.append(P)\r\n \r\n if K_now == 111: #T--T\r\n kT = kplus + kminus + kminus2\r\n T = T + (np.random.exponential(1.0/kT))\r\n decision_ch = np.random.random() #choice\r\n if decision_ch <= kplus / kT: #trailing head hydr\r\n if np.random.random() <= P0F:\r\n K = 1221 #TD;last digit 1 represents forward\r\n X = X+d\r\n P = X\r\n NofATP = NofATP+1\r\n else:\r\n K = 311#not escape after hydr\r\n X = X\r\n P = X\r\n NofATP = NofATP+1\r\n elif decision_ch <= (kplus+kminus) / kT: #leading head hydr\r\n if np.random.random() <= P0:\r\n K = 1222 #TD;last digit 2 represents backward\r\n X = X\r\n P = X\r\n NofATP = NofATP+1\r\n else:\r\n K = 113#not escape after hydr\r\n X = X\r\n P = X\r\n NofATP = NofATP+1 \r\n else : #leading head release ATP\r\n K = 113 #T--0\r\n X = X\r\n P = X+d/2\r\n Time.append(T)\r\n Position.append(P)\r\n \r\n if K_now == 311: #0--T\r\n kT = kminus + kb*ATP_conc + kminus2\r\n T = T + (np.random.exponential(1.0/kT))\r\n decision_ch = np.random.random() #choice\r\n if decision_ch <= kminus / kT: #leading head hydr\r\n if np.random.random() <= P0:\r\n K = 3222 #0D;last digit 2 represents backward\r\n X = X\r\n P = X\r\n NofATP = NofATP+1\r\n else:\r\n K = 313#not escape after hydr\r\n X = X\r\n P = X\r\n NofATP = NofATP+1\r\n elif decision_ch <= (kminus+kb*ATP_conc) / kT: #trailing head bind ATP\r\n K = 111 #T--T\r\n X = X\r\n P = X+d/2\r\n else : #leading head release ATP\r\n K = 313 #0--0\r\n X = X\r\n P = X+d/2\r\n Time.append(T)\r\n Position.append(P)\r\n \r\n if K_now == 1221: #TD\r\n decision_ch = np.random.random() #choice\r\n if decision_ch <= Pe: #Forward\r\n K = 113 #T--0\r\n X = X\r\n P = X+d/2\r\n Nstep = Nstep+1\r\n Fstep = Fstep+1\r\n else: #Back\r\n K = 311 #0--T\r\n X = X-d\r\n P = X+d/2\r\n Time.append(T)\r\n Position.append(P)\r\n \r\n if K_now == 1222: #TD\r\n decision_ch = np.random.random() #choice\r\n if decision_ch <= Pe: #Forward\r\n K = 113 #T--0\r\n X = X\r\n P = X+d/2\r\n else: #Back\r\n K = 311 #0--T\r\n X = X-d\r\n P = X+d/2\r\n Nstep = Nstep+1\r\n Bstep = Bstep+1\r\n Time.append(T)\r\n Position.append(P)\r\n \r\n if K_now == 313: #0--0\r\n kT = kb*ATP_conc + kb*ATP_conc\r\n T = T + (np.random.exponential(1.0/kT))\r\n decision_ch = np.random.random() #choice\r\n if decision_ch <= kb*ATP_conc / kT: #leading head bind ATP\r\n K = 311 #0--T\r\n X = X\r\n P = X+d/2\r\n else : #trailing head bind ATP\r\n K = 113 #T--0\r\n X = X\r\n P = X+d/2\r\n \r\n if T > 1: #duration time per simulation,s\r\n break\r\n \r\n V.append(P/T)\r\n NATP.append(P)\r\n if index % 200 == 199:\r\n Randomness.append(1.0/(np.var(NATP)/d/np.mean(NATP)))\r\n NATP = []\r\n Frecord.append(Fstep)\r\n Brecord.append(Bstep)\r\n \r\n if Nstep != 0:\r\n Number.append(NofATP/Nstep)\r\n if Bstep != 0:\r\n Ratio.append(Fstep/Bstep)\r\n\r\n print(\"Velocity: \",np.mean(V),\"+-\",np.std(V)/(len(V)**0.5),\"nm/s,SEM\")\r\n print(\"Randomness: \",np.mean(Randomness),\"+-\",np.std(Randomness)/(len(Randomness)**0.5),\",SEM\")\r\n print(\"ATPs per step: \",np.mean(Number),\"+-\",np.std(Number)/(len(Number)**0.5),\",SEM\")\r\n print(\"Forward step/Backward step: \",np.mean(Ratio),\"+-\",np.std(Ratio)/(len(Ratio)**0.5),\",SEM\")\r\n print(\"-------------------------------------------------------------------\")\r\n","sub_path":"KIF17_ForwardLoad.py","file_name":"KIF17_ForwardLoad.py","file_ext":"py","file_size_in_byte":10113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"605271119","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom merchandise.models import Product\nfrom django.db.models.signals import m2m_changed, pre_save\n\n# Create your models here.\nclass Cart(models.Model):\n user = models.ForeignKey(User,blank=True,null=True,on_delete=models.CASCADE)\n products = models.ManyToManyField(Product,blank=True)\n time_created = models.DateTimeField(auto_now_add=True)\n time_updated = models.DateTimeField(auto_now=True)\n subtotal = models.DecimalField(max_digits=50,decimal_places=2,default=0.00)\n cart_total = models.DecimalField(max_digits=50, decimal_places=2, default=0.00)\n\n\ndef subtotal_cart_reciever(sender, instance, action ,*args, **kwargs):\n if action == 'post_clear' or action == 'post_add' or action == 'post_remove':\n total = 0\n products = instance.products.all()\n for item in products:\n total += item.product_price\n instance.subtotal = total\n instance.save()\n\nm2m_changed.connect(subtotal_cart_reciever, sender=Cart.products.through)\n\n\ndef total_cart_reciever(sender, instance,*args, **kwargs):\n instance.cart_total = instance.subtotal\n\npre_save.connect(total_cart_reciever, sender=Cart)\n","sub_path":"cart_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"446279261","text":"\n\nfrom xai.brain.wordbase.nouns._diocese import _DIOCESE\n\n#calss header\nclass _DIOCESES(_DIOCESE, ):\n\tdef __init__(self,): \n\t\t_DIOCESE.__init__(self)\n\t\tself.name = \"DIOCESES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"diocese\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_dioceses.py","file_name":"_dioceses.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"110984392","text":"#!/usr/bin/env python3\n\"\"\"\n异常处理:把有可能发生异常的语句,放到try里执行,发生异常跳转到异常处理代码\n把不发生异常才执行的语句放到else中。\n不管是否发生异常都要执行的语句,放到finally中\"\"\"\n\ntry:\n num=int(input('number: '))\n result=100/num\nexcept (ValueError,ZeroDivisionError): #捕获错误信息\n print('Invalid Input')\nexcept (KeyboardInterrupt,EOFError):\n print('\\nBey-Bey')\nelse:\n print(result)\nfinally:\n print('Done')","sub_path":"python/day5/error_test.py","file_name":"error_test.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"612825700","text":"import unittest\nimport pandas\nfrom openpyxl import load_workbook\nfrom roster import Roster\n\nclass TestRoster(unittest.TestCase):\n\n def test_read_roster(self):\n with Roster(\"Jones_2019.xlsx\") as r:\n student_names = r.get_student_names()\n self.assertTrue(len(student_names) == 7)\n self.assertTrue(\"Robert Waters\" in student_names)\n\n catherine = r.get_student(\"Catherine Hitchens\")\n self.assertTrue(catherine[\"id\"] == 3)\n self.assertTrue(isinstance(catherine[\"grades\"], pandas.Series))\n self.assertTrue(len(catherine[\"grades\"]) == 10)\n self.assertTrue(catherine[\"grades\"][4] == 86)\n\n self.assertTrue(r.class_average() == 614.1/7)\n\n def test_write_roster(self):\n with Roster(\"Jones_2019.xlsx\") as r:\n john = r.get_student(\"Johnny Carson\")\n for assignment, grade in [(3, 90), (6, 94), (9, 92)]:\n john[\"grades\"][assignment] = grade\n self.assertTrue(r.class_average() == 616.6/7)\n r.save(\"Jones_2019_Updated.xlsx\")\n\n wb = load_workbook(\"Jones_2019_Updated.xlsx\")\n self.assertTrue(wb.get_sheet_by_name(\"Student_1\")[\"B12\"].value == 94)\n wb.close()\n\n def test_delete_roster_student(self):\n student_count = 0\n with Roster(\"Jones_2019.xlsx\") as r:\n student_count = len(r.get_student_names())\n self.assertTrue(student_count == 7)\n self.assertTrue(r.get_student(\"William Thomas\")[\"id\"] == 5)\n r.delete_student(\"Allen Dalton\")\n student_count = len(r.get_student_names())\n self.assertTrue(student_count == 6)\n self.assertTrue(r.get_student(\"William Thomas\")[\"id\"] == 4)\n r.save(\"Jones_2019_Reduced.xlsx\")\n\n wb = load_workbook(\"Jones_2019_Reduced.xlsx\")\n sheet_names = wb.get_sheet_names()\n self.assertTrue(len(sheet_names) == 7)\n self.assertTrue(sheet_names[0] == \"Roster\")\n self.assertTrue(sheet_names[-1] == \"Student_6\")\n self.assertTrue(wb.get_sheet_by_name(\"Student_3\")[\"B7\"].value == 92)\n wb.close()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_roster.py","file_name":"test_roster.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"460089705","text":"import logging \n\nLOG_FORMAT = \"%(Levelname)s %(asctime)s - %(message)s\"\n\ndef setup_logger(name, log_file, level= logging.INFO):\n\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(LOG_FORMAT)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger\n ","sub_path":"obd/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"371920670","text":"from servicelib.async_utils import cancel_sequential_workers\n\nfrom ..meta import PROJECT_NAME, __version__\n\n#\n# SEE https://patorjk.com/software/taag/#p=display&f=Small&t=Director\n#\nWELCOME_MSG = r\"\"\"\n______ _ _\n| _ (_) | |\n| | | |_ _ __ ___ ___| |_ ___ _ __\n| | | | | '__/ _ \\/ __| __/ _ \\| '__|\n| |/ /| | | | __/ (__| || (_) | |\n|___/ |_|_| \\___|\\___|\\__\\___/|_| {}\n\n\"\"\".format(\n f\"v{__version__}\"\n)\n\n\nasync def on_startup() -> None:\n print(WELCOME_MSG, flush=True)\n\n\nasync def on_shutdown() -> None:\n await cancel_sequential_workers()\n msg = PROJECT_NAME + f\" v{__version__} SHUT DOWN\"\n print(f\"{msg:=^100}\", flush=True)\n","sub_path":"services/director-v2/src/simcore_service_director_v2/core/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"412147923","text":"import logging\nfrom django.core.cache import cache\nfrom django.shortcuts import render\nfrom django.conf import settings\nimport zeep\nimport re\n\nlogger = logging.getLogger(__name__)\n\nWSDL = 'https://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'\n\nSTATION_ABBREV = {\n 'London Kings Cross': 'London Kings X',\n 'London Liverpool Street': 'London Liv. St',\n 'Birmingham New Street': \"Birm'ham New St\",\n}\n\n\ndef station_board(request):\n '''\n Retrieve a 'DepartureBoard' from National Rail Enquiries\n and render it as a web page\n '''\n station = request.GET.get('station', '')\n assert station, 'No station code found'\n offset = int(request.GET.get('offset', 0))\n\n cache_key = \"station_board!{0}!{1}\".format(station, offset)\n data = cache.get(cache_key)\n if data:\n logger.info('Cache hit for %s', cache_key)\n\n else:\n logger.info('Cache miss for %s', cache_key)\n data = {'messages': [], 'services': []}\n\n client = zeep.Client(wsdl=WSDL)\n raw_data = client.service.GetDepartureBoard(\n numRows=50, crs=station,\n _soapheaders={\"AccessToken\": settings.NRE_API_KEY},\n timeOffset=offset\n )\n\n data['locationName'] = raw_data['locationName']\n data['generatedAt'] = raw_data['generatedAt'].strftime(\"%H:%M\")\n\n if raw_data['nrccMessages']:\n for message in raw_data['nrccMessages']['message']:\n for key in message:\n data['messages'].append(re.sub('<[^<]+?>', '', message[key]))\n if len(data['messages']) > 1:\n data['messages'] = ['Multiple travel alerts in force - see www.nationalrail.co.uk for details.']\n\n if raw_data['trainServices']:\n for service in raw_data['trainServices']['service']:\n this_service = {}\n this_service['std'] = service['std']\n this_service['etd'] = service['etd']\n dest = service['destination']['location'][0]['locationName']\n if dest in STATION_ABBREV:\n dest = STATION_ABBREV[dest]\n this_service['destination'] = dest\n data['services'].append(this_service)\n\n cache.set(cache_key, data, timeout=30)\n\n return render(request, 'smartpanel/station_board.html', {'data': data})\n","sub_path":"tfc_web/smartpanel/views/widgets/station_board.py","file_name":"station_board.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"8584651","text":"import numpy\nfrom multiprocessing.pool import ThreadPool\nimport random\nimport math\nimport pre_processing\nimport pandas as pd\nimport similarity_functions\nimport file_utils\nimport gensim\nfrom nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom gensim import corpora, models\n\n\n\nsimilarities = ['total_set_similairy','skipthoughts_similarity','skipthoughts_similarity_N','num_word_similarity','max_set_similairy','vec_similairy'];\n#similarities = ['vec_similairy'];\n\n#pre-processing and extracting words\nf = 'n1400_dn'\nfile = '/home/roozbeh/data/wiki/data/'+f+'.csv'\ndf = pd.read_csv(file)\nwords,sentences,selected_lemmas, word_similarity_matrix = pre_processing.pre_process(df)\n\n\n#skipthoughts model\nl = len(sentences)\nimport skipthoughts\nsentence_model = skipthoughts.load_model()\nencoder = skipthoughts.Encoder(sentence_model)\n\n\npool = ThreadPool(processes=22)\n\n#gensim word2vec model\nfrom gensim import corpora, models\nglobal model \nmodel = gensim.models.Word2Vec.load(\"wiki_files/wiki.en.word2vec.model\") \n\n\ninputs = [0 for i in range(l*l)]\nsentences_strs = [\" \".join(sent) for sent in sentences]\nsentence_vectors = encoder.encode(sentences_strs)\nsentence_word_indices = [[words.index(w) for w in sent if w in words] for sent in sentences]\ntotal_vector = [sum([model.wv[w] for w in sent if w in model.wv.vocab]) for sent in sentences]\n\n\n\n#gensim lda model\ntokenizer = RegexpTokenizer(r'\\w+')\nen_stop = get_stop_words('en')\np_stemmer = PorterStemmer()\ntexts = []\n\nfor i in sentences_strs:\n\traw = i.lower()\n\ttokens = tokenizer.tokenize(raw)\n\tstopped_tokens = [i for i in tokens if not i in en_stop]\n\tstemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n\ttexts.append(stemmed_tokens)\n\n\ndictionary = corpora.Dictionary(texts)\ncorpus = [dictionary.doc2bow(text) for text in texts]\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=200, id2word = dictionary, passes=10)\nLDA_vectors = [[l[1] for l in ldamodel[dictionary.doc2bow(text)]] for text in texts]\n\n\n\ndef total_set_similairy(A,B):\n s = 0\n for i in A:\n for j in B:\n \ts = s + word_similarity_matrix[i][j]\n s = s / (len(A)*len(B)+1)\n return s\n\n\ndef max_set_similairy(A,B):\n m = 0\n for i in A:\n for j in B:\n if (word_similarity_matrix[i][j]>m):\n m = word_similarity_matrix[i][j]\n return m\n\n\n\ndef vec_similairy(i,j):\n p = 0\n p = numpy.dot(total_vector[i],total_vector[j])/(numpy.linalg.norm(total_vector[i])*numpy.linalg.norm(total_vector[j])+0.000001)\n return p\n\n\ndef num_word_similarity(A,B):\n l = len(set(A) & set(B))\n return l\n\n\nfrom tqdm import trange\nk = 0;\nfor i in trange(l):\n\tfor j in range(l):\n\t\tinputs[k] = [i,j]\n\t\tk = k + 1;\n\n\n\nmat = [[0 for i in range(l)] for j in range(l)]\n\nglobal counter; counter = 0 \n\ndef f(x,s):\n\tglobal counter\n\ti = x[0]\n\tj = x[1]\n\n\tif (s=='num_word_similarity'):\n\t\tmat[i][j] = similarity_functions.num_word_similarity(sentences[i],sentences[j])\n\telif (s=='total_set_similairy'):\n\t\tmat[i][j] = total_set_similairy(sentence_word_indices[i],sentence_word_indices[j])\n\telif (s=='max_set_similairy'):\n\t\tmat[i][j] = max_set_similairy(sentence_word_indices[i],sentence_word_indices[j])\n\telif (s=='vec_similairy'):\n\t\tmat[i][j] = vec_similairy(i,j)\n\telif (s=='skipthoughts_similarity'):\n\t\tmat[i][j] = numpy.dot(sentence_vectors[i],sentence_vectors[j])\n\telif (s=='skipthoughts_similarity_N'):\n\t\tmat[i][j] = numpy.dot(sentence_vectors[i],sentence_vectors[j])/(numpy.linalg.norm(sentence_vectors[i])*numpy.linalg.norm(sentence_vectors[j])+0.000001)\n\t\n\n\tcounter = counter + 1\n\tprint(s,counter)\n\n\n\n\n\ndef parallel_proc(f,inputs,s):\n\t\n\tnumber_of_threads=20\n\tl = len(inputs)\n\tm = math.ceil(l/number_of_threads)\n\n\tdef ff(l):\n\t\treturn [f(x,s) for x in l]\n\n\tdef ind(i):\n\t\tif (i 3:\n raise forms.ValidationError({'tags': _('Too many tags selected')})\n \n if cleaned_data['video_link']:\n video_link = cleaned_data['video_link']\n\n if 'youtu' in video_link:\n result = re.search('((?<=(v|V)/)|(?<=be/)|(?<=(\\?|\\&)v=)|(?<=embed/))([\\w-]+)', video_link)\n if result:\n cleaned_data['video_link'] = 'https://www.youtube.com/embed/{}'.format(result.group(0))\n return cleaned_data\n\n else:\n raise forms.ValidationError({'video_link': _('Unusable link')})\n \n if 'vimeo' in video_link:\n result = re.search(r'(http|https)?:\\/\\/(www\\.)?vimeo.com\\/(?:channels\\/(?:\\w+\\/)?|groups\\/([^\\/]*)\\/videos\\/|)(\\d+)(?:|\\/\\?)', video_link)\n if result:\n cleaned_data['video_link'] = 'https://player.vimeo.com/video/{}'.format(result.group(4))\n return cleaned_data\n\n else:\n raise forms.ValidationError({'video_link': _('Unusable link')})\n \n\n raise forms.ValidationError({'video_link': _('Unusable link')})\n\n return cleaned_data\n\n\nclass TrainingTranslateForm(forms.ModelForm):\n class Meta:\n model = Training\n fields = Training.TRANSLATED_FIELDS_CODED\n exclude = [\n 'nb_players',\n 'nb_balls',\n 'nb_mallets',\n 'space',\n 'video_link',\n 'difficulty',\n 'tags',\n 'author_name',\n 'author_city',\n 'author_mail'\n ]\n","sub_path":"nantes_bike_polo/training/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"143512497","text":"from kubetools.cli.git_utils import get_git_info\nfrom kubetools.config import load_kubetools_config\nfrom kubetools.constants import (\n ROLE_LABEL_KEY,\n)\nfrom kubetools.deploy.image import ensure_docker_images\nfrom kubetools.deploy.util import log_actions\nfrom kubetools.kubernetes.api import (\n create_cronjob,\n create_deployment,\n create_job,\n create_namespace,\n create_service,\n cronjob_exists,\n delete_job,\n deployment_exists,\n get_object_name,\n list_cronjobs,\n list_deployments,\n list_namespaces,\n list_services,\n namespace_exists,\n service_exists,\n update_cronjob,\n update_deployment,\n update_namespace,\n update_service,\n)\nfrom kubetools.kubernetes.config import (\n generate_kubernetes_configs_for_project,\n generate_namespace_config,\n)\n\n\n# Deploy/upgrade\n# Handles deploying new services and upgrading existing ones\n\ndef get_deploy_objects(\n build,\n app_dirs,\n replicas=None,\n default_registry=None,\n build_args=None,\n extra_envvars=None,\n extra_annotations=None,\n ignore_git_changes=False,\n custom_config_file=False,\n):\n all_services = []\n all_deployments = []\n all_jobs = []\n all_cronjobs = []\n\n envvars = {\n 'KUBE_ENV': build.env,\n 'KUBE_NAMESPACE': build.namespace,\n }\n if extra_envvars:\n envvars.update(extra_envvars)\n\n annotations = {\n 'kubetools/env': build.env,\n 'kubetools/namespace': build.namespace,\n }\n if extra_annotations:\n annotations.update(extra_annotations)\n\n namespace = generate_namespace_config(build.namespace, base_annotations=annotations)\n\n for app_dir in app_dirs:\n commit_hash, git_annotations = get_git_info(app_dir, ignore_git_changes)\n annotations.update(git_annotations)\n\n kubetools_config = load_kubetools_config(\n app_dir,\n env=build.env,\n namespace=build.namespace,\n app_name=app_dir,\n custom_config_file=custom_config_file,\n )\n\n context_to_image = ensure_docker_images(\n kubetools_config, build, app_dir,\n commit_hash=commit_hash,\n default_registry=default_registry,\n build_args=build_args,\n )\n\n services, deployments, jobs, cronjobs = generate_kubernetes_configs_for_project(\n kubetools_config,\n envvars=envvars,\n context_name_to_image=context_to_image,\n base_annotations=annotations,\n replicas=replicas or 1,\n default_registry=default_registry,\n )\n\n all_services.extend(services)\n all_deployments.extend(deployments)\n all_jobs.extend(jobs)\n all_cronjobs.extend(cronjobs)\n\n existing_deployments = {\n get_object_name(deployment): deployment\n for deployment in list_deployments(build.env, build.namespace)\n }\n\n # If we haven't been provided an explicit number of replicas, default to using\n # anything that exists live when available.\n if replicas is None:\n for deployment in all_deployments:\n existing_deployment = existing_deployments.get(get_object_name(deployment))\n if existing_deployment:\n deployment['spec']['replicas'] = existing_deployment.spec.replicas\n\n return namespace, all_services, all_deployments, all_jobs, all_cronjobs\n\n\ndef log_deploy_changes(\n build, namespace, services, deployments, jobs, cronjobs,\n message='Executing changes:',\n name_formatter=lambda name: name,\n):\n existing_namespace_names = set(\n get_object_name(namespace)\n for namespace in list_namespaces(build.env)\n )\n existing_service_names = set(\n get_object_name(service)\n for service in list_services(build.env, build.namespace)\n )\n existing_deployment_names = set(\n get_object_name(deployment)\n for deployment in list_deployments(build.env, build.namespace)\n )\n existing_cronjobs_names = set(\n get_object_name(cronjob)\n for cronjob in list_cronjobs(build.env, build.namespace)\n )\n\n deploy_service_names = set(\n get_object_name(service) for service in services\n )\n deploy_deployment_names = set(\n get_object_name(deployment) for deployment in deployments\n )\n deploy_cronjobs_names = set(\n get_object_name(cronjob) for cronjob in cronjobs\n )\n deploy_namespace_name = set((build.namespace,))\n\n new_namespace = deploy_namespace_name - existing_namespace_names\n\n new_services = deploy_service_names - existing_service_names\n update_services = deploy_service_names - new_services\n\n new_deployments = deploy_deployment_names - existing_deployment_names\n update_deployments = deploy_deployment_names - new_deployments\n\n new_cronjobs = deploy_cronjobs_names - existing_cronjobs_names\n update_cronjobs = deploy_cronjobs_names - new_cronjobs\n\n with build.stage(message):\n log_actions(build, 'CREATE', 'namespace', new_namespace, name_formatter)\n log_actions(build, 'CREATE', 'service', new_services, name_formatter)\n log_actions(build, 'CREATE', 'deployment', new_deployments, name_formatter)\n log_actions(build, 'CREATE', 'cronjob', new_cronjobs, name_formatter)\n log_actions(build, 'UPDATE', 'service', update_services, name_formatter)\n log_actions(build, 'UPDATE', 'deployment', update_deployments, name_formatter)\n log_actions(build, 'UPDATE', 'cronjob', update_cronjobs, name_formatter)\n\n\ndef execute_deploy(\n build,\n namespace,\n services,\n deployments,\n jobs,\n cronjobs,\n delete_completed_jobs=True,\n):\n # Split services + deployments into app (main) and dependencies\n depend_services = []\n main_services = []\n\n for service in services:\n if service['metadata']['labels'][ROLE_LABEL_KEY] == 'app':\n main_services.append(service)\n else:\n depend_services.append(service)\n\n depend_deployments = []\n main_deployments = []\n for deployment in deployments:\n if deployment['metadata']['labels'][ROLE_LABEL_KEY] == 'app':\n main_deployments.append(deployment)\n else:\n depend_deployments.append(deployment)\n\n # Now execute the deploy process\n if namespace:\n with build.stage('Create and/or update namespace'):\n if namespace_exists(build.env, namespace):\n build.log_info(f'Update namespace: {get_object_name(namespace)}')\n update_namespace(build.env, namespace)\n else:\n build.log_info(f'Create namespace: {get_object_name(namespace)}')\n create_namespace(build.env, namespace)\n\n if depend_services:\n with build.stage('Create and/or update dependency services'):\n for service in depend_services:\n if service_exists(build.env, build.namespace, service):\n build.log_info(f'Update service: {get_object_name(service)}')\n update_service(build.env, build.namespace, service)\n else:\n build.log_info(f'Create service: {get_object_name(service)}')\n create_service(build.env, build.namespace, service)\n\n if depend_deployments:\n with build.stage('Create and/or update dependency deployments'):\n for deployment in depend_deployments:\n if deployment_exists(build.env, build.namespace, deployment):\n build.log_info(f'Update deployment: {get_object_name(deployment)}')\n update_deployment(build.env, build.namespace, deployment)\n else:\n build.log_info(f'Create deployment: {get_object_name(deployment)}')\n create_deployment(build.env, build.namespace, deployment)\n\n noexist_main_services = []\n exist_main_services = []\n for service in main_services:\n if not service_exists(build.env, build.namespace, service):\n noexist_main_services.append(service)\n else:\n exist_main_services.append(service)\n\n if noexist_main_services:\n with build.stage('Create any app services that do not exist'):\n for service in noexist_main_services:\n build.log_info(f'Create service: {get_object_name(service)}')\n create_service(build.env, build.namespace, service)\n\n noexist_main_deployments = []\n exist_main_deployments = []\n for deployment in main_deployments:\n if not deployment_exists(build.env, build.namespace, deployment):\n noexist_main_deployments.append(deployment)\n else:\n exist_main_deployments.append(deployment)\n\n if noexist_main_deployments:\n with build.stage('Create any app deployments that do not exist'):\n for deployment in noexist_main_deployments:\n build.log_info(f'Create deployment: {get_object_name(deployment)}')\n create_deployment(build.env, build.namespace, deployment)\n\n if jobs:\n with build.stage('Execute upgrades'):\n for job in jobs:\n build.log_info(f'Create job: {get_object_name(job)}')\n create_job(build.env, build.namespace, job)\n if delete_completed_jobs:\n delete_job(build.env, build.namespace, job)\n\n if exist_main_deployments:\n with build.stage('Update existing app deployments'):\n for deployment in exist_main_deployments:\n build.log_info(f'Update deployment: {get_object_name(deployment)}')\n update_deployment(build.env, build.namespace, deployment)\n\n if exist_main_services:\n with build.stage('Update existing app services'):\n for service in exist_main_services:\n build.log_info(f'Update service: {get_object_name(service)}')\n update_service(build.env, build.namespace, service)\n\n for cronjob in cronjobs:\n with build.stage('Create and/or update cronjobs'):\n if cronjob_exists(build.env, build.namespace, cronjob):\n build.log_info(f'Update cronjob: {get_object_name(cronjob)}')\n update_cronjob(build.env, build.namespace, cronjob)\n else:\n build.log_info(f'Create cronjob: {get_object_name(cronjob)}')\n create_cronjob(build.env, build.namespace, cronjob)\n","sub_path":"kubetools/deploy/commands/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":10394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"171020315","text":"#Simple Linear Regression \n\n#Importing the libraries \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\n\n#importing the dataset\n#Chemical Manufactoring DataSet, predicting yield with other predictors\ndf = pd.read_csv(r'C:\\Users\\reece\\Desktop\\Python_Scripts\\LearningPython\\ChemicalManufacturingProcessData.csv')\n\n#Viewing data \ndf.dtypes\ndf.columns\ndf.columns.get_loc('Yield') #0\nX = df.drop(['Yield'], axis = 1)\ny=df.Yield\n\n#Their way of creating X and y, I prefer the way above\n# X = df.iloc[:, 1:].values\n# y = df.iloc[:, 0].values\n\n\n#Filling in missings with median values for columns\nX.isna().sum()\n\nfeatures = X.dtypes.index\nfeature_type = X.dtypes\n\nX.dtypes.value_counts()\ndtypes_dictionary = dict(zip(features, feature_type))\n\n\nreplacements = X.apply(np.nanmedian, 'rows') #Rememeber going down the rows gets you the median for the columns \nreplacement_dictionary = dict(zip(features, replacements))\n\nfor feature, feature_type in dtypes_dictionary.items():\n if feature_type == 'int64':\n X[feature] = X[feature].fillna(replacement_dictionary[feature])\n else:\n X[feature] = X[feature].fillna(math.floor(replacement_dictionary[feature]))\n \n#Data Partition \nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3,\n random_state = 0)\n\n#Feature Scaling \n\"\"\" from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n#Fitting Simple Linear Regressoin to the Training Set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n#Predicting the Test set Results \ny_pred = regressor.predict(X_test)\n\n#Viewing coefficients and intercepts \nprint('coefficients: ', regressor.coef_)\nprint('intercept: ', regressor.intercept_)\n\n\n#Visualizing the Training set results - residuals\n\n#predicted vs actual\nplt.scatter(regressor.predict(X_train), y_train, color = 'red')\nplt.title('Predicted Vs Actual')\nplt.xlabel('Predicted')\nplt.ylabel('Actual')\nplt.show()\n\n#Predicted vs Residuals\nresiduals = y_train - regressor.predict(X_train) \nplt.scatter(regressor.predict(X_train), residuals, color = 'red')\nplt.axhline(0, 0, 1)\nplt.title('Predicted Vs Residuals')\nplt.xlabel('Predicted')\nplt.ylabel('Residuals')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"LearningPython/ChemicalManufacturingProcess.py","file_name":"ChemicalManufacturingProcess.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"501182496","text":"import os\r\nimport yaml\r\nimport argparse\r\n\r\nfrom model.Processor import Processor\r\n\r\ndef main():\r\n parser = Init_parameters()\r\n\r\n # Update parameters by yaml\r\n args = parser.parse_args()\r\n\r\n if os.path.exists('./configs/' + args.config_id + '.yaml'):\r\n\r\n with open('./configs/' + args.config_id + '.yaml', 'r') as f:\r\n\r\n yaml_arg = yaml.load(f, Loader=yaml.FullLoader)\r\n parser.set_defaults(**yaml_arg)\r\n else:\r\n raise ValueError('Do NOT exist this config: {}'.format(args.config_id))\r\n\r\n # Update parameters by cmd\r\n args = parser.parse_args()\r\n\r\n # Show parameters\r\n print('\\n************************************************')\r\n print('The running config is presented as follows:')\r\n v = vars(args)\r\n for i in v.keys():\r\n print('{}: {}'.format(i, v[i]))\r\n print('************************************************\\n')\r\n\r\n p = Processor(args)\r\n p.start()\r\n\r\n\r\ndef Init_parameters():\r\n parser = argparse.ArgumentParser(description='kaggle-bengali')\r\n\r\n # Config\r\n parser.add_argument('--config_id', '-c', type=str, default='')\r\n\r\n return parser\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"91905235","text":"# coding=utf-8\n__author__ = 'BaiYa'\n\nimport logging\nimport logging.config\n\ninit = False\nif not init:\n logging.config.fileConfig(\"logging.conf\")\n init = True\nlogger = logging.getLogger('root')\nfileLogger = logging.getLogger('file')\n\ndef debug(msg):\n logger.debug(msg)\n fileLogger.debug(msg)\n\ndef info(msg):\n logger.info(msg)\n fileLogger.info(msg)\n\ndef warn(msg):\n logger.warning(msg)\n fileLogger.warning(msg)\n\ndef error(msg):\n logger.error(msg)\n fileLogger.error(msg)\n\ndef critical(msg):\n logger.critical(msg)\n fileLogger.critical(msg)\n\nif __name__ == '__main__':\n logger.setLevel(logging.DEBUG)\n logger.debug(\"debug message\")\n logger.info(\"info message\")\n logger.warning(\"warn message\")\n logger.error(\"error message\")\n logger.critical(\"critical message\")\n","sub_path":"manga_db/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"388591347","text":"import pymysql\nimport time\n\n\"\"\"\ninsert_tianchi_data函数字符串过长可能会导致错误:\n 建议配置MYSQL以满足过长字符串: 【登陆mysql,设置输入:set global max_allowed_packet = 100*1024*1024】\n\"\"\"\n\n\nclass tianchi_mysql(object):\n def __init__(self, host, user, password, db):\n # connect to database\n self.db = pymysql.connect(host, user, password, db)\n # create a cursor\n self.cursor = self.db.cursor()\n # index for debug\n self.index = 0\n\n def create_tianchi_data_table(self):\n # delete EXIST table\n self.cursor.execute(\"drop table if exists user_item_tables\")\n\n # create a new table\n create_table_sql = \"\"\"create table user_item_tables(\n user_id varchar(20) not null,\n item_id varchar(20) not null,\n behavior_type char(1),\n user_geohash varchar(10),\n item_category varchar(10),\n time varchar(20))\n \"\"\"\n # execute sql\n self.cursor.execute(create_table_sql)\n\n def insert_tianchi_data(self, context):\n starttime = time.clock()\n insert_tianchi_sql = \"insert into user_item_tables(user_id, item_id, behavior_type, \" \\\n \"user_geohash, item_category, time) value\"\n\n for line in context:\n line = line.replace('\\n', '')\n array = line.split(',')\n if array[0] == \"user_id\":\n continue\n value = \"('\" + array[0] + \"','\" + array[1] + \"','\" + array[2] + \"','\" + array[3] + \"','\" + array[4] + \"','\" + array[5] + \"'),\"\n insert_tianchi_sql += value\n self.index += 1\n if self.index % 100000 == 0:\n print('Current cal {} data..'.format(self.index))\n\n insert_tianchi_sql = insert_tianchi_sql[:len(insert_tianchi_sql) - 1]\n try:\n self.cursor.execute(insert_tianchi_sql)\n # commit to database\n self.db.commit()\n except:\n # if wrong, rollback\n self.db.rollback()\n endtime = time.clock()\n print('Running time: %s Seconds' % (endtime - starttime))\n\n def insert_tianchi_data_way2(self, context):\n starttime = time.clock()\n for line in context:\n line = line.replace('\\n', '')\n array = line.split(',')\n if array[0] == \"user_id\":\n continue\n self.cursor.execute(\"insert into user_item_tables(user_id, item_id, behavior_type, \"\n \"user_geohash, item_category, time) value('%s', '%s', '%s', '%s', '%s', '%s')\" %\n (array[0], array[1], array[2], array[3], array[4], array[5]))\n self.index += 1\n if self.index % 100000 == 0:\n print('Current cal {} data..'.format(self.index))\n try:\n self.db.commit()\n except:\n # if wrong, rollback\n self.db.rollback()\n endtime = time.clock()\n print('Running time: %s Seconds' % (endtime - starttime))\n\n def close_db(self):\n # close connect\n self.db.close()\n\nif __name__ == \"__main__\":\n # input csv data\n with open(\"./fresh_comp_offline/tianchi_fresh_comp_train_user.csv\") as f:\n context = f.readlines()\n print(\"Total number: %s\" % len(context))\n\n # import to database\n print(\"######################MYSQL_BEGIN##############################\")\n mysql = tianchi_mysql(\"localhost\", \"root\", \"6247\", \"tianchi_mobile_data\")\n # mysql.create_tianchi_data_table() # it will drop origin table and create a new table\n # mysql.insert_tianchi_data(context)\n mysql.insert_tianchi_data_way2(context)\n mysql.close_db()","sub_path":"convert_csv_to_myaql.py","file_name":"convert_csv_to_myaql.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"645159571","text":"import unittest\nimport arcade\n\n\nclass _FakeWindow(arcade.Window):\n \"\"\"A test double Window that is never displayed\"\"\"\n def __init__(self):\n super().__init__()\n # Tracking behavior of _FakeView:\n self.draw_calls = [] # list that records sequence of calls to on_draw()\n self.update_calls = [] # list that records sequence of calls to update()\n\n def set_visible(self, visible=True):\n pass # Make set_visible do nothing for testing\n\n\nclass _MockView(arcade.View):\n \"\"\"A test double View that records when a View's on_draw and update methods are called\"\"\"\n def __init__(self, name):\n super().__init__()\n self.name = name\n self.next_view = None\n self.count = 0\n\n def on_draw(self):\n self.window.draw_calls.append(self.name)\n\n def update(self, delta_time):\n self.window.update_calls.append(self.name)\n\n self.count += 1\n if self.count == 3:\n self.count = 0\n self.window.show_view(self.next_view)\n\n\nclass TestView(unittest.TestCase):\n def test_it_asserts_showing_view_of_none(self):\n win = _FakeWindow()\n with self.assertRaises(Exception):\n win.show_view(None)\n\n def test_single_view(self):\n win = _FakeWindow()\n view = _MockView('a')\n win.show_view(view)\n win.test(5)\n self.assertEqual(['a', 'a', 'a', 'a', 'a'], win.update_calls)\n self.assertEqual(['a', 'a', 'a', 'a'], win.draw_calls) # the first frame has an update but no draw\n\n def test_multiple_views(self):\n win = _FakeWindow()\n view_a = _MockView('a')\n view_b = _MockView('b')\n view_a.next_view = view_b\n view_b.next_view = view_a\n win.show_view(view_a)\n win.test(10)\n self.assertEqual(['a', 'a', 'a', 'b', 'b', 'b', 'a', 'a', 'a', 'b'], win.update_calls)\n self.assertEqual(['a', 'a', 'b', 'b', 'b', 'a', 'a', 'a', 'b'], win.draw_calls) # the first frame has an update but no draw\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit2/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"404508256","text":"import discord\r\nimport asyncio\r\nfrom discord.ext import commands\r\n\r\n\r\nclass ban(commands.Cog):\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.command(name='banir', aliases=['ban'])\r\n async def banir(self,ctx,member: discord.Member=None):\r\n if ctx.author.guild_permissions.administrator:\r\n if member is None:\r\n await ctx.send(f\"Olá {ctx.author.mention} você não marcou um usuário para banir\")\r\n else:\r\n await ctx.send(\"Sem permissão\")\r\n embed = discord.Embed(colour=0xff5d00)\r\n embed.set_author(name=f\"Você deseja banir o {member.name}? (60 segundos)\")\r\n embed.add_field(name=\"🆔ID:\",value=member.id,inline=False)\r\n embed.add_field(name=\"📶Menção:\",value=member.mention,inline=False)\r\n embed.add_field(name=\"🔢Tag:\",value=member.discriminator,inline=False)\r\n embed.add_field(name=\"📆Criação da Conta:\",value=member.created_at.strftime(\"**%H:%M:%S - %d/%m/20%y**\"),inline=False)\r\n embed.add_field(name=\"☑️Entrada no Servidor:\",value=member.joined_at.strftime(\"**%H:%M:%S - %d/%m/20%y**\"),inline=False)\r\n embed.add_field(name=\"📱Atividade:\",value=member.activity)\r\n embed.add_field(name=\"📷Avatar Link:\",value=\"[Link Direto](\" + member.avatar_url + \")\\n\",inline=False)\r\n embed.set_image(url=\"https://media1.tenor.com/images/d856e0e0055af0d726ed9e472a3e9737/tenor.gif\")\r\n embed.set_thumbnail(url=member.avatar_url)\r\n msg1 = await ctx.send(embed=embed)\r\n await msg1.add_reaction(\"✅\")\r\n await msg1.add_reaction(\"❌\")\r\n\r\n def check(reaction, user):\r\n return user == ctx.author and str(reaction.emoji)\r\n try:\r\n reaction, user = await self.client.wait_for('reaction_add', timeout=60.0, check=check)\r\n if reaction.emoji == \"✅\":\r\n await member.ban()\r\n await msg1.delete()\r\n await ctx.send(f\"Olá {ctx.author.mention} o {member.name} foi banido com sucesso!\")\r\n if reaction.emoji == \"❌\":\r\n await msg1.delete()\r\n except asyncio.TimeoutError:\r\n await msg1.delete()\r\n msg4 = await ctx.send(\"Você demorou muito para banir, tente novamente!\")\r\n await asyncio.sleep(5)\r\n await msg4.delete()\r\n @banir.error\r\n async def ban_handler(self, ctx, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n if error.param.name == 'msg':\r\n embed = discord.Embed(title=\"Comando !ban ou !banir:\", colour=discord.Colour(0x370c5e),\r\n description=\"Bane um usuário.\")\r\n\r\n embed.set_author(name=\"Masterr#3556\")\r\n embed.set_footer(text=\"\")\r\n\r\n embed.add_field(name=\"📖**Exemplos:**\", value=\"!ban @dugade\\n!banir @dugade não fez o cassino\", inline=False)\r\n embed.add_field(name=\"🔀**Outros Comandos**\", value=\"``!ajuda``\", inline=False)\r\n\r\n msg = await ctx.send(embed=embed)\r\n await msg.add_reaction(\"❓\")\r\n\r\n @commands.command(name='delmsg', aliases= [\"clear\", \"deletar\", \"apagar\"])\r\n async def delmsg(self,ctx, quantidade: int):\r\n if not ctx.author.guild_permissions.manage_messages:\r\n await ctx.send('👤 ***Você não tem permissão para limpar o chat!***')\r\n if ctx.author.guild_permissions.manage_messages:\r\n try:\r\n embed = discord.Embed(color=0x00ffff,\r\n description='Olá {}.\\n Você está prestes à apagar {} mensagem(ns), você quer realmente apagar?\\n Se sim, reaja com: ✅.\\n Se não, reaja com: ❌!\\n (30 segundos para se decidir!)'.format(ctx.author.mention, quantidade))\r\n embed.set_thumbnail(url=ctx.message.guild.icon_url)\r\n embed.set_footer(text=ctx.author.name, icon_url=ctx.author.avatar_url)\r\n msg = await ctx.send(f\"{ctx.author.mention}\")\r\n msg1 = await ctx.send(embed=embed)\r\n await msg1.add_reaction(\"✅\")\r\n await msg1.add_reaction(\"❌\")\r\n await ctx.message.delete()\r\n\r\n def check(reaction, user):\r\n return user == ctx.author and str(reaction.emoji)\r\n try:\r\n reaction, user = await self.client.wait_for('reaction_add', timeout=30.0, check=check)\r\n if reaction.emoji == \"✅\":\r\n \t await msg.delete()\r\n await msg1.delete()\r\n await ctx.channel.purge(limit=quantidade)\r\n msg4 = await ctx.send(f\"{ctx.author.mention}\")\r\n embed1 = discord.Embed(color=0x00ffff,\r\n description='[ |zFazT - Sistemas| (Deletar mensagens)] \\n\\n Olá {}, você apagou {} mensagem(ns) com sucesso!'.format(ctx.author.mention, quantidade))\r\n embed1.set_thumbnail(url=ctx.message.guild.icon_url)\r\n embed1.set_footer(text='Mensagens apagadas por: {}'.format(ctx.message.author.name), icon_url=ctx.message.author.avatar_url)\r\n msg3 = await ctx.send(embed=embed1)\r\n await asyncio.sleep(15)\r\n await msg3.delete()\r\n await msg4.delete()\r\n# await ctx.send(f\"Olá {ctx.author.mention}, você apagou {quantidade} mensagem(ns) com sucesso!\")\r\n if reaction.emoji == \"❌\":\r\n await msg1.delete()\r\n await msg.delete()\r\n msg4 = await ctx.send(f\"{ctx.author.mention}\")\r\n embed1 = discord.Embed(color=0x00ffff,\r\n description='[ |zFazT - Sistemas| (Deletar mensagens)] \\n\\n Olá {}, você cancelou a operação de apagar {} mensagem(ns) com sucesso!'.format(ctx.author.mention, quantidade))\r\n embed1.set_thumbnail(url=ctx.message.guild.icon_url)\r\n embed1.set_footer(text='Cancelamento efetuado por: {}'.format(ctx.message.author.name), icon_url=ctx.message.author.avatar_url)\r\n msg3 = await ctx.send(embed=embed1)\r\n# msg = await ctx.send(f\"Olá {ctx.author.mention}, você cancelou a operação de apagar {quantidade} de mensagem(ns)!\")\r\n await asyncio.sleep(15)\r\n await msg3.delete()\r\n await msg4.delete()\r\n except asyncio.TimeoutError:\r\n await msg1.delete()\r\n msg4 = await ctx.send(\"Você demorou muito para apagar, tente novamente!\")\r\n await asyncio.sleep(5)\r\n await msg4.delete()\r\n finally:\r\n pass\r\n\r\n @delmsg.error\r\n async def delmsg_handler(self, ctx, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n embed = discord.Embed(title=\"Comando !delmsg, !deletar, !clear ou !apagar:\", colour=discord.Colour(0x370c5e),\r\n description=\"Apaga mensagens\")\r\n\r\n embed.set_author(name=\"zFazT\")\r\n embed.set_footer(text=\"\")\r\n\r\n embed.add_field(name=\"📖**Exemplos:**\", value=\"!clear 9\\n!apagar 3\", inline=False)\r\n embed.add_field(name=\"🔀**Outros Comandos**\", value=\"``!ajuda``\", inline=False)\r\n\r\n msg = await ctx.send(embed=embed)\r\n await msg.add_reaction(\"❓\")\r\n \r\ndef setup(client):\r\n print(\"[Comando ban e delmsg] Carregado.\")\r\n client.add_cog(ban(client))","sub_path":"cogs/ban.py","file_name":"ban.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"37698062","text":"from ._ipywidgets import ipywidgets\n\nfrom descarteslabs.common.proto.widgets import widgets_pb2\n\nfrom ...types import Bool\nfrom ...types.widget import Widget\n\n\nclass Checkbox(Widget, Bool):\n _proto_type = widgets_pb2.Checkbox\n\n def __init__(self, name: str, default: bool = False, label=\"\"):\n super().__init__(name, default, label)\n self.widget = ipywidgets.Checkbox(value=default)\n self.widget._label = label\n\n\ndef checkbox(\n name: str,\n default: bool = False,\n label: str = \"\",\n):\n \"\"\"\n A checkbox widget, which acts as a `.Bool` parameter.\n\n Example\n -------\n >>> import descarteslabs.workflows as wf\n >>> wf.widgets.checkbox(\"param_name\", default=True, label=\"A string parameter\") # doctest: +SKIP\n\n >>> s2 = wf.ImageCollection.from_id(\n ... \"sentinel-2:L1C\", \"2018-01-01\", \"2018-04-01\",\n ... processing_level=wf.ifelse(\n ... wf.widgets.checkbox(\"surface\", default=True, label=\"Use surface reflectance\"),\n ... wf.Str(\"surface\"),\n ... wf.Str(\"toa\"),\n ... )\n ... ).pick_bands(\"red green blue\")\n >>> s2.visualize(\"Sentinel-2\", scales=[[0, 0.4], [0, 0.4], [0, 0.4]]) # doctest: +SKIP\n >>> # ^ when you call .visualize, the `checkbox` widget will automatically show up below\n\n Clicking the checkbox above will toggle atmospheric correction on and off.\n (If you haven't already, run ``wf.map`` in another notebook cell to see your layer.)\n\n Parameters\n ----------\n name: str\n The name of the parameter.\n default: bool, default False\n The default value for the widget.\n label: str, default \"\"\n The longform label to display next to the widget.\n If not given, the widget will display as ``name``.\n\n Returns\n -------\n widget: Checkbox\n A Widget object that acts just like a Workflows `.Bool`, and displays as a checkbox.\n \"\"\"\n\n return Checkbox(name, default, label)\n","sub_path":"descarteslabs/workflows/interactive/widgets/checkbox.py","file_name":"checkbox.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"73212775","text":"def prob4():\n verse = \"jam tomorrow and jam yesterday,\"\n print(\"The rule is,\")\n c = mystery(verse)\n w = enigma(verse,c)\n print(c,w)\ndef mystery(v):\n print(v)\n c = v.count(\"jam\")\n return(c)\ndef enigma(v,c):\n print(\"but never\", v[-1])\n for i in range(c):\n print(\"jam\")\n return(\"day.\")\nprint(prob4())","sub_path":"python/loops/strings_example.py","file_name":"strings_example.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"448495497","text":"\"\"\"Added Extra fields for deactivating user account\n\nRevision ID: a5b79aea0c5a\nRevises: 5d0e9b3e85d0\nCreate Date: 2020-05-07 15:57:32.238536\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = 'a5b79aea0c5a'\ndown_revision = '5d0e9b3e85d0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('deactivated_at', sa.DateTime(), nullable=True))\n op.add_column('users', sa.Column('deactivated_by', sa.String(length=65), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'deactivated_by')\n op.drop_column('users', 'deactivated_at')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/a5b79aea0c5a_added_extra_fields_for_deactivating_.py","file_name":"a5b79aea0c5a_added_extra_fields_for_deactivating_.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"285603541","text":"from sqlalchemy import Column, String, DateTime, Integer, MetaData, ForeignKey, CheckConstraint\nfrom sqlalchemy_utils import EmailType\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalch.db_conn import engine\nfrom sqlalch.cheker import book_check, re, user_check\n\n\nmeta = MetaData()\nBase = declarative_base()\n\n\nclass Book(Base):\n __tablename__ = 'book'\n book_id = Column(Integer, primary_key=True, nullable=False)\n book_name = Column(String(50), primary_key=False, nullable=False)\n author_name = Column(String(50), primary_key=False, nullable=False)\n author_lastname = Column(String(50), primary_key=False, nullable=False)\n book_url = Column(String(70), primary_key=False, nullable=False)\n CheckConstraint('book_id > 0', name='check_id')\n\n @classmethod\n def add_book(cls, b_id, b_name, a_name, a_lname, b_url):\n b_check = book_check(b_id, a_name, a_lname)\n if not b_check:\n print(\"data is incorrect\")\n return -1\n else:\n Session = sessionmaker(bind=engine)\n session = Session()\n new_book = Book(book_id = b_id, book_name=b_name, author_name=a_name, author_lastname=a_lname,\n book_url=b_url)\n session.add(new_book)\n session.commit()\n\n\nclass User(Base):\n __tablename__='l_user'\n user_name = Column(String(50), primary_key=False, nullable=True)\n user_lastname = Column(String(50), primary_key=False, nullable=True)\n email = Column(EmailType, primary_key=True, nullable=False)\n registration = Column(DateTime, primary_key=False,nullable=False)\n login = Column(String(20), primary_key=False, nullable=False)\n password = Column(String(30), primary_key=False, nullable=False)\n book_amount = Column(Integer, primary_key=False, nullable=True)\n CheckConstraint('book_amount>=0', name='ba_ch')\n\n @classmethod\n def add_user(cls, name, last_name,email, registration, login, psw, amount):\n u_check = user_check(name, last_name, email, amount)\n if not u_check:\n print(\"data is incorrect\")\n return -1\n else:\n Session = sessionmaker(bind=engine)\n session = Session()\n new_user = User(user_name=name, user_lastname=last_name, email=email, registration=registration,\n login=login,password=psw,book_amount=amount)\n session.add(new_user)\n session.commit()\n\n\nclass UserBook(Base):\n __tablename__ = 'user_book'\n t_book_id = Column(Integer, ForeignKey('book.book_id'), primary_key=True, nullable=False)\n user_email = Column(EmailType, ForeignKey('l_user.email'), primary_key=True, nullable=False)\n CheckConstraint('t_book_id>0', name='t_book_check')\n\n @classmethod\n def add_user_book(cls, b_id, u_email):\n if not isinstance(b_id, int) or not re.match(r\"[^@]+@[^@]+\\.[^@]+\", u_email):\n print('Data is incorrect')\n return -1\n else:\n Session = sessionmaker(bind=engine)\n session = Session()\n new_user_book = UserBook(t_book_id=b_id, user_email=u_email)\n session.add(new_user_book)\n session.commit()\n","sub_path":"Kostya Zinchuk/workshop5/source/python/sqlalch/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"79207570","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport random\nfrom core.scripts.twitter_related import base\n\nfrom core.scripts.twitter_related.base import BaseTwitterRelatedScript\nfrom core.utils.logging import debug\n\n\nclass ReplyByLearnedReplies(base.BaseTimelineScript):\n def on_timeline_update(self, data):\n learned_replies = list(\n self.st_memory.memory.get('learned_replies', {}).items())\n debug(\"Learned replies: {0}\".format(learned_replies))\n random.shuffle(learned_replies)\n\n for k, v in learned_replies:\n if data['text'].find(k) > -1:\n reply_item = random.choice(v)\n teacher_screen_name = reply_item['teacher_screen_name']\n if teacher_screen_name == data['user']['screen_name']:\n reply_message = reply_item['text']\n else:\n reply_message = \"@{0} {1}\".format(\n teacher_screen_name, reply_item['text'])\n self.twitter.reply_to(data, reply_message)\n return\n","sub_path":"scripts/twitter_related/reply_by_learned_replies/reply_by_learned_replies.py","file_name":"reply_by_learned_replies.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"5795302","text":"# Status Failed doar cand a expirat timeout\n# src_type = FTP nu e foarte bine pus la puncts\n\nimport argparse\nimport configparser\nimport logging\nfrom threading import Thread, Timer\nfrom multiprocessing import *\nimport os\nimport time\nimport ftputil\nimport ftputil.session\nimport shutil\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-config\", help=\"number of threads\", nargs='?')\nargs = parser.parse_args()\nargs.config = args.config if args.config!=None else \"conf.txt\"\n\nconfig = configparser.ConfigParser()\nconfig.read(args.config)\n\nlog_file = config['General']['log_path']\n\nlogging.basicConfig(filename=log_file, format='[%(asctime)s]\\t[%(levelname)s]\\t\\t[line:%(lineno)d]\\t%(message)s', level=logging.DEBUG)\nlogging.info('Config file: %s', args.config)\n\ndef ftp_sync(src, dst, sync_nr):\n src_files = []\n src = src.split()\n try:\n if(config[sync_nr]['ftp_mode'] == 'PASSIVE'):\n my_session_factory = ftputil.session.session_factory(use_passive_mode=True)\n else:\n my_session_factory = ftputil.session.session_factory(use_passive_mode=False)\n\n with ftputil.FTPHost(src[0], src[1], src[2],session_factory=my_session_factory) as host:\n for root, dirs, files in host.walk('/'):\n for d in dirs:\n src_dir = host.path.join(root, d)\n src_files.append(src_dir[len(src)-2:])\n s_modif = host.stat(src_dir).st_mtime\n dest_dir = os.path.join(dst, src_dir[len(src)-2:].replace('/', '\\\\'))\n if (os.path.exists(dest_dir)):\n d_modif = os.stat(dest_dir).st_mtime\n logging.info('%s: Directory %s already exists', sync_nr, src_dir[len(src):])\n else:\n try:\n os.makedirs(dest_dir)\n logging.info('%s: Directory %s was created', sync_nr, dest_dir)\n except Exception as e:\n logging.error('%s: Unable to create directory %s: %s', sync_nr, dest_dir, e)\n for f in files:\n src_file = host.path.join(root, f)\n src_files.append(src_file[len(src)+1:])\n s_modif = host.stat(src_file).st_mtime\n dest_file = os.path.join(dst, src_file[len(src)-2:].replace('/', '\\\\'))\n if (os.path.isfile(dest_file)):\n d_modif = os.stat(dest_file).st_mtime\n else:\n d_modif = 0\n\n if(int(s_modif)==int(d_modif)):\n logging.info('%s: File %s: nothing changed', sync_nr, src_file[len(src)-2:])\n else:\n logging.info('%s: File %s has changed in the source', sync_nr, src_file[len(src)-2:])\n try:\n # shutil.copy2(src_file, dest_file[:len(dest_file)-len(f)])\n host.download(src_file, dest_file)\n logging.info('%s: File %s is up to date', sync_nr, src_file[len(src)-2:])\n except Exception as e:\n logging.error('%s: Unable to copy %s: %s', sync_nr, src_file, e)\n\n if(config[sync_nr]['move_files'] == 'YES'):\n for i in host.listdir(src):\n item = host.path.join(src, i)\n if(host.path.isfile(item)):\n try:\n host.remove(item)\n logging.info('%s: %s deleted', sync_nr, item)\n except Exception as e:\n logging.error('%s: Unable to delete file: %s: %s', sync_nr, item, e)\n if(host.path.isdir(item)):\n try:\n host.rmtree(item)\n logging.info('%s: %s deleted', sync_nr, item)\n except Exception as e:\n logging('%s: Unable to delete directory %s: %s', sync_nr, item, e)\n\n except Exception as e:\n logging.error('%s: %s', sync_nr, e)\n return src_files\n\n\ndef disk_sync(src, dst, sync_nr):\n src_files = []\n for root, dirs, files in os.walk(src):\n for d in dirs:\n src_dir = os.path.join(root, d)\n src_files.append(src_dir[len(src)+1:])\n s_modif = os.stat(src_dir).st_mtime\n dest_dir = os.path.join(dst, src_dir[len(src)+1:])\n if (os.path.exists(dest_dir)):\n d_modif = os.stat(dest_dir).st_mtime\n logging.info('%s: Directory %s already exists', sync_nr, src_dir[len(src):])\n else:\n try:\n os.makedirs(dest_dir)\n logging.info('%s: Directory %s was created', sync_nr, dest_dir)\n except Exception as e:\n logging.error('%s: Unable to create directory %s: %s', sync_nr, dest_dir, e)\n\n for f in files:\n src_file = os.path.join(root, f)\n src_files.append(src_file[len(src)+1:])\n s_modif = os.stat(src_file).st_mtime\n dest_file = os.path.join(dst, src_file[len(src)+1:])\n if (os.path.isfile(dest_file)):\n d_modif = os.stat(dest_file).st_mtime\n else:\n d_modif = 0\n\n if(int(s_modif)==int(d_modif)):\n logging.info('%s: File %s: nothing changed', sync_nr, src_file[len(src):])\n else:\n logging.info('%s: File %s has changed in the source', sync_nr, src_file[len(src):])\n try:\n shutil.copy2(src_file, dest_file[:len(dest_file)-len(f)])\n logging.info('%s: File %s is up to date', sync_nr, src_file[len(src):])\n except Exception as e:\n logging.error('%s: Unable to copy %s: %s', sync_nr, src_file, e)\n\n if(config[sync_nr]['move_files'] == 'YES'):\n for i in os.listdir(src):\n item = os.path.join(src, i)\n if(os.path.isfile(item)):\n try:\n os.remove(item)\n logging.info('%s: %s deleted', sync_nr, item)\n except Exception as e:\n logging.error('%s: Unable to delete file: %s: %s', sync_nr, item, e)\n if(os.path.isdir(item)):\n try:\n shutil.rmtree(item)\n logging.info('%s: %s deleted', sync_nr, item)\n except Exception as e:\n logging('%s: Unable to delete directory %s: %s', sync_nr, item, e)\n\n return src_files\n\ndef sync(i):\n sync_nr = 'Sync_#'+ str(i+1)\n logging.debug('Thread %i started with %s', i, sync_nr)\n src = config[sync_nr]['src']\n dst = config[sync_nr]['dst']\n\n if (config[sync_nr]['delete_extra_dst_content'] == 'YES' and config[sync_nr]['move_files'] == 'YES'):\n logging.error('Inconsistency in config file in [%s]: delete_extra_dst_content and move_files', sync_nr)\n return\n\n if (config[sync_nr]['src_type'] == 'DISK'):\n src_files = disk_sync(src, dst, sync_nr)\n else:\n src_files = ftp_sync(src, dst, sync_nr)\n\n if(config[sync_nr]['delete_extra_dst_content'] == 'YES'):\n for root, dirs, files in os.walk(dst):\n for f in files:\n f = os.path.join(root, f)[len(dst)+1:]\n if (f not in src_files):\n try:\n f = os.path.join(root, f)\n os.remove(f)\n logging.info('%s: %s deleted from %s', sync_nr, f, dst)\n except Exception as e:\n logging.error('%s: Unable to delete file: %s: %s', sync_nr, f, e)\n for d in dirs:\n d = os.path.join(root, d)[len(dst)+1:]\n if(d not in src_files):\n try:\n d = os.path.join(root, d)\n shutil.rmtree(d)\n logging.info('%s: %s deleted from %s', sync_nr, d, dst)\n except Exception as e:\n logging.error('%s: Unable to delete directory %s: %s', sync_nr, d, e)\n\ndef start_sync(i):\n sync_nr = 'Sync_#'+ str(i+1)\n status = 'Ok'\n while(True):\n try:\n start_time = time.time()\n p = Process(target=sync, args=(i,))\n p.start()\n p.join(int(config[sync_nr]['timeout']))\n last_update = time.time()\n if(p.is_alive()):\n p.terminate()\n status = 'Failed'\n logging.debug('%s reached timeout', sync_nr)\n\n print('%s | SRC-%s: %s >>> DST-%s: %s | Last update: %s | Sync duration: %s | Status: %s' % (\n sync_nr, config[sync_nr]['src_type'], config[sync_nr]['src_name'], config[sync_nr]['dst_type'], config[sync_nr]['dst_name'], time.ctime(last_update), last_update-start_time, status))\n\n while (time.time()-start_time < int(config[sync_nr]['update_interval'])):\n time.sleep(1)\n except Exception as e:\n logging.error('Thread %d: %s', i, e)\n return\n\n\nif __name__ == \"__main__\":\n nr_loc = len(config.sections())-1\n t = [0 for i in range(0,nr_loc)]\n\n for i in range(0, nr_loc):\n try:\n t[i] = Thread(target=start_sync, args=(i,))\n t[i].start()\n except Exception as e:\n logging.error('Thread %d: %s', i, e)\n continue\n","sub_path":"ftpsync.py","file_name":"ftpsync.py","file_ext":"py","file_size_in_byte":9520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"90039255","text":"from flask import jsonify, g\nfrom app import db\nfrom app.models import write_data_to_db\nfrom app.api import bp\nfrom app.api.auth import basic_auth\n\n\n@bp.route('/tokens', methods=['POST'])\n@basic_auth.login_required\ndef get_token():\n token = g.current_user.get_token()\n write_data_to_db()\n return jsonify({'token': token})\n\n\n@bp.route('/tokens', methods=['DELETE'])\n@basic_auth.login_required\ndef revoke_token():\n g.current_user.revoke_token()\n write_data_to_db()\n return '', 204\n","sub_path":"app/api/tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"165125629","text":"from guifw.abstractparameters import *\n\nclass ModelTool(ItemWithParameters):\n def __init__(self, object=None, viewUpdater=None, **kwargs):\n ItemWithParameters.__init__(self, **kwargs)\n self.object=object\n self.viewUpdater=viewUpdater\n self.rotateX=ActionParameter(parent=self, name='rotate X', callback=self.rotate_x)\n self.rotateY=ActionParameter(parent=self, name='rotate Y', callback=self.rotate_y)\n self.rotateZ=ActionParameter(parent=self, name='rotate Z', callback=self.rotate_z)\n self.scaleX=NumericalParameter(parent=self, name=\"scale X\", value=1.0, step=0.1, enforceRange=False, enforceStep=False)\n self.scaleY=NumericalParameter(parent=self, name=\"Y\", value=1.0, step=0.1, enforceRange=False, enforceStep=False)\n self.scaleZ=NumericalParameter(parent=self, name=\"Z\", value=1.0, step=0.1, enforceRange=False, enforceStep=False)\n self.scale=ActionParameter(parent=self, name='scale', callback=self.scale)\n self.collapseTop=ActionParameter(parent=self, name='Collapse to Top', callback=self.collapseTop)\n self.collapseBottom=ActionParameter(parent=self, name='Collapse to Bottom', callback=self.collapseBottom)\n self.heightMapResolution=NumericalParameter(parent=self, name=\"Height map resolution\", value=1.0, step=0.1, enforceRange=False, enforceStep=False)\n self.heightMapButtonTop=ActionParameter(parent=self, name='Calculate Heightmap (top)', callback=self.heightmapTop)\n self.heightMapButtonBottom=ActionParameter(parent=self, name='Calculate Heightmap (bottom)', callback=self.heightmapBottom)\n\n self.parameters=[[self.rotateX, self.rotateY, self.rotateZ],\n self.scaleX, self.scaleY, self.scaleZ, self.scale,\n [self.collapseTop, self.collapseBottom],\n self.heightMapResolution,\n self.heightMapButtonTop,\n self.heightMapButtonBottom]\n \n \n def rotate_x(self):\n if self.object!=None:\n self.object.rotate_x()\n if self.viewUpdater!=None:\n self.viewUpdater()\n\n def rotate_y(self):\n if self.object!=None:\n self.object.rotate_y()\n if self.viewUpdater!=None:\n self.viewUpdater()\n\n def rotate_z(self):\n if self.object!=None:\n self.object.rotate_z()\n if self.viewUpdater!=None:\n self.viewUpdater()\n\n def scale(self):\n if self.object!=None:\n self.object.scale([self.scaleX.getValue(), self.scaleY.getValue(), self.scaleZ.getValue()])\n if self.viewUpdater!=None:\n self.viewUpdater()\n \n def collapseTop(self):\n if self.object!=None:\n self.object.collapse_to_surface(False)\n if self.viewUpdater!=None:\n self.viewUpdater()\n\n def collapseBottom(self):\n if self.object!=None:\n self.object.collapse_to_surface(True)\n if self.viewUpdater!=None:\n self.viewUpdater()\n \n def heightmapTop(self):\n if self.object!=None:\n self.object.calc_height_map_scanning(grid=self.heightMapResolution.getValue(), waterlevel=\"max\" )\n #self.object.interpolate_gaps(self.object.maxv[2])\n if self.viewUpdater!=None:\n self.viewUpdater(mode=\"heightmap\")\n\n def heightmapBottom(self):\n if self.object!=None:\n self.object.calc_height_map_scanning(grid=self.heightMapResolution.getValue(), waterlevel=\"min\" )\n #self.object.interpolate_gaps(self.object.minv[2])\n if self.viewUpdater!=None:\n self.viewUpdater(mode=\"heightmap\")\n","sub_path":"tools/modeltool.py","file_name":"modeltool.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"131837877","text":"import pandas as pd\nfrom sklearn.ensemble import IsolationForest\n\nfrom .data_check import DataCheck\nfrom .data_check_message import DataCheckWarning\n\nfrom evalml.utils import get_random_state\nfrom evalml.utils.gen_utils import numeric_dtypes\n\n\nclass OutliersDataCheck(DataCheck):\n \"\"\"Checks if there are any outliers in input data by using an Isolation Forest to obtain the anomaly score\n of each index and then using IQR to determine score anomalies. Indices with score anomalies are considered outliers.\"\"\"\n\n def __init__(self, random_state=0):\n \"\"\"Checks if there are any outliers in the input data.\n\n Arguments:\n random_state (int, np.random.RandomState): The random seed/state. Defaults to 0.\n \"\"\"\n self.random_state = get_random_state(random_state)\n\n def validate(self, X, y=None):\n \"\"\"Checks if there are any outliers in a dataframe by using an Isolation Forest to obtain the anomaly score\n of each index and then using IQR to determine score anomalies. Indices with score anomalies are considered outliers.\n\n Arguments:\n X (pd.DataFrame): Features\n y: Ignored.\n\n Returns:\n A set of indices that may have outlier data.\n\n Example:\n >>> df = pd.DataFrame({\n ... 'x': [1, 2, 3, 40, 5],\n ... 'y': [6, 7, 8, 990, 10],\n ... 'z': [-1, -2, -3, -1201, -4]\n ... })\n >>> outliers_check = OutliersDataCheck()\n >>> assert outliers_check.validate(df) == [DataCheckWarning(\"Row '3' is likely to have outlier data\", \"OutliersDataCheck\")]\n \"\"\"\n\n if not isinstance(X, pd.DataFrame):\n X = pd.DataFrame(X)\n X = X.select_dtypes(include=numeric_dtypes)\n\n if len(X.columns) == 0:\n return []\n\n def get_IQR(df, k=2.0):\n q1 = df.quantile(0.25)\n q3 = df.quantile(0.75)\n iqr = q3 - q1\n lower_bound = q1 - (k * iqr)\n upper_bound = q3 + (k * iqr)\n return (lower_bound, upper_bound)\n\n clf = IsolationForest(random_state=self.random_state)\n clf.fit(X)\n scores = pd.Series(clf.decision_function(X))\n lower_bound, upper_bound = get_IQR(scores, k=2)\n outliers = (scores < lower_bound) | (scores > upper_bound)\n outliers_indices = outliers[outliers].index.values.tolist()\n warning_msg = \"Row '{}' is likely to have outlier data\"\n return [DataCheckWarning(warning_msg.format(row_index), self.name) for row_index in outliers_indices]\n","sub_path":"evalml/data_checks/outliers_data_check.py","file_name":"outliers_data_check.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"292419395","text":"'''\n11\n5 4 5 4 3 3 2 1 4 1 4\n'''\n\nnoe = int(input())\narr = [int(x) for x in input().split()]\n\ni = 0\ncnt = 0\nwhile len(arr) > 0:\n pos = arr[0]\n arr = arr[pos+1:]\n cnt += 1\nprint(cnt)","sub_path":"Hackerrank/recover_the_array.py","file_name":"recover_the_array.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"213732544","text":"# -*- coding: utf-8 -*-\r\n#!\\usr\\bin\\python\r\n# This is my SNMP trap receiver code\r\nimport socket\r\nimport sys\r\n\r\nfrom datetime import date\r\n\r\n# serverPort = 162\r\n# serverAddress = '192.168.13.117'\r\n# clientPort = 33333\r\n# clientAddress = '192.168.13.210'\r\n\r\nimport configparser\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read('Agent.conf')\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\nserverPort = config.get(\"server\", \"serverPort\")\r\ns.bind((\"0.0.0.0\", int(serverPort)))\r\nprint('Server accepting data on: ' + str(s.getsockname()))\r\n\r\nwhile 1:\r\n data, addr = s.recvfrom(33333)\r\n print('from address: ' + str(addr[0]))\r\n print('on port: ' + str(addr[1]))\r\n print(' Received UDP-datagram data:')\r\n print(data)\r\n","sub_path":"SocketReceiver.py","file_name":"SocketReceiver.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"38466138","text":"from Bishop import *\nimport numpy as np\n\n############\n# Test MDP #\n############\n\n# Create a one-dimension MDP manually\nS = range(5)\nA = range(2) # left or right\nR = [50, -1, -1, -1, 0]\nT = np.zeros((5, 2, 5))\n# Move left\nT[0][0][0] = 1\nT[1][0][0] = 1\nT[2][0][1] = 1\nT[3][0][3] = 1\nT[4][0][4] = 1\n# Move right\nT[0][1][1] = 1\nT[1][1][2] = 1\nT[2][1][3] = 1\nT[3][1][4] = 1\nT[4][1][4] = 1\nTest = MDP.MDP(S, A, T, R)\nTest.validate()\n\n############\n# Test Map #\n############\n\n# Create a simple grid world\nTest = Map()\nTest.BuildGridWorld(3, 4, diagonal=True)\nTest.PrintMap()\nTest.Validate() # Should fail.\nTest.InsertObjects([1, 2, 4], [0, 0, 1], [\"A\", \"B\"])\nTest.AddStartingPoint(10)\nTest.AddExitState(0)\nTest.Validate() # Success!\n\n# Repeat above, but do not number object types correctly.\nTest = Map()\nTest.BuildGridWorld(3, 4, diagonal=False)\nTest.Validate() # Should fail.\nTest.InsertObjects([1, 2, 4], [1, 1, 2], [\"A\"]) # Fail\nTest.InsertObjects([1, 2, 4], [1, 1, 2], [\"A\", \"B\"]) # Ok\nTest.AddStartingPoint(10)\nTest.AddExitState(0)\nTest.Validate() # Should fail\n\n# Repeat above, but using coordinate system\nTest = Map()\nTest.BuildGridWorld(3, 4, diagonal=False)\nTest.PrintMap()\nTest.Validate() # Should fail.\nS1 = Test.GetRawStateNumber([2, 1])\nS2 = Test.GetRawStateNumber([3, 1])\nS3 = Test.GetRawStateNumber([2, 2])\nTest.InsertObjects([S1, S2, S3], [0, 0, 1], [\"A\", \"B\"])\nTest.AddStartingPoint(10)\nTest.AddExitState(0)\nTest.Validate() # Success!\n\n##############\n# Test Agent #\n##############\n\nMyMap = Map()\nMyMap.BuildGridWorld(1, 3, diagonal=True)\nMyMap.InsertObjects([0], [0], [\"A\"])\nMyMap.AddStartingPoint(1)\nMyMap.AddExitState(2)\nMyMap.PrintMap()\nMyAgent = Agent(MyMap, \"ScaledUniform\", 1, 30)\n\n################\n# Test Planner #\n################\n\nMyMap = Map()\nMyMap.BuildGridWorld(1, 3, diagonal=True)\nMyMap.InsertObjects([0], [0], [\"A\"])\nMyMap.AddStartingPoint(1)\nMyMap.AddExitState(2)\nMyAgent = Agent(MyMap, \"ScaledUniform\", 1, 30)\nMyPlanner = Planner.Planner(MyAgent, MyMap)\nMyPlanner.Simulate()\n\n#########################\n# Test more complex map #\n#########################\n\nMyMap = Map()\nMyMap.BuildGridWorld(4, 5, diagonal=True)\nMyMap.InsertObjects([3, 16], [0, 1], [\"A\", \"B\"])\nMyMap.InsertSquare(2, 1, 3, 3, 1)\nMyMap.AddStartingPoint(19)\nMyMap.AddExitState(0)\nMyAgent = Agent(MyMap, \"ScaledUniform\", 1, 30)\nMyPlanner = Planner.Planner(MyAgent, MyMap)\nMyPlanner.Simulate()\n\n#################\n# Test observer #\n#################\n\nMyMap = Map()\nMyMap.BuildGridWorld(4, 5, diagonal=True)\nMyMap.InsertObjects([3, 16], [0, 1], [\"A\", \"B\"])\nMyMap.InsertSquare(2, 1, 3, 3, 1)\nMyMap.AddStartingPoint(19)\nMyMap.AddExitState(0)\nMyAgent = Agent(MyMap, \"ScaledUniform\", 1, 30)\nObs = Observer(MyAgent, MyMap)\nObs.SimulateAgents(10)\nObs.SimulateAgents(10, True)\n\n# Remove action softmax\nMyMap = Map()\nMyMap.BuildGridWorld(4, 5, diagonal=True)\nMyMap.InsertObjects([3, 16], [0, 1], [\"A\", \"B\"])\nMyMap.InsertSquare(2, 1, 3, 3, 1)\nMyMap.AddTerrainNames([\"Mud\", \"Water\"])\nMyMap.AddStartingPoint(19)\nMyMap.AddExitState(0)\nMyMap.PrintMap()\n# Softmax choices but not actions\nMyAgent = Agent(MyMap, \"ScaledUniform\", 1, 30, True, False)\nObs = Observer(MyAgent, MyMap)\n# Simulate 100 agents. Non-human readable.\n# Use same agent parameters.\n# This computes the action distribution given costs and rewards\nObs.SimulateAgents(100, False, False)\n\n\n# Test Inference\nMyMap = Map()\nMyMap.BuildGridWorld(8, 6, diagonal=True)\nMyMap.InsertObjects([36, 47], [0, 1], [\"Grapes\", \"Apples\"])\nMyMap.InsertSquare(1, 1, 8, 3, 1)\nMyMap.InsertSquare(3, 3, 6, 1, 2)\nMyMap.InsertSquare(6, 4, 3, 2, 2)\nMyMap.AddTerrainNames([\"Mud\", \"Jungle\", \"Water\"])\nMyMap.AddStartingPoint(45)\nMyMap.AddExitState(0)\n# Softmax choices but not actions\nMyAgent = Agent(MyMap, \"ScaledUniform\", 1, 30)\nObs = Observer(MyAgent, MyMap)\n# Use 10 samples to infer agent\nRes = Obs.InferAgent([1, 1, 0, 4, 4, 0, 4, 4, 4], 100, True)\n","sub_path":"build/lib/Bishop/Tests.py","file_name":"Tests.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"306068646","text":"\"\"\"\nadapted from original crepe repository at: https://github.com/marl/crepe\noriginal article: \"CREPE: A Convolutional Representation for Pitch Estimation\", 2018, (Kim, Jong Wook; Salamon, Justin; Li, Peter; Bello, Juan Pablo)\n\narticle: \"Fully-Convolutional Network for Pitch Estimation of Speech Signals\", 2019, (Ardaillon, Luc; Roebel, Axel)\n\nmodified by Luc Ardaillon: 16/04/2019\n\"\"\"\n\n# the model is trained on 8kHz audio\nmodel_srate = 8000\n\ndef build_model(learning_rate=0.0002, weightsFile=None, inputSize=1953, dropout = 0, training = False):\n '''\n :param learning_rate:\n :param weightsFile:\n :param inputSize:\n :param dropout:\n :param training:\n :return:\n '''\n\n from keras.layers import Input, Reshape, Conv2D, BatchNormalization, MaxPool2D, Dropout\n from keras.layers import Permute, Flatten\n from keras.models import Model\n from keras import optimizers\n\n layers = [1, 2, 3, 4, 5, 6]\n filters = [256, 32, 32, 128, 256, 512]\n widths = [32, 64, 64, 64, 64, 64]\n strides = [(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]\n\n if(inputSize is not None):\n x = Input(shape=(inputSize,), name='input', dtype='float32')\n y = Reshape(target_shape=(inputSize, 1, 1), name='input-reshape')(x)\n else:\n x = Input(shape=(None,1,1), name='input', dtype='float32')\n y = x\n\n for l, f, w, s in zip(layers, filters, widths, strides):\n y = Conv2D(f, (w, 1), strides=s, padding='valid', activation='relu', name=\"conv%d\" % l)(y)\n if(l<4):\n y = MaxPool2D(pool_size=(2, 1), strides=None, padding='valid', name=\"conv%d-maxpool\" % l)(y)\n\n y = BatchNormalization(name=\"conv%d-BN\" % l)(y)\n if(dropout and training):\n y = Dropout(0.25, name=\"conv%d-dropout\" % l)(y)\n\n # here replaced the fully-connected layer by a convolutional one:\n y = Conv2D(486, (4, 1), strides=(1, 1), padding='valid', activation='sigmoid', name=\"classifier\")(y)\n if(training):\n y = Permute((2, 1, 3), name=\"transpose\")(y)\n y = Flatten(name=\"flatten\")(y)\n\n model = Model(inputs=x, outputs=y)\n\n if(weightsFile is not None): # if restarting learning from a checkpoint\n model.load_weights(weightsFile)\n\n if(training):\n for layer in model.layers:\n layer.trainable = True\n\n model.compile(optimizer=optimizers.Adam(lr=learning_rate), loss='binary_crossentropy')\n\n return model\n\nif __name__ == '__main__':\n model = build_model()\n model.summary()\n","sub_path":"models/FCN_1953/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"498834266","text":"\"\"\"\n788. The Maze II\nupdated with comment\n\"\"\"\nfrom collections import deque\nclass Solution:\n \"\"\"\n @param maze: the maze\n @param start: the start\n @param destination: the destination\n @return: the shortest distance for the ball to stop at the destination\n \"\"\"\n\n\n def shortestDistance(self, maze, start, destination):\n # write your code here\n DIRECTION_DELTA = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n if not maze:\n return -1\n queue = deque([tuple(start)])\n #min_dist_to_point: minimum distance to position key from start as value.\n #e.x. min_dist_to_point[(3, 2)] = 2 minimum distance to location (3, 2) si 2\n #this also servers as visited.\n min_dist_to_point = {tuple(start): 0}\n\n\n while queue:\n head = queue.popleft()\n for delta in DIRECTION_DELTA:\n x = head[0] + delta[0]\n y = head[1] + delta[1]\n steps = 1\n while not self.has_hit_wall(maze, (x, y)):\n x += delta[0]\n y += delta[1]\n steps += 1\n x -= delta[0]\n y -= delta[1]\n steps -= 1\n if (x, y) in min_dist_to_point: #that means the point was previously visited\n if min_dist_to_point[head] + steps < min_dist_to_point[(x, y)]:\n queue.append((x, y)) #re-append to the queue if the current solution result in smallest distance\n min_dist_to_point[(x, y)] = min_dist_to_point[head] + steps\n else: # if the point was not previously visited, append to the queue\n queue.append((x, y))\n min_dist_to_point[(x, y)] = min_dist_to_point[head] + steps\n return min_dist_to_point[tuple(destination)] if tuple(destination) in min_dist_to_point else -1\n\n\n def has_hit_wall(self, maze, next_location):\n num_row = len(maze)\n num_col = len(maze[0])\n\n next_r = next_location[0]\n next_c = next_location[1]\n\n if not (0 <= next_r < num_row):\n return True\n\n if not (0 <= next_c < num_col):\n return True\n\n if maze[next_r][next_c] == 1:\n return True\n\n return False\n","sub_path":"lintcode/788.bfs.py","file_name":"788.bfs.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"190725035","text":"class Paper:\n def __init__(self, c = 0, l = 0):\n self.c = c\n self.l = l\n \nclass PaperCollection:\n def __init__(self, p):\n self.p = p\n \n def sort(self):\n for x in range(len(self.p)):\n y = x + 1\n while(y < len(self.p)):\n if(self.p[x].c > self.p[y].c):\n temp = self.p[x]\n self.p[x] = self.p[y]\n self.p[y] = temp\n y += 1\n\n def display(self):\n previousCode = 0\n totalColor = 0\n a = []\n for y in range(len(self.p)):\n totalColor += 1\n total = 1\n l = self.p[y].l\n if(self.p[y].c == previousCode):\n totalColor -= 1\n continue\n for z in range(len(self.p)):\n if(y != z and self.p[y].c == self.p[z].c):\n previousCode = self.p[y].c \n total += 1\n l += self.p[z].l\n a.append(\"{0} {1} {2}\".format(self.p[y].c, l, total)) \n \n print(totalColor)\n for x in range(len(a)):\n print(a[x])\n\ndef solution():\n n = int(input())\n pp = []\n while(n > 0):\n c, l = map(int, input().split())\n p = Paper(c, l)\n pp.append(p)\n n -= 1\n pc = PaperCollection(pp)\n pc.sort()\n pc.display()\n\nsolution()\n \n \n ","sub_path":"bt70.py","file_name":"bt70.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"173678409","text":"import os\nimport sys\nimport tempfile\n\n# add module to syspath\n# get the current working directory\npwd = os.path.abspath(os.path.dirname(__file__))\n# isolate the last folder (the folder we are currently in)\nproject = os.path.basename(pwd)\n# remove the last folder from the cwd\nnew_path = pwd.strip(project)\n# create a new path pointing to where our flask object is defined\nfull_path = os.path.join(new_path, 'flaskr')\n\ntry:\n from flaskr import app, init_db\nexcept ImportError:\n sys.path.append(full_path)\n from flaskr import app, init_db\n\ndef before_feature(context, feature):\n app.config['TESTING'] = True\n context.db, app.config['DATABASE'] = tempfile.mkstemp()\n context.client = app.test_client()\n init_db()\n\ndef after_feature(context, feature):\n os.close(context.db)\n os.unlink(app.config['DATABASE'])\n\n","sub_path":"chp18_flask-bdd/features/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"220153218","text":"# Copyright (c) 2017-2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nThis module prints the metadata obtained from a remote server.\n\"\"\"\n\nfrom argparse import ArgumentParser\nfrom typing import Collection\n\nfrom ._base import CliCommand\nfrom .. import LOG, Network\nfrom ..client.config import configure_parser, AnonymousNetworkConfig\nfrom ..model.core import UserTerminateRequest, ConnectionTimeoutError\nfrom ..model.types_store import PackageStore\nfrom ..util.dar import DamlcPackageError\nfrom ..util.dar_repo import LocalDarRepository\nfrom ..pretty import get_pretty_printer, PrettyOptions\n\n\nclass PrintMetadataCommand(CliCommand):\n name = 'metadata'\n\n def parser(self) -> ArgumentParser:\n arg_parser = ArgumentParser()\n\n configure_parser(arg_parser, parties=False)\n arg_parser.add_argument('--file', help='path to a DAR file', action='append')\n arg_parser.add_argument('--show-hidden', help='show hidden types', action='store_true')\n arg_parser.add_argument('--format', help='one of \\\"daml\\\" or \\\"python\\\"', default='daml')\n return arg_parser\n\n def execute(self, args) -> int:\n LOG.debug('Executing a metadata fetch...')\n\n options = PrettyOptions(\n column_width=80,\n show_hidden_types=args.show_hidden,\n format=args.format)\n\n if args.file:\n return self.execute_static_metadata(args.file, options)\n\n config = AnonymousNetworkConfig.get_config(args)\n\n return self.execute_runtime_metadata(config, options)\n\n @staticmethod\n def execute_static_metadata(files: Collection[str], options: PrettyOptions) -> int:\n repo = LocalDarRepository()\n try:\n repo.add_source(*files)\n _process_metadata(repo.store, options)\n return 0\n\n except DamlcPackageError as ex:\n return ex.exit_code\n\n @staticmethod\n def execute_runtime_metadata(config: 'AnonymousNetworkConfig', options: PrettyOptions) -> int:\n try:\n network = Network()\n network.set_config(config)\n network.run_until_complete(_main(network, options))\n return 0\n except UserTerminateRequest:\n return 1\n except ConnectionTimeoutError as ex:\n print(str(ex))\n return 1\n\n\nasync def _main(network: Network, options):\n metadata = await network.aio_global().metadata()\n _process_metadata(metadata.store, options)\n\n\ndef _process_metadata(store: PackageStore, options: PrettyOptions):\n import sys\n if sys.stdout.isatty():\n try:\n import pygments\n from pygments.formatters.terminal256 import Terminal256Formatter\n formatter = Terminal256Formatter()\n except ImportError:\n pygments = None\n formatter = None\n else:\n pygments = None\n formatter = None\n\n pretty_printer = get_pretty_printer(options.format, options, store)\n code = pretty_printer.render_store()\n lexer = pretty_printer.lexer()\n\n if pygments is not None and lexer is not None and formatter is not None:\n print(pygments.highlight(code, lexer, formatter))\n else:\n print(code)\n\n","sub_path":"python/dazl/cli/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"32398186","text":"from src.NodeNormalization import NodeNormalization\nimport pytest\n\n\ndef test_nn_load():\n nn: NodeNormalization = NodeNormalization(True)\n\n nn._test_mode = 1\n\n assert(nn.load_compendium(\"./tests/datafile.json\", 5))\n\ndef test_nn_record_validation():\n nn: NodeNormalization = NodeNormalization()\n\n assert(nn.validate_compendia(\"./tests/datafile.json\"))\n\n ret_val = nn.validate_compendia(\"./tests/datafile_with_errors.json\")\n\n assert(ret_val == False)\n","sub_path":"tests/test_nn.py","file_name":"test_nn.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"173573920","text":"import configparser\n#import pymysql\nimport mysql.connector as mysql\n\nclass Handler(object):\n\tdef __init__(self):\n\t\tconfig = configparser.ConfigParser()\n\t\tconfig.read(\"config/database.config\")\n\t\tself.user = config[\"main\"][\"username\"]\n\t\tself.passwd = config[\"main\"][\"password\"]\n\t\tself.db = config[\"main\"][\"db\"]\n\t\tself.host = config[\"main\"][\"host\"]\n\t\t\n\tdef connect(self):\n\t\t#self.conn = pymysql.connect(\n\t\tself.conn = mysql.connect(\n\t\t\thost=self.host,\n\t\t\tuser=self.user,\n\t\t\tpasswd=self.passwd,\n\t\t\tdb=self.db,\n\t\t\tautocommit=True)\n\t\tself.cursor = self.conn.cursor(dictionary=True)\n\n\tdef execute(self,query):\n\t\tself.cursor.execute(query)\n\t\treturn self.cursor\n\n\tdef print_response(self):\n\t\t#NOT READY YET!!!!\n\t\tfor row in self.cursor.fetchall():\n\t\t\tprint(row)\n\t\t#print(\"ha\")\n\n\tdef close_connection(self):\n\t\tself.conn.close()\n\n\tdef insertIntoTable(self, table, params, timestamps=False):\n\t\tif timestamps:\n\t\t\tparams.update({\"created_at\" : {\"quote\" : False, \"value\" : \"NOW()\"},\n\t\t\t\t\t\"updated_at\" : {\"quote\" : False, \"value\" : \"NOW()\"}})\n\n\t\t#print(params)\n\t\tquery_cols = []\n\t\tquery_values = []\n\n\t\tfor key in params:\n\t\t\tquery_cols.append(key)\n\t\t\tquotes = \"\\\"\"\n\t\t\tvalue = params[key]\n\t\t\tif isinstance(value, dict):\n\t\t\t\tif not params[key][\"quote\"]:\n\t\t\t\t\tquotes = \"\"\n\t\t\t\tvalue = params[key][\"value\"]\n\t\t\tquery_values.append(\"{}{}{}\".format(quotes,value,quotes))\n\n\t\tquery = \"INSERT INTO {} ({}) values ({})\".format(table , \",\".join(query_cols) , \",\".join(query_values))\n\t\t#print(query)\n\t\tself.execute(query)\n\t\treturn self.cursor.lastrowid\n\n\tdef updateRow(self, table, update_params, where_params, timestamps=False):\n\t\tif timestamps:\n\t\t\tupdate_params.update({\"updated_at\" : {\"quote\" : False, \"value\" : \"NOW()\"}})\n\t\tquery = \"UPDATE {} set \".format(table)\n\n\t\tupdate_array = []\n\t\tfor key in update_params:\n\t\t\tquotes = \"\\\"\"\n\t\t\tquery += \"\"\n\t\t\tvalue = update_params[key]\n\t\t\tif isinstance(value, dict):\n\t\t\t\tif not update_params[key][\"quote\"]:\n\t\t\t\t\tquotes = \"\"\n\t\t\t\tvalue = update_params[key][\"value\"]\n\t\t\tupdate_array.append(\"{} = {}{}{} \".format(key,quotes,value,quotes))\n\n\t\twhere_array = []\n\t\tfor key in where_params:\n\t\t\tquotes = \"\\\"\"\n\t\t\tquery += \"\"\n\t\t\tvalue = where_params[key]\n\t\t\tif isinstance(value, dict):\n\t\t\t\tif not where_params[key][\"quote\"]:\n\t\t\t\t\tquotes = \"\"\n\t\t\t\tvalue = where_params[key][\"value\"]\n\t\t\twhere_array.append(\"{} = {}{}{} \".format(key,quotes,value,quotes))\n\n\t\tquery += \"{} where {}\".format(\",\".join(update_array),\" and \".join(where_array))\n\t\tself.execute(query)\n\n\n\n\n","sub_path":"mysql_handler/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"574461673","text":"import os\nimport time\nimport exifread\n\ncount = 1\nfilepath = \".\"\n\nmonths = {\n '01': 'January',\n '02': 'February',\n '03': 'March',\n '04': 'April',\n '05': 'May',\n '06': 'June',\n '07': 'July',\n '08': 'August',\n '09': 'September',\n '10': 'October',\n '11': 'November',\n '12': 'December'\n }\n\nseasons = {\n 'January': 'Winter',\n 'February': 'Winter',\n 'March': 'Spring',\n 'April': 'Spring',\n 'May': 'Spring',\n 'June': 'Summer',\n 'July': 'Summer',\n 'August': 'Summer',\n 'September': 'Fall',\n 'October': 'Fall',\n 'November': 'Fall',\n 'December': 'December'\n }\n\n\ndef filterImages(list):\n images = []\n for l in list:\n if \".jpg\" in l or \".JPG\" in l:\n images.append(l)\n return images\n\n\ndef sortImage(image, dateTimeString):\n year = dateTimeString[0]\n monthCode = dateTimeString[1]\n month = months.get(monthCode)\n season = seasons.get(month)\n day = dateTimeString[2]\n if season == 'December':\n os.renames(image, filepath + \"\\\\\" + month + \"\\\\\" + dateTimeString[1] + day + year + \"_\" + \"{:04}\".format(count) + \".jpg\")\n else:\n os.renames(image, filepath + \"\\\\\" + season + \"\\\\\" + month + \"\\\\\" + dateTimeString[1] + day + year + \"_\" + \"{:04}\".format(count) + \".jpg\")\n\n\ndef processImage(name):\n global count\n f = open(name, \"rb\")\n meta = readImgMeta(f)\n f.close()\n imageDateTime = str(meta[\"EXIF DateTimeOriginal\"])\n dateTimeString = imageDateTime.split(' ')[0].split(':')\n sortImage(name, dateTimeString)\n count += 1\n\n\ndef readImgMeta(f):\n return exifread.process_file(f)\n\n\ndef main():\n images = filterImages(os.listdir())\n for i in images:\n processImage(i)\n\n\nmain()\n","sub_path":"PhotoSorter.py","file_name":"PhotoSorter.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"604282129","text":"#! /usr/bin/python3 -OOt\n\nimport pathlib\nimport re\nimport sys\nimport glob\nimport os\nimport subprocess\nfrom pathlib import *\nimport time\nimport json\n\n\ndef convert(path, config):\n # get config settings\n START_FRAME = int(config['startFrame'])\n MAX_FRAMES = int(config['maxFrames'])\n MAX_WIDTH = int(config['maxWidth'])\n MAX_HEIGHT = int(config['maxHeight'])\n SCALER = config['scaler']\n CRF = int(config['quality'])\n FRAME_RATE = int(config['FPS'])\n PRESET = config['preset']\n CODEC = config['codec']\n VIDFORMAT = config['format']\n GAMMA = config['gamma']\n PREMULT = int(config['premult'])\n NAME_LEVELS = int(config['namelevels'])\n\n AUDIO = False\n AUDIO_OFFSET = int(config['audiooffset'])\n\n standard = ['.jpg', '.jpeg', '.png', '.tiff', '.tif']\n gamma = ['.exr', '.tga']\n alltypes = standard + gamma\n vid_suff = ['.mov', '.mp4', '.webm', '.mkv', '.avi'] # do vid-vid conversion with audio as well?\n\n # Check if being output to video or frames\n isVidOut = True\n vids = ['mov', 'mp4', 'mp4-via-jpg']\n if VIDFORMAT not in vids:\n isVidOut = False\n\n file = pathlib.Path(path)\n saveDir = file # set the directory to save the output to\n\n # For Directories\n if os.path.isdir(path):\n files = os.listdir(path)\n for f in files:\n fpath = pathlib.Path(f)\n if fpath.suffix in alltypes:\n file = file.joinpath(fpath)\n break\n stem = file.stem\n suffix = file.suffix\n\n # create ffmpeg command to append to\n platform = sys.platform\n if platform == \"win32\":\n cmd = ['ffmpeg']\n elif platform.startswith('linux'): # full path to ffmpeg for linux\n cmd = ['/usr/bin/ffmpeg']\n else: # full path to ffmpeg for osx\n cmd = ['/usr/local/bin/ffmpeg']\n\n if (suffix in alltypes):\n l = len(stem)\n back = stem[::-1]\n m = re.search('\\d+', back)\n if (m):\n # simple regex match - find digit from the end of the stem\n sp = m.span(0)\n sp2 = [l - a for a in sp]\n sp2.reverse()\n\n # glob for other frames in the folder and find the first frame to use as start number\n preframepart = stem[0:sp2[0]]\n postframepart = stem[sp2[1]:]\n frames = sorted(file.parent.glob(preframepart + '*' + postframepart + suffix))\n start_num = int(frames[0].name[sp2[0]:sp2[1]])\n if START_FRAME > 0:\n start_num = START_FRAME\n\n # get padding for frame num\n padding = sp2[1] - sp2[0]\n padstring = '%' + format(padding, '02') + 'd' # eg %05d\n # fix for unpadded frame numbers\n if len(frames[0].name) != len(frames[-1].name):\n padstring = '%' + 'd'\n\n # get absolute path to the input file and set the outputfile\n inputf = stem[0:sp2[0]] + padstring + postframepart + suffix\n inputf_abs = str(file.with_name(inputf))\n\n # naming the video file based on parent dirs\n parts = file.parent.parts\n if (NAME_LEVELS > 0):\n sec = len(parts) - NAME_LEVELS\n parts = parts[sec:]\n outname = \"_\".join(parts)\n else:\n outname = str(file.parent)\n outname = re.sub(r'\\W+', '_', outname)\n\n outputf = str(saveDir.with_name('_' + outname + \"_video.\" + VIDFORMAT))\n if not isVidOut:\n outputf = str(saveDir.with_name('_' + preframepart + \"_\" + padstring + \".\" + VIDFORMAT))\n\n # if the video already exists create do not overwrite it\n counter = 1\n while pathlib.Path(outputf).exists():\n outputf = str(saveDir.with_name('_' + outname + \"_video_\" + str(counter) + \".\" + VIDFORMAT))\n counter = counter + 1\n\n # scale down video if the image dimensions exceed the max width or height, while maintaining aspect ratio\n if MAX_HEIGHT <= 0 and MAX_WIDTH <= 0:\n scalestr = \"scale='trunc(iw/2)*2':'trunc(ih/2)*2'\"\n elif MAX_WIDTH <= 0:\n scalestr = \"scale='-2:min'(\" + str(MAX_HEIGHT) + \",trunc(ih/2)*2)':force_original_aspect_ratio=decrease\"\n elif MAX_HEIGHT <= 0:\n scalestr = \"scale='min(\" + str(MAX_WIDTH) + \",trunc(iw/2)*2)':-2\"\n else:\n # this currently causes issues if the W or H are greater than the max, and the other dimension is no longer divisible by 2 when scaled down so pad it\n scalestr = \"scale='min(\" + str(MAX_WIDTH) + \",trunc(iw/2)*2)':min'(\" + str(MAX_HEIGHT) + \",trunc(ih/2)*2)':force_original_aspect_ratio=decrease,pad=\" + str(MAX_WIDTH) + \":\" + str(MAX_HEIGHT) + \":(ow-iw)/2:(oh-ih)/2\"\n # maybe skip force ratio and do it manually? DOesnt work yet...\n max_asp = float(MAX_WIDTH) / MAX_HEIGHT\n A = \"min(trunc(iw/2)*2,\" + str(MAX_WIDTH) + \")\"\n B = \"if( gt(ih,\" + str(MAX_HEIGHT) + \"), trunc((\" + str(MAX_HEIGHT) + \"*dar)/2)*2, -2 )\"\n C = \"if(gt(iw,\" + str(MAX_WIDTH) + \"), trunc((\" + str(MAX_WIDTH) + \"/dar)/2)*2 ,-2)\"\n D = \"min( trunc(ih/2)*2,\" + str(MAX_HEIGHT) + \")\"\n scalestr = \"scale='if( gt(dar,\" + str(max_asp) + \"), \" + A + \", \" + B + \")':'if( gt(dar,\" + str(max_asp) + \"), \" + C + \", \" + D + \" )'\"\n\n # ============================================\n # FFPROBE - Probably easier to use this metadata\n # =============================================\n # ffprobe = ['ffprobe']\n # ffprobe.extend(('-v', 'quiet'))\n # ffprobe.extend(('-print_format', 'json'))\n # ffprobe.append(str(file))\n # ffprobe.append('-show_format')\n # ffprobe.append('-show_streams')\n # ffpr = subprocess.check_output(ffprobe)\n # ffjson = json.loads(ffpr)\n # IN_W = ffjson['streams'][0]['coded_width']\n # IN_H = ffjson['streams'][0]['coded_height']\n # IN_DURATION = ffjson['streams'][0]['duration']\n # IN_FRAMES = int(ffjson['streams'][0]['nb_frames'])\n # IN_FPS = ffjson['streams'][0]['r_frame_rate']\n # ===================================\n\n if (suffix in gamma):\n cmd.extend(('-gamma', GAMMA))\n cmd.extend(('-start_number', str(start_num).zfill(padding)))\n cmd.extend(('-r', str(FRAME_RATE)))\n cmd.extend(('-i', inputf_abs))\n\n # AUDIO\n try:\n tracks = []\n tracks.extend(sorted(file.parent.glob('*.mp3')))\n tracks.extend(sorted(file.parent.glob('*.wav')))\n # also search immediate parent?\n tracks.extend(sorted(file.parents[1].glob('*.mp3')))\n tracks.extend(sorted(file.parents[1].glob('*.wav')))\n if (tracks):\n AUDIO = True\n # audio track offset - add controls for this?\n cmd.extend(('-itsoffset', str(AUDIO_OFFSET)))\n cmd.extend(('-i', str(tracks[0])))\n except:\n pass\n if isVidOut:\n # Codecs TODO DNxHR and ProRes?\n if CODEC == \"H.264\":\n cmd.extend(('-c:v', 'libx264'))\n cmd.extend(('-pix_fmt', 'yuv420p', '-crf', str(CRF), '-preset', PRESET))\n # colours are always slightly off... not sure how to fix. libx264rgb seems to help but still not right?\n # cmd.extend(('-c:v', 'libx264rgb'))\n # cmd.extend(('-pix_fmt', 'yuv444p', '-crf', str(CRF), '-preset', PRESET))\n elif CODEC == \"DNxHR\":\n cmd.extend(('-c:v', 'dnxhd'))\n cmd.extend(('-profile', 'dnxhr_hq'))\n else:\n pass\n\n if MAX_FRAMES > 0:\n cmd.extend(('-vframes', str(MAX_FRAMES)))\n if isVidOut:\n if PREMULT:\n cmd.extend(('-vf', 'premultiply=inplace=1, ' + scalestr)) # premult is causing all the problems?? Leave it off...\n else:\n cmd.extend(('-vf', scalestr))\n else:\n cmd.extend(('-vf', scalestr))\n cmd.extend(('-sws_flags', SCALER))\n if VIDFORMAT == 'jpg':\n cmd.extend(('-q:v', '2'))\n # AUDIO OPTIONS\n if AUDIO:\n cmd.extend(('-c:a', 'aac'))\n cmd.extend(('-b:a', '320k'))\n cmd.append('-shortest')\n cmd.append(outputf)\n subprocess.run(cmd)\n else:\n pass\n # ==================================\n # Vid-Vid conversion (with audio)\n # TODO\n # ==================================\n elif suffix in vid_suff:\n cmd.extend(('-i', file))\n if CODEC == \"H.264\":\n cmd.extend(('-c:v', 'libx264'))\n cmd.extend(('-pix_fmt', 'yuv420p', '-crf', str(CRF), '-preset', PRESET))\n\n outputf = str(saveDir.with_name(stem + \"_converted.\" + VIDFORMAT))\n cmd.append(outputf)\n subprocess.run(cmd)\n else:\n print(\"Invalid file extension\")\n\n# Read config file for settings\ndef readSettings(settings):\n try:\n with open(settings, 'r') as f:\n config = json.load(f)\n except Exception as e:\n print(e)\n f.close()\n return config\n\nif __name__ == '__main__':\n path = Path(sys.argv[0]).with_name('settings.json')\n config = readSettings(path)\n convert(sys.argv[1], config)\n # input()","sub_path":"ffmpegify.py","file_name":"ffmpegify.py","file_ext":"py","file_size_in_byte":9692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"312903147","text":"class BIT:\n def __init__(self, n, MOD):\n self.n = n\n self.data = [0] * (n + 1)\n self.MOD = MOD\n\n def sum(self, i):\n s = 0\n while i > 0:\n s += self.data[i]\n s %= self.MOD\n i -= i & -i\n return s\n\n def add(self, i, x):\n while i <= self.n:\n self.data[i] += x\n self.data[i] %= self.MOD\n i += i & -i\n\n def diff(self, i, j):\n s = self.sum(j) - self.sum(i)\n s %= self.MOD\n return s\n\n\nn, k = map(int, input().split())\nmod = 998244353\nbit = BIT(n + 2, mod)\nranges = []\nfor _ in range(k):\n l, r = map(int, input().split())\n ranges.append((l, r))\n\nbit.add(1, 1)\n\nfor i in range(1, n):\n v = bit.sum(i)\n for l, r in ranges:\n bit.add(i + l, v)\n bit.add(min(n + 1, i + r + 1), -v)\nprint(bit.diff(n - 1, n))\n","sub_path":"Python_codes/p02549/s818025054.py","file_name":"s818025054.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"147966713","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n\n\nif __name__ == '__main__':\n sol = Solution()\n root = TreeNode(1)\n root.right = TreeNode(9)\n root.left = TreeNode(20)\n","sub_path":"jiuyang_file/depth_binary_tree.py","file_name":"depth_binary_tree.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"546529068","text":"#!/usr/bin/env python\n# Copyright (C) 2011 Statoil ASA, Norway. \n# \n# The file 'rft_test.py' is part of ERT - Ensemble based Reservoir Tool. \n# \n# ERT is free software: you can redistribute it and/or modify \n# it under the terms of the GNU General Public License as published by \n# the Free Software Foundation, either version 3 of the License, or \n# (at your option) any later version. \n# \n# ERT is distributed in the hope that it will be useful, but WITHOUT ANY \n# WARRANTY; without even the implied warranty of MERCHANTABILITY or \n# FITNESS FOR A PARTICULAR PURPOSE. \n# \n# See the GNU General Public License at \n# for more details. \n\n\nimport datetime\nimport unittest\nimport ert.ecl.ecl as ecl\nfrom test_util import approx_equal, approx_equalv\n\n\nRFT_file = \"test-data/Statoil/ECLIPSE/Gurbat/ECLIPSE.RFT\"\nPLT_file = \"test-data/Statoil/ECLIPSE/RFT/TEST1_1A.RFT\"\n\n\n\n\ndef out_of_range():\n rftFile = ecl.EclRFTFile( RFT_file )\n rft = rftFile[100]\n\n\n\nclass RFTTest( unittest.TestCase ):\n\n def loadRFT( self ):\n rftFile = ecl.EclRFTFile( RFT_file )\n\n rft = rftFile[0]\n cell = rft.ijkget( (32 , 53 , 0) )\n self.assertTrue( isinstance( cell , ecl.EclRFTCell ))\n\n self.assertEqual( 2 , rftFile.size( ) )\n self.assertEqual( 0 , rftFile.size( well = \"OP*\"))\n self.assertEqual( 0 , rftFile.size( well = \"XXX\"))\n self.assertEqual( 1 , rftFile.size( date = datetime.date( 2000 , 6 , 1 )))\n self.assertEqual( 0 , rftFile.size( date = datetime.date( 2000 , 6 , 17 )))\n \n cell = rft.ijkget( (30 , 20 , 1880) )\n self.assertTrue( cell is None )\n\n for rft in rftFile:\n self.assertTrue( rft.is_RFT() )\n self.assertFalse( rft.is_SEGMENT( ))\n self.assertFalse( rft.is_PLT( ))\n self.assertFalse( rft.is_MSW( ))\n \n for cell in rft:\n self.assertTrue( isinstance( cell , ecl.EclRFTCell ))\n \n cell0 = rft.iget_sorted( 0 )\n self.assertTrue( isinstance( cell , ecl.EclRFTCell ))\n rft.sort()\n\n\n \n def loadPLT( self ):\n pltFile = ecl.EclRFTFile( PLT_file )\n plt = pltFile[11]\n self.assertTrue( plt.is_PLT() )\n self.assertFalse( plt.is_SEGMENT( ))\n self.assertFalse( plt.is_RFT( ))\n self.assertFalse( plt.is_MSW( ))\n for cell in plt:\n self.assertTrue( isinstance( cell , ecl.EclPLTCell ))\n\n\n def exceptions( self ):\n self.assertRaises( IndexError , out_of_range )\n \n\n \n\ndef fast_suite():\n suite = unittest.TestSuite()\n suite.addTest( RFTTest( 'loadRFT' ))\n suite.addTest( RFTTest( 'loadPLT' ))\n suite.addTest( RFTTest( 'exceptions' ))\n return suite\n\n\ndef test_suite( argv ):\n return fast_suite()\n\n\nif __name__ == \"__main__\":\n unittest.TextTestRunner().run( fast_suite() )\n\n\n \n","sub_path":"devel/python/test/rft_test.py","file_name":"rft_test.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"431990706","text":"import unittest\nimport task\nimport math\nfrom datetime import date\n\n\nclass TestCase(unittest.TestCase):\n\n def test1(self):\n expected = \"success\"\n self.assertEqual(expected, task.firstrun())\n\n def test2(self):\n expected = \"failure\"\n self.assertNotEqual(expected, task.firstrun())\n\n def test_circleArea(self):\n input1 = 1\n output1 = math.pi\n self.assertEqual(output1, task.circleArea(input1))\n input2 = 0\n output2 = 0\n self.assertEqual(output2, task.circleArea(input2))\n\n def test_firstLast(self):\n input1 = [0, 1, 2, 3, 4, 5]\n output1 = [0, 5]\n input2 = [\"apple\", \"jacks\", \"are\", \"gross\", \"this will be a long string for fun because it will be at the end\"]\n output2 = [\"apple\", \"this will be a long string for fun because it will be at the end\"]\n self.assertEqual(output1, task.firstLast(input1))\n self.assertEqual(output2, task.firstLast(input2))\n\n def test_dateDifference(self):\n input1 = date(2020, 12, 25)\n input2 = date(2021, 12, 25)\n output1 = 365\n input3 = date(2022, 12, 25)\n output2 = 365\n self.assertEqual(output1, task.dateDifference(input1, input2))\n self.assertEqual(output2, task.dateDifference(input2, input3))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"170952388","text":"#layer size 128, softmax, 40 epochs, print model summary\n\nimport tensorflow as tf\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.callbacks import TensorBoard, EarlyStopping\nfrom tensorflow.python.keras.utils import plot_model\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.utils import to_categorical\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nfrom tqdm import tqdm\nimport random\nimport time\nfrom sklearn.model_selection import train_test_split\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-s\", \"--server\", required=True, help=\"Running the code on the server or not (y/n)\")\nap.add_argument(\"-b\", \"--binary\", required=True, help=\"NN works on binary classification or not (y/n)\")\nargs = vars(ap.parse_args())\nrun_on_server = args[\"server\"]\nrun_binary = args[\"binary\"]\n\nif run_on_server == \"y\" and run_binary == \"y\":\n train_folder = \"/mnt/Data/ltanzi/Train_Val_BROUNBRO/Train\"\n val_folder = \"/mnt/Data/ltanzi/Train_Val_BROUNBRO/Validation\"\n out_folder = \"/mnt/Data/ltanzi/\"\n resnet_weights_path = \"imagenet\"\n last_layer = 1\n categories = [\"B\", \"Unbroken\"]\n num_classes = 2\n loss = \"binary_crossentropy\"\n\nelif run_on_server == \"y\" and run_binary == \"n\":\n train_folder = \"/mnt/Data/ltanzi/Train_Val/TrainShallow\"\n val_folder = \"/mnt/Data/ltanzi/Train_Val/Validation\"\n out_folder = \"/mnt/Data/ltanzi/FirstNN/\"\n resnet_weights_path = \"imagenet\"\n last_layer = 3\n categories = [\"A\", \"B\", \"Unbroken\"]\n num_classes = 3\n loss = \"sparse_categorical_crossentropy\"\n\n\nelif run_on_server == \"n\":\n train_folder = \"/Users/leonardotanzi/Desktop/FinalDataset2/Train_Val/Train\"\n val_folder = \"/Users/leonardotanzi/Desktop/FinalDataset2/Train_Val/Validation\"\n out_folder = \"/Users/leonardotanzi/Desktop/FinalDataset/\"\n resnet_weights_path = \"/Users/leonardotanzi/Desktop/MasterThesis/TransferLearning/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5\"\n\nelse:\n raise ValueError('Incorrect arg')\n\n\nimage_size = 256\n\ntraining_data = []\n\nfor category in categories:\n\n path = os.path.join(train_folder, category) # create path to broken and unbroken\n class_num = categories.index(category) # get the classification (0 or a 1). 0=broken 1=unbroken\n\n for img in tqdm(os.listdir(path)): # iterate over each image per broken and unbroken\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE) # convert to array\n new_array = cv2.resize(img_array, (image_size, image_size)) # resize to normalize data size\n training_data.append([new_array, class_num]) # add this to our training_data\n except Exception as e: # in the interest in keeping the output clean...\n pass\n\npass\nrandom.shuffle(training_data)\n\nX = []\ny = []\n\nfor features, label in training_data:\n X.append(features)\n y.append(label)\n\n#qua ho due liste una con immagini e una con la label all'indice corrispondente (shufflate)\n\nX = np.array(X).reshape(-1, image_size, image_size, 1) # we need to convert x in numpy array, last 1 because it's grayscale\n\n'''\nnumpy allow us to give one of new shape parameter as -1 (eg: (2,-1) or (-1,3) but not (-1, -1)). It simply means that it \nis an unknown dimension and we want numpy to figure it out. And numpy will figure this by looking at the 'length of the \narray and remaining dimensions' and making sure it satisfies the above mentioned criteria.\nz = np.array([[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]])\n \nNew shape as (-1, 2). row unknown, column 2. we get result new shape as (6, 2)\nz.reshape(-1, 2)\narray([[ 1, 2],\n [ 3, 4],\n [ 5, 6],\n [ 7, 8],\n [ 9, 10],\n [11, 12]])\n\nIt means, that the size of the dimension, for which you passed -1, is being inferred. Thus,\n\nA.reshape(-1, 28*28)\nmeans, \"reshape A so that its second dimension has a size of 28*28 and calculate the correct size of the first dimension\".\n\n'''\n\nX = X/255.0 # normalize\n\nconv_layers = [3]\nlayer_sizes = [32]\ndense_layers = [2]\n\nes = EarlyStopping(monitor=\"val_acc\", mode=\"max\", verbose=1, patience=10) # verbose to print the n of epoch in which stopped,\n\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n\n lr = 0.001 \n NAME = \"{}conv-{}nodes-{}dense-lr{}\".format(conv_layer, layer_size, dense_layer, lr)\n tensorboard = TensorBoard(log_dir=\"/mnt/data/ltanzi/FirstNN/logsLR/{}\".format(NAME))\n model = Sequential()\n\n model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:])) #64 is the number of filter used. X.shape[] prende\n #la seconda a la terza shape che sono altezza e larghezza immagine\n #e salta la prima, che era il \"-1\" in reshape\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n for l in range(conv_layer - 1): # because we need for sure 1 conv layer\n model.add(Conv2D(layer_size, (3, 3)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n # model.add(Dropout(0.25))\n\n model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors\n\n for l in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation(\"relu\"))\n # model.add(Dropout(0.5))\n\n model.add(Dense(last_layer))\n model.add(Activation(\"softmax\"))\n\n adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, decay=0.0) # the optimizer, as the sgd\n\n model.compile(loss=loss,\n optimizer=adam,\n metrics=[\"accuracy\"])\n \n model.fit(X, y, batch_size=32, epochs=200, validation_split=0.3, callbacks=[tensorboard, es])\n \n print(NAME)\n model.summary()\n\n # model.save(\"3-32-2.model\")\n","sub_path":"CNN/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"148850444","text":"'''\nFlaskの設定を行います。\n'''\n\nfrom flask import Flask\nfrom controllers.posts import post_ctr\nfrom model_instance import database\n\ndict_confmode = {\n 'test': 'setting.TestMode',\n 'dev': 'setting.DevMode',\n 'pro': 'setting.ProdMode'\n}\n\n\ndef create_app(config_mode='test'):\n # Flask実行ファイル読込\n app = Flask(__name__, instance_relative_config=True)\n # コンフィグ読込\n confmode = dict_confmode[config_mode]\n app.config.from_object(confmode)\n app.config.from_pyfile('application.cfg', silent=True)\n # コントローラ読込\n app.register_blueprint(post_ctr)\n # モデルインスタンス初期化\n database.init_db(app)\n return app\n","sub_path":"www/factory_app.py","file_name":"factory_app.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"123911971","text":"def expSquaring(x, n):\n ''' Efficient exponentiation algorithm. '''\n if n < 0:\n return expSquaring(1/x, -n)\n elif n == 0:\n return 1\n elif n == 1:\n return x\n elif n % 2 == 0:\n return expSquaring(x*x, n/2)\n else:\n return x* expSquaring(x*x, (n-1)/2)\n\n# Necessary values from keyGenerator\nf = open(\"publicKey.txt\", 'r')\nn = int(f.readline())\ne = int(f.readline())\nf.close()\n\nhashes = []\nidentical = []\nfor m in range(1, 100):\n c = expSquaring(m, e) % n\n if c in hashes:\n identical.append(c)\n hashes.append(expSquaring(m, e) % n)\n\nprint(\"Number of collisions found out of 100: \" + str(len(identical)))\nprint(\" n and e: \" + str(n) + \",\" + str(e))\n\n","sub_path":"Archive/Ph20/HW7/collisionAttack.py","file_name":"collisionAttack.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"626834169","text":"import os\nfrom flake8.api import legacy as f8\nimport re\n\nheredir = os.path.abspath(os.path.dirname(__file__))\ncelery_jobs_directory = os.path.join(heredir, '../../celeryutils')\nlaiutils_directory = os.path.join(heredir, '../../laiutils')\nunit_tests_directory = os.path.join(heredir, '../unit')\nrunner_directory = os.path.join(heredir, '../../')\n\ntestable_dirs = [heredir, celery_jobs_directory, laiutils_directory,\n unit_tests_directory, runner_directory]\n\n\ndef flake8_examine(file_loc):\n style_guide = f8.get_style_guide()\n result = style_guide.check_files([file_loc])\n error_count = result.total_errors\n return error_count\n\n\ndef get_all_py_files(directory):\n pyfiles = []\n pattern = \".*py$\"\n for f in os.listdir(directory):\n fullpath = os.path.join(directory, f)\n if (os.path.isfile(fullpath) and re.match(pattern, f)):\n pyfiles.extend([fullpath])\n return pyfiles\n\n\nclass TestFlake8:\n def test_flake8(self):\n to_test = []\n for x in testable_dirs:\n to_test.extend(get_all_py_files(x))\n for f in to_test:\n assert flake8_examine(f) == 0\n","sub_path":"code/tests/style/test_flake8.py","file_name":"test_flake8.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"15418692","text":"from flask import Flask, request, Response\nimport requests\nimport os\nimport logging\nfrom dotenv import load_dotenv, find_dotenv\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\napp = Flask(__name__)\n\nlogging.info(os.environ.get(\"TEST_ENV_VAR\",\"\"))\nload_dotenv(find_dotenv())\n\nUSER_TOKEN = os.environ.get(\"SLACK_API_TOKEN\", \"\")\nWEBHOOK_TOKEN = os.environ.get(\"WEBHOOK_TOKEN\", \"\")\nSTATUS_ENDPOINT = \"https://slack.com/api/users.profile.set\"\nHEADERS = {\n \"Authorization\": \"Bearer {}\".format(USER_TOKEN)\n}\n\n\n@app.route(\"/set_status\", methods=[\"POST\"])\ndef set_status():\n\n valid_user_token = check_user_token(USER_TOKEN)\n valid_webhook_token = check_webhook_token(request.headers.get(\"token\", \"\"))\n\n if valid_user_token and valid_webhook_token:\n\n logging.info(\"Token valid.\")\n\n if request.args is not None:\n status_text = request.args.get(\"status_text\", \"\")\n status_emoji = request.args.get(\"status_emoji\", \"\")\n else:\n status_text = \"\"\n status_emoji = \"\"\n\n payload = {\n \"profile\": {\n \"status_text\": status_text,\n \"status_emoji\": status_emoji\n }\n }\n\n logging.info(\"Setting status to {}.\".format(payload[\"profile\"]))\n r = requests.post(\n STATUS_ENDPOINT,\n json=payload,\n headers=HEADERS\n )\n\n logging.info(\"Response status code: {}\".format(r.status_code))\n\n return Response(\"Status update successful.\"), r.status_code\n\n else:\n\n if not valid_webhook_token:\n logging.info(\"Webhook token invalid.\")\n return Response(\"Invalid webhook token.\"), 401\n\n if not valid_user_token:\n logging.info(\"User token invalid.\")\n return Response(\"Invalid user token.\"), 401\n\n\ndef check_user_token(user_token):\n logging.info(user_token)\n logging.info(type(user_token))\n if user_token == \"\" or user_token is None or not isinstance(user_token, str):\n return False\n else:\n return True\n\n\ndef check_webhook_token(webhook_token):\n if webhook_token == WEBHOOK_TOKEN:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"96608474","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport xlrd\n\n\ndef main():\n lst = []\n graph(lst)\n\n\ndef max_value(lst):\n \"\"\"Find the maximum value by sector in each year\n \"\"\"\n \"\"\"Open file\n \"\"\"\n rb = xlrd.open_workbook('zp.xlsx')\n \"\"\"Select active sheet\n \"\"\"\n sheet = rb.sheet_by_index(0)\n lst = []\n print('Максимальное значение по секторам экономики в каждом году')\n for col in range(1, sheet.ncols):\n value = sheet.cell_value(1, col)\n for row in range(1, sheet.nrows):\n data = sheet.cell_value(row, col)\n if type(data) != float:\n if data == 'NaN':\n pass\n elif (value < data):\n value = data\n lst.append(value)\n for col in range(0, sheet.ncols):\n for row in range(1, sheet.nrows):\n for i in range(0, len(lst)):\n if lst[i] == sheet.cell_value(row, col):\n print (\n 'Год: ', int(sheet.cell_value(0, col)),\n 'сектор экономики: ', sheet.cell_value(row, 0),\n 'значение: ', lst[i])\n return lst\n\n\ndef min_value(lst):\n \"\"\"Find the minimum value by sector in each year\n \"\"\"\n \"\"\"Open file\n \"\"\"\n rb = xlrd.open_workbook('zp.xlsx')\n \"\"\"Select active sheet\n \"\"\"\n sheet = rb.sheet_by_index(0)\n lst = []\n print('Минимальное значение по секторам экономики в каждом году')\n for col in range(1, sheet.ncols):\n value = sheet.cell_value(1, col)\n for row in range(1, sheet.nrows):\n data = sheet.cell_value(row, col)\n if type(data) != float:\n if data == 'NaN':\n pass\n elif (value > data):\n value = data\n lst.append(value)\n for col in range(0, sheet.ncols):\n for row in range(1, sheet.nrows):\n for i in range(0, len(lst)):\n if lst[i] == sheet.cell_value(row, col):\n print (\n 'Год: ', int(sheet.cell_value(0, col)),\n 'сектор экономики: ', sheet.cell_value(row, 0),\n 'значение: ', lst[i])\n return lst\n\n \ndef graph(lst):\n N = 17\n lst = []\n max_means = max_value(lst)\n \"\"\"the x locations for the groups\n \"\"\"\n ind = np.arange(N)\n \"\"\"the width of the bars\n \"\"\"\n width = 0.5\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, max_means, width, color='g')\n min_means = min_value(lst)\n rects2 = ax.bar(ind, min_means, width, color='r')\n \"\"\"add some text for labels, title and axes ticks\n \"\"\"\n ax.set_ylabel('Wages')\n ax.set_title('The highest and lowest wages by years')\n ax.set_xticks(ind)\n labels = ax.set_xticklabels((\n '1995 ', '2000 ', '2001 ', '2002 ', '2003 ', '2004 ', '2005 ',\n '2006 ', '2007 ', '2008 ', '2009 ', '2010 ', '2011 ', '2012 ', '2013 ',\n '2014 ', '2015 '))\n plt.setp(labels, rotation=30, fontsize=10)\n ax.legend((rects1[0], rects2[0]), ('Max', 'Min'))\n plt.show()\n return lst\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Extrime_wages.py","file_name":"Extrime_wages.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"4264751","text":"#This is the eventhandler file, every request goes throug this\nimport hook\ndef handle(data, thread):\n plugins=hook.testforhook_all()\n a=open(\"hookeventtemp.py\",\"w\")\n a.write(\"\"\"def call(request):\n import sys\n liste=[]\"\"\")\n for i in plugins:\n a.write(\" import \"+i+\"\\n\")\n for i in plugins:\n a.write(\" liste.append(\"+i+\".all(request))\\n\")\n a.write(\" return liste\")\n import hookeventtemp\n back=hookeventtemp.call(data)\n","sub_path":"eventhandle.py","file_name":"eventhandle.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"23848973","text":"# Закодируйте любую строку (хотя бы из трех слов) по алгоритму Хаффмана.\n\nfrom collections import Counter\nfrom collections import deque\nimport time\nimport sys\nimport string\nimport random\n\n\nclass Node:\n def __init__(self, right, left):\n self.right = right\n self.left = left\n\n def step(self, symbols_dic, binary_code):\n self.left.step(symbols_dic, binary_code + \"0\")\n self.right.step(symbols_dic, binary_code + \"1\")\n\n\nclass Leaf:\n def __init__(self, symbol):\n self.symbol = symbol\n\n def step(self, symbols_dic, binary_code):\n symbols_dic[self.symbol] = binary_code if len(binary_code) > 0 else '0'\n\n def __repr__(self):\n return self.symbol\n\n\nclass Huffman:\n def __init__(self, _string):\n self.huffnam_tabel = {}\n self.tree = deque()\n self.string = _string\n\n def __repr__(self):\n return self.string\n\n def encode(self):\n if self.huffnam_tabel != {} or self.string == '':\n return self.string\n for symbol, frequency in Counter(self.string).items():\n self.tree.append((frequency, Leaf(symbol)))\n self._sort()\n\n while len(self.tree) > 1:\n frequency_left, left = self.tree.popleft()\n frequency_right, right = self.tree.popleft()\n self._insert((frequency_left + frequency_right, Node(right, left)))\n\n root: Node = Node(self.tree[0][1].right, self.tree[0][1].left) if isinstance(self.tree[0][1], Node) \\\n else Leaf(self.tree[0][1].symbol)\n root.step(self.huffnam_tabel, \"\")\n self.tree.clear()\n self.string = \"\".join(self.huffnam_tabel[c] for c in self.string)\n return self.string\n\n def decode(self):\n if self.huffnam_tabel == {} or self.string == '':\n return self.string\n decoded = ''\n symbol = ''\n for c in self.string:\n symbol += c\n for key in self.huffnam_tabel:\n if self.huffnam_tabel[key] == symbol:\n decoded += key\n symbol = ''\n break\n self.huffnam_tabel = {}\n self.string = decoded\n return self.string\n\n def _sort(self):\n n = 0\n while len(self.tree) > n:\n for i in range(len(self.tree) - 1 - n):\n if self.tree[i][0] > self.tree[i + 1][0]:\n self.tree[i], self.tree[i + 1] = self.tree[i + 1], self.tree[i]\n n += 1\n\n def _insert(self, obj):\n i = 0\n while i < len(self.tree) and self.tree[i][0] < obj[0]:\n i += 1\n if i < len(self.tree):\n self.tree.insert(i, obj)\n else:\n self.tree.append(obj)\n\n\ndef _joke():\n for _ in range(6):\n print('.', end='')\n sys.stdout.flush()\n time.sleep(0.3)\n return True\n\n\ns = input('Введите секретное сообщение: ')\ns = Huffman(s)\n\nprint('Ахалай махалай, сяськи масяськи')\n_joke()\ns.encode()\nprint(f'\\nКодовое сообещние \\n {s}')\n\nprint('Раскодируем')\n_joke()\ns.decode()\nprint(f'\\nИсходное сообщение \\n {s}')\n\n\ndef test_huffman(n, _len):\n for _ in range(n):\n test_string = \"\".join(random.choice(string.ascii_uppercase + string.ascii_lowercase +\n string.digits) for _ in range(_len))\n code = Huffman(test_string)\n code.encode()\n if test_string != code.decode():\n print(test_string)\n print(code)\n print(\"Test Fail\")\n print(\"Test OK\")\n\ntest_huffman(100, 1000)\n","sub_path":"Lesson8/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"75231920","text":"# -*- coding: utf-8 -*-\nfrom app import app\nfrom flask import *\nfrom app.models.Email import Email\nfrom app.models.banco.Usuario import Usuario\nfrom app.models.form.login_usuario import LoginForm\nfrom app.models.form.cadastro_usuario import CadastroForm\nfrom app.models.form.editar_usuario import EditarForm\nfrom flask_login import login_user, login_required, logout_user, current_user\nfrom hashlib import md5\n\nusuario_bp = Blueprint('usuario', __name__, url_prefix='/usuario')\n\n@usuario_bp.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n \n if form.validate_on_submit():\n email = form.email.data\n senha = md5(form.senha.data.encode())\n usuario = Usuario.query.filter_by(email = email).first()\n\n if usuario:\n if usuario.senha == senha.hexdigest():\n login_user(usuario)\n else:\n flash(u'Senha inválida!', 'danger')\n else:\n flash(u'Usuário inválido!', 'danger')\n\n return redirect('/produto')\n\n@usuario_bp.route('/cadastro', methods=['GET', 'POST'])\ndef cadastro():\n form = CadastroForm()\n\n if form.validate_on_submit():\n nome = form.nome.data\n email = form.email.data\n senha = md5((form.senha.data).encode())\n conf_senha = md5((form.conf_senha.data).encode())\n endereco = form.endereco.data\n cpf = form.cpf.data\n data_nasc = form.data_nasc.data\n \n if senha.hexdigest() == conf_senha.hexdigest():\n novo_usuario = Usuario(nome = nome, email = email, senha = senha.hexdigest(), endereco = endereco, cpf = cpf, data_nasc = data_nasc)\n \n cadastro_usuario(novo_usuario)\n login_user(novo_usuario)\n else:\n flash(u'Ocorreu um problema ao tentar cadastrar usuário, as senhas não coincidem!', 'danger')\n\n return redirect('/produto')\n\n@usuario_bp.route('/funcionario/cadastro', methods=['GET', 'POST'])\n@login_required\ndef cadastro_funcionario():\n if current_user.cargo == 'administrador':\n form = CadastroForm()\n\n if request.method == 'POST':\n nome = request.form['nome']\n email = request.form['email']\n senha = md5((request.form['senha']).encode())\n conf_senha = md5((request.form['conf_senha']).encode())\n endereco = request.form['endereco']\n cpf = request.form['cpf']\n data_nasc = request.form['data_nasc']\n cargo = 'funcionario'\n \n if senha.hexdigest() == conf_senha.hexdigest():\n novo_usuario = Usuario(nome = nome, email = email, senha = senha.hexdigest(), endereco = endereco, cpf = cpf, data_nasc = data_nasc, cargo = cargo)\n \n cadastro_usuario(novo_usuario)\n return redirect(\"/produto\")\n else:\n flash(u'Ocorreu um problema ao tentar cadastrar funcionário, as senhas não coincidem!', 'danger')\n\n return render_template('adicionarfuncionario.html', form=form, titulo='Adicionar Funcionario')\n else:\n return redirect('/produto')\n\ndef cadastro_usuario(usuario):\n usuario_foi_cadastrado = Usuario.salvar(usuario)\n\n if usuario_foi_cadastrado:\n flash(u'Usuário cadastrado com sucesso!', 'success') \n\n if Email.send_verificacao_email(usuario.email):\n flash(u'Email de verificação enviado com sucesso!', 'success') \n else: \n flash(u'Falha ao enviar email de verificação, tente novamente em outro momento!', 'danger')\n\n else: \n flash(u'Ocorreu um problema ao tentar cadastrar usuário, tente novamente!', 'danger')\n\n return redirect('/produto')\n\n@usuario_bp .route('/funcionario/listar', methods=['GET'])\n@login_required\ndef listar():\n if current_user.cargo == 'administrador':\n funcionarios = Usuario.query.filter_by(cargo='funcionario')\n\n return render_template('buscas/funcionarios.html', funcionarios = funcionarios)\n else:\n flash(u'Você não tem permissão para acessar esta rota!', 'danger')\n\n return redirect('/produto')\n\n@usuario_bp.route('/editar', methods=['GET', 'POST'])\n@login_required\ndef editar_usuario():\n form = EditarForm()\n\n form.nome.data = current_user.nome\n form.email.data = current_user.email\n form.endereco.data = current_user.endereco\n form.cpf.data = current_user.cpf\n form.data_nasc.data = current_user.data_nasc\n\n if request.method == 'POST':\n usuario = Usuario.query.get(current_user.id)\n usuario.nome = request.form['nome']\n usuario.email = request.form['email']\n usuario.endereco = request.form['endereco']\n usuario.cpf = request.form['cpf']\n usuario.data_nasc = request.form['data_nasc']\n senha = request.form['senha']\n conf_senha = request.form['conf_senha']\n\n if senha.strip() and conf_senha.strip():\n senha_md5 = md5(senha.encode())\n conf_senha_md5 = md5(conf_senha.encode())\n\n if senha_md5.hexdigest() == conf_senha_md5.hexdigest():\n usuario.senha = senha_md5.hexdigest()\n else:\n flash(u'Ocorreu um problema ao tentar alterar funcionário, as senhas não coincidem!', 'danger')\n\n usuario_foi_salvo = Usuario.salvar(usuario)\n\n if usuario_foi_salvo:\n flash(u'Usuario alterado com sucesso!', 'success')\n\n return redirect('/produto')\n else:\n flash(\n u'Ocorreu um problema ao tentar alterar informacoes, tente novamente!', 'danger')\n\n return render_template('adicionarfuncionario.html', form=form, titulo='Editar')\n\n return render_template('adicionarfuncionario.html', form = form, titulo='Editar')\n\n@usuario_bp.route('/editar/', methods=['GET', 'POST'])\ndef editar_funcionario(id = False):\n form = EditarForm()\n usuario = Usuario.query.get(id)\n\n if current_user.cargo == 'administrador':\n if usuario:\n form.nome.data = usuario.nome\n form.email.data = usuario.email\n form.endereco.data = usuario.endereco\n form.cpf.data = usuario.cpf\n form.data_nasc.data = usuario.data_nasc\n\n if request.method == 'POST':\n usuario.nome = request.form['nome']\n usuario.email = request.form['email']\n usuario.endereco = request.form['endereco']\n usuario.cpf = request.form['cpf']\n usuario.data_nasc = request.form['data_nasc']\n senha = request.form['senha']\n conf_senha = request.form['conf_senha']\n\n if senha.strip() and conf_senha.strip():\n senha_md5 = md5(senha.encode())\n conf_senha_md5 = md5(conf_senha.encode())\n\n if senha_md5.hexdigest() == conf_senha_md5.hexdigest():\n usuario.senha = senha_md5.hexdigest()\n else:\n flash(u'Ocorreu um problema ao tentar alterar funcionário, as senhas não coincidem!', 'danger')\n\n usuario_foi_salvo = Usuario.salvar(usuario)\n\n if usuario_foi_salvo:\n flash(u'Funcionário alterado com sucesso!', 'success')\n\n return redirect('/produto')\n else:\n flash(u'Ocorreu um problema ao tentar alterar informacoes, tente novamente!', 'danger')\n\n return render_template('adicionarfuncionario.html', form=form, titulo='Editar')\n \n else:\n flash(u'Ocorreu um problema ao tentar buscar o usuário, tente novamente!', 'danger')\n\n return redirect('/funcionario/listar')\n\n return render_template('adicionarfuncionario.html', form = form, titulo='Editar')\n else:\n flash(u'Você não tem permissão para acessar esta rota!', 'danger')\n\n return redirect('/produto')\n\n@usuario_bp.route('/deletarconta')\n@login_required\ndef excluir_conta(id = False):\n id_usuario = current_user.id\n\n if Usuario.excluir(id_usuario):\n logout_user()\n flash(u'Sua conta foi excluida com sucesso!', 'success')\n else:\n flash(u'Falha ao excluir sua conta!', 'danger')\n \n return redirect('/produto')\n\n@usuario_bp.route('/deletarconta/')\n@login_required\ndef excluir_conta_outro_user(id = False):\n if id and current_user.cargo == 'administrador':\n if Usuario.excluir(id):\n flash(u'A conta foi excluida com sucesso!', 'success')\n else:\n flash(u'Erro ao excluir conta!', 'danger')\n\n return redirect('/usuario/funcionario/listar')\n else:\n flash(u'Você não tem permissão para excluir contas de terceiros!', 'danger')\n \n return redirect('/produto')\n\n\n@usuario_bp.route('/logout')\n@login_required\ndef logout():\n logout_user()\n \n return redirect('/produto')\n","sub_path":"app/controllers/usuario_bp.py","file_name":"usuario_bp.py","file_ext":"py","file_size_in_byte":8918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"371509683","text":"#!/usr/bin/python3\n#coding=utf-8\nimport datetime as dt\nimport logging\nimport traceback\nimport random\nimport urllib3,json,time,sys,os,requests\n\nTG_BOT_TOKEN = os.environ[\"TG_BOT_TOKEN\"] # telegram bot token 自行申请\nTG_USER_ID = os.environ[\"TG_USER_ID\"] # telegram 用户ID\nTT_WEEK = os.environ[\"TT_WEEK\"]\n\ndef telegram_bot(title, content):\n print(\"\\n\")\n tg_bot_token = TG_BOT_TOKEN\n tg_user_id = TG_USER_ID\n if \"TG_BOT_TOKEN\" in os.environ and \"TG_USER_ID\" in os.environ:\n tg_bot_token = os.environ[\"TG_BOT_TOKEN\"]\n tg_user_id = os.environ[\"TG_USER_ID\"]\n if not tg_bot_token or not tg_user_id:\n print(\"Telegram推送的tg_bot_token或者tg_user_id未设置!!\\n取消推送\")\n return\n print(\"Telegram 推送开始\")\n send_data = {\"chat_id\": tg_user_id, \"text\": title +'\\n\\n'+content, \"disable_web_page_preview\": \"true\"}\n response = requests.post(\n url ='https://api.telegram.org/bot%s/sendMessage' % (tg_bot_token), data=send_data)\n print(response.text)\n\ndef HandleException( excType, excValue, tb):\n\tErrorMessage = traceback.format_exception(excType, excValue, tb) # 异常信息\n\tlogging.exception('ErrorMessage: %s' % ErrorMessage) # 将异常信息记录到日志中\n\tstr=\"\"\n\tfor item in ErrorMessage:\n\t\tstr=str+item\n\ttelegram_bot(\"[甜糖星愿]程序错误警报\",\"\\nErrorMessage:%s\\n\" %str)\n\treturn\n\nsys.excepthook = HandleException #全局错误异常处理!\n\nlogging.basicConfig(filename = '/AutomationTTnode/sendTTnodeMSG.log',format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s', level = logging.DEBUG)\nlogging.debug(\"日志开始\")\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n####################以下内容请不要乱动,程序写得很菜,望大佬手下留情#########################################\ndevices = ''\ninactivedPromoteScore = 0\ntotal = 0\naccountScore = 0\nmsgTitle = \"[甜糖星愿]星愿日结详细\"\nmsg = \"\\n\"\n\ndef getInitInfo():#甜糖用户初始化信息,可以获取待收取的推广信息数,可以获取账户星星数\n url = \"http://tiantang.mogencloud.com/web/api/account/message/loading\"\n header = {\"Content-Type\":\"application/json\",\"authorization\":authorization}\n http = urllib3.PoolManager()\n response = http.request('POST', url,headers = header)\n if response.status != 200:\n print(\"getInitInfo方法请求失败,结束程序\")\n logging.debug(\"getInitInfo方法请求失败,结束程序\")\n raise Exception(\"响应状态码:\"+str(response.status)+\"\\n请求url:\"+url+\"\\n消息:API出现异常,请暂停使用程序!\")\n data = response.data.decode('utf-8')\n data = json.loads(data)\n if data['errCode']!=0:\n print(\"发送推送TG Bot,authorization已经失效\")\n telegram_bot(\"[甜糖星愿]-Auth失效通知\",\"#### authorization已经失效,请通过手机号码和验证码进行重新生成配置\"+end)\n exit()\n data=data['data']\n\n return data\n\ndef getDevices():#获取当前设备列表,可以获取待收的星星数\n url = \"http://tiantang.mogencloud.com/api/v1/devices?page=1&type=2&per_page=200\"\n header = {\"Content-Type\":\"application/json\",\"authorization\":authorization}\n http = urllib3.PoolManager()\n response = http.request('GET', url,headers = header)\n if response.status != 200:\n print(\"getDevices方法请求失败,结束程序\")\n logging.debug(\"getDevices方法请求失败,结束程序\")\n raise Exception(\"响应状态码:\" + str(response.status) + \"\\n请求url:\" + url + \"\\n消息:API出现异常,请暂停使用程序!\")\n data = response.data.decode('utf-8')\n data = json.loads(data)\n if data['errCode']!=0:\n raise Exception(\"响应状态码:\" + str(response.status) + \"\\n请求url:\" + url + \"\\n消息:API可能已经变更,请暂停使用程序!\")\n\n\n data=data['data']['data']\n if len(data) == 0:\n telegram_bot(\"[甜糖星愿]请绑定通知\",\"#### 该账号尚未绑定设备,请绑定设备后再运行\")\n exit()\n return data\n\n\n\ndef promote_score_logs(score):#收取推广奖励星星\n global msg\n if score == 0:\n msg = msg + \"\\n [推广奖励]0-🌟\\n\"\n return\n url = \"http://tiantang.mogencloud.com/api/v1/promote/score_logs\"\n header = {\"Content-Type\":\"application/json\",\"authorization\":authorization}\n body_json = {'score':score}\n encoded_body = json.dumps(body_json).encode('utf-8')\n http = urllib3.PoolManager()\n response = http.request('POST', url,body = encoded_body,headers = header)\n if response.status != 201 and response.status != 200:\n print(\"promote_score_logs方法请求失败,结束程序\")\n logging.debug(\"promote_score_logs方法请求失败,结束程序\")\n raise Exception(\"响应状态码:\" + str(response.status) + \"\\n请求url:\" + url + \"\\n消息:API出现异常,请暂停使用程序!\")\n data = response.data.decode('utf-8')\n data = json.loads(data)\n\n if data['errCode'] != 0:\n msg = msg + \"\\n [推广奖励]0-🌟(收取异常)\\n\"\n return\n msg = msg + \"\\n [推广奖励]\" + str(score) + \"-🌟\\n\"\n global total\n total = total + score\n data = data['data']\n #发送微信推送,啥设备,获取了啥星星数\n return\n\ndef score_logs(device_id,score,name):#收取设备奖励\n global msg\n if score == 0:\n msg = msg + \"\\n [\" + name + \"]0-🌟\\n\"\n return\n url = \"http://tiantang.mogencloud.com/api/v1/score_logs\"\n header = {\"Content-Type\":\"application/json\",\"authorization\":authorization}\n body_json = {'device_id':device_id,'score':score}\n encoded_body = json.dumps(body_json).encode('utf-8')\n http = urllib3.PoolManager()\n response = http.request('POST', url,body = encoded_body,headers = header)\n if response.status != 201 and response.status != 200:\n print(\"score_logs方法请求失败,结束程序\")\n logging.debug(\"score_logs方法请求失败,结束程序\")\n raise Exception(\"响应状态码:\" + str(response.status) + \"\\n请求url:\" + url + \"\\n消息:API出现异常,请暂停使用程序!\")\n data = response.data.decode('utf-8')\n data = json.loads(data)\n\n if data['errCode'] != 0:\n msg = msg + \"\\n [\" + name + \"]0-🌟(收取异常)\\n\"\n return\n msg = msg + \"\\n [\" + name + \"]\" + str(score) + \"-🌟\\n\"\n global total\n total = total + int(score)\n data = data['data']\n #发送微信推送,啥设备,获取了啥星星数\n return\n\ndef sign_in():#签到功能\n\turl = \"http://tiantang.mogencloud.com/web/api/account/sign_in\"\n\theader = {\"Content-Type\":\"application/json\",\"authorization\":authorization}\n\thttp = urllib3.PoolManager()\n\tresponse = http.request('POST', url,headers = header)\n\tif response.status != 201 and response.status != 200:\n\t\tprint(\"sign_in方法请求失败,结束程序\")\n\t\tlogging.debug(\"sign_in方法请求失败,结束程序\")\n\t\traise Exception(\"响应状态码:\" +str(response.status) + \"\\n请求url:\" + url + \"\\n消息:API出现异常,请暂停使用程序!\")\n\tdata = response.data.decode('utf-8')\n\tdata = json.loads(data)\n\tglobal msg\n\n\tif data['errCode']!=0:\n\t\tmsg = msg + \"\\n [签到奖励]0-🌟(失败:\" + data['msg'] + \")\\n\"\n\t\treturn\n\n\tmsg = msg + \"\\n [签到奖励]\" + str(data['data']) + \"-🌟 \\n\"\n\tglobal total\n\ttotal = total + data['data']\n\treturn\n\ndef readConfig(filePath):#读取配置文件\n\ttry:\n\t\tfile = open(filePath,\"a+\",encoding = \"utf-8\",errors = \"ignore\")\n\t\tfile.seek(0)\n\t\tresult = file.read()\n\tfinally:\n\t\tif file:\n\t\t\tfile.close()\n\t\t\tprint(\"文件流已经关闭\")\n\treturn result\n\ndef zfb_withdraw(bean):#支付宝提现\n url = \"http://tiantang.mogencloud.com/api/v1/withdraw_logs\"\n score = bean[\"score\"]\n score = score-score%100\n real_name = bean[\"real_name\"]\n card_id = bean[\"card_id\"]\n bank_name =\"支付宝\"\n sub_bank_name = \"\"\n type=\"zfb\"\n \n if score < 1000:\n return \"\\n[自动提现]支付宝提现失败,星愿数不足1000\\n\"\n if score >= 10000:\n score = 9900\n body_json = \"score=\" + str(score) + \"&real_name=\" + real_name + \"&card_id=\" + card_id + \"&bank_name=\" + bank_name + \"&sub_bank_name=\" + sub_bank_name + \"&type=\" + type\n encoded_body = body_json.encode('utf-8')\n header = {\"Content-Type\":\"application/x-www-form-urlencoded;charset=UTF-8\",\"authorization\":authorization}\n http = urllib3.PoolManager()\n response = http.request('POST', url,body = encoded_body,headers = header)\n if response.status != 201 and response.status != 200:\n logging.debug(\"withdraw_logs方法请求失败\")\n return \"\\n[自动提现]支付宝提现失败,请关闭自动提现等待更新并及时查看甜糖客户端app的账目\\n\"\n\n data = response.data.decode('utf-8')\n data = json.loads(data)\n if data['errCode'] == 403002:\n logging.debug(\"\\n####[自动提现]支付宝提现失败,\" + data['msg'] + \"\\n\")\n return \"\\n[自动提现]支付宝提现失败,\" + data['msg'] + \"\\n\"\n if data['errCode'] != 0:\n print(\"\" + data['msg'] + str(score))\n logging.debug(\"\" + data['msg'] + str(score))\n return \"\\n[自动提现]支付宝提现失败,请关闭自动提现等待更新并及时查看甜糖客户端app的账目\\n\"\n\n data = data['data']\n zfbID = data['card_id']\n pre = zfbID[0:4]\n end = zfbID[len(zfbID)-4:len(zfbID)]\n zfbID = pre + end\n return \"\\n[自动提现]扣除\" + str(score) + \"-🌟\\n-------\\t提现方式:支付宝\\n-------\\t支付宝号:\" + zfbID + \"\\n\"\n \ndef withdraw_type(userInfo):#根据用户是否签约来决定提现方式\n\tbean={}\n\tzfbList=userInfo['zfbList']#获取支付宝列表\n\tif len(zfbList)==0:\n\t\twithdraw_str=\"\\n####[自动提现]支付提现失败,原因是未绑定支付宝号,请绑定支付宝账户\\n\"\n\t\treturn withdraw_str\n\telse:\n\t\tbean[\"score\"]=userInfo['score']\n\t\tbean[\"real_name\"]=zfbList[0]['name']\n\t\tbean[\"card_id\"]=zfbList[0]['account']\n\t\twithdraw_str=zfb_withdraw(bean)\n\t\treturn withdraw_str\n\n\t\nif __name__ == \"__main__\":\n config = readConfig(\"/AutomationTTnode/ttnodeConfig.config\")\n\n print(\"config:\" + config)\n\n if len(config) == 0:\n print(\"错误提示ttnodeConfig.config为空,请重新运行ttnodeconfig.py\")\n logging.debug(\"错误提示ttnodeConfig.config为空,请重新运行ttnodeconfig.py\")\n exit()\n\n config=eval(config)#转成字典\n authorization=config.get(\"authorization\",\"\")\n\n if len(authorization)==0:\n print(\"错误提示authorization为空,请重新运行ttnodeconfig.py\")\n exit()\n\n authorization = authorization.strip()\n week=int(os.environ[\"TT_WEEK\"])\n end=\"\\n注意:以上统计仅供参考,一切请以甜糖客户端APP为准\\n\"\n #错峰延时执行\n sleep_time = random.randint(1,100)\n print(\"错峰延时执行\" + str(sleep_time) + \"秒,请耐心等待\")\n logging.debug(\"错峰延时执行\" + str(sleep_time) + \"秒,请耐心等待\")\n time.sleep(sleep_time)\n\n #获取用户信息\n data=getInitInfo()\n inactivedPromoteScore=data['inactivedPromoteScore']\n accountScore=data['score']\n\n devices=getDevices()#获取设备列表信息\n #获取用户信息\n\n msg=msg+\"\\n[收益详细]:\\n\"\n sign_in()#收取签到收益\n promote_score_logs(inactivedPromoteScore)#收取推广收益\n\n\n\n for device in devices:\n score_logs(device['hardware_id'],device['inactived_score'],device['alias'])#收取设备收益\n time.sleep(1)\n #自动提现\n withdraw = \"\"\n now_week = dt.datetime.now().isoweekday()#获取今天是星期几返回1-7\n now_week = int(now_week)\n\n if week == now_week:\n userInfo = getInitInfo()\n withdraw = withdraw_type(userInfo)\n \n #收益统计并发送TG消息\n total_str = \"\\n[总共收取]\" + str(total) + \"-🌟\\n\"\n nowdata = getInitInfo()\n accountScore = nowdata['score']\n nickName = \"\\n[账户昵称]\" + nowdata['nickName'] + \"\\n\"\n accountScore_str = \"\\n[账户星愿]\" + str(accountScore) + \"-🌟\\n\"\n\n\n now_time = dt.datetime.now().strftime('%F %T')\n now_time_str = \"[当前时间]\" + now_time + \"\\n\"\n msg = now_time_str + nickName+accountScore_str + total_str + withdraw + msg + end\n telegram_bot(msgTitle,msg)\n exit()\n","sub_path":"AutomationTTnode/sendTTnodeMSG.py","file_name":"sendTTnodeMSG.py","file_ext":"py","file_size_in_byte":12341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"200106678","text":"#! /usr/bin/python3\n\nimport traceback\nfrom snack import *\nimport subprocess\nimport re\nimport os\nimport json\nimport mraa\nfrom collections import OrderedDict\n\n\nclass ansicolors:\n clear = '\\033[2J'\n\n\nclass TopMenu:\n def __init__(self):\n self.gscreen = SnackScreen()\n self.boardType = subprocess.check_output('grep -a -o -P \"IOT2050-\\w*\" /proc/device-tree/model',\n shell=True).lstrip().rstrip().decode('utf-8')\n\n def show(self):\n menuItems = [('OS Settings', OsSettingsMenu(self)),\n ('Networking', NetworkingMenu(self)),\n ('Software', SoftwareMenu(self)),\n ('Peripherals', PeripheralsMenu(self))]\n while True:\n action, selection = ListboxChoiceWindow(screen=self.gscreen,\n title='IOT2050 Setup',\n text='',\n items=menuItems,\n buttons=[('Quit', 'quit', 'ESC')])\n if action == 'quit':\n self.close()\n return\n selection.show()\n\n def close(self):\n self.gscreen.finish()\n\n\nclass OsSettingsMenu:\n def __init__(self, topmenu):\n self.topmenu = topmenu\n\n def show(self):\n action, selection = ListboxChoiceWindow(screen=self.topmenu.gscreen,\n title='OS Settings',\n text='',\n items=[('Change Hostname', self.changeHostname),\n ('Change Password', self.changePassword)],\n buttons=[('Back', 'back', 'ESC')])\n\n if action == 'back':\n return\n selection()\n\n def changeHostname(self):\n currentHostname = subprocess.check_output('hostname').decode('utf-8').lstrip().rstrip()\n action, text = EntryWindow(screen=self.topmenu.gscreen,\n title='Change Host Name',\n text='',\n prompts=[('Host Name:', currentHostname)],\n width=70,\n entryWidth=50,\n buttons=[('OK'), ('Cancel', 'cancel', 'ESC')])\n if action == 'cancel':\n return\n subprocess.call('hostname ' + text[0].lstrip().rstrip(), shell=True)\n with open('/etc/hostname', 'w') as textfile:\n textfile.write(text[0].lstrip().rstrip())\n\n def changePassword(self):\n self.topmenu.close()\n print(ansicolors.clear) # Clear console\n subprocess.call('passwd', shell=True)\n exit()\n\n\nclass NetworkingMenu:\n def __init__(self, topmenu):\n self.topmenu = topmenu\n\n def show(self):\n subprocess.call('nmtui', shell=True, stderr=open(os.devnull, 'wb'))\n\n\nclass SoftwareMenu:\n def __init__(self, topmenu):\n self.topmenu = topmenu\n\n def show(self):\n action, selection = ListboxChoiceWindow(screen=self.topmenu.gscreen,\n title='Software',\n text='',\n items=[('Manage Autostart Options', self.changeAutostart)],\n buttons=[('Back', 'back', 'ESC')])\n if action == 'back':\n return\n selection()\n\n def changeAutostart(self):\n sshEnabled = 'enabled' in subprocess.Popen('systemctl is-enabled ssh',\n shell=True,\n stdout=subprocess.PIPE,\n stderr=open(os.devnull, 'wb')).stdout.read().decode('utf-8')\n mosquittoAutostartEnabled = 'enabled' in subprocess.Popen('systemctl is-enabled mosquitto',\n shell=True,\n stdout=subprocess.PIPE,\n stderr=open(os.devnull, 'wb')).stdout.read().decode('utf-8')\n noderedAutostartEnabled = 'enabled' in subprocess.Popen('systemctl is-enabled node-red',\n shell=True,\n stdout=subprocess.PIPE,\n stderr=open(os.devnull, 'wb')).stdout.read().decode('utf-8')\n buttonbar = ButtonBar(screen=self.topmenu.gscreen, buttonlist=[('Ok', 'ok'), ('Cancel', 'cancel', 'ESC')])\n ct = CheckboxTree(height=4, scroll=0)\n ct.append('SSH Server Enabled', selected=sshEnabled)\n ct.append('Auto Start Mosquitto Broker', selected=mosquittoAutostartEnabled)\n ct.append('Auto Start node-red', selected=noderedAutostartEnabled)\n g = GridForm(self.topmenu.gscreen, 'Advanced Options', 1, 2)\n g.add(ct, 0, 0)\n g.add(buttonbar, 0, 1)\n result = g.runOnce()\n if buttonbar.buttonPressed(result) == 'cancel':\n return\n selectedOptions = ct.getSelection()\n sshEnabledNew = 'SSH Server Enabled' in selectedOptions\n mosquittoAutostartEnabledNew = 'Auto Start Mosquitto Broker' in selectedOptions\n noderedAutostartEnabledNew = 'Auto Start node-red' in selectedOptions\n if sshEnabled != sshEnabledNew:\n self.changeServiceSetting('ssh', sshEnabledNew)\n if mosquittoAutostartEnabled != mosquittoAutostartEnabledNew:\n self.changeServiceSetting('mosquitto', mosquittoAutostartEnabledNew)\n if noderedAutostartEnabled != noderedAutostartEnabledNew:\n self.changeServiceSetting('node-red', noderedAutostartEnabledNew)\n\n def changeServiceSetting(self, name, status):\n if status:\n subprocess.call('systemctl enable ' + name, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))\n subprocess.call('systemctl start ' + name, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))\n else:\n subprocess.call('systemctl stop ' + name, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))\n subprocess.call('systemctl disable ' + name, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))\n\n\nclass PeripheralsMenu:\n def __init__(self, topmenu):\n self.topmenu = topmenu\n self.configureFile = '/etc/board-configuration.json'\n self.config = self.getConfig()\n\n def show(self):\n while True:\n action, selection = ListboxChoiceWindow(screen=self.topmenu.gscreen,\n title='Peripherals',\n text='',\n items=[('Configure External COM Ports', self.configureExternalSerialMode),\n ('Configure Arduino I/O', self.configureArduinoIoMode)],\n buttons=[('Back', 'back', 'ESC')])\n if action == 'back':\n return\n selection()\n\n def getConfig(self):\n with open(self.configureFile, 'r') as f:\n config = json.load(f, object_pairs_hook=OrderedDict)\n return config\n\n def saveConfig(self, jsonSrc):\n with open(self.configureFile, 'w') as f:\n json.dump(jsonSrc, f, indent=4, separators=(',', ': '))\n\n def pinmuxArray(self, index):\n return self.config['Arduino_pinmux_map']['IO' + str(index)]\n\n def _pullMode(self, index):\n return self.config['User_configuration']['IO' + str(index) + '_PULL_MODE']\n\n def pullMode(self, index):\n pullModeMap = {'Hiz': mraa.MODE_HIZ,\n 'Pull-up': mraa.MODE_PULLUP,\n 'Pull-down': mraa.MODE_PULLDOWN}\n return pullModeMap[self._pullMode(index)]\n\n def _setPinmuxOfUserConfig(self, pinmux, index):\n self.config['User_configuration']['IO' + str(index) + '_MODE'] = pinmux\n\n def _pinmuxOfUserConfig(self, index):\n return self.config['User_configuration']['IO' + str(index) + '_MODE']\n\n def setPinmuxOfUserConfig(self, pinmux, index=None):\n if index is not None:\n self._setPinmuxOfUserConfig(pinmux, index)\n else:\n for i in range(0, 20):\n pinmuxes = self.pinmuxArray(i)\n if pinmux in ' '.join(pinmuxes):\n self._setPullModeOfUserConfig(i, 'Hiz')\n self._setPinmuxOfUserConfig([n for n in pinmuxes if pinmux in n][0], i)\n\n def resetPinmuxOfUserConfig(self, pinmux):\n for i in range(0, 20):\n pinmuxes = self.pinmuxArray(i)\n if pinmux in ' '.join(pinmuxes):\n defaultPinmux = pinmuxes[0]\n self._setPinmuxOfUserConfig(defaultPinmux, i)\n self._setPullModeOfUserConfig(i, 'Hiz')\n if 'GPIO' in defaultPinmux:\n self.setPinmuxToGpio(defaultPinmux, i)\n elif 'IIC_SDA' in defaultPinmux:\n mraa.I2c(0)\n\n def setPinmuxToGpio(self, gpioPinmux, index):\n direction = gpioPinmux.split('_')[1].lstrip().rstrip().lower()\n gpio = mraa.Gpio(index)\n gpio.dir(mraa.DIR_OUT if direction == 'output' else mraa.DIR_IN)\n if direction == 'output':\n gpio.write(0)\n gpio.mode(self.pullMode(index))\n\n def checkPinmuxConfig(self, pinmux, index=None):\n for i in range(0, 20):\n if index is None or index == i:\n if pinmux in self._pinmuxOfUserConfig(i):\n return 1\n return 0\n\n def checkPullModeConfig(self, pullmode, index):\n if pullmode in self._pullMode(index):\n return 1\n return 0\n\n def _setPullModeOfUserConfig(self, index, pullMode):\n self.config['User_configuration']['IO' + str(index) + '_PULL_MODE'] = pullMode\n\n def getDirection(self, index):\n if self.checkPinmuxConfig('GPIO_Input', index):\n return 'Input'\n elif self.checkPinmuxConfig('GPIO_Output', index):\n return 'Output'\n else:\n return '--'\n\n def getPullMode(self, index):\n if self.checkPullModeConfig('Pull-down', index):\n return 'Pull-down'\n elif self.checkPullModeConfig('Pull-up', index):\n return 'Pull-up'\n elif self.checkPullModeConfig('Hiz', index):\n return 'Hiz'\n\n def configureArduinoGpio(self):\n gpioIndex = 0\n dirIndex = 0\n pullmodeIndex = 0\n while True:\n gm = GridForm(self.topmenu.gscreen, # screen\n \"Enable GPIO\", # title\n 1, 27) # 27x1 grid\n g = GridForm(self.topmenu.gscreen, # screen\n \"Enable GPIO\", # title\n 4, 2) # 2x4 grid\n gm.add(Label('Gpio | Direction | Pull Mode'), 0, 0)\n gm.add(Label('-------+-----------+----------'), 0, 1)\n for i in range(0, 20):\n gpio = 'Gpio{:<3}'.format(str(i))\n direction = ' {:<10}'.format(self.getDirection(i))\n pullmode = ' {:<9}'.format(self.getPullMode(i))\n label = '%s|%s|%s' % (gpio, direction, pullmode)\n gm.add(Label(label), 0, i + 2)\n gm.add(Label(' '), 0, 23)\n lbGpio = Listbox(height = 1, scroll = 0, returnExit = 0, width = 6, border = 0)\n for i in range(0, 20):\n lbGpio.append('Gpio' + str(i), i)\n lbGpio.setCurrent(gpioIndex)\n lbDir = Listbox(height = 1, scroll = 0, returnExit = 0, width = 11, border = 0)\n lbDir.append('Input', 0)\n lbDir.append('Output', 1)\n lbDir.setCurrent(dirIndex)\n lbPullMode = Listbox(height = 1, scroll = 0, returnExit = 0, width = 10, border = 0)\n lbPullMode.append('Hiz', 0)\n lbPullMode.append('Pull-up', 1)\n lbPullMode.append('Pull-down', 2)\n lbPullMode.setCurrent(pullmodeIndex)\n g.add(Label('Gpio: '), 0, 0)\n g.add(Label('Direction: '), 1, 0)\n g.add(Label('Pull-Mode: '), 2, 0)\n g.add(lbGpio, 0, 1)\n g.add(lbDir, 1, 1)\n g.add(lbPullMode, 2, 1)\n btnOk = ButtonBar(screen = self.topmenu.gscreen, buttonlist = [('Ok', 1)], compact = 1)\n g.add(btnOk, 3, 1)\n gm.add(g, 0, 24)\n gm.add(Label(' '), 0, 25)\n btnBack = ButtonBar(screen = self.topmenu.gscreen, buttonlist = [('Back', 'back', 'ESC')])\n gm.add(btnBack, 0, 26)\n result = gm.runOnce()\n if btnBack.buttonPressed(result) == 'back':\n return\n def selectedPullMode(item):\n if item == 2:\n return 'Pull-down'\n elif item == 1:\n return 'Pull-up'\n elif item == 0:\n return 'Hiz'\n if btnOk.buttonPressed(result) == 1:\n gpioIndex = lbGpio.current()\n dirIndex = lbDir.current()\n pullmodeIndex = lbPullMode.current()\n if dirIndex == 0: # input\n self.setPinmuxOfUserConfig('GPIO_Input', gpioIndex)\n self._setPullModeOfUserConfig(gpioIndex, selectedPullMode(pullmodeIndex))\n self.setPinmuxToGpio('GPIO_Input', gpioIndex)\n elif dirIndex == 1: # output\n self.setPinmuxOfUserConfig('GPIO_Output', gpioIndex)\n self._setPullModeOfUserConfig(gpioIndex, selectedPullMode(pullmodeIndex))\n self.setPinmuxToGpio('GPIO_Output', gpioIndex)\n self.saveConfig(self.config)\n\n def configureArduinoI2c(self):\n btnchoicewind = ButtonChoiceWindow(screen=self.topmenu.gscreen,\n title='Enable I2C on IO18 & IO19',\n text='',\n buttons=['Enable', 'Disable', ('Cancel', 'ESC')],\n width=40)\n if btnchoicewind == 'cancel':\n return\n elif btnchoicewind == 'enable':\n i2c = mraa.I2c(0)\n self.setPinmuxOfUserConfig('I2C')\n elif (btnchoicewind == 'disable') and self.checkPinmuxConfig('I2C'):\n self.resetPinmuxOfUserConfig('I2C')\n self.saveConfig(self.config)\n\n def configureArduinoSpi(self):\n btnchoicewind = ButtonChoiceWindow(screen=self.topmenu.gscreen,\n title='Enable SPI on IO10-IO13',\n text='',\n buttons=['Enable', 'Disable', ('Cancel', 'ESC')],\n width=40)\n if btnchoicewind == 'cancel':\n return\n elif btnchoicewind == 'enable':\n spi = mraa.Spi(0)\n self.setPinmuxOfUserConfig('SPI')\n elif btnchoicewind == 'disable' and self.checkPinmuxConfig('SPI'):\n self.resetPinmuxOfUserConfig('SPI')\n self.saveConfig(self.config)\n\n def configureArduinoUart(self):\n ckboxtree = CheckboxTree(height=2, scroll=0)\n ckboxtree.append(text='RX & TX', item=1, selected=self.checkPinmuxConfig('UART_RX'))\n ckboxtree.append(text='CTS & RTS', item=2, selected=self.checkPinmuxConfig('UART_CTS'))\n buttonbar = ButtonBar(screen=self.topmenu.gscreen, buttonlist=[('Ok', 'ok'), ('Cancel', 'cancel', 'ESC')])\n g = GridForm(self.topmenu.gscreen, # screen\n 'Enable UART on IO0-IO3', # title\n 1, 2) # 2x1 grid\n g.add(ckboxtree, 0, 0)\n g.add(buttonbar, 0, 1)\n result = g.runOnce()\n if buttonbar.buttonPressed(result) == 'cancel':\n return\n\n selected = ckboxtree.getSelection()\n if 1 in selected:\n uart = mraa.Uart(0)\n uart.setFlowcontrol(False, True if 2 in selected else False)\n self.setPinmuxOfUserConfig('UART_RX')\n self.setPinmuxOfUserConfig('UART_TX')\n if 2 in selected:\n self.setPinmuxOfUserConfig('UART_CTS')\n self.setPinmuxOfUserConfig('UART_RTS')\n else:\n self.resetPinmuxOfUserConfig('UART_CTS')\n self.resetPinmuxOfUserConfig('UART_RTS')\n else:\n self.resetPinmuxOfUserConfig('UART')\n self.saveConfig(self.config)\n\n def configureArduinoPwm(self):\n ckboxtree = CheckboxTree(height=6, scroll=0)\n ckboxtree.append(text='PWM 4', item=4, selected=self.checkPinmuxConfig('PWM_4'))\n ckboxtree.append(text='PWM 5', item=5, selected=self.checkPinmuxConfig('PWM_5'))\n ckboxtree.append(text='PWM 6', item=6, selected=self.checkPinmuxConfig('PWM_6'))\n ckboxtree.append(text='PWM 7', item=7, selected=self.checkPinmuxConfig('PWM_7'))\n ckboxtree.append(text='PWM 8', item=8, selected=self.checkPinmuxConfig('PWM_8'))\n ckboxtree.append(text='PWM 9', item=9, selected=self.checkPinmuxConfig('PWM_9'))\n buttonbar = ButtonBar(screen=self.topmenu.gscreen, buttonlist=[('Ok', 'ok'), ('Cancel', 'cancel', 'ESC')])\n g = GridForm(self.topmenu.gscreen, # screen\n 'Enable PWM on IO4-IO9', # title\n 1, 2) # 1x1 grid\n g.add(ckboxtree, 0, 0)\n g.add(buttonbar, 0, 1)\n result = g.runOnce()\n if buttonbar.buttonPressed(result) == 'cancel':\n return\n selected = ckboxtree.getSelection()\n for n in range(4, 10):\n if n in selected:\n pwm = mraa.Pwm(n)\n self.setPinmuxOfUserConfig('PWM_' + str(n))\n else:\n self.resetPinmuxOfUserConfig('PWM_' + str(n))\n self.saveConfig(self.config)\n\n def configureArduinoAdc(self):\n ckboxtree = CheckboxTree(height=6, scroll=0)\n ckboxtree.append(text='ADC 0', item=0, selected=self.checkPinmuxConfig('ADC_0'))\n ckboxtree.append(text='ADC 1', item=1, selected=self.checkPinmuxConfig('ADC_1'))\n ckboxtree.append(text='ADC 2', item=2, selected=self.checkPinmuxConfig('ADC_2'))\n ckboxtree.append(text='ADC 3', item=3, selected=self.checkPinmuxConfig('ADC_3'))\n ckboxtree.append(text='ADC 4', item=4, selected=self.checkPinmuxConfig('ADC_4'))\n ckboxtree.append(text='ADC 5', item=5, selected=self.checkPinmuxConfig('ADC_5'))\n buttonbar = ButtonBar(screen=self.topmenu.gscreen, buttonlist=[('Ok', 'ok'), ('Cancel', 'cancel', 'ESC')])\n g = GridForm(self.topmenu.gscreen, # screen\n 'Enable ADC on IO14-IO19', # title\n 1, 2) # 1x1 grid\n g.add(ckboxtree, 0, 0)\n g.add(buttonbar, 0, 1)\n result = g.runOnce()\n if buttonbar.buttonPressed(result) == 'cancel':\n return\n selected = ckboxtree.getSelection()\n for n in range(0, 6):\n if n in selected:\n aio = mraa.Aio(n)\n self.setPinmuxOfUserConfig('ADC_' + str(n))\n else:\n self.resetPinmuxOfUserConfig('ADC_' + str(n))\n self.saveConfig(self.config)\n\n def configureArduinoIoMode(self):\n while True:\n self.config = self.getConfig()\n ioInfor = ' Pin | Current | Pinmux\\n -----+------------+-------------------------------------------\\n'\n for i in range(0, 20):\n ioInfor += ' IO{:<3}'.format(str(i))\n ioInfor += '| {:<11}'.format(self.config['User_configuration']['IO' + str(i) + '_MODE'])\n ioInfor += '| ' + ' | '.join(self.config['Arduino_pinmux_map']['IO' + str(i)])\n ioInfor += '\\n'\n action, selection = ListboxChoiceWindow(screen=self.topmenu.gscreen,\n title='Configure Arduino I/O',\n text=ioInfor,\n items=[('Enable GPIO', self.configureArduinoGpio),\n ('Enable I2C on IO18 & IO19', self.configureArduinoI2c),\n ('Enable SPI on IO10-IO13', self.configureArduinoSpi),\n ('Enable UART on IO0-IO3', self.configureArduinoUart),\n ('Enable PWM on IO4-IO9', self.configureArduinoPwm),\n ('Enable ADC on IO14-IO19', self.configureArduinoAdc)],\n buttons=[('Back', 'back', 'ESC')],\n width=68)\n if action == 'back':\n return\n selection()\n\n def configureExternalSerialMode(self):\n self.config = self.getConfig()\n modeItems = ['RS232', 'RS485', 'RS422']\n modeAction, modeSelection = ListboxChoiceWindow(screen=self.topmenu.gscreen,\n title='Configure External COM Ports',\n text='Select a mode:',\n items=modeItems,\n buttons=[('Ok', 'ok'), ('Cancel', 'cancel', 'ESC')],\n default=self.currentMode())\n if modeAction == 'cancel':\n return\n switchMode = modeItems[modeSelection]\n self.terminateStatus = ''\n if (switchMode == 'RS485') or (switchMode == 'RS422'):\n self.terminateStatus = self.selectTerminate()\n if self.topmenu.boardType == 'IOT2050-BASIC':\n self.setBasicBoard(switchMode)\n elif self.topmenu.boardType == 'IOT2050-ADVANCED':\n self.setAdvancedBoard(switchMode)\n else:\n return\n terminateOpt = ' -t' if self.terminateStatus == 'on' else ''\n subprocess.call('switchserialmode -m ' + switchMode + terminateOpt, shell=True)\n self.config['User_configuration']['External_Serial_Current_Mode'] = switchMode\n self.saveConfig(self.config)\n subprocess.call('sync', shell=True)\n\n def currentMode(self):\n mode = self.config['User_configuration']['External_Serial_Current_Mode']\n if mode == 'RS232':\n return 0\n elif mode == 'RS485':\n return 1\n elif mode == 'RS422':\n return 2\n return 0\n\n def setAdvancedBoard(self, mode):\n command = ''\n if mode == 'RS232':\n command = 'switchserialmode cp210x -D cp2102n24 -m gpio -v 0'\n elif mode == 'RS485':\n command = 'switchserialmode cp210x -D cp2102n24 -m RS485 -g 1'\n elif mode == 'RS422':\n command = 'switchserialmode cp210x -D cp2102n24 -m gpio -v 1'\n subprocess.call(command, shell=True)\n self.config['User_configuration']['External_Serial_Init_Mode'] = mode\n if self.terminateStatus == 'on' or self.terminateStatus == 'off':\n self.config['User_configuration']['External_Serial_Terminate'] = self.terminateStatus\n self.saveConfig(self.config)\n if mode == 'RS485':\n self.setRS485SetupHoldTime()\n ButtonChoiceWindow(screen=self.topmenu.gscreen,\n title='Note',\n text='You need to power cycle the device for the changes to take effect',\n buttons=['Ok'])\n\n def setRS485SetupHoldTime(self):\n command = 'switchserialmode cp210x -D CP2102N24 -d | grep -o -P \\\"setup-time\\\\(0x\\\\w*\\\\)\\\" | grep -o -P \\\"0x\\\\w*\\\"'\n setup = subprocess.check_output(command, shell=True).lstrip().rstrip().decode('utf-8').lower()\n command = 'switchserialmode cp210x -D CP2102N24 -d | grep -o -P \\\"hold-time\\\\(0x\\\\w*\\\\)\\\" | grep -o -P \\\"0x\\\\w*\\\"'\n hold = subprocess.check_output(command, shell=True).lstrip().rstrip().decode('utf-8').lower()\n\n disSetup = '0xaa' if int(setup, 0) == 0 else setup\n disHold = '0xaa' if int(hold, 0) == 0 else hold\n\n action, values = EntryWindow(screen=self.topmenu.gscreen,\n title='Set The Setup and Hold Time of RS485 Mode',\n text='The setup and hold time will affect the transfer stable in RS485 mode',\n prompts=[('Setup (0x00 ~ 0xffff): ', disSetup), ('Hold (0x00 ~ 0xffff): ', disHold)],\n width=70,\n entryWidth=50,\n buttons=[('OK'), ('Cancel', 'cancel', 'ESC')])\n\n if action == 'cancel':\n return\n\n command = 'switchserialmode cp210x -D cp2102n24'\n if int(setup, 0) != int(values[0], 0):\n command += ' -s ' + values[0]\n if int(hold, 0) != int(values[1], 0):\n command += ' -o ' + values[1]\n if ('-s' in command) or ('-o' in command):\n subprocess.call(command, shell=True)\n\n def setBasicBoard(self, mode):\n persistentReturn = ButtonChoiceWindow(screen=self.topmenu.gscreen,\n title='Configure Serial Mode',\n text='Do you want to make your changes persistent?\\n(Mode setting will be kept after reboot.) ',\n buttons=[('Yes', 'yes'), ('No', 'no', 'ESC')],\n width=40)\n command = 'switchserialmode ttyuart -D /dev/ttyS2 -m ' + mode\n subprocess.call(command, shell=True)\n if persistentReturn == 'yes':\n self.config['User_configuration']['External_Serial_Init_Mode'] = mode\n if self.terminateStatus == 'on' or self.terminateStatus == 'off':\n self.config['User_configuration']['External_Serial_Terminate'] = self.terminateStatus\n self.saveConfig(self.config)\n\n def currentTerminate(self):\n terminate = self.config['User_configuration']['External_Serial_Terminate']\n if terminate == 'off':\n return 0\n elif terminate == 'on':\n return 1\n else:\n return 0\n\n def selectTerminate(self):\n default = self.config['User_configuration']['External_Serial_Terminate']\n rdgroup = RadioGroup()\n rda = rdgroup.add(title = 'Turn off terminate resistor', value = 0, default = 0 if self.currentTerminate() else 1)\n rdb = rdgroup.add(title = 'Turn on terminate resistor', value = 1, default = self.currentTerminate())\n buttonbar = ButtonBar(screen=self.topmenu.gscreen, buttonlist=[('Ok', 'ok'), ('Cancel', 'cancel', 'ESC')])\n g = GridForm(self.topmenu.gscreen,\n 'Set the terminate resistor',\n 1, 3)\n g.add(rda, 0, 0)\n g.add(rdb, 0, 1)\n g.add(buttonbar, 0, 2)\n result = g.runOnce()\n if buttonbar.buttonPressed(result) == 'cancel':\n return default\n return 'on' if rdgroup.getSelection() else 'off'\n\n\ndef main():\n try:\n mainwindow = TopMenu()\n mainwindow.show()\n except:\n pass\n finally:\n mainwindow.close()\n return ''\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"recipes-app/iot2050setup/files/iot2050setup.py","file_name":"iot2050setup.py","file_ext":"py","file_size_in_byte":28194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"459302495","text":"\n\n#calss header\nclass _VAUNTED():\n\tdef __init__(self,): \n\t\tself.name = \"VAUNTED\"\n\t\tself.definitions = [u'praised often in a way that is considered to be more than acceptable or reasonable: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_vaunted.py","file_name":"_vaunted.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"558056781","text":"# LeetCode\n# Python3\n# -*- coding: utf-8 -*-\n\n# @Problem : 34. Search for a Range\n# @Difficulty : Medium\n\n# @AUTHOR : Yvette WANG\n# @Last Edited : 01 01 2018\n\n# @Description\n# Given an array of integers sorted in ascending order, find the starting and ending position of a given target value.\n#\n# Your algorithm's runtime complexity must be in the order of O(log n).\n#\n# If the target is not found in the array, return [-1, -1].\n#\n# For example,\n# Given [5, 7, 7, 8, 8, 10] and target value 8,\n# return [3, 4].\n\n# Version 1 -> Accepted\n# Beats 39%\n\n# possible improvements\n# use recursion\n# more abstract\n\nclass Solution:\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n\n l = 0\n r = len(nums) - 1\n i = (l + r) // 2\n\n while l <= r:\n if l == r and nums[l] == target:\n return [l, l]\n elif l == r:\n return [-1, -1]\n\n if nums[i] > target:\n r = i - 1\n elif nums[i] < target:\n l = i + 1\n else:\n j = i\n while j - 1 >= 0 and nums[j - 1] == target:\n j -= 1\n while i + 1 < len(nums) and nums[i + 1] == target:\n i += 1\n return [j, i]\n i = (l + r) // 2\n\n return [-1, -1]\n\n\n\nif __name__ == \"__main__\":\n print(Solution().searchRange([4], 2))\n","sub_path":"34. Search for a Range.py","file_name":"34. Search for a Range.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"243589415","text":"#\n# @lc app=leetcode.cn id=19 lang=python3\n#\n# [19] 删除链表的倒数第N个节点\n#\n# https://leetcode-cn.com/problems/remove-nth-node-from-end-of-list/description/\n#\n# algorithms\n# Medium (34.16%)\n# Likes: 450\n# Dislikes: 0\n# Total Accepted: 55.1K\n# Total Submissions: 160K\n# Testcase Example: '[1,2,3,4,5]\\n2'\n#\n# 给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。\n# \n# 示例:\n# \n# 给定一个链表: 1->2->3->4->5, 和 n = 2.\n# \n# 当删除了倒数第二个节点后,链表变为 1->2->3->5.\n# \n# \n# 说明:\n# \n# 给定的 n 保证是有效的。\n# \n# 进阶:\n# \n# 你能尝试使用一趟扫描实现吗?\n# \n#\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n first, second = head, head\n for i in range(n):\n first = first.next\n if first is None:\n return head.next\n\n while first.next is not None:\n first = first.next\n second = second.next\n second.next = second.next.next\n return head\n","sub_path":"python/19.删除链表的倒数第n个节点.py","file_name":"19.删除链表的倒数第n个节点.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"234048639","text":"#Data: 30/03/2019\r\n#Autor: Wellington Mascena\r\n#Descrição: Ler largura e altura com input para formar um retângulo de #\r\n\r\nlargura = int(input(\"digite a largura: \"))\r\naltura = int(input(\"digite a altura: \"))\r\n\r\n#auxiliar para armazenar o valor da largura\r\naux = 0\r\n\r\nwhile altura > 0:\r\n altura = altura - 1\r\n aux = largura\r\n while aux > 0:\r\n print(\"#\",end='')\r\n aux = aux - 1\r\n\r\n print('')\r\n","sub_path":"s7_retangulo_cheio.py","file_name":"s7_retangulo_cheio.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"639847214","text":"import cv2\nimport numpy as np\n\nMIN_MATCH_COUNT = 10\n\ndef show_match_image(img1, img2):\n # Initiate SIFT detector\n sift = cv2.xfeatures2d.SIFT_create()\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(img1,None)\n kp2, des2 = sift.detectAndCompute(img2,None)\n # BFMatcher with default params\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(des1,des2, k=2)\n # Apply ratio test\n good = []\n for m,n in matches:\n if m.distance < 0.75*n.distance:\n\n good.append(m)\n # cv2.drawMatchesKnn expects list of lists as matches.\n good_2 = np.expand_dims(good, 1)\n matching = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good_2[:20],None, flags=2)\n\n if len(good)>MIN_MATCH_COUNT:\n # 獲取關鍵點的坐標\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n H, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC,5.0)\n wrap = cv2.warpPerspective(img2, H, (img2.shape[1]+img2.shape[1] , img2.shape[0]+img2.shape[0]))\n # result = cv2.addWeighted(img1, 0.5, wrap, 0.5, 0)\n wrap[0:img2.shape[0], 0:img2.shape[1]] = img1\n\n # rows, cols = np.where(wrap[:,:,0] !=0)\n # min_row, max_row = min(rows), max(rows) +1\n # min_col, max_col = min(cols), max(cols) +1\n # result = wrap[min_row:max_row,min_col:max_col,:]#去除黑色無用部分\n result = wrap\n\n cv2.imshow('Match Result', matching)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return matching, result\n\nif __name__ == '__main__':\n img1 = cv2.imread('data/Rainier1.png') # queryImage\n img2 = cv2.imread('data/Rainier3.png') # trainImage\n matching, result = show_match_image(img1, img2)","sub_path":"HW2/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"118789624","text":"import numpy as np\nimport pickle\nimport ast\nimport random\nimport os\n\n#random.seed(13) #ti as compris\n\n\n\nwith open(\"data/robust2004.txt\", \"r\") as f:\n queries = ast.literal_eval(f.read())\nqueries = list(queries.keys())\n\nrandom.shuffle(queries)\nif \"634\" in queries:\n\tqueries.remove(\"634\")\nif \"672\" in queries:\n\tqueries.remove(\"672\")\n\nprint(len(queries), \" queries.\")\n\ndef split(a, n):\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))\n\nfolds = list(split(queries, 5))\n\npickle.dump(folds, open(\"folds.pkl\", \"wb\"))\n\n\n","sub_path":"scripts/prepare_5_folds.py","file_name":"prepare_5_folds.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"125193439","text":"import sys\nimport pycountry\nimport matplotlib.pyplot as plt\nfrom PyQt5.QtWidgets import *\nfrom pytrends.request import TrendReq\npytrend = TrendReq()\n\n\n# Class that creates the GUI and contains most of it's features\nclass Window(QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.initUI()\n\n # System that runs if the user wants interest over time\n def button_time_clicked(self):\n list = []\n text = self.input.text().split(\",\")\n for ref in text:\n list.append(ref.strip())\n # Checks to see if list of searches is empty\n if list == ['']:\n self.min_num_alert()\n return\n # Checks to see if there is an empty search\n if '' in list:\n self.missing_term_alert()\n return\n # Checks to see of there are more than 5 search terms\n if len(list) > 5:\n self.max_num_alert(len(list))\n return\n if self.multi_graph_check.isChecked(): # Bool Variable check\n pass\n else:\n plt.close('all')\n interest_over_time(list)\n\n # System that runs if the user wants interest by regeion\n def button_region_clicked(self):\n list = []\n text = self.input.text().split(\",\")\n for ref in text:\n list.append(ref.strip())\n # Checks to see if list of searches is empty\n if list == ['']:\n self.min_num_alert()\n return\n # Checks to see if there is an empty search\n if '' in list:\n self.missing_term_alert()\n return\n # Checks to see of there are more than 5 search terms\n if len(list) > 5:\n self.max_num_alert(len(list))\n return\n if self.multi_graph_check.isChecked(): # Bool Variable check\n pass\n else:\n plt.close('all')\n region = self.region.currentText()\n interest_by_region(list, region)\n\n # System to close all graphs\n def button_close_graph_clicked(self):\n plt.close('all')\n\n # System to close the program\n def button_close_clicked(self):\n sys.exit()\n\n # When people do things that will crash the program these events will\n # happen\n # Alert for when someone has an empty search term\n def missing_term_alert(self):\n alert = QMessageBox()\n alert.setWindowTitle(\"ALERT\")\n alert.setText(\"You have inputed a blank search term.\\nPlease add a \"\n \"search term(s) and try again\")\n alert.setIcon(QMessageBox.Critical)\n alert.setStandardButtons(QMessageBox.Ok)\n alert.exec()\n\n # Alert for if a user has more than 5 search terms due\n # to google only accepting a max of 5\n def max_num_alert(self, list_num):\n alert = QMessageBox()\n alert.setWindowTitle(\"ALERT\")\n alert.setText(f\"You have inputed more than 5 search terms.\"\n f\"\\nPlease remove {list_num-5} \"\n f\"search term(s) and try again\")\n alert.setIcon(QMessageBox.Critical)\n alert.setStandardButtons(QMessageBox.Ok)\n alert.exec()\n\n # Alert for if a user has no search term provided\n def min_num_alert(self):\n alert = QMessageBox()\n alert.setWindowTitle(\"ALERT\")\n alert.setText(\"You have inputed 0 search terms.\\n\"\n \"Please add a search term(s) and try again\")\n alert.setIcon(QMessageBox.Critical)\n alert.setStandardButtons(QMessageBox.Ok)\n alert.exec()\n\n # Creates the main gui for the code. (AKA the heart of the code)\n def initUI(self):\n self.setGeometry(0, 0, 500, 500)\n self.setWindowTitle(\"Test\")\n\n # Creates a info labele\n self.label = QLabel(self)\n self.label.setText(\"Seperate searches by a ,\")\n self.label.resize(150, 30)\n self.label.move(150, 0)\n\n # Creates where you enter the search terms\n self.input = QLineEdit(self)\n self.input.setPlaceholderText(\"Enter Search Here\")\n self.input.resize(150, 30)\n self.input.move(150, 40)\n\n # Creates the interest over time button\n self.button_time = QPushButton(self)\n self.button_time.setText(\"Search for interest over time\")\n self.button_time.move(5, 80)\n self.button_time.resize(200, 30)\n self.button_time.clicked.connect(self.button_time_clicked)\n\n # Creates the interest BY region button\n self.button_region = QPushButton(self)\n self.button_region.setText(\"Search for interest by region\")\n self.button_region.move(210, 80)\n self.button_region.resize(200, 30)\n self.button_region.clicked.connect(self.button_region_clicked)\n\n # Creates the close graphs button\n self.button_close_graph = QPushButton(self)\n self.button_close_graph.setText(\"Close Graphs\")\n self.button_close_graph.move(105, 120)\n self.button_close_graph.clicked.connect(self.button_close_graph_clicked)\n\n # Creates the best button in the world. (The Close Button!!!)\n self.button_close = QPushButton(self)\n self.button_close.setText(\"Exit\")\n self.button_close.move(210, 120)\n self.button_close.clicked.connect(self.button_close_clicked)\n\n # Creates the checkbox to allow you to keep graphs open\n self.multi_graph_check = QCheckBox(self)\n self.multi_graph_check.setText(\"Keep graphs open\")\n self.multi_graph_check.resize(200, 30)\n self.multi_graph_check.move(150, 150)\n\n # Creates the dropdown box to seletect a region for interest by region\n self.region = QComboBox(self)\n self.region.resize(300, 30)\n self.region.addItem(\"Worldwide\")\n for country in pycountry.countries:\n self.region.addItem(country.name)\n\n\n# Displays the error alert box\ndef error():\n alert = QMessageBox()\n alert.setWindowTitle(\"ERROR\")\n alert.setText(\"There has been an error\")\n alert.setInformativeText(\"This could be due to there being \"\n \"no graph to be displayed\")\n alert.setIcon(QMessageBox.Critical)\n alert.setStandardButtons(QMessageBox.Ok)\n alert.exec()\n\n\n# The interest over time system\ndef interest_over_time(list):\n pytrend.build_payload(kw_list=list)\n interest_over_time = pytrend.interest_over_time()\n interest_over_time.plot(y=list, figsize=(15, 8), kind ='line')\n plt.ylabel('Search Interest')\n plt.show()\n\n\n# The interest by regeion system\ndef interest_by_region(list, region):\n\n if region == \"Worldwide\":\n pytrend.build_payload(kw_list=list)\n else:\n region = pycountry.countries.get(name=region)\n print(region.alpha_2)\n pytrend.build_payload(kw_list=list, geo=region.alpha_2)\n try:\n interest_by_region = pytrend.interest_by_region(inc_low_vol=True, resolution='COUNTRY')\n except:\n error()\n return\n interest_by_region.plot(y=list, figsize=(15, 8), kind ='bar')\n plt.ylabel('Search Interest')\n plt.show()\n\n\n# The brain of the main GUI\ndef gui():\n app = QApplication([])\n win = Window()\n win.show()\n sys.exit(app.exec())\n\n\n# Starts the code\ngui()\n","sub_path":"trends.py","file_name":"trends.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"91613106","text":"from django.shortcuts import render, redirect\nfrom resume_parser import resume_parser\nfrom .models import UserDetails, Competencies, MeasurableResults, Resume, ResumeDetails, UploadResumeModelForm\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, FileResponse, Http404, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom .serializers import UserDetailsSerializer, CompetenciesSerializer, MeasurableResultsSerializer, ResumeSerializer, ResumeDetailsSerializer\nimport os\nimport requests\n\ndef homepage(request):\n if request.method == 'POST':\n user = User.objects.get(id=1)\n UserDetails.objects.filter(user=user).delete()\n Competencies.objects.filter(user=user).delete()\n MeasurableResults.objects.filter(user=user).delete()\n Resume.objects.filter(user=user).delete()\n ResumeDetails.objects.filter(resume__user=user).delete()\n file_form = UploadResumeModelForm(request.POST, request.FILES)\n files = request.FILES.getlist('resume')\n if file_form.is_valid():\n for file in files:\n try:\n user = User.objects.get(id=1)\n\n # saving the file\n resume = Resume(user=user, resume=file)\n resume.save()\n \n # extracting resume entities\n parser = resume_parser.ResumeParser(os.path.join(settings.MEDIA_ROOT, resume.resume.name))\n data = parser.get_extracted_data()\n \n # User Details\n # resume.name = data.get('name')\n # resume.email = data.get('email')\n # resume.education = get_education(data.get('education'))\n user_details = UserDetails()\n user_details.user = user\n user_details.name = data.get('name')\n user_details.email = data.get('email')\n user_details.mobile_number = data.get('mobile_number')\n user_details.skills = ', '.join(data.get('skills'))\n user_details.years_of_exp = data.get('total_experience')\n user_details.save()\n\n for comp in data.get('competencies'):\n competencies = Competencies()\n competencies.user = user\n competencies.competency = comp\n competencies.save()\n\n for mr in data.get('measurable_results'):\n measurable_results = MeasurableResults()\n measurable_results.user = user\n measurable_results.measurable_result = mr\n measurable_results.save()\n\n # Resume Details\n resume_details = ResumeDetails()\n resume_details.resume = resume\n resume_details.page_nos = data.get('no_of_pages')\n resume_details.save()\n\n # resume.experience = ', '.join(data.get('experience'))\n # measurable_results.append(data.get('measurable_results'))\n # resume.save()\n except IntegrityError:\n messages.warning(request, 'Duplicate resume found:', file.name)\n return redirect('homepage')\n\n resumes = Resume.objects.filter(user=User.objects.get(id=1))\n user_detail = UserDetails.objects.get(user=user)\n messages.success(request, 'Resumes uploaded!')\n\n overall_score = 0\n\n competencies = data.get('competencies')\n measurable_results = data.get('measurable_results')\n\n if competencies and measurable_results:\n overall_score = competencies.get('score') + measurable_results.get('score')\n \n if competencies:\n context = {\n 'resumes': resumes,\n 'competencies': competencies,\n 'measurable_results': measurable_results,\n 'no_of_pages': data.get('no_of_pages'),\n 'total_experience': data.get('total_experience'),\n 'user_details': user_detail,\n 'overall_score': overall_score\n }\n else:\n context = {\n 'resumes': resumes,\n 'competencies': [],\n 'measurable_results': [],\n 'no_of_pages': data.get('no_of_pages'),\n 'total_experience': data.get('total_experience'),\n 'user_details': user_detail,\n 'overall_score': overall_score\n }\n return render(request, 'base.html', context)\n else:\n form = UploadResumeModelForm()\n return render(request, 'base.html', {'form': form})\n\ndef get_education(education):\n '''\n Helper function to display the education in human readable format\n '''\n education_string = ''\n for edu in education:\n education_string += edu[0] + ' (' + str(edu[1]) + '), '\n return education_string.rstrip(', ')\n\n@csrf_exempt\ndef user_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a code snippet.\n \"\"\"\n try:\n user = User.objects.get(pk=pk)\n user_details = UserDetails.objects.get(user=user)\n comp = Competencies.objects.filter(user=user)\n mr = MeasurableResults.objects.filter(user=user)\n resume = Resume.objects.get(user=user)\n resume_details = ResumeDetails.objects.filter(resume=resume)\n except UserDetails.DoesNotExist:\n return HttpResponse(status=404)\n except Competencies.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n comp_serializer = CompetenciesSerializer(comp, many=True)\n mr_serializer = MeasurableResultsSerializer(mr, many=True)\n resume_serializer = ResumeSerializer(resume)\n resume_details_serializer = ResumeDetailsSerializer(resume_details, many=True)\n user_details_serializer = UserDetailsSerializer(user_details)\n\n data = {}\n data['competencies'] = comp_serializer.data\n data['measurable_results'] = mr_serializer.data\n data['resume'] = resume_serializer.data\n data['resume_details'] = resume_details_serializer.data\n data['user_details'] = user_details_serializer.data\n return JsonResponse(data)\n\n@csrf_exempt\ndef job_recommendation(request):\n if request.method == 'POST':\n job_title = request.POST.get('job_title')\n job_location = request.POST.get('job_location')\n data = requests.get('https://api.ziprecruiter.com/jobs/v1?search=Python&location=Santa%20Monica&api_key=mqpqz4ev44nfu3n9brazrrix27yzipzm').json()\n return JsonResponse(data)","sub_path":"resume_parser/parser_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"334583349","text":"class Maze:\r\n def __init__(self):\r\n # Open the file:\r\n fh = open(\"maze1.txt\", \"r\")\r\n content = fh.readlines()\r\n # maze_array is a two-dimensional array with the entire maze (integers):\r\n maze_array = []\r\n # Fill maze_array with numbers from file:\r\n for i in range(len(content)):\r\n line = str(content[i]).strip()\r\n linelist = []\r\n for element in line:\r\n linelist.append(int(element))\r\n maze_array.append(linelist)\r\n self.maze_array = maze_array\r\n # Find finish of maze, i.e. zero on the side:\r\n # Coordinates of this finish line. Start at (0, 0):\r\n row = 0\r\n column = 0\r\n width = len(maze_array[1])-1\r\n height = len(maze_array)-1\r\n success = 0\r\n # Check the upper row:\r\n for i in range(width):\r\n if maze_array[row][column] == 0:\r\n success = 1\r\n break\r\n else:\r\n column += 1\r\n # Check the lower row:\r\n if success == 0:\r\n row = height\r\n column = 0\r\n for j in range(width):\r\n if maze_array[row][column] == 0:\r\n success = 1\r\n break\r\n else:\r\n column += 1\r\n # Check left side:\r\n if success == 0:\r\n row = 0\r\n column = 0\r\n for k in range(height):\r\n if maze_array[row][column] == 0:\r\n success = 1\r\n break\r\n else:\r\n row += 1\r\n #Check right side:\r\n if success == 0:\r\n row = 0\r\n column = width\r\n for l in range(height):\r\n if maze_array[row][column] == 0:\r\n success = 1\r\n break\r\n else:\r\n row += 1\r\n # Save finish coordinate as attribute:\r\n self.finish = [row, column]\r\n\r\n\r\n","sub_path":"Class Maze.py","file_name":"Class Maze.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"224994990","text":"from __main__ import Plugin\nimport praw\nimport re\n\nclass Main(Plugin):\n\n def helptext(self):\n\n yield \"Administrative use only.\"\n\n def exe(self, message):\n\n #check that it's me\n if not self.is_captainmetaphor(message):\n yield \"You are not authorized to use that command\"\n return\n\n args = message.body.split(maxsplit=3)\n\n #check url validity\n if re.match('https?://\\S', args[2]) is None:\n yield \"bad URL\"\n return\n\n #make praw instance as captainmeta4\n r=praw.Reddit('captainmeta4')\n\n\n\n subreddit = r.subreddit(args[1])\n url = args[2]\n title=args[3]\n\n submission = subreddit.submit(title, url=url)\n\n yield self.to_text(submission)\n \n\n\n","sub_path":"plugins/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"177373328","text":"#!/usr/bin/env python\n\nfrom colorama import *\nimport os\nimport sys\nimport docopt\nimport docker\nimport tempfile\nimport shutil\nimport time\nimport logging\nimport subprocess\nimport re\n\n\nclass Builder(object):\n \"\"\"\n Docker Source to Image(STI) is a tool for building reproducable Docker images. STI produces ready-to-run images by\n injecting a user source into a docker image and preparing a new Docker image which incorporates the base image and\n built source, and is ready to use with `docker run`. STI supports incremental builds which re-use previously\n downloaded dependencies, previously built artifacts, etc.\n\n Usage:\n sti build SOURCE_DIR BUILD_IMAGE_TAG APP_IMAGE_TAG [--runtime-image=RUNTIME_IMAGE_TAG] [--clean]\n [--user=USERID] [--url=URL] [--timeout=TIMEOUT] [-e ENV_NAME=VALUE]... [-l LOG_LEVEL]\n [--dir=WORKING_DIR] [--push]\n sti validate BUILD_IMAGE_TAG [--runtime-image=RUNTIME_IMAGE_TAG] [--incremental] [--url=URL]\n [--timeout=TIMEOUT] [-l LOG_LEVEL]\n sti --help\n\n Arguments:\n BUILD_IMAGE_TAG Tag for the Docker image which provides the build and runtime for the application.\n SOURCE_DIR Directory or GIT repository containing your application sources.\n APP_IMAGE_TAG Tag for the Docker image which is created by STI. In the case of incremental\n builds, this tag is also used to identify the previous build of the application.\n\n\n Options:\n --runtime-image=RUNTIME_IMAGE_TAG Tag which identifies an optional Docker image with runtime components but\n none of the build dependencies. If provided, the application will be built\n with BUILD_IMAGE_TAG and the binaries will be extracted and installed on\n the runtime image.\n --clean Do a clean build, ie. do not perform an incremental build.\n --dir=WORKING_DIR Directory where Dockerfiles and other support scripts are created.\n (Default: temp dir)\n -l LOG_LEVEL Logging level. Default: INFO\n --timeout=TIMEOUT Timeout commands if they take too long. Default: 120 seconds.\n --user=USERID Perform the build as specified user.\n --url=URL Connect to docker at the specified url Default: $DOCKER_HOST or unix://var/run/docker.sock\n --help Print this help message.\n \"\"\"\n def __init__(self):\n self.arguments = docopt.docopt(Builder.__doc__)\n\n log_level = self.arguments['-l'] or \"INFO\"\n numeric_level = getattr(logging, log_level.upper(), None)\n if not isinstance(numeric_level, int):\n logging.warn(\"Invalid log level %s. Defaulting to INFO\", log_level)\n numeric_level = logging.INFO\n logging.basicConfig(level=numeric_level)\n self.logger = logging.getLogger(__name__)\n\n if self.arguments['--url'] is not None:\n self.docker_url = self.arguments['--url']\n else:\n self.docker_url = os.getenv('DOCKER_HOST', 'unix://var/run/docker.sock')\n\n # these two checks should be done by the python docker client ...\n if self.docker_url.startswith('tcp:'):\n self.docker_url = self.docker_url.replace('tcp:', 'http:')\n if self.docker_url == 'http://':\n self.docker_url = 'http://127.0.0.1:4243'\n\n try:\n self.timeout = float(self.arguments['--timeout'])\n except TypeError:\n self.timeout = 120\n\n self.docker_client = docker.Client(base_url=self.docker_url, timeout=self.timeout)\n server_version = self.docker_client.version()\n self.logger.debug(\"Connected to Docker server version %s. Server linux kernel: %s\",\n server_version['Version'], server_version['KernelVersion'])\n\n def check_file_exists(self, container_id, file_path):\n try:\n self.docker_client.copy(container_id, file_path)\n return True\n except docker.APIError as e:\n return False\n\n def pull_image(self, image_name):\n if not self.is_image_in_local_registry(image_name):\n self.docker_client.pull(image_name)\n else:\n self.logger.debug(\"Image %s is available in local registry\", image_name)\n\n def is_image_in_local_registry(self, image_name):\n images = self.docker_client.images(image_name)\n self.logger.debug(\"Checking if %s found. Result: %s\", image_name, len(images) != 0)\n return len(images) != 0\n\n def push_image(self, image_name):\n images = self.docker_client.images(image_name)\n if len(images) == 0:\n raise \"Image %s not found in local registry. Unable to push.\" % image_name\n else:\n self.logger.debug(\"Image %s is available in local registry. Pushing to remote.\" % image_name)\n self.docker_client.push(image_name)\n\n def create_container(self, image_name):\n try:\n container = self.docker_client.create_container(image_name, command='/bin/true')\n container_id = container['Id']\n self.docker_client.start(container_id)\n exitcode = self.docker_client.wait(container_id)\n time.sleep(1)\n\n return container_id\n except docker.APIError as e:\n self.logger.critical(\"Error while creating container for image %s. %s\", image_name, e.explanation)\n return None\n\n def remove_container(self, container_id):\n self.docker_client.remove_container(container_id)\n\n def validate_images(self, requests=[]):\n for request in requests:\n image_name = request.image_name\n self.pull_image(image_name)\n container_id = self.create_container(image_name)\n\n if not container_id:\n return False\n\n valid = self.validate_image(image_name, container_id, request.validate_incremental)\n self.remove_container(container_id)\n\n if not valid:\n self.logger.critical(\"%s %s failed validation %s\" % (Fore.RED, request.description, Fore.RESET))\n return False\n return True\n\n def validate_image(self, image_name, container_id, validate_incremental):\n images = self.docker_client.images(image_name)\n\n if len(images) < 1:\n self.logger.critical(\"Couldn't find image %s\" % image_name)\n return False\n\n image = self.docker_client.inspect_image(images[0]['Id'])\n\n if image['config']['Entrypoint']:\n self.logger.critical(\"Image %s has a configured Entrypoint and is incompatible with sti\", image_name)\n return False\n\n required_files = ['/usr/bin/prepare', '/usr/bin/run']\n if validate_incremental:\n required_files += ['/usr/bin/save-artifacts']\n\n valid_image = self.validate_required_files(container_id, required_files)\n\n if valid_image:\n self.logger.info(\"%s passes source image validation\", image_name)\n\n return valid_image\n\n def validate_required_files(self, container_id, required_files=[]):\n valid_image = True\n\n for f in required_files:\n if not self.check_file_exists(container_id, f):\n valid_image = False\n self.logger.critical(\"Invalid image: file %s is missing.\", f)\n\n return valid_image\n\n def detect_incremental_build(self, image_name):\n container_id = self.create_container(image_name)\n\n try:\n result = self.check_file_exists(container_id, '/usr/bin/save-artifacts')\n self.remove_container(container_id)\n\n return result\n except docker.APIError as e:\n self.logger.critical(\"Error while detecting whether image %s supports incremental build\" % image_name)\n return False\n\n def prepare_source_dir(self, source, target_source_dir):\n if re.match('^(http(s?)|git|file)://', source):\n git_clone_cmd = \"git clone --quiet %s %s\" % (source, target_source_dir)\n try:\n self.logger.info(\"Fetching %s\", source)\n subprocess.check_output(git_clone_cmd, stderr=subprocess.STDOUT, shell=True)\n except subprocess.CalledProcessError as e:\n self.logger.critical(\"%s command failed (%i)\", git_clone_cmd, e.returncode)\n return False\n else:\n shutil.copytree(source, target_source_dir)\n\n def save_artifacts(self, image_name, target_dir):\n self.logger.info(\"Saving data from image %s for incremental build\", image_name)\n container = self.docker_client.create_container(image_name,\n [\"/usr/bin/save-artifacts\"],\n volumes={\"/usr/artifacts\": {}})\n container_id = container['Id']\n self.docker_client.start(container_id, binds={target_dir: \"/usr/artifacts\"})\n exitcode = self.docker_client.wait(container_id)\n # TODO: error handling\n self.logger.debug(self.docker_client.logs(container_id))\n time.sleep(1)\n self.docker_client.remove_container(container_id)\n\n def build_deployable_image(self, image_name, context_dir, tag, env_str, incremental=False):\n with open(os.path.join(context_dir, 'Dockerfile'), 'w+') as docker_file:\n docker_file.write(\"FROM %s\\n\" % image_name)\n docker_file.write('ADD ./src /usr/src/\\n')\n if incremental:\n docker_file.write('ADD ./artifacts /usr/artifacts/\\n')\n for env in env_str:\n env = env.split(\"=\")\n name = env[0]\n value = env[1]\n docker_file.write(\"ENV %s %s\\n\" % (name, value))\n docker_file.write('RUN /usr/bin/prepare\\n')\n docker_file.write('CMD /usr/bin/run\\n')\n\n self.logger.info(\"Building new docker image\")\n img, logs = self.docker_client.build(tag=tag, path=context_dir, rm=True)\n self.logger.info(\"Build logs:\\n%s\", logs)\n\n return img\n\n def build(self, working_dir, image_name, source_dir, incremental_build, user_id, tag, env_str):\n build_dir = working_dir or tempfile.mkdtemp()\n\n try:\n if incremental_build:\n artifact_tmp_dir = os.path.join(build_dir, 'artifacts')\n os.mkdir(artifact_tmp_dir)\n self.save_artifacts(tag, artifact_tmp_dir)\n\n build_context_source = os.path.join(build_dir, 'src')\n self.prepare_source_dir(source_dir, build_context_source)\n img = self.build_deployable_image(image_name, build_dir, tag, env_str, incremental_build)\n\n if img is not None:\n built_image_name = tag or img\n self.logger.info(\"%s Built image %s %s\", Fore.GREEN, built_image_name, Fore.RESET)\n else:\n self.logger.critical(\"%s STI build failed. %s\", Fore.RED, Fore.RESET)\n\n finally:\n if not working_dir:\n shutil.rmtree(build_dir)\n pass\n\n def extended_build(self, working_dir, build_image, runtime_image, source_dir, incremental_build, user_id, tag, app_build_tag, env_str):\n build_dir = working_dir or tempfile.mkdtemp()\n\n builder_build_dir = os.path.join(build_dir, 'build')\n runtime_build_dir = os.path.join(build_dir, 'runtime')\n previous_build_volume = os.path.join(builder_build_dir, 'last_build_artifacts')\n input_source_dir = os.path.join(builder_build_dir, 'src')\n output_source_dir = os.path.join(runtime_build_dir, 'src')\n\n os.mkdir(builder_build_dir)\n os.mkdir(runtime_build_dir)\n os.mkdir(previous_build_volume)\n os.mkdir(output_source_dir)\n\n build_container_id = None\n try:\n self.logger.debug(\"Incremental build: %s\", incremental_build)\n if incremental_build:\n self.pull_image(app_build_tag)\n self.save_artifacts(app_build_tag, previous_build_volume)\n\n volumes = {'/usr/artifacts': {}, '/usr/src': {}, '/usr/build': {}}\n bind_mounts = {\n previous_build_volume: '/usr/artifacts',\n input_source_dir: '/usr/src',\n output_source_dir: '/usr/build'\n }\n self.prepare_source_dir(source_dir, input_source_dir)\n build_container = self.docker_client.create_container(build_image, '/usr/bin/prepare', volumes=volumes)\n build_container_id = build_container['Id']\n self.docker_client.start(build_container_id, binds=bind_mounts)\n exitcode = self.docker_client.wait(build_container_id)\n self.logger.debug(self.docker_client.logs(build_container_id))\n\n if exitcode != 0:\n self.logger.error(\"Unable to build application\")\n raise \"Unable to build container\"\n\n img = self.build_deployable_image(runtime_image, runtime_build_dir, tag, env_str)\n if img is not None:\n built_image_name = tag or img\n build_container_img = self.docker_client.commit(build_container_id)\n self.docker_client.tag(build_container_img['Id'], app_build_tag)\n self.logger.info(\"%s Built build-image %s %s\", Fore.GREEN, app_build_tag, Fore.RESET)\n self.logger.info(\"%s Built image %s %s\", Fore.GREEN, built_image_name, Fore.RESET)\n else:\n self.logger.critical(\"%s STI build failed. %s\", Fore.RED, Fore.RESET)\n finally:\n if build_container_id != None:\n self.remove_container(build_container_id)\n if not working_dir:\n shutil.rmtree(builder_build_dir)\n shutil.rmtree(runtime_build_dir)\n shutil.rmtree(build_dir)\n\n def main(self):\n build_image = self.arguments['BUILD_IMAGE_TAG']\n runtime_image = self.arguments['--runtime-image']\n app_image = self.arguments['APP_IMAGE_TAG']\n app_build_tag = \"%s-build\" % app_image\n source = self.arguments['SOURCE_DIR']\n user = self.arguments['--user']\n env_str = self.arguments['ENV_NAME=VALUE']\n is_incremental = not self.arguments['--clean']\n working_dir = self.arguments['--dir']\n should_push = self.arguments['--push']\n\n\n validations = []\n\n try:\n if runtime_image:\n if is_incremental:\n self.pull_image(app_build_tag)\n is_incremental = self.is_image_in_local_registry(app_build_tag)\n if self.arguments['validate']:\n validations.append(ImageValidationRequest('Runtime image', runtime_image, False))\n validations.append(ImageValidationRequest('Build image', build_image, True))\n self.validate_images(validations)\n elif self.arguments['build']:\n self.extended_build(working_dir, build_image, runtime_image, source, is_incremental, user, app_image, app_build_tag, env_str)\n else:\n if is_incremental:\n self.pull_image(app_image)\n is_incremental = self.is_image_in_local_registry(app_image)\n if self.arguments['validate']:\n validations.append(ImageValidationRequest('Target image', build_image,\n self.arguments['--incremental']))\n self.validate_images(validations)\n elif self.arguments['build']:\n self.build(working_dir, build_image, source, is_incremental, user, app_image, env_str)\n finally:\n self.docker_client.close()\n\n\nclass ImageValidationRequest:\n def __init__(self, description, image_name, validate_incremental=False):\n self.description = description\n self.image_name = image_name\n self.validate_incremental = validate_incremental\n\n\ndef main():\n builder = Builder()\n builder.main()\n\nif __name__ == \"__main__\":\n sys.path.insert(0, '.')\n main()\n","sub_path":"vendor/src/github.com/openshift/docker-source-to-images/python/sti/cmd/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":16426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"262545336","text":"from glb_definition import*\n\ndef blockRotator(block, gameMatrix):\n if (block.rotatable and checkBoundingRotation(block) and checkRotateBlockCollision(block, gameMatrix)):\n block.incrementRotationState()\n block.updateRotatedBlocks()\n block.rotateVertUpdate()\n block.updateHorizontalPosition(0)\n\ndef checkBoundingRotation(block):\n nextState = block.matrix[block.rotateState]\n if block.xPos + len(nextState) > 10:\n ''' Block cannot rotate in this direction as it is up against the edge of gameBoard. '''\n return False\n elif block.xPos < 0:\n return False\n elif block.yPos + len(nextState) >= 16:\n return False\n else:\n return True\n\ndef checkRotateBlockCollision(block, gameMatrix):\n nextState = block.getNextRotateState()\n for i in range(len(block.matrix[nextState])):\n for j in range(len(block.matrix[nextState][i])):\n if gameMatrix[block.yPos + i][block.xPos + j] != 0 and block.matrix[nextState][i][j] != 0:\n return False\n return True\n\n\n\n","sub_path":"block_rotator.py","file_name":"block_rotator.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"72151730","text":"def render_to_string(template, context=None, request=None):\n if (context and ('team' in context) and isinstance(context['team'], Team)):\n team = context['team']\n else:\n team = None\n default_context = get_default_context(request, context, team=team)\n if (context is None):\n context = default_context\n else:\n context = dict(context)\n context.update(default_context)\n if request:\n context = RequestContext(request, context)\n else:\n context = Context(context)\n return loader.render_to_string(template, context)","sub_path":"Data Set/bug-fixing-5/4b9f666cf8bf8720b28f1fe4b8330b3460455c2e--bug.py","file_name":"4b9f666cf8bf8720b28f1fe4b8330b3460455c2e--bug.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"498031721","text":"from utils.spark_app import MovieDataApp\n\n\nspark = MovieDataApp().spark\n\ndef get_movie_time():\n movie_data = spark.sql(\"select * from movie.db_asset_source\")\n\n def extract_movie_time(row):\n movie_len = None\n try:\n tmp_dict = eval(row.extra)\n # print(tmp_dict)\n movie_len = tmp_dict.get('len')\n if movie_len is not None:\n movie_len = int(int(movie_len) / 60)\n except:\n pass\n return row.aid, movie_len # try eval出错时此处mivie_len返回None\n\n tmp_table = movie_data.rdd.map(extract_movie_time).toDF(['movie_id', 'movie_len']).dropna()\n import pyspark.sql.functions as fn\n from pyspark.sql import Window\n tmp_table = tmp_table.withColumn(\"sort_num\",\n fn.row_number().over(Window.partitionBy(\"movie_id\").orderBy(fn.asc('movie_len'))))\n # tmp_table.show()\n # 影视的时长有些是有两个,一个是所有剧集的总时长,一个是单集时长, 在这里取单集时长,即排序选小的那个时间\n tmp_table = tmp_table.where('sort_num=1').where('movie_len<200').drop('sort_num')\n\n return tmp_table","sub_path":"online_recommend/stat_factor/movie_time.py","file_name":"movie_time.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"589604081","text":"import sys\n\n\na = list(map(int, sys.stdin.readline().rstrip().split()))\nb = list(map(int, sys.stdin.readline().rstrip().split()))\nc = list(map(int, sys.stdin.readline().rstrip().split()))\n\nresult_a, result_b, result_c = [], [], []\n\n\ndef cal(a_list, ah_1, am_1, as_1, ah_2, am_2, as_2):\n if (as_2 - as_1) < 0:\n am_2 -= 1\n as_2 += 60\n a_list.insert(0, as_2-as_1)\n\n if (am_2 - am_1) < 0:\n ah_2 -= 1\n am_2 += 60\n a_list.insert(0, am_2 - am_1)\n\n a_list.insert(0, ah_2 - ah_1)\n\n return map(str, a_list)\n\n\nprint(' '.join(cal(result_a, *a)))\nprint(' '.join(cal(result_b, *b)))\nprint(' '.join(cal(result_c, *c)))\n","sub_path":"baekjoon/미분류/boj_5575.py","file_name":"boj_5575.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"236757162","text":"#!/usr/bin/python\nimport Tkinter\nimport tkMessageBox\n\n\n\ntop = Tkinter.Tk()\n\nC = Tkinter.Canvas(top, bg=\"blue\", height=250, width=300)\n\ncoord = 10, 50, 240, 210\narc = C.create_arc(coord, start=0, extent=150, fill=\"red\")\n\nline = C.create_line(1,1, 220, 240, fill = \"yellow\")\noval = C.create_oval(1,1, 30, 100, fill = \"green\")\noval = C.create_polygon(50,50,100,100,400,200)\n\n# filename = PhotoImage(file = \"sunset-1.jpg\")\n# image = C.create_image(50, 50, anchor=NE, image=filename)\n\nC.pack();\n\ntop.mainloop()\n","sub_path":"HaroldTestPY/HTCanvas.py","file_name":"HTCanvas.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160430583","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\nimport scipy.stats as stats\n\n\ndf = pd.read_csv('kc_house_data.csv')\n\n\n# My goal going into this project is to determine a few things:\n# 1. How close can we get to determining the house price? \n# 2. Is there any data that violates the integrity of our model and can we remove it without negatively affecting the other categories?\n# 3. What is the clearest way of displaying the results?\n\n# With already having a set of data to work with, my first step according to the OSEMN model of explatory data analysis (which I will be using) is to investigate the data I've been given and to scrub away any faulty information (null values, major outliers, etc...)\n\n# In[3]:\n\n\ndf.info()\n\n\n# In[148]:\n\n\ndf.head()\n\n\n# When I originially loaded the data I noticed that sqft_basement was classified as an object. I assumed that the presence of a basement affects the price of a house so I decided it would be helpful to convert the column to numeric.\n\n# In[2]:\n\n\ndf.sqft_basement.replace(\"?\",'0',inplace=True)\n\n\n# To convert the data from strings to numeric I had to first take care of the '?'s. With the unknown information making up a small portion of the data and the houses with 0 sqft_basement making up much of this data, I decided to replace all of the unknown data with a 0.\n\n# In[3]:\n\n\ndf.sqft_basement = df.sqft_basement.astype('float64')\n\n\n# The column that is next classified as an object is date. According to the data source, this column describes the date a house was sold. Because I believe that, along with 'id', this information will have no bearing on the price of a house I will go ahead and drop both columns.\n\n# In[4]:\n\n\ndf.drop('date',axis=1,inplace=True)\n\n\n# In[5]:\n\n\ndf.drop('id',axis=1,inplace=True)\n\n\n# In[15]:\n\n\ndf.corr()\n\n\n# One of the first things I am curious about is the following: Without scrubbing any data, what is the correlation between price and the other independent variables? I am curious also with seeing any major correlation between any two variables. Some notable independent variables are: sqft_living, grade, sqft_above, sqft_living15, and bathrooms. They have a decent correlation with price. It's possible that with some additional scrubbing we can the correlation value up. It should also be noted that we are missing a couple of variables in the above table. We are missing sqft_basement and date.\n\n# According to the above info, sqft_basement and date are both dates which lets us know why we may not be seeing them displayed on the correlation table.\n\n# In[16]:\n\n\ntotal = df.isnull().sum().sort_values(ascending=False)\ntotal\n\n\n# According to the above data, there exists 3 variables with null values. I will start by scrubbing each of the variables.\n\n# In[17]:\n\n\ndf.waterfront.unique()\n\n\n# In[18]:\n\n\ndf.waterfront.value_counts()\n\n\n# In[6]:\n\n\ndf.waterfront.fillna(0,inplace=True)\n\n\n# The waterfront column contained 2,376 missing values. This equates to 11 percent of the possible data being a null value. Given that the data doesn't correlate too strong with any other variable, but still wanting to keep the information the rows offer, I will simply replace each null value with a 0.\n\n# In[110]:\n\n\ndf.view.value_counts()\n\n\n# In[111]:\n\n\ndf.view.unique()\n\n\n# The 'views' column has a total of 63 null values.The overwhelming amount of houses in this column contain 0 views. Because there is such a small number of null values, the model will not be too affected by the presence of this data, however, I will still convert the null values to 0 since more data is better than less. \n\n# In[7]:\n\n\ndf.view.fillna(0,inplace=True)\n\n\n# Finally for the last column with null values which also contains the most.\n\n# In[33]:\n\n\ndf.yr_renovated.value_counts()\n\n\n# In[81]:\n\n\ndf.yr_renovated.unique()\n\n\n# In[84]:\n\n\ndf.yr_renovated.describe()\n\n\n# The 'yr_renovated' column has 3,842 null values. 17,011 values in this column are 0. I don't particularly want to delete 3,842 more rows and the majority of the houses in this set of data haven't been renovated. I also see that this column has very little correlation with all other variables. With these observations being duly noted I will go ahead and replace all the null values in this column with a 0. Replacing it with the mean would make no sense since the mean of this data set is 83.\n\n# In[8]:\n\n\ndf.yr_renovated.fillna(0,inplace=True)\n\n\n# In[114]:\n\n\ndf.info()\n\n\n# The data now has no null values to worry about. My next point of interest is seeing how the remaining data relates to our dependent variable, 'price'. To do that I will be using a heatmap courtesy of seaborn.\n\n# In[24]:\n\n\ncorrelation = df.corr()\nplt.figure(figsize=(14, 12))\nheatmap = sns.heatmap(correlation, annot=True, linewidths=0, vmin=-1, cmap=\"RdBu_r\")\n\n\n# In[39]:\n\n\nformula = 'price ~ waterfront + yr_renovated + view + bathrooms + sqft_living + sqft_living15 + grade + bedrooms + floors + condition + lat + zipcode + long + yr_built + sqft_lot + sqft_lot15'\nmodel = ols(formula= formula, data=df).fit()\n\n\n# In[40]:\n\n\nmodel.summary()\n\n\n# A couple of things to note from the above model summary is the following:\n# 1. The r-squared value of our model is .699 which means that the model can explain 69.9 percent of the variation of data. I have yet to normalize the data so I am curious to see what effect that will have.\n# 2. Just about all our of variables have a p-value of 0 which means they are significant and relevant for our model.\n\n# In[49]:\n\n\ndf.hist(figsize=[12,12]);\n\n\n# Much of the data does not follow a normal distriubtion so I will attempt to make the data appear more normal.\n\n# In[9]:\n\n\ndf2 = pd.DataFrame([])\n\n\n# In[10]:\n\n\ndf2['grade'] = df.grade\ndf2['baths'] = df.bathrooms\ndf2['beds'] = df.bedrooms\ndf2['cond'] = df.condition\ndf2['floor'] = df.floors\ndf2['lat'] = df.lat\ndf2['long'] = df.long\ndf2['price'] = np.log(df.price)\ndf2['sqft_above'] = np.log(df.sqft_above)\ndf2['sqft_basement'] = np.log(df.sqft_basement)\ndf2['sqft_living'] = np.log(df.sqft_living)\ndf2['sqft_living15'] = np.log(df.sqft_living15)\ndf2['sqft_lot'] = np.log(df.sqft_lot)\ndf2['sqft_lot15'] = np.log(df.sqft_lot15)\n\n\n# In[18]:\n\n\nscaled_price = (df2.price - min(df2.price))/(max(df2.price) - min(df2.price))\nscaled_sqft_above = (df2.sqft_above - min(df2.sqft_above))/(max(df2.sqft_above) - min(df2.sqft_above))\nscaled_sqft_basement = (df.sqft_basement - min(df.sqft_basement))/(max(df.sqft_basement) - min(df.sqft_basement))\nscaled_sqft_living = (df2.sqft_living - min(df2.sqft_living))/(max(df2.sqft_living) - min(df2.sqft_living))\nscaled_sqft_living15 = (df2.sqft_living15 - min(df2.sqft_living15))/(max(df2.sqft_living15) - min(df2.sqft_living15))\nscaled_sqft_lot = (df2.sqft_lot - min(df2.sqft_lot))/(max(df2.sqft_lot) - min(df2.sqft_lot))\nscaled_sqft_lot15 = (df2.sqft_lot15 - min(df2.sqft_lot15))/(max(df2.sqft_lot15) - min(df2.sqft_lot15))\n\n\n# In[12]:\n\n\ndfscaled = pd.DataFrame([])\n\n\n# In[19]:\n\n\ndfscaled['waterfront'] = df.waterfront\ndfscaled['view'] = df.view\ndfscaled['yr_renovated'] = df.yr_renovated\ndfscaled['yr_built'] = df.yr_built\ndfscaled['zipcode'] = df.zipcode\ndfscaled['grade'] = df.grade\ndfscaled['baths'] = df.bathrooms\ndfscaled['beds'] = df.bedrooms\ndfscaled['cond'] = df.condition\ndfscaled['floor'] = df.floors\ndfscaled['lat'] = df.lat\ndfscaled['long'] = df.long\ndfscaled['price'] = scaled_price\ndfscaled['sqft_above'] = scaled_sqft_above\ndfscaled['sqft_basement'] = scaled_sqft_basement\ndfscaled['sqft_living'] = scaled_sqft_living\ndfscaled['sqft_living15'] = scaled_sqft_living15\ndfscaled['sqft_lot'] = scaled_sqft_lot\ndfscaled['sqft_lot15'] = scaled_sqft_lot15\n\n\n# In[20]:\n\n\ndfscaled.hist(figsize=[12,12]);\n\n\n# In[21]:\n\n\nformula2 = 'price ~ waterfront + view + yr_renovated + yr_built + zipcode + grade + baths + beds + cond + floor + lat + long + sqft_above + sqft_basement + sqft_living + sqft_living15 + sqft_lot + sqft_lot15'\n\n\n# In[22]:\n\n\nmodel2 = ols(formula= formula2, data=dfscaled).fit()\n\n\n# In[23]:\n\n\nmodel2.summary()\n\n\n# In[24]:\n\n\nfig = sm.graphics.qqplot(model2.resid, dist=stats.norm, line='45', fit=True)\n\n\n# In[27]:\n\n\nplt.scatter(model2.predict(dfscaled), model2.resid)\nplt.plot(model2.predict(dfscaled), [0 for i in range(len(df))])\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"JG FI final project module 1.py","file_name":"JG FI final project module 1.py","file_ext":"py","file_size_in_byte":8396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"132859006","text":"class Xodim:\n def init(self, ism, manzili, maoshi):\n self.ism = ism\n self.manzili = manzili\n self.maoshi = maoshi\n\n def gap(self):\n print(f\"Mening ismim {xodim.ism} maoshim {xodim.maoshi}\")\n if self.maoshi < 1000000:\n print(\"Mening oyligim kam!!!\")\n\n\nxodimlar = []\n\nn = int(input(\"Xodimlar soni: \"))\nfor i in range(n):\n print(f\"{i + 1} - xodim\")\n ism = input(\"Ism kiriting: \")\n manzili = input(\"Manzil kiriting\")\n maoshi = int(input(\"Maoshini kiriting\"))\n xodimlar.append(Xodim(ism=ism, manzili=manzili, maoshi=maoshi))\n\n\n\nfor xodim in xodimlar:\n xodim.gap()","sub_path":"python fayllari/obektlar_va_classlar/xodim1.py","file_name":"xodim1.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"33810569","text":"# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass ScitokensCpp(CMakePackage):\n \"\"\"A C++ implementation of the SciTokens library with a C library interface.\n SciTokens provide a token format for distributed authorization.\"\"\"\n\n homepage = \"https://github.com/scitokens/scitokens-cpp\"\n url = \"https://github.com/scitokens/scitokens-cpp/archive/refs/tags/v0.7.0.tar.gz\"\n\n version(\"0.7.0\", sha256=\"72600cf32523b115ec7abf4ac33fa369e0a655b3d3b390e1f68363e6c4e961b6\")\n\n depends_on(\"sqlite\")\n depends_on(\"curl\")\n depends_on(\"uuid\", type=\"build\")\n\n # https://github.com/scitokens/scitokens-cpp/issues/72\n @when(\"^openssl@3:\")\n def patch(self):\n filter_file(\" -Werror\", \"\", \"CMakeLists.txt\")\n","sub_path":"var/spack/repos/builtin/packages/scitokens-cpp/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"429175992","text":"\n\n#calss header\nclass _DENSE():\n\tdef __init__(self,): \n\t\tself.name = \"DENSE\"\n\t\tself.definitions = [u'having parts that are close together so that it is difficult to go or see through: ', u'stupid: ', u'(of a substance) containing a lot of matter in a small space: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_dense.py","file_name":"_dense.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"223487315","text":"from bottle import route, run, request, response\n\napi_info = {'name': 'My api', 'version': '0.0.1'}\n\nlog_file_name = '/tmp/logs.txt'\nlog_file = open(log_file_name, 'a+')\n\n\n@route('/api', method='GET')\ndef api_home():\n return api_info\n\n\n@route('/api/log', method='GET')\ndef get_logs():\n log_file.seek(0, 0)\n text = log_file.read()\n\n response.content_type = 'text/plain'\n return text\n\n\n@route('/api/log', method='POST')\ndef post_logs():\n text = request.forms.get('log')\n log_file.write(text + '\\n')\n\n response.status = 201\n return\n\nif __name__ == '__main__':\n run(host='0.0.0.0', port=8080)\n","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"541885623","text":"import common\nimport pytest\nimport time\n\nfrom common import RETRY_COUNTS, RETRY_INTERVAL\nfrom common import client, core_api # NOQA\nfrom common import storage_class, statefulset, pod # NOQA\nfrom common import sts_name, volume_name # NOQA\nfrom common import check_volume_data, cleanup_volume, \\\n create_and_check_volume, get_longhorn_api_client, get_self_host_id, \\\n wait_for_volume_detached, wait_for_volume_degraded, \\\n wait_for_volume_healthy, wait_scheduling_failure, \\\n write_volume_random_data, wait_for_rebuild_complete\nfrom common import SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY\nfrom common import SETTING_DEFAULT_DATA_LOCALITY\nfrom common import VOLUME_FIELD_ROBUSTNESS, VOLUME_ROBUSTNESS_HEALTHY\nfrom common import create_pv_for_volume\nfrom common import create_pvc_for_volume\nfrom common import write_pod_volume_random_data\nfrom common import wait_for_volume_replica_count\n\nfrom common import Mi, Gi, DATA_SIZE_IN_MB_2, DATA_SIZE_IN_MB_4\nfrom common import create_and_wait_pod\nfrom common import settings_reset # NOQA\nfrom common import wait_for_rebuild_start\nfrom common import delete_and_wait_pod\nfrom common import crash_engine_process_with_sigkill\nfrom common import wait_for_replica_running\n\nfrom time import sleep\n\n\n@pytest.yield_fixture(autouse=True)\ndef reset_settings():\n yield\n client = get_longhorn_api_client() # NOQA\n host_id = get_self_host_id()\n node = client.by_id_node(host_id)\n client.update(node, allowScheduling=True)\n setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(setting, value=\"true\")\n\n\ndef get_host_replica(volume, host_id):\n \"\"\"\n Get the replica of the volume that is running on the test host. Trigger a\n failed assertion if it can't be found.\n :param volume: The volume to get the replica from.\n :param host_id: The ID of the test host.\n :return: The replica hosted on the test host.\n \"\"\"\n host_replica = None\n for i in volume.replicas:\n if i.hostId == host_id:\n host_replica = i\n assert host_replica is not None\n return host_replica\n\n\n# We check to make sure the replica is found, running, and in RW mode (not\n# rebuilding) since the longhorn-engine has the latest status compared to\n# longhorn-manager, which might be in an intermediate state.\ndef wait_new_replica_ready(client, volume_name, replica_names): # NOQA\n \"\"\"\n Wait for a new replica to be found on the specified volume. Trigger a\n failed assertion if one can't be found.\n :param client: The Longhorn client to use in the request.\n :param volume_name: The name of the volume.\n :param replica_names: The list of names of the volume's old replicas.\n \"\"\"\n new_replica_ready = False\n wait_for_rebuild_complete(client, volume_name)\n for _ in range(RETRY_COUNTS):\n v = client.by_id_volume(volume_name)\n for r in v.replicas:\n if r[\"name\"] not in replica_names and r[\"running\"] and \\\n r[\"mode\"] == \"RW\":\n new_replica_ready = True\n break\n if new_replica_ready:\n break\n sleep(RETRY_INTERVAL)\n assert new_replica_ready\n\n\ndef test_soft_anti_affinity_scheduling(client, volume_name): # NOQA\n \"\"\"\n Test that volumes with Soft Anti-Affinity work as expected.\n\n With Soft Anti-Affinity, a new replica should still be scheduled on a node\n with an existing replica, which will result in \"Healthy\" state but limited\n redundancy.\n\n 1. Create a volume and attach to the current node\n 2. Generate and write `data` to the volume.\n 3. Set `soft anti-affinity` to true\n 4. Disable current node's scheduling.\n 5. Remove the replica on the current node\n 6. Wait for the volume to complete rebuild. Volume should have 3 replicas.\n 7. Verify `data`\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n host_id = get_self_host_id()\n volume.attach(hostId=host_id)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n\n data = write_volume_random_data(volume)\n setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(setting, value=\"true\")\n node = client.by_id_node(host_id)\n client.update(node, allowScheduling=False)\n replica_names = list(map(lambda replica: replica.name, volume.replicas))\n host_replica = get_host_replica(volume, host_id)\n\n volume.replicaRemove(name=host_replica.name)\n wait_new_replica_ready(client, volume_name, replica_names)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n check_volume_data(volume, data)\n\n cleanup_volume(client, volume)\n\n\ndef test_soft_anti_affinity_detach(client, volume_name): # NOQA\n \"\"\"\n Test that volumes with Soft Anti-Affinity can detach and reattach to a\n node properly.\n\n 1. Create a volume and attach to the current node.\n 2. Generate and write `data` to the volume\n 3. Set `soft anti-affinity` to true\n 4. Disable current node's scheduling.\n 5. Remove the replica on the current node\n 6. Wait for the new replica to be rebuilt\n 7. Detach the volume.\n 8. Verify there are 3 replicas\n 9. Attach the volume again. Verify there are still 3 replicas\n 10. Verify the `data`.\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n host_id = get_self_host_id()\n volume.attach(hostId=host_id)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n\n data = write_volume_random_data(volume)\n setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(setting, value=\"true\")\n node = client.by_id_node(host_id)\n client.update(node, allowScheduling=False)\n replica_names = list(map(lambda replica: replica.name, volume.replicas))\n host_replica = get_host_replica(volume, host_id)\n\n volume.replicaRemove(name=host_replica.name)\n wait_new_replica_ready(client, volume_name, replica_names)\n volume = wait_for_volume_healthy(client, volume_name)\n volume.detach()\n volume = wait_for_volume_detached(client, volume_name)\n assert len(volume.replicas) == 3\n\n volume.attach(hostId=host_id)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n check_volume_data(volume, data)\n\n cleanup_volume(client, volume)\n\n\ndef test_hard_anti_affinity_scheduling(client, volume_name): # NOQA\n \"\"\"\n Test that volumes with Hard Anti-Affinity work as expected.\n\n With Hard Anti-Affinity, scheduling on nodes with existing replicas should\n be forbidden, resulting in \"Degraded\" state.\n\n 1. Create a volume and attach to the current node\n 2. Generate and write `data` to the volume.\n 3. Set `soft anti-affinity` to false\n 4. Disable current node's scheduling.\n 5. Remove the replica on the current node\n 1. Verify volume will be in degraded state.\n 2. Verify volume reports condition `scheduled == false`\n 3. Verify only two of three replicas of volume are healthy.\n 4. Verify the remaining replica doesn't have `replica.HostID`, meaning\n it's unscheduled\n 6. Check volume `data`\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n host_id = get_self_host_id()\n volume.attach(hostId=host_id)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n\n data = write_volume_random_data(volume)\n setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(setting, value=\"false\")\n node = client.by_id_node(host_id)\n client.update(node, allowScheduling=False)\n host_replica = get_host_replica(volume, host_id)\n\n volume.replicaRemove(name=host_replica.name)\n # Instead of waiting for timeout and lengthening the tests a significant\n # amount we can make sure the scheduling isn't working by making sure the\n # volume becomes Degraded and reports a scheduling error.\n wait_for_volume_degraded(client, volume_name)\n wait_scheduling_failure(client, volume_name)\n # While there are three replicas that should exist to meet the Volume's\n # request, only two of those volumes should actually be Healthy.\n volume = client.by_id_volume(volume_name)\n assert sum([1 for replica in volume.replicas if replica.running and\n replica.mode == \"RW\"]) == 2\n # Confirm that the final volume is an unscheduled volume.\n assert sum([1 for replica in volume.replicas if\n not replica.hostId]) == 1\n # Three replicas in total should still exist.\n assert len(volume.replicas) == 3\n check_volume_data(volume, data)\n\n cleanup_volume(client, volume)\n\n\ndef test_hard_anti_affinity_detach(client, volume_name): # NOQA\n \"\"\"\n Test that volumes with Hard Anti-Affinity are still able to detach and\n reattach to a node properly, even in degraded state.\n\n 1. Create a volume and attach to the current node\n 2. Generate and write `data` to the volume.\n 3. Set `soft anti-affinity` to false\n 4. Disable current node's scheduling.\n 5. Remove the replica on the current node\n 1. Verify volume will be in degraded state.\n 2. Verify volume reports condition `scheduled == false`\n 6. Detach the volume.\n 7. Verify that volume only have 2 replicas\n 1. Unhealthy replica will be removed upon detach.\n 8. Attach the volume again.\n 1. Verify volume will be in degraded state.\n 2. Verify volume reports condition `scheduled == false`\n 3. Verify only two of three replicas of volume are healthy.\n 4. Verify the remaining replica doesn't have `replica.HostID`, meaning\n it's unscheduled\n 9. Check volume `data`\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n host_id = get_self_host_id()\n volume.attach(hostId=host_id)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n\n data = write_volume_random_data(volume)\n setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(setting, value=\"false\")\n node = client.by_id_node(host_id)\n client.update(node, allowScheduling=False)\n host_replica = get_host_replica(volume, host_id)\n\n volume.replicaRemove(name=host_replica.name)\n volume = wait_for_volume_degraded(client, volume_name)\n wait_scheduling_failure(client, volume_name)\n volume.detach()\n volume = wait_for_volume_detached(client, volume_name)\n assert len(volume.replicas) == 2\n\n volume.attach(hostId=host_id)\n # Make sure we're still not getting another successful replica.\n volume = wait_for_volume_degraded(client, volume_name)\n wait_scheduling_failure(client, volume_name)\n assert sum([1 for replica in volume.replicas if replica.running and\n replica.mode == \"RW\"]) == 2\n assert sum([1 for replica in volume.replicas if\n not replica.hostId]) == 1\n assert len(volume.replicas) == 3\n check_volume_data(volume, data)\n\n cleanup_volume(client, volume)\n\n\ndef test_hard_anti_affinity_live_rebuild(client, volume_name): # NOQA\n \"\"\"\n Test that volumes with Hard Anti-Affinity can build new replicas live once\n a valid node is available.\n\n If no nodes without existing replicas are available, the volume should\n remain in \"Degraded\" state. However, once one is available, the replica\n should now be scheduled successfully, with the volume returning to\n \"Healthy\" state.\n\n 1. Create a volume and attach to the current node\n 2. Generate and write `data` to the volume.\n 3. Set `soft anti-affinity` to false\n 4. Disable current node's scheduling.\n 5. Remove the replica on the current node\n 1. Verify volume will be in degraded state.\n 2. Verify volume reports condition `scheduled == false`\n 6. Enable the current node's scheduling\n 7. Wait for volume to start rebuilding and become healthy again\n 8. Check volume `data`\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n host_id = get_self_host_id()\n volume.attach(hostId=host_id)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n\n data = write_volume_random_data(volume)\n setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(setting, value=\"false\")\n node = client.by_id_node(host_id)\n client.update(node, allowScheduling=False)\n replica_names = map(lambda replica: replica.name, volume.replicas)\n host_replica = get_host_replica(volume, host_id)\n\n volume.replicaRemove(name=host_replica.name)\n wait_for_volume_degraded(client, volume_name)\n wait_scheduling_failure(client, volume_name)\n # Allow scheduling on host node again\n client.update(node, allowScheduling=True)\n wait_new_replica_ready(client, volume_name, replica_names)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n check_volume_data(volume, data)\n\n cleanup_volume(client, volume)\n\n\ndef test_hard_anti_affinity_offline_rebuild(client, volume_name): # NOQA\n \"\"\"\n Test that volumes with Hard Anti-Affinity can build new replicas during\n the attaching process once a valid node is available.\n\n Once a new replica has been built as part of the attaching process, the\n volume should be Healthy again.\n\n 1. Create a volume and attach to the current node\n 2. Generate and write `data` to the volume.\n 3. Set `soft anti-affinity` to false\n 4. Disable current node's scheduling.\n 5. Remove the replica on the current node\n 1. Verify volume will be in degraded state.\n 2. Verify volume reports condition `scheduled == false`\n 6. Detach the volume.\n 7. Enable current node's scheduling.\n 8. Attach the volume again.\n 9. Wait for volume to become healthy with 3 replicas\n 10. Check volume `data`\n \"\"\"\n volume = create_and_check_volume(client, volume_name)\n host_id = get_self_host_id()\n volume.attach(hostId=host_id)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n\n data = write_volume_random_data(volume)\n setting = client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(setting, value=\"false\")\n node = client.by_id_node(host_id)\n client.update(node, allowScheduling=False)\n replica_names = map(lambda replica: replica.name, volume.replicas)\n host_replica = get_host_replica(volume, host_id)\n\n volume.replicaRemove(name=host_replica.name)\n volume = wait_for_volume_degraded(client, volume_name)\n wait_scheduling_failure(client, volume_name)\n volume.detach()\n volume = wait_for_volume_detached(client, volume_name)\n client.update(node, allowScheduling=True)\n volume.attach(hostId=host_id)\n wait_new_replica_ready(client, volume_name, replica_names)\n volume = wait_for_volume_healthy(client, volume_name)\n assert len(volume.replicas) == 3\n check_volume_data(volume, data)\n\n cleanup_volume(client, volume)\n\n\ndef test_replica_rebuild_per_volume_limit(\n client, core_api, storage_class, sts_name, statefulset): # NOQA\n \"\"\"\n Test the volume always only have one replica scheduled for rebuild\n\n 1. Set soft anti-affinity to `true`.\n 2. Create a volume with 1 replica.\n 3. Attach the volume and write a few hundreds MB data to it.\n 4. Scale the volume replica to 5.\n 5. Constantly checking the volume replica list to make sure there should be\n only 1 replica in WO state.\n 6. Wait for the volume to complete rebuilding. Then remove 4 of the 5\n replicas.\n 7. Monitoring the volume replica list again.\n 8. Once the rebuild was completed again, verify the data checksum.\n \"\"\"\n replica_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n client.update(replica_soft_anti_affinity_setting, value=\"true\")\n\n data_path = '/data/test'\n storage_class['parameters']['numberOfReplicas'] = \"1\"\n vol_name, pod_name, md5sum = \\\n common.prepare_statefulset_with_data_in_mb(\n client, core_api, statefulset, sts_name, storage_class,\n data_path=data_path, data_size_in_mb=DATA_SIZE_IN_MB_2)\n\n # Scale the volume replica to 5\n r_count = 5\n vol = client.by_id_volume(vol_name)\n vol.updateReplicaCount(replicaCount=r_count)\n\n vol = common.wait_for_volume_replicas_mode(client, vol_name, 'RW',\n replica_count=r_count)\n\n # Delete 4 volume replicas\n del vol.replicas[0]\n for r in vol.replicas:\n vol.replicaRemove(name=r.name)\n\n r_count = 1\n common.wait_for_volume_replicas_mode(client, vol_name, 'RW',\n replica_count=r_count)\n\n assert md5sum == common.get_pod_data_md5sum(core_api, pod_name, data_path)\n\n\ndef test_data_locality_basic(client, core_api, volume_name, pod, settings_reset): # NOQA\n \"\"\"\n Test data locality basic feature\n\n Context:\n\n Data Locality feature allows users to have an option to keep a local\n replica on the same node as the consuming pod.\n Longhorn is currently supporting 2 modes:\n - disabled: Longhorn does not try to keep a local replica\n - best-effort: Longhorn try to keep a local replica\n\n See manual tests at:\n https://github.com/longhorn/longhorn/issues/1045#issuecomment-680706283\n\n Steps:\n\n Case 1: Test that Longhorn builds a local replica on the engine node\n\n 1. Create a volume(1) with 1 replica and dataLocality set to disabled\n 2. Find node where the replica is located on.\n Let's call the node is replica-node\n 3. Attach the volume to a node different than replica-node.\n Let call the node is engine-node\n 4. Write 200MB data to volume(1)\n 5. Use a retry loop to verify that Longhorn does not create\n a replica on the engine-node\n 6. Update dataLocality to best-effort for volume(1)\n 7. Use a retry loop to verify that Longhorn creates and rebuilds\n a replica on the engine-node and remove the other replica\n 8. detach the volume(1) and attach it to a different node.\n Let's call the new node is new-engine-node and the old\n node is old-engine-node\n 9. Wait for volume(1) to finish attaching\n 10. Use a retry loop to verify that Longhorn creates and rebuilds\n a replica on the new-engine-node and remove the replica on\n old-engine-node\n\n Case 2: Test that Longhorn prioritizes deleting replicas on the same node\n\n 1. Add the tag AVAIL to node-1 and node-2\n 2. Set node soft anti-affinity to `true`.\n 3. Create a volume(2) with 3 replicas and dataLocality set to best-effort\n 4. Use a retry loop to verify that all 3 replicas are on node-1 and\n node-2, no replica is on node-3\n 5. Attach volume(2) to node-3\n 6. User a retry loop to verify that there is no replica on node-3 and\n we can still read/write to volume(2)\n 7. Find the node which contains 2 replicas.\n Let call the node is most-replica-node\n 8. Set the replica count to 2 for volume(2)\n 9. Verify that Longhorn remove one replica from most-replica-node\n\n Case 3: Test that the volume is not corrupted if there is an unexpected\n detachment during building local replica\n\n 1. Remove the tag AVAIL from node-1 and node-2\n Set node soft anti-affinity to `false`.\n 2. Create a volume(3) with 1 replicas and dataLocality set to best-effort\n 3. Attach volume(3) to node-3.\n 4. Use a retry loop to verify that volume(3) has only 1 replica on node-3\n 5. Write 800MB data to volume(3)\n 6. Detach volume(3)\n 7. Attach volume(3) to node-1\n 8. Use a retry loop to:\n Wait until volume(3) finishes attaching.\n Wait until Longhorn start rebuilding a replica on node-1\n Immediately detach volume(3)\n 9. Verify that the replica on node-1 is in ERR state.\n 10. Attach volume(3) to node-1\n 11. Wait until volume(3) finishes attaching.\n 12. Use a retry loop to verify the Longhorn cleanup the ERR replica,\n rebuild a new replica on node-1, and remove the replica on node-3\n\n Case 4: Make sure failed to schedule local replica doesn't block the\n the creation of other replicas.\n\n 1. Disable scheduling for node-3\n 2. Create a vol with 1 replica, `dataLocality = best-effort`.\n The replica is scheduled on a node (say node-1)\n 3. Attach vol to node-3. There is a fail-to-schedule\n replica with Spec.HardNodeAffinity=node-3\n 4. Increase numberOfReplica to 3. Verify that the replica set contains:\n one on node-1, one on node-2, one failed replica\n with Spec.HardNodeAffinity=node-3.\n 5. Decrease numberOfReplica to 2. Verify that the replica set contains:\n one on node-1, one on node-2, one failed replica\n with Spec.HardNodeAffinity=node-3.\n 6. Decrease numberOfReplica to 1. Verify that the replica set contains:\n one on node-1 or node-2, one failed replica\n with Spec.HardNodeAffinity=node-3.\n 7. Decrease numberOfReplica to 2. Verify that the replica set contains:\n one on node-1, one on node-2, one failed replica\n with Spec.HardNodeAffinity=node-3.\n 8. Turn off data locality by set `dataLocality=disabled` for the vol.\n Verify that the replica set contains: one on node-1, one on node-2\n\n 9. clean up\n \"\"\"\n\n # Case 1: Test that Longhorn builds a local replica on the engine node\n\n nodes = client.list_node()\n\n default_data_locality_setting = \\\n client.by_id_setting(SETTING_DEFAULT_DATA_LOCALITY)\n try:\n client.update(default_data_locality_setting, value=\"disabled\")\n except Exception as e:\n print(\"Exception when update Default Data Locality setting\",\n default_data_locality_setting, e)\n\n volume1_name = volume_name + \"-1\"\n volume1_size = str(500 * Mi)\n volume1_data_path = \"/data/test\"\n pv1_name = volume1_name + \"-pv\"\n pvc1_name = volume1_name + \"-pvc\"\n pod1_name = volume1_name + \"-pod\"\n pod1 = pod\n\n pod1['metadata']['name'] = pod1_name\n\n volume1 = create_and_check_volume(client,\n volume1_name,\n num_of_replicas=1,\n size=volume1_size)\n\n volume1 = client.by_id_volume(volume1_name)\n create_pv_for_volume(client, core_api, volume1, pv1_name)\n create_pvc_for_volume(client, core_api, volume1, pvc1_name)\n\n volume1 = client.by_id_volume(volume1_name)\n volume1_replica_node = volume1.replicas[0]['hostId']\n\n volume1_attached_node = None\n for node in nodes:\n if node.name != volume1_replica_node:\n volume1_attached_node = node.name\n break\n\n assert volume1_attached_node is not None\n\n pod1['spec']['volumes'] = [{\n \"name\": \"pod-data\",\n \"persistentVolumeClaim\": {\n \"claimName\": pvc1_name\n }\n }]\n\n pod1['spec']['nodeSelector'] = \\\n {\"kubernetes.io/hostname\": volume1_attached_node}\n create_and_wait_pod(core_api, pod1)\n\n write_pod_volume_random_data(core_api, pod1_name,\n volume1_data_path, DATA_SIZE_IN_MB_2)\n\n for i in range(10):\n volume1 = client.by_id_volume(volume1_name)\n assert len(volume1.replicas) == 1\n assert volume1.replicas[0]['hostId'] != volume1_attached_node\n time.sleep(1)\n\n volume1 = client.by_id_volume(volume1_name)\n volume1.updateDataLocality(dataLocality=\"best-effort\")\n\n for _ in range(RETRY_COUNTS):\n volume1 = client.by_id_volume(volume1_name)\n assert volume1[VOLUME_FIELD_ROBUSTNESS] == VOLUME_ROBUSTNESS_HEALTHY\n if len(volume1.replicas) == 1 and \\\n volume1.replicas[0]['hostId'] == volume1_attached_node:\n break\n time.sleep(RETRY_INTERVAL)\n assert len(volume1.replicas) == 1\n assert volume1.replicas[0]['hostId'] == volume1_attached_node\n\n delete_and_wait_pod(core_api, pod1_name)\n volume1 = wait_for_volume_detached(client, volume1_name)\n\n volume1_replica_node = volume1.replicas[0]['hostId']\n\n volume1_attached_node = None\n for node in nodes:\n if node.name != volume1_replica_node:\n volume1_attached_node = node.name\n break\n\n assert volume1_attached_node is not None\n\n pod1['spec']['nodeSelector'] = \\\n {\"kubernetes.io/hostname\": volume1_attached_node}\n create_and_wait_pod(core_api, pod1)\n for _ in range(RETRY_COUNTS):\n volume1 = client.by_id_volume(volume1_name)\n assert volume1[VOLUME_FIELD_ROBUSTNESS] == VOLUME_ROBUSTNESS_HEALTHY\n if len(volume1.replicas) == 1 and \\\n volume1.replicas[0]['hostId'] == volume1_attached_node:\n break\n time.sleep(RETRY_INTERVAL)\n assert len(volume1.replicas) == 1\n assert volume1.replicas[0]['hostId'] == volume1_attached_node\n delete_and_wait_pod(core_api, pod1_name)\n wait_for_volume_detached(client, volume1_name)\n\n # Case 2: Test that Longhorn prioritizes deleting replicas on the same node\n\n node1 = nodes[0]\n node2 = nodes[1]\n node3 = nodes[2]\n\n client.update(node1, allowScheduling=True, tags=[\"AVAIL\"])\n client.update(node2, allowScheduling=True, tags=[\"AVAIL\"])\n\n replica_node_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n try:\n client.update(replica_node_soft_anti_affinity_setting,\n value=\"true\")\n except Exception as e:\n print(\"Exception when update \"\n \"Replica Node Level Soft Anti-Affinity setting\",\n replica_node_soft_anti_affinity_setting, e)\n\n volume2_name = volume_name + \"-2\"\n volume2_size = str(500 * Mi)\n pv2_name = volume2_name + \"-pv\"\n pvc2_name = volume2_name + \"-pvc\"\n pod2_name = volume2_name + \"-pod\"\n pod2 = pod\n\n pod2['metadata']['name'] = pod2_name\n\n volume2 = client.create_volume(name=volume2_name,\n size=volume2_size,\n numberOfReplicas=3,\n nodeSelector=[\"AVAIL\"],\n dataLocality=\"best-effort\")\n\n volume2 = wait_for_volume_detached(client, volume2_name)\n volume2 = client.by_id_volume(volume2_name)\n create_pv_for_volume(client, core_api, volume2, pv2_name)\n create_pvc_for_volume(client, core_api, volume2, pvc2_name)\n\n volume2 = client.by_id_volume(volume2_name)\n\n pod2['spec']['volumes'] = [{\n \"name\": \"pod-data\",\n \"persistentVolumeClaim\": {\n \"claimName\": pvc2_name\n }\n }]\n\n pod2['spec']['nodeSelector'] = {\"kubernetes.io/hostname\": node3.name}\n create_and_wait_pod(core_api, pod2)\n\n volume2 = wait_for_volume_healthy(client, volume2_name)\n\n for replica in volume2.replicas:\n assert replica[\"hostId\"] != node3.name\n\n volume2.updateReplicaCount(replicaCount=2)\n\n # 2 Healthy replicas and 1 replica failed to schedule\n # The failed to schedule replica is the local replica on node3\n volume2 = wait_for_volume_replica_count(client, volume2_name, 3)\n volume2 = client.by_id_volume(volume2_name)\n\n volume2_healthy_replicas = []\n for replica in volume2.replicas:\n if replica.running is True:\n volume2_healthy_replicas.append(replica)\n\n assert len(volume2_healthy_replicas) == 2\n\n volume2_rep1 = volume2_healthy_replicas[0]\n volume2_rep2 = volume2_healthy_replicas[1]\n assert volume2_rep1[\"hostId\"] != volume2_rep2[\"hostId\"]\n delete_and_wait_pod(core_api, pod2_name)\n wait_for_volume_detached(client, volume2_name)\n\n # Case 3: Test that the volume is not corrupted if there is an unexpected\n # detachment during building local replica\n\n client.update(node1, allowScheduling=True, tags=[])\n client.update(node2, allowScheduling=True, tags=[])\n\n replica_node_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n try:\n client.update(replica_node_soft_anti_affinity_setting,\n value=\"false\")\n except Exception as e:\n print(\"Exception when update \"\n \"Replica Node Level Soft Anti-Affinity setting\",\n replica_node_soft_anti_affinity_setting, e)\n\n volume3_name = volume_name + \"-3\"\n volume3_size = str(1 * Gi)\n volume3_data_path = \"/data/test\"\n pv3_name = volume3_name + \"-pv\"\n pvc3_name = volume3_name + \"-pvc\"\n pod3_name = volume3_name + \"-pod\"\n pod3 = pod\n\n pod3['metadata']['name'] = pod3_name\n\n volume3 = client.create_volume(name=volume3_name,\n size=volume3_size,\n numberOfReplicas=1)\n\n volume3 = wait_for_volume_detached(client, volume3_name)\n volume3 = client.by_id_volume(volume3_name)\n create_pv_for_volume(client, core_api, volume3, pv3_name)\n create_pvc_for_volume(client, core_api, volume3, pvc3_name)\n\n volume3 = client.by_id_volume(volume3_name)\n\n pod3['spec']['volumes'] = [{\n \"name\": \"pod-data\",\n \"persistentVolumeClaim\": {\n \"claimName\": pvc3_name\n }\n }]\n\n pod3['spec']['nodeSelector'] = {\"kubernetes.io/hostname\": node3.name}\n create_and_wait_pod(core_api, pod3)\n volume3 = wait_for_volume_healthy(client, volume3_name)\n\n write_pod_volume_random_data(core_api, pod3_name,\n volume3_data_path, DATA_SIZE_IN_MB_4)\n\n volume3.updateDataLocality(dataLocality=\"best-effort\")\n volume3 = client.by_id_volume(volume3_name)\n\n if volume3.replicas[0]['hostId'] != node3.name:\n wait_for_rebuild_start(client, volume3_name)\n volume3 = client.by_id_volume(volume3_name)\n assert len(volume3.replicas) == 2\n wait_for_rebuild_complete(client, volume3_name)\n\n volume3 = wait_for_volume_replica_count(client, volume3_name, 1)\n assert volume3.replicas[0][\"hostId\"] == node3.name\n\n delete_and_wait_pod(core_api, pod3_name)\n\n pod3['spec']['nodeSelector'] = {\"kubernetes.io/hostname\": node1.name}\n create_and_wait_pod(core_api, pod3)\n\n wait_for_rebuild_start(client, volume3_name)\n crash_engine_process_with_sigkill(client, core_api, volume3_name)\n delete_and_wait_pod(core_api, pod3_name)\n wait_for_volume_detached(client, volume3_name)\n volume3 = client.by_id_volume(volume3_name)\n assert len(volume3.replicas) == 1\n assert volume3.replicas[0][\"hostId\"] == node3.name\n\n create_and_wait_pod(core_api, pod3)\n wait_for_rebuild_start(client, volume3_name)\n volume3 = client.by_id_volume(volume3_name)\n assert len(volume3.replicas) == 2\n wait_for_rebuild_complete(client, volume3_name)\n\n # Wait for deletion of extra replica\n volume3 = wait_for_volume_replica_count(client, volume3_name, 1)\n assert volume3.replicas[0][\"hostId\"] == node1.name\n assert volume3.replicas[0][\"mode\"] == \"RW\"\n assert volume3.replicas[0][\"running\"] is True\n\n delete_and_wait_pod(core_api, pod3_name)\n wait_for_volume_detached(client, volume3_name)\n\n # Case 4: Make sure failed to schedule local replica doesn't block the\n # the creation of other replicas.\n\n replica_node_soft_anti_affinity_setting = \\\n client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)\n try:\n client.update(replica_node_soft_anti_affinity_setting,\n value=\"false\")\n except Exception as e:\n print(\"Exception when update \"\n \"Replica Node Level Soft Anti-Affinity setting\",\n replica_node_soft_anti_affinity_setting, e)\n\n client.update(node3, allowScheduling=False)\n\n volume4_name = volume_name + \"-4\"\n volume4_size = str(1 * Gi)\n\n volume4 = client.create_volume(name=volume4_name,\n size=volume4_size,\n numberOfReplicas=1,\n dataLocality=\"best-effort\")\n\n volume4 = wait_for_volume_detached(client, volume4_name)\n volume4 = client.by_id_volume(volume4_name)\n\n volume4_replica_name = volume4.replicas[0][\"name\"]\n\n volume4.attach(hostId=node3.name)\n\n wait_for_volume_healthy(client, volume4_name)\n\n volume4 = client.by_id_volume(volume4_name)\n assert len(volume4.replicas) == 2\n\n for replica in volume4.replicas:\n if replica[\"name\"] == volume4_replica_name:\n assert replica[\"running\"] is True\n assert replica[\"mode\"] == \"RW\"\n else:\n assert replica[\"running\"] is False\n assert replica[\"mode\"] == \"\"\n\n assert volume4.conditions.scheduled.reason == \\\n \"LocalReplicaSchedulingFailure\"\n\n volume4 = volume4.updateReplicaCount(replicaCount=3)\n\n volume4 = wait_for_volume_degraded(client, volume4_name)\n\n v4_node1_replica_count = 0\n v4_node2_replica_count = 0\n v4_failed_replica_count = 0\n\n for replica in volume4.replicas:\n if replica[\"hostId\"] == node1.name:\n v4_node1_replica_count += 1\n elif replica[\"hostId\"] == node2.name:\n v4_node2_replica_count += 1\n elif replica[\"hostId\"] == \"\":\n v4_failed_replica_count += 1\n\n assert v4_node1_replica_count == 1\n assert v4_node2_replica_count == 1\n assert v4_failed_replica_count > 0\n\n volume4 = volume4.updateReplicaCount(replicaCount=2)\n\n volume4 = wait_for_volume_replica_count(client, volume4_name, 3)\n\n v4_node1_replica_count = 0\n v4_node2_replica_count = 0\n v4_failed_replica_count = 0\n\n for replica in volume4.replicas:\n if replica[\"hostId\"] == node1.name:\n v4_node1_replica_count += 1\n elif replica[\"hostId\"] == node2.name:\n v4_node2_replica_count += 1\n elif replica[\"hostId\"] == \"\":\n v4_failed_replica_count += 1\n\n assert v4_node1_replica_count == 1\n assert v4_node2_replica_count == 1\n assert v4_failed_replica_count > 0\n\n volume4 = volume4.updateReplicaCount(replicaCount=1)\n\n volume4 = wait_for_volume_replica_count(client, volume4_name, 2)\n\n v4_node1_replica_count = 0\n v4_node2_replica_count = 0\n v4_failed_replica_count = 0\n\n for replica in volume4.replicas:\n if replica[\"hostId\"] == node1.name:\n v4_node1_replica_count += 1\n elif replica[\"hostId\"] == node2.name:\n v4_node2_replica_count += 1\n elif replica[\"hostId\"] == \"\":\n v4_failed_replica_count += 1\n\n assert v4_node1_replica_count + v4_node2_replica_count == 1\n assert v4_failed_replica_count == 1\n\n volume4 = volume4.updateDataLocality(dataLocality=\"disabled\")\n volume4 = volume4.updateReplicaCount(replicaCount=2)\n\n running_replica_count = 0\n for _ in range(RETRY_COUNTS):\n volume4 = client.by_id_volume(volume4_name)\n running_replica_count = 0\n for r in volume4.replicas:\n if r.failedAt == \"\" and r.running is True:\n running_replica_count += 1\n if running_replica_count == 2:\n break\n time.sleep(RETRY_INTERVAL)\n assert running_replica_count == 2\n\n v4_node1_replica_count = 0\n v4_node2_replica_count = 0\n v4_node3_replica_count = 0\n\n for replica in volume4.replicas:\n wait_for_replica_running(client, volume4_name, replica[\"name\"])\n if replica[\"hostId\"] == node1.name:\n v4_node1_replica_count += 1\n elif replica[\"hostId\"] == node2.name:\n v4_node2_replica_count += 1\n elif replica[\"hostId\"] == node3.name:\n v4_node3_replica_count += 1\n assert v4_node1_replica_count == 1\n assert v4_node2_replica_count == 1\n assert v4_node3_replica_count == 0\n","sub_path":"manager/integration/tests/test_scheduling.py","file_name":"test_scheduling.py","file_ext":"py","file_size_in_byte":35756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"270681824","text":"#!/usr/bin/python3\nfrom sys import stdin\nfrom itertools import accumulate\n\n\"\"\"# for python2\ndef accumulate (a):\n for i in range (1, len (a)):\n a [i] += a [i - 1]\"\"\"\n\ndef main ():\n read = stdin.readline\n n, q = map (int, read ().split ())\n t = sorted (map (int, read ().split ()), reverse = True)\n s = read ()\n t = [0] + list (accumulate (t))\n for q_ in range (q):\n print (t [int (read ())])\n\nif __name__ == \"__main__\": main ()","sub_path":"_scoring_in_exam.py","file_name":"_scoring_in_exam.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"479591173","text":"\"\"\"\n\tReferences:\n\t\thttps://towardsdatascience.com/step-by-step-vgg16-implementation-in-keras-for-beginners-a833c686ae6c\n\thttps://www.tensorflow.org/api_docs/python/tf/keras/initializers/HeNormal\n\"\"\"\n\nimport os\n# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers, losses\nfrom tensorflow.keras.models import Model\nfrom utils.helpers import gaussian_kernel_layer\n\n\nclass VGG16CustomCbs(Model):\n\tdef __init__(self, input_shape, n_classes=10):\n\t\tsuper(VGG16CustomCbs, self).__init__()\n\t\tn_filters = [64, 128, 256, 512]\n\t\tinput = keras.Input(shape=input_shape)\n\t\tinitializer = tf.keras.initializers.HeNormal()\n\t\tregularizer = tf.keras.regularizers.l2(5e-4)\t# from original paper \n\n\t\tself.conv_1 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", kernel_initializer=initializer, kernel_regularizer=regularizer)\n\t\tself.act_1 = layers.Activation('relu')\n\t\tself.conv_2 = layers.Conv2D(filters=n_filters[0], kernel_size=(3, 3), padding=\"same\", kernel_initializer=initializer, kernel_regularizer=regularizer)\n\t\tself.pool_2 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))\n\t\tself.act_2 = layers.Activation('relu')\n\n\t\tself.conv_3 = [layers.Conv2D(filters=n_filters[1], kernel_size=(3, 3), padding=\"same\", kernel_initializer=initializer, kernel_regularizer=regularizer) for i in range(2)]\n\t\tself.pool_3 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))\n\t\tself.act_3 = layers.Activation('relu')\n\n\t\tself.conv_4 = [layers.Conv2D(filters=n_filters[2], kernel_size=(3, 3), padding=\"same\", kernel_initializer=initializer, kernel_regularizer=regularizer) for i in range(3)]\n\t\tself.pool_4 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))\n\t\tself.act_4 = layers.Activation('relu')\n\n\t\tself.conv_5 = [layers.Conv2D(filters=n_filters[3], kernel_size=(3, 3), padding=\"same\", kernel_initializer=initializer, kernel_regularizer=regularizer) for i in range(3)]\n\t\tself.pool_5 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))\n\t\tself.act_5 = layers.Activation('relu')\n\n\t\tself.conv_6 = [layers.Conv2D(filters=n_filters[3], kernel_size=(3, 3), padding=\"same\", kernel_initializer=initializer, kernel_regularizer=regularizer) for i in range(3)]\n\t\tself.pool_6 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))\n\t\tself.act_6 = layers.Activation('relu')\n\n\t\tself.flat = layers.Flatten()\n\t\tself.fc_7 = layers.Dense(units=4096, activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=regularizer)\n\t\tself.drop_7 = layers.Dropout(0.5)\n\t\tself.fc_8 = layers.Dense(units=4096, activation=\"relu\", kernel_initializer=initializer, kernel_regularizer=regularizer)\n\t\tself.drop_8 = layers.Dropout(0.5)\n\t\tself.out = layers.Dense(units=n_classes, activation=\"softmax\", kernel_initializer=initializer, kernel_regularizer=regularizer)\n\n\tdef call(self, x, sigma):\n\t\tx = self.conv_1(x)\n\t\tx = self.act_1(gaussian_kernel_layer(x, sigma))\n\t\tx = self.conv_2(x)\n\t\tx = gaussian_kernel_layer(x, sigma)\n\t\tx = self.act_2(self.pool_2(x))\n\t\t\n\t\tfor i in range(2):\n\t\t\tx = self.conv_3[i](x)\n\t\t\tx = gaussian_kernel_layer(x, sigma)\n\t\tx = self.act_3(self.pool_3(x))\n\t\t\n\t\tfor i in range(3):\n\t\t\tx = self.conv_4[i](x)\n\t\t\tx = gaussian_kernel_layer(x, sigma)\n\t\tx = self.act_4(self.pool_4(x))\n\t\t\n\t\tfor i in range(3):\n\t\t\tx = self.conv_5[i](x)\n\t\t\tx = gaussian_kernel_layer(x, sigma)\n\t\tx = self.act_5(self.pool_5(x))\n\n\t\tfor i in range(3):\n\t\t\tx = self.conv_6[i](x)\n\t\t\tx = gaussian_kernel_layer(x, sigma)\n\t\tx = self.act_6(self.pool_6(x))\n\t\tx = self.flat(x)\n\t\tx = self.fc_7(x)\n\t\tx = self.drop_7(x)\n\t\tx = self.fc_8(x)\n\t\tx = self.drop_8(x)\n\t\tx = self.out(x)\n\t\treturn x\n\ndef train_step(model, optim, X, Y, sigma=1):\n\twith tf.GradientTape() as tape:\n\t\tY_cap = model(X, sigma, training=True)\n\t\tloss = losses.SparseCategoricalCrossentropy()(Y, Y_cap)\n\tvariables = model.trainable_variables\n\tgradeints = tape.gradient(loss, variables)\n\toptim.apply_gradients(zip(gradeints, variables))\n\treturn loss, Y_cap\n\ndef test_step(model, X, Y, sigma=1):\n\tY_cap = model(X, sigma, training=False)\n\tloss = losses.SparseCategoricalCrossentropy()(Y, Y_cap)\n\treturn loss, Y_cap","sub_path":"models/vgg16_cbs.py","file_name":"vgg16_cbs.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"89179831","text":"# -*- coding: utf-8 -*-\r\n# !/usr/bin/env python\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.common.exceptions import NoAlertPresentException\r\nimport unittest, re, random\r\nfrom login_ds import loginFirefox\r\nfrom login_ds import loginIE\r\nfrom VerifyWords import VerifyWords\r\nfrom time import sleep\r\nfrom to_log import tolog\r\nfrom namegenerator import random_key\r\n\r\nimport time\r\nPass=\"'result': 'p'\"\r\nFail=\"'result': 'f'\"\r\n\r\nclass ExtendPool(unittest.TestCase):\r\n\r\n\r\n def test_extend_pool(self):\r\n Failflag = False\r\n self.driver = loginFirefox()\r\n # self.driver.implicitly_wait(30)\r\n self.verificationErrors = []\r\n self.accept_next_alert = True\r\n driver = self.driver\r\n\r\n strip_size = [\"64 KB\", \"128 KB\", \"256 KB\", \"512 KB\", \"1 MB\"]\r\n sector_size = [\"512 Bytes\", \"1 KB\", \"2 KB\", \"4 KB\"]\r\n\r\n Prefer_ctrl = [1, 2]\r\n #disklist = [\"1\", \"4\", \"5\", \"6\", \"8\", \"9\", \"10\", \"11\", \"12\"]\r\n #disklist = [1, 3, 4, 5, 6, 8, 9, 10, 11, 12]\r\n disklist = [\"1\", \"5\", \"6\", \"9\", \"10\"]\r\n diskrem = [4, 8, 11, 12]\r\n\r\n # block_size = ['512 Bytes', '1 KB', '2 KB', '4 KB', '8 KB', '16 KB', '32 KB', '64 KB', '128 KB']\r\n # volume_sector = ['512 Bytes', '1 KB', '2 KB', '4 KB']\r\n\r\n raid_level = [\"RAID0\", \"RAID1\", \"RAID5\", \"RAID6\"]\r\n\r\n tolog(\"Start to create pool!\")\r\n sleep(5)\r\n driver.find_element_by_link_text(\"Pool\").click()\r\n sleep(3)\r\n driver.find_element_by_xpath(\"//div[2]/button\").click()\r\n sleep(1)\r\n\r\n #poolnum0=poolnum1=volnum0=volnum1=snapnum0=snapnum1=clonenum0=clonenum1=0\r\n validatelist = list()\r\n\r\n sleep(1)\r\n driver.find_element_by_name(\"name\").clear()\r\n pool_name = random_key(10)\r\n driver.find_element_by_name(\"name\").send_keys(pool_name)\r\n sleep(1)\r\n Select(driver.find_element_by_name(\"mediatype\")).select_by_visible_text(\"Hard Disk Drive\")\r\n raid = random.choice(raid_level)\r\n stripsize=random.choice(strip_size)\r\n sectorsize=random.choice(sector_size)\r\n if raid == \"RAID0\":\r\n disks = random.sample(disklist, 1)\r\n elif raid == \"RAID1\":\r\n disks = random.sample(disklist, 2)\r\n # elif raid == \"RAID50\":\r\n # disks = random.sample(disklist, 6)\r\n # elif raid == \"RAID60\":\r\n # disks = random.sample(disklist, 8)\r\n else:\r\n disks = random.sample(disklist, 4)\r\n\r\n # for pool extend\r\n # diskrem=list()\r\n # for disk in disklist:\r\n # if disk in disks:\r\n # pass\r\n # else:\r\n # diskrem.append(int(disk))\r\n #diskrem = disklist - disks\r\n\r\n\r\n\r\n disks.sort()\r\n verifydisk = list()\r\n for disk in disks:\r\n verifydisk.append(int(disk))\r\n # the verifydisk list will be verified in detail list by removing spaces\r\n verifydisk.sort()\r\n\r\n disks = verifydisk[::-1]\r\n # print disks\r\n # click disk in reverse order to avoid the unapplicable disk selection\r\n #\r\n\r\n for disk in disks:\r\n sleep(1)\r\n driver.find_element_by_xpath(\"//div[2]/div/div/ul/li[%s]\" % (str(disk))).click()\r\n\r\n sleep(2)\r\n # verifydisk.sort()\r\n Select(driver.find_element_by_name(\"raidlevel\")).select_by_visible_text(raid)\r\n sleep(1)\r\n # sectorsize=random.choice(sector_size)\r\n Select(driver.find_element_by_name(\"strip\")).select_by_visible_text(stripsize)\r\n sleep(1)\r\n Select(driver.find_element_by_name(\"sector\")).select_by_visible_text(sectorsize)\r\n # sleep(1)\r\n # ctrlid = random.choice(Prefer_ctrl)\r\n\r\n # driver.find_element_by_xpath(\"//label[%d]/span\" % ctrlid).click()\r\n sleep(5)\r\n\r\n driver.find_element_by_xpath(\"//button[@type='submit']\").click()\r\n for i in range(60):\r\n try:\r\n if re.search(r\"^[\\s\\S]*Pool was added successfully.[\\s\\S]*$\",\r\n driver.find_element_by_css_selector(\"BODY\").text):\r\n tolog(\"Pool %s was added successfully.\" % pool_name);\r\n break\r\n except:\r\n pass\r\n time.sleep(1)\r\n else:\r\n self.fail(\"time out\")\r\n sleep(5)\r\n validatelist.append(VerifyWords(driver, (pool_name, raid)))\r\n sleep(2)\r\n driver.find_element_by_xpath(\"//pr-gear-button/div/a\").click()\r\n sleep(2)\r\n\r\n driver.find_element_by_link_text(\"View Detail\").click()\r\n sleep(5)\r\n verifydiskstr = str(verifydisk).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\r\n validatelist.append(VerifyWords(driver, (pool_name, raid, stripsize, sectorsize, verifydiskstr)))\r\n\r\n tolog(\"Start to extend pool from gear button\")\r\n sleep(4)\r\n validatelist.append(VerifyWords(driver, (pool_name, raid, stripsize, sectorsize, verifydiskstr)))\r\n\r\n driver.find_element_by_xpath(\"//li/ul/li/a/span/span\").click()\r\n sleep(2)\r\n driver.find_element_by_xpath(\"//pr-gear-button/div/a\").click()\r\n sleep(2)\r\n\r\n driver.find_element_by_link_text(\"Extend Pool\").click()\r\n\r\n diskrem=random.sample(diskrem, len(disks))\r\n diskrem.sort()\r\n\r\n #disklistrem = list()\r\n\r\n disklistrem = diskrem[::-1]\r\n\r\n for disk in disklistrem:\r\n sleep(1)\r\n driver.find_element_by_xpath(\"//div[2]/div/div/ul/li[%s]\" % (str(disk))).click()\r\n # # li3 is a location that not contain disk, clicking li3 leads the hover information upwards\r\n # # so the submit button can be clicked\r\n # driver.find_element_by_xpath(\"//div[2]/div/div/ul/li3\").click()\r\n # sleep(2)\r\n driver.find_element_by_xpath(\"//button[@type='submit']\").click()\r\n\r\n for i in range(60):\r\n try:\r\n if re.search(r\"^[\\s\\S]*Pool was extended successfully.[\\s\\S]*$\",\r\n driver.find_element_by_css_selector(\"BODY\").text):\r\n tolog(\"Pool %s was extended successfully.\" % pool_name);\r\n break\r\n except:\r\n pass\r\n time.sleep(1)\r\n else:\r\n self.fail(\"time out\")\r\n\r\n sleep(5)\r\n validatelist.append(VerifyWords(driver, (pool_name, raid)))\r\n # sleep(2)\r\n # driver.find_element_by_xpath(\"//li/ul/li/a/span/span\").click()\r\n sleep(5)\r\n driver.find_element_by_xpath(\"//pr-gear-button/div/a\").click()\r\n sleep(2)\r\n\r\n driver.find_element_by_link_text(\"View Detail\").click()\r\n sleep(3)\r\n verifydiskstr = verifydiskstr+\",\"+str(diskrem).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\r\n validatelist.append(VerifyWords(driver, (pool_name, raid, stripsize, sectorsize, verifydiskstr)))\r\n sleep(2)\r\n driver.find_element_by_xpath(\"//li/ul/li/a/span/span\").click()\r\n sleep(2)\r\n driver.find_element_by_xpath(\"//pr-gear-button/div/a\").click()\r\n sleep(2)\r\n\r\n driver.find_element_by_link_text(\"Delete\").click()\r\n\r\n time.sleep(2)\r\n driver.find_element_by_name(\"name\").clear()\r\n driver.find_element_by_name(\"name\").send_keys(\"confirm\")\r\n time.sleep(2)\r\n driver.find_element_by_xpath(\"//button[@type='submit']\").click()\r\n for i in range(60):\r\n try:\r\n if re.search(r\"^[\\s\\S]*Pool was deleted successfully.[\\s\\S]*$\",\r\n driver.find_element_by_css_selector(\"BODY\").text):\r\n tolog(\"Pool %s was deleted successful!\" % pool_name);\r\n break\r\n except:\r\n pass\r\n time.sleep(1)\r\n else:\r\n self.fail(\"time out\")\r\n for val in validatelist:\r\n if val:\r\n Failflag=True\r\n break\r\n if Failflag:\r\n tolog(Fail)\r\n else:\r\n tolog(Pass)\r\n\r\n def is_element_present(self, how, what):\r\n try:\r\n self.driver.find_element(by=how, value=what)\r\n except NoSuchElementException as e:\r\n return False\r\n return True\r\n\r\n\r\n def is_alert_present(self):\r\n try:\r\n self.driver.switch_to_alert()\r\n except NoAlertPresentException as e:\r\n return False\r\n return True\r\n\r\n\r\n def close_alert_and_get_its_text(self):\r\n try:\r\n alert = self.driver.switch_to_alert()\r\n alert_text = alert.text\r\n if self.accept_next_alert:\r\n alert.accept()\r\n else:\r\n alert.dismiss()\r\n return alert_text\r\n finally:\r\n self.accept_next_alert = True\r\n\r\n\r\n def tearDown(self):\r\n self.driver.quit()\r\n self.assertEqual([], self.verificationErrors)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","sub_path":"extend_pool_from_gear_button.py","file_name":"extend_pool_from_gear_button.py","file_ext":"py","file_size_in_byte":9143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"578065339","text":"def solution(priorities, location):\n \"\"\"\n 전형적인 큐 시뮬레이션이다. 다만 우선순위가 있다 하더라도 우선순위 큐가 아님을 주의하자.\n 왜냐하면 location은 다시 집어넣어졌을 때 정렬 상태를 유지할 필요가 없기 때문이다.\n 이 알고리즘의 시간 복잡도는 while문에 의해 지배된다. 즉 O(N^2)이다.\n \"\"\"\n answer = 0\n\n #큐 초기화 (우선순위, 인덱스)\n q = []\n\n for i, p in enumerate(priorities):\n q.append( (p, i) )\n\n while q:\n (priority, loc) = q.pop(0)\n #자신보다 우선수위가 높은 리스트가 있는지 확인\n if_bigger_than = [ elem for elem in q if elem[0] > priority ] \n #있으면 다시 집어넣고\n if if_bigger_than:\n q.append( (priority, loc) )\n #없으면 answer + 1, 그리고 찾는 위치에서 반복을 멈춘다.\n else:\n answer += 1\n\n if loc == location:\n break\n \n return answer\n\nif __name__=='__main__':\n priorities = [1, 1, 9, 1, 1, 1]# [2, 1, 3, 2]\n location = 0#2\n\n print(solution(priorities, location))","sub_path":"programmers/ch02_stack_and_queue/Sol05_Printer.py","file_name":"Sol05_Printer.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"289997213","text":"import sys\nlines = [line for line in sys.stdin.read().split(\"\\n\")]\ndictionary = {}\nfor i in range(8):\n for j in range(8):\n dictionary[(i, j)] = None\nfor i, line in enumerate(lines):\n if i >= 2: break\n tokens = line.split(\" \")\n if len(tokens[1]) == 0: continue\n list = tokens[1].split(\",\")\n for str in list:\n if len(str) == 2: str = \"P\"+str\n dictionary[(ord(str[1])-ord(\"a\"), 7-(ord(str[2])-ord(\"1\")))] = (str[0], i)\n\nbound = \"+---+---+---+---+---+---+---+---+\"\nfor j in range(8):\n print(bound)\n for i in range(8):\n print(\"|\", end=\"\")\n if ((i+j) & 1) == 1: print(\":\", end=\"\")\n else: print(\".\", end=\"\")\n if dictionary[(i, j)] == None:\n if ((i+j) & 1) == 1: print(\":\", end=\"\")\n else: print(\".\", end=\"\")\n elif dictionary[(i, j)][1] == 0:\n print(dictionary[(i, j)][0], end=\"\")\n else:\n print(dictionary[(i, j)][0].lower(), end=\"\")\n if ((i+j) & 1) == 1: print(\":\", end=\"\")\n else: print(\".\", end=\"\")\n print(\"|\")\nprint(bound)","sub_path":"HCPC/2019-20/Contest4Gold/empleh.py","file_name":"empleh.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"258719475","text":"import pandas as pd\nimport pathlib\nimport numpy as np\nimport math\nfrom coal_data_processing_functions import state_abbreviations, lower_case_data_keys, generic_coal_rank, \\\n pull_state_abbreviation, completing_missing_keys_in_a_describe_dataframe\n\n# Set universal options for the code file.\npd.set_option('display.max_rows', 10)\nfileDir = pathlib.Path(__file__).parents[2]\n\n# Read in (1) COALQUAL Data, (2) the amount of coal mining done in each county, and (3) the county names and FIPS codes\n# tables. We use skipfooter to not read int he search criteria rows.\ncoalqual_filename = fileDir / 'Data' / 'COALQUAL Data' / 'CQ_upper_level.csv'\nCOALQUAL = pd.read_csv(coalqual_filename, header=0,\n names=['State', 'County', 'Province', 'Region', 'Field', 'Formation', 'Bed', 'Apparent_Rank',\n 'Sulfur', 'Heat', 'Arsenic', 'Boron', 'Bromine', 'Chlorides', 'Mercury', 'Lead',\n 'Selenium'],\n usecols=[1, 2, 5, 6, 7, 9, 11, 28, 84, 87, 147, 151, 159, 165, 191, 219, 239])\n\nmining_volume_filename = fileDir / 'Intermediate' / 'Coal Mining By Counties.csv'\nMining_Volume = pd.read_csv(mining_volume_filename, header=0, names=['Coal_Sales', 'FIPS_Code_State',\n 'County_Name_State_Normal_Capitalization'],\n usecols=[1, 2, 8])\n\nfips_codes_filename = fileDir / 'Intermediate' / 'FIPS_Code.csv'\nFIPS_Codes = pd.read_csv(fips_codes_filename, header=0, names=['FIPS_Code,State', 'County_Name_State'], usecols=[6, 7])\n\n# Process the columns that will serve as keys for the data merging. This involves creating (1) a state abbreviation\n# for COALQUAL, (2) a basin-level for COALQUAL and (3) a County, State string in all lower case.\nCOALQUAL['State_Abbreviation'] = state_abbreviations(COALQUAL.State)\nCOALQUAL['Province_Region'] = COALQUAL['Province'] + ' ' + COALQUAL['Region']\nCounty_Name_State_Normal_Capitalization = COALQUAL['County'] + ' County, ' + COALQUAL['State_Abbreviation']\nCOALQUAL['County_Name_State'] = lower_case_data_keys(County_Name_State_Normal_Capitalization)\n\nMining_Volume['County_Name_State'] = lower_case_data_keys(Mining_Volume['County_Name_State_Normal_Capitalization'])\n\nFIPS_Codes['County_Name_State'] = lower_case_data_keys(FIPS_Codes['County_Name_State'])\n\n# Merge COALQUAL with the FIPS code table to create a table that includes\nCOALQUAL = pd.merge(COALQUAL, FIPS_Codes, how='left', on='County_Name_State')\n\n# Drop anthracite and samples with blank apparent rank\nCOALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Anthracite']\nCOALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Semianthracite']\nCOALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Rock']\nCOALQUAL = COALQUAL.dropna(subset=['Apparent_Rank'])\n\n# Classify apparent ranks into broad categories.\nCOALQUAL['Rank'] = generic_coal_rank(COALQUAL.Apparent_Rank)\n\n# Create summary tables at the county-level and export this to an excel sheet.\nCounty_Name_State_Summary = COALQUAL.groupby(['FIPS_Code,State', 'Rank']).describe()\n\nwriter = pd.ExcelWriter(str(fileDir / 'Intermediate' / 'County Trace Element Data.xlsx'))\nCounty_Name_State_Summary['Chlorides'].to_excel(writer, 'Chlorides')\nCounty_Name_State_Summary['Selenium'].to_excel(writer, 'Selenium')\nCounty_Name_State_Summary['Boron'].to_excel(writer, 'Boron')\nCounty_Name_State_Summary['Bromine'].to_excel(writer, 'Bromine')\nCounty_Name_State_Summary['Lead'].to_excel(writer, 'Lead')\nCounty_Name_State_Summary['Arsenic'].to_excel(writer, 'Arsenic')\nCounty_Name_State_Summary['Heat'].to_excel(writer, 'Heat')\nCounty_Name_State_Summary['Mercury'].to_excel(writer, 'Mercury')\n\nCounty_Name_State_Summary_Rank = COALQUAL.groupby(['FIPS_Code,State']).describe(exclude=[np.number])\nCounty_Name_State_Summary_Rank['Rank'].to_excel(writer, 'Rank')\nCounty_Name_State_Summary_Rank['State_Abbreviation'].to_excel(writer, 'State_Abbreviation')\nwriter.save()\n\n# Create summary tables for the state-level and export this to an excel sheet.\nState_Summary = COALQUAL.groupby(['State_Abbreviation', 'Rank']).describe()\n\nwriter = pd.ExcelWriter(str(fileDir / 'Intermediate' / 'State Trace Element Data.xlsx'))\nState_Summary['Chlorides'].to_excel(writer, 'Chlorides')\nState_Summary['Selenium'].to_excel(writer, 'Selenium')\nState_Summary['Boron'].to_excel(writer, 'Boron')\nState_Summary['Bromine'].to_excel(writer, 'Bromine')\nState_Summary['Lead'].to_excel(writer, 'Lead')\nState_Summary['Arsenic'].to_excel(writer, 'Arsenic')\nState_Summary['Heat'].to_excel(writer, 'Heat')\nState_Summary['Mercury'].to_excel(writer, 'Mercury')\nwriter.save()\n\n# Create summary tables at the rank-level and export this to an excel sheet.\nRank_Summary = COALQUAL.groupby(['Rank']).describe()\n\nwriter = pd.ExcelWriter(str(fileDir / 'Intermediate' / 'Rank Trace Element Data.xlsx'))\nRank_Summary['Chlorides'].to_excel(writer, 'Chlorides')\nRank_Summary['Selenium'].to_excel(writer, 'Selenium')\nRank_Summary['Boron'].to_excel(writer, 'Boron')\nRank_Summary['Bromine'].to_excel(writer, 'Bromine')\nRank_Summary['Lead'].to_excel(writer, 'Lead')\nRank_Summary['Arsenic'].to_excel(writer, 'Arsenic')\nRank_Summary['Heat'].to_excel(writer, 'Heat')\nRank_Summary['Mercury'].to_excel(writer, 'Mercury')\nwriter.save()\n\n# Create summary tables at the region-level and export the resulting table to an excel sheet.\nframes = [COALQUAL.loc[(COALQUAL['Province_Region'].isin(['INTERIOR EASTERN', 'INTERIOR WESTERN',\n 'EASTERN CENTRAL APPALACHIAN']))],\n COALQUAL.loc[(COALQUAL['Province'].isin(['ROCKY MOUNTAIN']))]]\nCOALQUAL_Basin = pd.concat(frames)\nCOALQUAL_Basin = COALQUAL_Basin.loc[(COALQUAL_Basin['State_Abbreviation'].isin(['AZ', 'UT', 'CO', 'NM', 'IN', 'KY',\n 'MO', 'IA', 'NE', 'KS', 'OK', 'AR',\n 'TN', 'WV', 'VA']))]\n\nRegion = []\ni = 0\nwhile i < len(COALQUAL_Basin['Region']):\n if COALQUAL_Basin.Province.iloc[i] == 'ROCKY MOUNTAIN':\n Region.append('ROCKY MOUNTAIN')\n else:\n Region.append(COALQUAL_Basin.Region.iloc[i])\n i += 1\n\n\nRegion = pd.Series(Region)\nCOALQUAL_Basin['Region'] = Region.values\n\nBasin_Summary = COALQUAL_Basin.groupby(['Region', 'Rank']).describe()\nwriter = pd.ExcelWriter(str(fileDir / 'Intermediate' / 'Basin Trace Element Data.xlsx'))\nBasin_Summary['Chlorides'].to_excel(writer, 'Chlorides')\nBasin_Summary['Selenium'].to_excel(writer, 'Selenium')\nBasin_Summary['Boron'].to_excel(writer, 'Boron')\nBasin_Summary['Bromine'].to_excel(writer, 'Bromine')\nBasin_Summary['Lead'].to_excel(writer, 'Lead')\nBasin_Summary['Arsenic'].to_excel(writer, 'Arsenic')\nBasin_Summary['Heat'].to_excel(writer, 'Heat')\nBasin_Summary['Mercury'].to_excel(writer, 'Mercury')\nwriter.save()\n\n\n# Merge these four summary tables into one that fills in data gaps.\n\n# Start with processing the county summary table.\nTrace_Element_County_filename = fileDir / 'Intermediate' / 'County Trace Element Data.xlsx'\nArsenic_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Arsenic', header=0,\n usecols=[0, 1, 3, 6, 7, 8], names=['FIPS_Code,State', 'Rank', 'Arsenic_Mean',\n 'Arsenic_25th', 'Arsenic_50th', 'Arsenic_75th'])\nBoron_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Boron', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['FIPS_Code,State', 'Rank', 'Boron_Mean', 'Boron_25th', 'Boron_50th', 'Boron_75th'])\nBromine_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Bromine', header=0,\n usecols=[0, 1, 3, 6, 7, 8], names=['FIPS_Code,State', 'Rank', 'Bromine_Mean',\n 'Bromine_25th', 'Bromine_50th', 'Bromine_75th'])\nChlorides_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Chlorides', header=0, usecols=[0, 1, 3, 6,\n 7, 8],\n names=['FIPS_Code,State', 'Rank', 'Chlorides_Mean', 'Chlorides_25th', 'Chlorides_50th',\n 'Chlorides_75th'])\nHeat_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Heat', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['FIPS_Code,State', 'Rank', 'Heat_Mean', 'Heat_25th', 'Heat_50th', 'Heat_75th'])\nLead_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Lead', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['FIPS_Code,State', 'Rank', 'Lead_Mean', 'Lead_25th', 'Lead_50th', 'Lead_75th'])\nSelenium_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Selenium', header=0, usecols=[0, 1, 3, 6, 7,\n 8],\n names=['FIPS_Code,State', 'Rank', 'Selenium_Mean', 'Selenium_25th', 'Selenium_50th',\n 'Selenium_75th'])\nMercury_County = pd.read_excel(Trace_Element_County_filename, sheet_name='Mercury', header=0,\n usecols=[0, 1, 3, 6, 7, 8], names=['FIPS_Code,State', 'Rank', 'Mercury_Mean',\n 'Mercury_25th', 'Mercury_50th', 'Mercury_75th'])\nState_County = pd.read_excel(Trace_Element_County_filename, sheet_name='State_Abbreviation', header=0, usecols=[0, 3],\n names=['FIPS_Code,State', 'State'])\n\nCounty_Trace_Element_Data = pd.DataFrame(np.column_stack([Arsenic_County['FIPS_Code,State'], Arsenic_County['Rank'],\n Arsenic_County['Arsenic_Mean'],\n Arsenic_County['Arsenic_25th'],\n Arsenic_County['Arsenic_50th'],\n Arsenic_County['Arsenic_75th'], Boron_County['Boron_Mean'],\n Boron_County['Boron_25th'], Boron_County['Boron_50th'],\n Boron_County['Boron_75th'], Bromine_County['Bromine_Mean'],\n Bromine_County['Bromine_25th'],\n Bromine_County['Bromine_50th'],\n Bromine_County['Bromine_75th'],\n Chlorides_County['Chlorides_Mean'],\n Chlorides_County['Chlorides_25th'],\n Chlorides_County['Chlorides_50th'],\n Chlorides_County['Chlorides_75th'], Heat_County['Heat_Mean'],\n Heat_County['Heat_25th'], Heat_County['Heat_50th'],\n Heat_County['Heat_75th'], Lead_County['Lead_Mean'],\n Lead_County['Lead_25th'], Lead_County['Lead_50th'],\n Lead_County['Lead_75th'], Selenium_County['Selenium_Mean'],\n Selenium_County['Selenium_25th'],\n Selenium_County['Selenium_50th'],\n Selenium_County['Selenium_75th'],\n Mercury_County['Mercury_Mean'],\n Mercury_County['Mercury_25th'],\n Mercury_County['Mercury_50th'],\n Mercury_County['Mercury_75th']]),\n columns=['FIPS_Code_State', 'Rank', 'Arsenic_Mean', 'Arsenic_25th',\n 'Arsenic_50th', 'Arsenic_75th', 'Boron_Mean', 'Boron_25th',\n 'Boron_50th', 'Boron_75th', 'Bromine_Mean', 'Bromine_25th',\n 'Bromine_50th', 'Bromine_75th', 'Chlorides_Mean', 'Chlorides_25th',\n 'Chlorides_50th', 'Chlorides_75th', 'Heat_Mean', 'Heat_25th',\n 'Heat_50th', 'Heat_75th', 'Lead_Mean', 'Lead_25th', 'Lead_50th',\n 'Lead_75th', 'Selenium_Mean', 'Selenium_25th', 'Selenium_50th',\n 'Selenium_75th', 'Mercury_Mean', 'Mercury_25th', 'Mercury_50th',\n 'Mercury_75th'])\n\nCounty_Trace_Element_Data['State'] = pull_state_abbreviation(County_Trace_Element_Data.FIPS_Code_State)\n\n# Pull and process the state-level summary table.\nTrace_Element_State_filename = fileDir / 'Intermediate' / 'State Trace Element Data.xlsx'\nArsenic_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Arsenic', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['State_Abbreviation', 'Rank', 'Arsenic_Mean', 'Arsenic_25th', 'Arsenic_50th',\n 'Arsenic_75th'])\nBoron_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Boron', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['State_Abbreviation', 'Rank', 'Boron_Mean', 'Boron_25th', 'Boron_50th', 'Boron_75th']\n )\nBromine_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Bromine', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['State_Abbreviation', 'Rank', 'Bromine_Mean', 'Bromine_25th', 'Bromine_50th',\n 'Bromine_75th'])\nChlorides_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Chlorides', header=0, usecols=[0, 1, 3, 6, 7,\n 8],\n names=['State_Abbreviation', 'Rank', 'Chlorides_Mean', 'Chlorides_25th',\n 'Chlorides_50th', 'Chlorides_75th'])\nHeat_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Heat', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['State_Abbreviation', 'Rank', 'Heat_Mean', 'Heat_25th', 'Heat_50th', 'Heat_75th'])\nLead_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Lead', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['State_Abbreviation', 'Rank', 'Lead_Mean', 'Lead_25th', 'Lead_50th', 'Lead_75th'])\nSelenium_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Selenium', header=0,\n usecols=[0, 1, 3, 6, 7, 8], names=['State_Abbreviation', 'Rank', 'Selenium_Mean',\n 'Selenium_25th', 'Selenium_50th', 'Selenium_75th'])\nMercury_State = pd.read_excel(Trace_Element_State_filename, sheet_name='Mercury', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['State_Abbreviation', 'Rank', 'Mercury_Mean', 'Mercury_25th', 'Mercury_50th',\n 'Mercury_75th'])\n\nState_Trace_Element_Data = pd.DataFrame(np.column_stack([Arsenic_State['State_Abbreviation'], Arsenic_State['Rank'],\n Arsenic_State['Arsenic_Mean'], Arsenic_State['Arsenic_25th'],\n Arsenic_State['Arsenic_50th'], Arsenic_State['Arsenic_75th'],\n Boron_State['Boron_Mean'], Boron_State['Boron_25th'], Boron_State['Boron_50th'],\n Boron_State['Boron_75th'], Bromine_State['Bromine_Mean'],\n Bromine_State['Bromine_25th'], Bromine_State['Bromine_50th'],\n Bromine_State['Bromine_75th'], Chlorides_State['Chlorides_Mean'],\n Chlorides_State['Chlorides_25th'], Chlorides_State['Chlorides_50th'],\n Chlorides_State['Chlorides_75th'], Heat_State['Heat_Mean'],\n Heat_State['Heat_25th'], Heat_State['Heat_50th'], Heat_State['Heat_75th'],\n Lead_State['Lead_Mean'], Lead_State['Lead_25th'], Lead_State['Lead_50th'],\n Lead_State['Lead_75th'], Selenium_State['Selenium_Mean'],\n Selenium_State['Selenium_25th'], Selenium_State['Selenium_50th'],\n Selenium_State['Selenium_75th'], Mercury_State['Mercury_Mean'],\n Mercury_State['Mercury_25th'], Mercury_State['Mercury_50th'],\n Mercury_State['Mercury_75th']]),\n columns=['State', 'Rank', 'Arsenic_Mean', 'Arsenic_25th', 'Arsenic_50th',\n 'Arsenic_75th', 'Boron_Mean', 'Boron_25th', 'Boron_50th', 'Boron_75th',\n 'Bromine_Mean', 'Bromine_25th', 'Bromine_50th', 'Bromine_75th',\n 'Chlorides_Mean', 'Chlorides_25th', 'Chlorides_50th', 'Chlorides_75th',\n 'Heat_Mean', 'Heat_25th', 'Heat_50th', 'Heat_75th', 'Lead_Mean',\n 'Lead_25th', 'Lead_50th', 'Lead_75th', 'Selenium_Mean',\n 'Selenium_25th', 'Selenium_50th', 'Selenium_75th', 'Mercury_Mean',\n 'Mercury_25th', 'Mercury_50th', 'Mercury_75th'])\n\nState_Trace_Element_Data['State'] = pull_state_abbreviation(State_Trace_Element_Data.State)\n\n# Pull and process the rank level summary.\n\nTrace_Element_Rank_filename = fileDir / 'Intermediate' / 'Rank Trace Element Data.xlsx'\nArsenic_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Arsenic', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Arsenic_Mean', 'Arsenic_25th', 'Arsenic_50th', 'Arsenic_75th'])\nBoron_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Boron', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Boron_Mean', 'Boron_25th', 'Boron_50th', 'Boron_75th'])\nBromine_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Bromine', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Bromine_Mean', 'Bromine_25th', 'Bromine_50th', 'Bromine_75th'])\nChlorides_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Chlorides', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Chlorides_Mean', 'Chlorides_25th', 'Chlorides_50th', 'Chlorides_75th'])\nHeat_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Heat', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Heat_Mean', 'Heat_25th', 'Heat_50th', 'Heat_75th'])\nLead_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Lead', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Lead_Mean', 'Lead_25th', 'Lead_50th', 'Lead_75th'])\nSelenium_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Selenium', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Selenium_Mean', 'Selenium_25th', 'Selenium_50th', 'Selenium_75th'])\nMercury_Rank = pd.read_excel(Trace_Element_Rank_filename, sheet_name='Mercury', header=0, usecols=[0, 2, 5, 6, 7],\n names=['Rank', 'Mercury_Mean', 'Mercury_25th', 'Mercury_50th', 'Mercury_75th'])\n\nSummary_Rank = pd.DataFrame(np.column_stack([['BIT', 'LIG', 'SUB'], Arsenic_Rank['Arsenic_Mean'],\n Arsenic_Rank['Arsenic_25th'], Arsenic_Rank['Arsenic_50th'],\n Arsenic_Rank['Arsenic_75th'], Boron_Rank['Boron_Mean'],\n Boron_Rank['Boron_25th'], Boron_Rank['Boron_50th'],\n Boron_Rank['Boron_75th'], Bromine_Rank['Bromine_Mean'],\n Bromine_Rank['Bromine_25th'], Bromine_Rank['Bromine_50th'],\n Bromine_Rank['Bromine_75th'], Chlorides_Rank['Chlorides_Mean'],\n Chlorides_Rank['Chlorides_25th'], Chlorides_Rank['Chlorides_50th'],\n Chlorides_Rank['Chlorides_75th'], Heat_Rank['Heat_Mean'],\n Heat_Rank['Heat_25th'], Heat_Rank['Heat_50th'], Heat_Rank['Heat_75th'],\n Lead_Rank['Lead_Mean'], Lead_Rank['Lead_25th'], Lead_Rank['Lead_50th'],\n Lead_Rank['Lead_75th'], Selenium_Rank['Selenium_Mean'],\n Selenium_Rank['Selenium_25th'], Selenium_Rank['Selenium_50th'],\n Selenium_Rank['Selenium_75th'], Mercury_Rank['Mercury_Mean'],\n Mercury_Rank['Mercury_25th'], Mercury_Rank['Mercury_50th'],\n Mercury_Rank['Mercury_75th']]),\n columns=['Rank', 'Arsenic_Mean', 'Arsenic_25th', 'Arsenic_50th', 'Arsenic_75th',\n 'Boron_Mean', 'Boron_25th', 'Boron_50th', 'Boron_75th', 'Bromine_Mean',\n 'Bromine_25th', 'Bromine_50th', 'Bromine_75th', 'Chlorides_Mean', 'Chlorides_25th',\n 'Chlorides_50th', 'Chlorides_75th', 'Heat_Mean', 'Heat_25th', 'Heat_50th',\n 'Heat_75th', 'Lead_Mean', 'Lead_25th', 'Lead_50th', 'Lead_75th', 'Selenium_Mean',\n 'Selenium_25th', 'Selenium_50th', 'Selenium_75th', 'Mercury_Mean', 'Mercury_25th',\n 'Mercury_50th', 'Mercury_75th'])\n\n# Finally, pull and process basin-level summary.\n\nTrace_Element_Basin_filename = fileDir / 'Intermediate' / 'Basin Trace Element Data.xlsx'\nArsenic_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Arsenic', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['Region', 'Rank', 'Arsenic_Mean', 'Arsenic_25th', 'Arsenic_50th', 'Arsenic_75th'])\nBoron_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Boron', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['Region', 'Rank', 'Boron_Mean', 'Boron_25th', 'Boron_50th', 'Boron_75th'])\nBromine_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Bromine', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['Region', 'Rank', 'Bromine_Mean', 'Bromine_25th', 'Bromine_50th', 'Bromine_75th'])\nChlorides_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Chlorides', header=0, usecols=[0, 1, 3, 6, 7,\n 8],\n names=['Region', 'Rank', 'Chlorides_Mean', 'Chlorides_25th', 'Chlorides_50th',\n 'Chlorides_75th'])\nHeat_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Heat', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['Region', 'Rank', 'Heat_Mean', 'Heat_25th', 'Heat_50th', 'Heat_75th'])\nLead_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Lead', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['Region', 'Rank', 'Lead_Mean', 'Lead_25th', 'Lead_50th', 'Lead_75th'])\nSelenium_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Selenium', header=0,\n usecols=[0, 1, 3, 6, 7, 8], names=['Region', 'Rank', 'Selenium_Mean', 'Selenium_25th',\n 'Selenium_50th', 'Selenium_75th'])\nMercury_Basin = pd.read_excel(Trace_Element_Basin_filename, sheet_name='Mercury', header=0, usecols=[0, 1, 3, 6, 7, 8],\n names=['Region', 'Rank', 'Mercury_Mean', 'Mercury_25th', 'Mercury_50th', 'Mercury_75th'])\n\nBasin_Trace_Element_Data = pd.DataFrame(np.column_stack([Arsenic_Basin['Region'], Arsenic_Basin['Rank'],\n Arsenic_Basin['Arsenic_Mean'], Arsenic_Basin['Arsenic_25th'],\n Arsenic_Basin['Arsenic_50th'], Arsenic_Basin['Arsenic_75th'],\n Boron_Basin['Boron_Mean'], Boron_Basin['Boron_25th'],\n Boron_Basin['Boron_50th'], Boron_Basin['Boron_75th'],\n Bromine_Basin['Bromine_Mean'], Bromine_Basin['Bromine_25th'],\n Bromine_Basin['Bromine_50th'], Bromine_Basin['Bromine_75th'],\n Chlorides_Basin['Chlorides_Mean'],\n Chlorides_Basin['Chlorides_25th'],\n Chlorides_Basin['Chlorides_50th'],\n Chlorides_Basin['Chlorides_75th'], Heat_Basin['Heat_Mean'],\n Heat_Basin['Heat_25th'], Heat_Basin['Heat_50th'],\n Heat_Basin['Heat_75th'], Lead_Basin['Lead_Mean'],\n Lead_Basin['Lead_25th'], Lead_Basin['Lead_50th'],\n Lead_Basin['Lead_75th'], Selenium_Basin['Selenium_Mean'],\n Selenium_Basin['Selenium_25th'],\n Selenium_Basin['Selenium_50th'],\n Selenium_Basin['Selenium_75th'], Mercury_Basin['Mercury_Mean'],\n Mercury_Basin['Mercury_25th'], Mercury_Basin['Mercury_50th'],\n Mercury_Basin['Mercury_75th']]),\n columns=['Basin', 'Rank', 'Arsenic_Mean', 'Arsenic_25th', 'Arsenic_50th',\n 'Arsenic_75th', 'Boron_Mean', 'Boron_25th', 'Boron_50th', 'Boron_75th',\n 'Bromine_Mean', 'Bromine_25th', 'Bromine_50th', 'Bromine_75th',\n 'Chlorides_Mean', 'Chlorides_25th', 'Chlorides_50th', 'Chlorides_75th',\n 'Heat_Mean', 'Heat_25th', 'Heat_50th', 'Heat_75th', 'Lead_Mean',\n 'Lead_25th', 'Lead_50th', 'Lead_75th', 'Selenium_Mean',\n 'Selenium_25th', 'Selenium_50th', 'Selenium_75th', 'Mercury_Mean',\n 'Mercury_25th', 'Mercury_50th', 'Mercury_75th'])\n\nBasin_Trace_Element_Data['Basin'] = completing_missing_keys_in_a_describe_dataframe(Basin_Trace_Element_Data.Basin)\n\n# Fill in the county-level analysis NAN values.\nmissing_values = pd.isnull(County_Trace_Element_Data)\n\n# This function runs through missing values and if it finds a value that is missing, it pulls first the state- and\n# rank-level value for the cell. If there is no state- and rank-level match, it pulls the basin level value. This\n# happens for counties in Arizona, Illinois, Indiana, Missouri, and Tennessee. For these states, it also uses only\n# values from adjoining states within those basins. That matching occurs above when the basin summary table is created.\nx = 1\nwhile x < 35:\n y = 0\n while y < missing_values.shape[0]:\n if missing_values.iloc[y, x]:\n State = County_Trace_Element_Data.State[y]\n Rank = County_Trace_Element_Data.Rank[y]\n State_Trace_Element_Data_copy = State_Trace_Element_Data\n State_Trace_Element_Data_copy = State_Trace_Element_Data_copy[State_Trace_Element_Data_copy.State == State]\n State_Trace_Element_Data_copy = State_Trace_Element_Data_copy[State_Trace_Element_Data_copy.Rank == Rank]\n reported_value = State_Trace_Element_Data_copy.iloc[0, x]\n if math.isnan(reported_value):\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data\n if State == 'AZ':\n Region = 'ROCKY MOUNTAIN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[Basin_Trace_Element_Data_copy.Basin ==\n Region]\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[Basin_Trace_Element_Data_copy.Rank ==\n Rank]\n elif State == 'IL':\n Region = 'EASTERN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Rank == Rank]\n elif State == 'IN':\n Region = 'EASTERN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Rank == Rank]\n elif State == 'MO':\n Region = 'WESTERN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Rank == Rank]\n elif State == 'TN':\n Region = 'CENTRAL APPALACHIAN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Rank == Rank]\n value = Basin_Trace_Element_Data_copy.iloc[0, x]\n County_Trace_Element_Data.iat[y, x] = value\n else:\n value = State_Trace_Element_Data_copy.iloc[0, x]\n County_Trace_Element_Data.iat[y, x] = value\n y += 1\n x += 1\n\n# Fill in the county-level analysis NAN values.\nmissing_values = pd.isnull(State_Trace_Element_Data)\n# This function runs through missing values and if it finds a value that is missing, it pulls first the state- and\n# rank-level value for the cell. If there is no state- and rank-level match, it pulls the basin level value. This\n# happens for counties in Arizona, Illinois, Indiana, Missouri, and Tennessee. For these states, it also uses only\n# values from adjoining states within those basins. That matching occurs above when the basin summary table is created.\nx = 1\nwhile x < 34:\n y = 0\n while y < missing_values.shape[0]:\n if missing_values.iloc[y, x]:\n State = State_Trace_Element_Data.State[y]\n Rank = State_Trace_Element_Data.Rank[y]\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[Basin_Trace_Element_Data_copy.Rank == Rank]\n if 2+2 == 4:\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data\n if State == 'AZ':\n Region = 'ROCKY MOUNTAIN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[Basin_Trace_Element_Data_copy.Basin ==\n Region]\n elif State == 'IL':\n Region = 'EASTERN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n elif State == 'IN':\n Region = 'EASTERN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n elif State == 'MO':\n Region = 'WESTERN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n elif State == 'TN':\n Region = 'CENTRAL APPALACHIAN'\n Basin_Trace_Element_Data_copy = Basin_Trace_Element_Data_copy[\n Basin_Trace_Element_Data_copy.Basin == Region]\n value = Basin_Trace_Element_Data_copy.iloc[0, x]\n State_Trace_Element_Data.iat[y, x] = value\n y += 1\n x += 1\n\nCounty_Trace_Element_Data.to_csv(fileDir / 'Intermediate' / 'Full Descriptive County Trace Element Data.csv')\nSummary_Rank.to_csv(fileDir / 'Intermediate' / 'Full Descriptive Rank Trace Element Data.csv')\nState_Trace_Element_Data.to_csv(fileDir / 'Intermediate' / 'Full Descriptive State Trace Element Data.csv')\nBasin_Trace_Element_Data.to_csv(fileDir / 'Intermediate' / 'Full Descriptive Basin Trace Element Data.csv')\n","sub_path":"Code/data_processing/avg_county_coal_constituents.py","file_name":"avg_county_coal_constituents.py","file_ext":"py","file_size_in_byte":34643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"314919116","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: cicada\n@contact: 1713856662a@gmail.com\n@file: compute_domain_flow.py\n@time: 2019/5/30 下午3:15 \n\"\"\"\n\n\nfrom multiprocessing import ProcessError, Process\n\nfrom dbs import DB\nfrom utils.log_utils import get_logger\n\ncore_logger = get_logger(\"core\")\n\n\nclass ComputeDomainControler(object):\n\n def __init__(self, cad_file_path, param_field, visual_file_path):\n self.cad_file_path = cad_file_path\n self.param_field = param_field\n self.visual_file_path = visual_file_path\n\n def start_actions(self):\n core_logger.info(f\"构建计算域数据库信息 | cad_file_path: {self.cad_file_path}\"\n f\" | visual_file_path: {self.visual_file_path}\")\n domain_id = DB.write_compute_domain(self.cad_file_path, self.visual_file_path)\n core_logger.info(f\"异步开始构建计算域 | domain_id: {domain_id}\")\n try:\n dom_crt_process = Process(target=self.asyc_domain_create, args=(domain_id, ))\n dom_crt_process.start()\n core_logger.exception(f\"异步进行计算域的生成 | pid: {dom_crt_process.pid} | domain_id: {domain_id}\")\n return domain_id, \"success\"\n except ProcessError:\n core_logger.exception(f\"开启计算域构建进程失败 | domain_id: {domain_id}\")\n return -1, f\"开启计算域构建进程失败 | domain_id: {domain_id}\"\n\n def asyc_domain_create(self, domain_id):\n pass\n","sub_path":"actions/compute_domain_flow.py","file_name":"compute_domain_flow.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"50183722","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 6 19:03:32 2018\n\n@author: barbara\n\"\"\"\n\n\nimport copy\n\n\ndef GradientDescent(niter, epsV, epsZ, functional, X_init):\n X = copy.deepcopy(X_init)\n grad_op = functional.gradient\n energy=functional(X)\n print(\" initial energy : {}\".format(energy))\n\n for k in range(niter):\n grad=grad_op(X)\n X_temp0=X.copy()\n X_temp0[0]= (X[0]- epsV *grad[0]).copy()\n X_temp0[1]= (X[1]- epsZ *grad[1]).copy()\n energy_temp0=functional(X_temp0)\n if energy_temp0= cv_best:\n\t\t\t\tif n_worse < 2:\n\t\t\t\t\tprint('%i<2 worse epochs: cv best=%f, cv epoch=%f'%(n_worse,cv_best,cv_epoch))\n\t\t\t\t\tf.write('\\n%i<2 worse epochs: cv best=%f, cv epoch=%f'%(n_worse,cv_best,cv_epoch))\n\t\t\t\telse:\n\t\t\t\t\tprint('early stopping: cv best=%f, cv epoch=%f'%(cv_best,cv_epoch))\n\t\t\t\t\tf.write('\\nearly stopping: cv best=%f, cv epoch=%f'%(cv_best,cv_epoch))\n\t\t\t\t\tbreak # break out of epoch looping, early stopping\n\nf.close()","sub_path":"restore_masked_speech.py","file_name":"restore_masked_speech.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"540002895","text":"import sys\r\nimport random\r\nfrom PySide2 import QtCore, QtWidgets, QtGui\r\nfrom functools import partial\r\n\r\n\r\nclass LearnWidget(QtWidgets.QWidget):\r\n def __init__(self, operation_selection, parent=None):\r\n super(LearnWidget, self).__init__(parent)\r\n\r\n operand = operation_selection\r\n self.learnMul = [\"Welcome to Scales!\", \"Let's start with multiplying.\", \"We can use our fingers \\n to multiply numbers.\", \"Lets multiply 2 and 3.\", \"Put up 3 fingers\", \"Now add 3 more fingers\", \"We have added 3 fingers \\n two times!\", \"This is the same as\\n multiplying two numbers!\", \"How many fingers\\n do you have up?\", \"6! Did you get it right?\", \"Click back to \\n pick another \\n module.\"]\r\n self.learnDiv = [\"Welcome to Scales!\", \"Let's start with dividing.\", \"We can use our fingers to\\n divide numbers.\", \"Lets divide 6 by 3.\", \"Put up 6 fingers.\", \"Take down fingers\\n 3 at a time.\", \"How many fingers\\n did you take away\\n before you go to 0?\", \"That's the answer to 6/3!\", \"Did you answer 2?\", \"If so, you got it right!\", \"Click back to \\n pick another \\n module.\"]\r\n self.learnSub = [\"Welcome to Scales!\", \"Let's start with subtracting.\", \"We can use our fingers \\n to subtract numbers.\", \"Lets subtract 5 from 2.\", \"Put up 5 fingers.\", \"Take down 2.\", \"How many fingers are left?\", \"3! Did you get it right?\", \"Click back to \\n pick another \\n module.\"]\r\n self.learnTexts = [\"Welcome to Scales!\", \"Let's start with adding.\", \"We can use our fingers\\n to add numbers.\", \"Let's add 3 and 2.\", \"Lets put up 3 fingers.\", \"Now count 2 more.\", \"How many fingers are up?\", \"5! Did you get it right?\", \"Click back to \\n pick another \\n module.\"]\r\n\r\n self.tutorial = []\r\n if operand == \"+\":\r\n self.tutorial = self.learnTexts\r\n elif operand == \"-\":\r\n self.tutorial = self.learnSub\r\n elif operand == \"*\":\r\n self.tutorial = self.learnMul\r\n elif operand == \"/\":\r\n self.tutorial = self.learnDiv\r\n\r\n self.currentText = 0\r\n \r\n\r\n # Snakey Image\r\n self.snakeCartoon = QtWidgets.QLabel()\r\n snakepng = QtGui.QPixmap('images/snake1.png')\r\n snakeScaled = snakepng.scaled(450, 450, QtCore.Qt.KeepAspectRatio)\r\n self.snakeCartoon.setPixmap(snakeScaled)\r\n\r\n # Tutorial Text\r\n self.tutorialText = QtWidgets.QLabel(self.tutorial[self.currentText])\r\n self.tutorialText.setStyleSheet(\"color: white; font-size: 52px;\")\r\n self.tutorialText.adjustSize()\r\n self.tutorialText.wordWrap()\r\n\r\n # Tutorial Navigation Stuff\r\n navStyle = (\"color: white; font-size: 22px; margin: 0 15px\")\r\n self.nextButton = QtWidgets.QPushButton(\"Next\")\r\n self.backButton = QtWidgets.QPushButton(\"Back\")\r\n self.nextButton.setStyleSheet(navStyle)\r\n self.backButton.setStyleSheet(navStyle)\r\n self.nextButton.clicked.connect(self.next)\r\n self.backButton.clicked.connect(self.back)\r\n\r\n # horizontal Layout for side by side buttons\r\n self.buttonLayout = QtWidgets.QHBoxLayout()\r\n self.buttonLayout.addWidget(self.backButton)\r\n self.buttonLayout.addWidget(self.nextButton)\r\n\r\n # Veritcal Layout - right side of page\r\n self.vertLayout = QtWidgets.QVBoxLayout()\r\n self.vertLayout.addWidget(self.tutorialText)\r\n self.vertLayout.addLayout(self.buttonLayout)\r\n\r\n # Horizontal Layout\r\n self.splitLayout = QtWidgets.QHBoxLayout()\r\n self.splitLayout.addWidget(self.snakeCartoon)\r\n self.splitLayout.addLayout(self.vertLayout)\r\n self.splitLayout.setAlignment(QtCore.Qt.AlignCenter)\r\n\r\n\r\n # Apply the layout\r\n self.setLayout(self.splitLayout)\r\n\r\n def next(self):\r\n if self.currentText + 1 < len(self.tutorial):\r\n self.currentText += 1\r\n self.tutorialText.setText(self.tutorial[self.currentText])\r\n self.tutorialText.repaint()\r\n \r\n def back(self):\r\n if self.currentText > 0:\r\n self.currentText -= 1\r\n self.tutorialText.setText(self.tutorial[self.currentText])\r\n self.tutorialText.repaint()\r\n\r\n","sub_path":"LearnModule.py","file_name":"LearnModule.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"639877340","text":"class Word_Game():\n def find_words(self, board, words):\n if not board or not board[0]:\n return []\n\n visited = [[0 for y in range(len(board[0]))] for x in range(len(board))]\n result = []\n \n # Convert the words list into a trie tree\n word_dic = Trie()\n node = word_dic.root\n for w in words:\n word_dic.insert(w)\n \n for i in range(len(board)):\n for j in range(len(board[0])):\n self.exist(board, i, j, node, visited, \"\", result)\n\n return result\n\n\n def exist(self, board, x, y, node, visited, path, result):\n \"\"\"\n return type: void\n \"\"\"\n if node.is_word:\n result.appedn(path)\n node.is_word = False # ??\n\n if x < 0 or y < 0 or x >= len(board) or y >= len(board[0]) or visited[x][y] == 1:\n return\n \n current_node = node.children.get(board[x][y])\n if not current_node:\n return \n \n # the letter in the trie tree, add it into potential word path\n path += board[x][y]\n visited[x][y] = 1\n self.exist(board, x - 1, y, current_node, visited, path, result)\n self.exist(board, x + 1, y, current_node, visited, path, result)\n self.exist(board, x, y - 1, current_node, visited, path, result)\n self.exist(board, x, y + 1, current_node, visited, path, result)\n visited[x][y] = 0\n \n return\n\n\nclass Trie_Node():\n def __init__(self):\n self.children = collections.defaultdict(Trie_Node)\n self.is_word = False\n \nclass Trie():\n def __init__(self):\n self.root = Trie_Node()\n\n def insert(self, word):\n node = self.root\n for letter in word:\n node = node.children[letter]\n node.is_word = True","sub_path":"Algorithm/212_Word_Search_II.py","file_name":"212_Word_Search_II.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"354069208","text":"\"\"\"\nwsgi_app.py\n\"\"\"\n\n\ndef application(environ, start_response):\n status = \"200 OK\" # HTTP messages have a status\n body = \"Hello World\" # HTTP messages have a body\n\n # HTTP messages have headers to describe various things, at a\n # minimum describing the type(Content-Type) and the length of the\n # content(Content-Length)\n headers = [(\"Content-Type\",\"text/plain\"),\n (\"Content-Length\",str(len(body)))]\n\n start_response(status, headers) # calling the function passed in\n # with the status and headers of\n # the HTTP Response Message\n\n return [body] # returning a list containing the body of the HTTP\n # Response Message\n","sub_path":"doc/source/wsgi_app.py","file_name":"wsgi_app.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"179445568","text":"from tkinter import *\nfoto_list=[\"pc1.png\",\"pc2.png\",\"pc3.png\",\"pc4.png\",\"pc5.png\",'pc6.png','pc7.png']\nttt=\"фото компьютеров\"\nlist_ = [\"Компьютер1\",\"Компьютер2\",\"Компьютер3\",\"Компьютер4\",\"Компьютер5\"]\ndef list_to_txt(event):\n global can,foto\n txt.delete(0.0,END)\n valik=lbox.curselection()\n txt.insert(END,lbox.get(valik[0]))\n can.delete(ALL)\n foto=PhotoImage(file=foto_list[valik[0]])\n can.create_image(0,0,image=foto,anchor=NW)\n\ndef txt_to_list(event):\n text=txt.get(0.0,END)\n text=text[-2:-1]\n if text==\"\\n\":\n pass\n else:\n list_.append(text)\n print(list_)\n lbox.config(height=len(list_))\n lbox.insert(END,text) \n txt.delete(0.0,END)\ndef opisanie():\n global ttt\n text = txt.get(0.0, END)\n print(list(text))\n if text==\"Компьютер5\\n\":\n ttt=\"игровой компьютер имеет 960m видеокарту 16 гб оперативной памяти 500w блок питания стоит 700 евро\"\n elif text==\"Компьютер2\\n\":\n ttt=\"компьтер игровой имеет видео карту ртх 3090, процессор i9, 32гб оперативной памяти ddr4, стоит он 2000 евро.\"\n elif text==\"Компьютер1 \\n\":\n ttt=\" игровой компьютер он имеет core i5, 16гб оперативной памяти, 500w, 1050 видеокарта \"\n elif text==\"Компьютер4\":\n ttt=\"компьютер стоит 900 евро он игровой \"\n elif text==\"Компьютер3\":\n ttt=\"Компьютер имеет core i5 видеокарту 1050, 16 гб оперативной памяти, компьютер стоит 500 евро.\"\n else:\n ttt=\"Компьютер\"\n opis.config(text=ttt)\n\n\n\nwin=Tk()\nwin.geometry(\"1080x1080\")\nwin.title(\"Компьютеры\")\nlbox=Listbox(win,width=25,height=7,selectmode=SINGLE)\nlbox.insert(1, \"Компьютер1 \")\nlbox.insert(2, \"Компьютер2\")\nlbox.insert(3, \"Компьютер3\")\nlbox.insert(4, \"Компьютер4\")\nlbox.insert(5, \"Компьютер5\")\nlbox.insert(6, \"Компьютер6\")\nlbox.insert(7, \"Компьютер7\")\nfor element in foto_list:\n lbox.insert(END,element)\n\nlbox.grid(row=0,column=0)\nlbox.bind(\"<>\",list_to_txt)\ntxt=Text(win,height=10,width=15,wrap=WORD)\ntxt.grid(row=3,column=3)\ntxt.bind(\"\",txt_to_list)\ncan=Canvas(win,width=400,height=400,bg=\"gold\")\ncan.grid(row=1,column=1,columnspan=1)\npc = PhotoImage(file=\"\")#220px-PelobatesFuscus.png\npanel = Label(win, image = pc)\npanel.grid(row=5, column=3)\nfoto=PhotoImage(file=\"pc3.png\")\nbtn=Button(text='Информация ', command=opisanie)\nbtn.grid(row=1, column=2)\nopis=Label(win, text=\"\", width=10, height=10)\nopis.grid(row=20, column=6)\ncan.grid(row=4, column=6)\n\n\n\n\n\nwin.mainloop()\n","sub_path":"PythonApplication14/PythonApplication14.py","file_name":"PythonApplication14.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"249024667","text":"import discord\nfrom discord.ext import commands\nfrom discord.utils import get\nintents = discord.Intents.default()\nintents.members = True\nclient = commands.Bot(command_prefix= '.', intents=intents)\nx = 100\n@client.event\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name=\"Yo mama\"))\n print('bot is ready')\n@client.command()\nasync def Room(ctx):\n global x\n print(\"yes\")\n for xx in range(x):\n await ctx.guild.create_text_channel('test')\n\n@client.command()\nasync def RoomD(ctx):\n guild = ctx.guild\n for channel in guild.channels:\n await channel.delete()\n@client.command()\nasync def kickall(ctx, *, reason=None):\n for member in ctx.guild.members:\n try:\n await member.kick(reason=reason)\n print(f\"Kicked {member.name}\")\n except:\n print(f\"Could not kick {member}\")\n@client.command(pass_context=True)\nasync def delrole(ctx, *,role_name):\n role = discord.utils.get(ctx.message.server.roles, name=role_name)\n if role:\n try:\n await client.delete_role(ctx.message.server, role)\n await client.say(\"The role {} has been deleted!\".format(role.name))\n except discord.Forbidden:\n await client.say(\"Missing Permissions to delete this role!\")\n else:\n await client.say(\"The role doesn't exist!\")\n\nclient.run(\"KEYHERE\")","sub_path":"Discord_Server_Destroyer_Bot.py","file_name":"Discord_Server_Destroyer_Bot.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"229622605","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Deleting model 'Platforms'\n db.delete_table('flags_platforms')\n\n # Adding field 'Flag.is_mac'\n db.add_column('flags_flag', 'is_mac',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n # Adding field 'Flag.is_windows'\n db.add_column('flags_flag', 'is_windows',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n # Adding field 'Flag.is_linux'\n db.add_column('flags_flag', 'is_linux',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n # Adding field 'Flag.is_chrome_os'\n db.add_column('flags_flag', 'is_chrome_os',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n # Adding field 'Flag.is_android'\n db.add_column('flags_flag', 'is_android',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n # Removing M2M table for field compatibility on 'Flag'\n db.delete_table('flags_flag_compatibility')\n\n\n def backwards(self, orm):\n # Adding model 'Platforms'\n db.create_table('flags_platforms', (\n ('support', self.gf('django.db.models.fields.CharField')(max_length=15)),\n ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ))\n db.send_create_signal('flags', ['Platforms'])\n\n # Deleting field 'Flag.is_mac'\n db.delete_column('flags_flag', 'is_mac')\n\n # Deleting field 'Flag.is_windows'\n db.delete_column('flags_flag', 'is_windows')\n\n # Deleting field 'Flag.is_linux'\n db.delete_column('flags_flag', 'is_linux')\n\n # Deleting field 'Flag.is_chrome_os'\n db.delete_column('flags_flag', 'is_chrome_os')\n\n # Deleting field 'Flag.is_android'\n db.delete_column('flags_flag', 'is_android')\n\n # Adding M2M table for field compatibility on 'Flag'\n db.create_table('flags_flag_compatibility', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('flag', models.ForeignKey(orm['flags.flag'], null=False)),\n ('platforms', models.ForeignKey(orm['flags.platforms'], null=False))\n ))\n db.create_unique('flags_flag_compatibility', ['flag_id', 'platforms_id'])\n\n\n models = {\n 'flags.flag': {\n 'Meta': {'object_name': 'Flag'},\n 'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_android': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_chrome_os': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_linux': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_mac': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_windows': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n }\n }\n\n complete_apps = ['flags']","sub_path":"flags/migrations/0003_auto__del_platforms__add_field_flag_is_mac__add_field_flag_is_windows_.py","file_name":"0003_auto__del_platforms__add_field_flag_is_mac__add_field_flag_is_windows_.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"328644727","text":"#!/usr/bin/env python\n# Pieces of this code are from Tornado, they are being phased out.\n#\n# Copyright 2012 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom __future__ import absolute_import, division, print_function, with_statement\n\nimport logging\nimport logging.handlers\nimport sys\nimport time\nfrom colorama import init\ninit()\nfrom colorama import Fore, Back, Style\nfrom .util import unicode, bytes, basestring\n\ntry:\n import curses\nexcept ImportError:\n curses = None\n\n# From tornado/util.py:\n# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for\n# literal strings, and alternative solutions like \"from __future__ import\n# unicode_literals\" have other problems (see PEP 414). u() can be applied\n# to ascii strings that include \\u escapes (but they must not contain\n# literal non-ascii characters).\n# todo _ can remove this, this next 10 lines is from\n# http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/\n_UTF8_TYPES = (bytes, type(None))\n_TO_UNICODE_TYPES = (unicode, type(None))\n\n\ndef utf8(value):\n \"\"\"Convert a string argument to a byte string.\n\n If the argument is already a byte string or None, it is returned unchanged.\n Otherwise it must be a unicode string and is encoded as utf8.\n\n \"\"\"\n if isinstance(value, _UTF8_TYPES):\n return value\n assert isinstance(value, unicode), \\\n \"Expected bytes, unicode, or None; got %r\" % type(value)\n return value.encode(\"utf-8\")\n\n\ndef to_unicode(value):\n \"\"\"Convert a string argument to a unicode string.\n\n If the argument is already a unicode string or None, it is returned\n unchanged. Otherwise it must be a byte string and is decoded as utf8.\n\n \"\"\"\n if isinstance(value, _TO_UNICODE_TYPES):\n return value\n assert isinstance(value, bytes), \\\n \"Expected bytes, unicode, or None; got %r\" % type(value)\n return value.decode(\"utf-8\")\n\n# to_unicode was previously named _unicode not because it was private,\n# but to avoid conflicts with the built-in unicode() function/type\n_unicode = to_unicode\n\n# When dealing with the standard library across python 2 and 3 it is\n# sometimes useful to have a direct conversion to the native string type\nif str is unicode:\n native_str = to_unicode\nelse:\n native_str = utf8\n\n\ndef _stderr_supports_color():\n color = False\n if curses and sys.stderr.isatty():\n try:\n curses.setupterm()\n if curses.tigetnum(\"colors\") > 0:\n color = True\n except Exception:\n pass\n return color\n\n# Encoding notes: The logging module prefers to work with character\n# strings, but only enforces that log messages are instances of\n# basestring. In python 2, non-ascii bytestrings will make\n# their way through the logging framework until they blow up with\n# an unhelpful decoding error (with this formatter it happens\n# when we attach the prefix, but there are other opportunities for\n# exceptions further along in the framework).\n#\n# If a byte string makes it this far, convert it to unicode to\n# ensure it will make it out to the logs. Use repr() as a fallback\n# to ensure that all byte strings can be converted successfully,\n# but don't do it by default so we don't add extra quotes to ascii\n# bytestrings. This is a bit of a hacky place to do this, but\n# it's worth it since the encoding errors that would otherwise\n# result are so useless (and tornado is fond of using utf8-encoded\n# byte strings whereever possible).\n\n\ndef safe_unicode(s):\n try:\n return _unicode(s)\n except UnicodeDecodeError:\n return repr(s)\n\nNORMAL = Fore.RESET + Style.RESET_ALL + Back.RESET\n\nLEVEL_COLORS = {\n 'DEBUG': Fore.BLUE, # Blue\n 'INFO': Fore.GREEN, # Green\n 'WARNING': Fore.YELLOW,\n 'ERROR': Fore.RED,\n 'CRITICAL': Fore.RED\n}\n\n\ndef default_log_template(self, record):\n \"\"\" Return the prefix for the log message. Template for Formatter.\n\n :param: record: :py:class:`logging.LogRecord` object. this is passed in\n from inside the :py:meth:`logging.Formatter.format` record.\n\n \"\"\"\n\n prefix_template = ''\n prefix_template += NORMAL\n prefix_template += LEVEL_COLORS.get(record.levelname) + Style.BRIGHT + '(%(levelname)s)' + NORMAL + ' '\n prefix_template += '[' + Fore.BLACK + Style.DIM + Style.BRIGHT + '%(asctime)s' + Fore.RESET + Style.RESET_ALL + ']'\n prefix_template += ' ' + Fore.WHITE + Style.DIM + Style.BRIGHT + '%(name)s' + Fore.RESET + Style.RESET_ALL + ' '\n prefix_template += NORMAL\n\n return prefix_template\n\n\nclass LogFormatter(logging.Formatter):\n\n \"\"\"Log formatter used in Tornado.\n\n Key features of this formatter are:\n\n * Color support when logging to a terminal that supports it.\n * Timestamps on every log line.\n * Robust against str/bytes encoding problems.\n\n This formatter is enabled automatically by\n `tornado.options.parse_command_line` (unless ``--logging=none`` is\n used).\n\n \"\"\"\n\n template = default_log_template\n\n def __init__(self, color=True, *args, **kwargs):\n logging.Formatter.__init__(self, *args, **kwargs)\n self._color = color and _stderr_supports_color()\n\n def format(self, record):\n try:\n record.message = record.getMessage()\n except Exception as e:\n record.message = \"Bad message (%r): %r\" % (e, record.__dict__)\n assert isinstance(\n record.message, basestring) # guaranteed by logging\n\n date_format = '%H:%m:%S'\n record.asctime = time.strftime(date_format, self.converter(record.created))\n\n prefix = self.template(record) % record.__dict__\n\n formatted = prefix + \" \" + safe_unicode(record.message)\n if record.exc_info:\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n # exc_text contains multiple lines. We need to safe_unicode\n # each line separately so that non-utf8 bytes don't cause\n # all the newlines to turn into '\\n'.\n lines = [formatted.rstrip()]\n lines.extend(safe_unicode(ln)\n for ln in record.exc_text.split('\\n'))\n formatted = '\\n'.join(lines)\n return formatted.replace(\"\\n\", \"\\n \")\n\n\ndef debug_log_template(self, record):\n \"\"\" Return the prefix for the log message. Template for Formatter.\n\n :param: record: :py:class:`logging.LogRecord` object. this is passed in\n from inside the :py:meth:`logging.Formatter.format` record.\n\n \"\"\"\n\n prefix_template = ''\n prefix_template += NORMAL\n prefix_template += LEVEL_COLORS.get(record.levelname) + Style.BRIGHT + '(%(levelname)1.1s)' + NORMAL + ' '\n prefix_template += '[' + Fore.BLACK + Style.DIM + Style.BRIGHT + '%(asctime)s' + Fore.RESET + Style.RESET_ALL + ']'\n prefix_template += ' ' + Fore.WHITE + Style.DIM + Style.BRIGHT + '%(name)s' + Fore.RESET + Style.RESET_ALL + ' '\n prefix_template += Fore.GREEN + Style.BRIGHT + '%(module)s.%(funcName)s()'\n prefix_template += Fore.BLACK + Style.DIM + Style.BRIGHT + ':' + NORMAL + Fore.CYAN + '%(lineno)d'\n prefix_template += NORMAL\n\n return prefix_template\n\n\nclass DebugLogFormatter(LogFormatter):\n\n \"\"\"Provides greater technical details than standard log Formatter.\"\"\"\n\n template = debug_log_template\n","sub_path":"tmuxp/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"370884255","text":"#! /usr/bin/env python\nimport os\nimport sys\nimport pandas \n#import pandas.io.data as web # Package and modules for importing data; this code may change depending on pandas version\nimport pandas_datareader.data as web #for aconda\nimport datetime\nimport json\n\ndef average(result, n):\n return sum([x[2] for x in result][-n:])/n\n\ndef up_from_bottom(result):\n my_list = [x[2] for x in result]\n bottom = min(my_list)\n if (my_list[-1]/bottom) > 2.5:\n return True\n return False \n\nbull = []\ntry:\n with open('good.json', 'r') as f:\n data = json.load(f)\n\n end = datetime.date.today()\n my_day = end.day\n if end.day == 29:\n my_day =28\n start = datetime.datetime(end.year - 1, end.month, my_day)\n\n print('range:', start, end)\n # First argument is the series we want, second is the source (\"yahoo\" for Yahoo! Finance), third is the start date, fourth is the end date\n for x in data:\n try:\n apple = web.DataReader(x, \"yahoo\", start, end)\n #print(apple.head())\n #print('type', type(apple))\n rows = apple.iterrows()\n result =[]\n for r in rows:\n y=r\n #print('second', x[1].__str__)\n item = y[1].__str__()\n items=item.split()\n result.append((items[14], float(items[9]), float(items[12])))\n #print(result) \n if len(result) < 100:\n continue\n\n if result[-1][2] > average(result, 20):\n if average(result,20) > average(result, 50):\n if average(result, 10) > average(result, 100):\n if up_from_bottom(result):\n print('bull:', x)\n bull.append(x)\n except Exception as e:\n print('Exception for foo loop', x, str(e))\n \n with open('bull.txt', 'w+') as outfile:\n for z in bull:\n outfile.write(str(z)+'\\n')\n\nexcept Exception as e:\n print(\"EEEException: \", str(e))\nf.close()\n","sub_path":"stock/src/bull.py","file_name":"bull.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"304306289","text":"import re\nimport sys\nimport json\nimport cv2\n\nFONT = cv2.FONT_HERSHEY_SIMPLEX\nFONT_SCALE = 0.8\n\ndef show_video(video_path, json_path):\n with open(json_path, 'r') as json_file:\n tracks_json = json.load(json_file)\n\n frame_idx = 0\n frame_keys = tracks_json.keys()\n print(frame_keys)\n cap = cv2.VideoCapture(video_path)\n fps = 5\n sz = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n video_write = cv2.VideoWriter()\n video_write.open('output.mp4', fourcc, fps, sz, True)\n\n while cap.isOpened():\n success, frame = cap.read()\n if not success:\n break\n frame_idx += 1\n str_frame_idx = str(frame_idx)\n if str_frame_idx not in frame_keys:\n continue\n else:\n list_objs = tracks_json[str_frame_idx]\n for obj in list_objs:\n body_box, track_id, head_box = obj\n cv2.putText(frame, str(track_id), (int(body_box[0]), int(body_box[1])), FONT, FONT_SCALE,\n color=(0, 255, 0), lineType=cv2.LINE_AA, thickness=4)\n cv2.rectangle(frame, (int(body_box[0]), int(body_box[1])), (int(body_box[0] + body_box[2]),\n int(body_box[1] + body_box[3])), color=(255, 0, 0), thickness=3)\n if head_box:\n cv2.rectangle(frame, (head_box[0], head_box[1]),\n (head_box[0] + head_box[2], head_box[1] + head_box[3]),\n color=(0, 0, 255), thickness=3)\n if success and frame_idx < 10000:\n # cv2.imshow('video', frame)\n # cv2.waitKey(5)\n video_write.write(frame)\n else:\n break\n\n cap.release()\n video_write.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n video_path = 'video.mp4'\n json_path = 'video_box.json'\n show_video(video_path, json_path)\n","sub_path":"python/visualize_video_box.py","file_name":"visualize_video_box.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160728419","text":"def agent(observation, configuration):\n import torch\n import torch.nn as nn\n from collections import OrderedDict\n from torch import tensor\n import random\n\n\n class QNet(nn.Module):\n def __init__(self, action_space, observation_space, n_hidden_layers=2, n_neurons=64, batch_norm=False):\n super(QNet, self).__init__()\n self.action_space = action_space\n self.observation_space = observation_space\n \n sizes = [self.observation_space] + [n_neurons for _ in range(n_hidden_layers)] + [self.action_space]\n layers = []\n for i in range(n_hidden_layers + 1):\n if i < n_hidden_layers and batch_norm:\n layers.append(nn.BatchNorm1d(sizes[i]))\n layers.append(nn.Linear(sizes[i], sizes[i + 1]))\n if i < n_hidden_layers:\n layers.append(nn.ReLU())\n self.layers = nn.Sequential(*layers)\n \n def forward(self, obs):\n return self.layers(obs)\n \n def get_action(self, observation):\n obs = observation['board'] + [observation['mark']]\n obs = torch.tensor(obs).float()\n self.eval()\n state_value = self.forward(obs.unsqueeze(0))[0].detach()\n self.train()\n for i in range(self.action_space):\n if obs[i] != 0:\n state_value[i] = -1e7\n return state_value.argmax().item()\n action_space = 7\n observation_space = 43\n model = QNet(action_space, observation_space, batch_norm=True)\n model.eval()\n return int(model.get_action(observation))\n","sub_path":"dqn_connectx/submission/connectx/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"211215165","text":"import subprocess\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Lock\nfrom time import sleep\n\nfrom slinkie import Slinkie\n\n\ndef repeat(fn, *args, **kw):\n while True:\n yield fn(*args, **kw)\n\n\nclass Juggler:\n lock = Lock()\n job_id = 1\n jobs = {}\n tpe = ThreadPoolExecutor(8)\n\n def __init__(self):\n raise Exception(f\"Do not instantiate {type(self).__name__}.\")\n\n @classmethod\n def _get_job_id(cls):\n with cls.lock:\n result, cls.job_id = cls.job_id, cls.job_id + 1\n return result\n\n @staticmethod\n def _job(command):\n try:\n result = subprocess.run(\n command,\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n return {\n 'command': command,\n 'returncode': result.returncode,\n 'stdout': result.stdout,\n 'stderr': result.stderr}\n\n except Exception as e:\n return {\n 'command': command,\n 'returncode': -0xc05fefe, # covfefe.\n 'stdout': None,\n 'stderr': str(e)}\n\n @classmethod\n def submit_job(cls, command):\n\n if isinstance(command, str):\n _command = Slinkie(command.split(' ')) \\\n .map(lambda it: it.strip()) \\\n .filter(lambda it: it != \"\") \\\n .list()\n else:\n _command = list(command)\n\n job_id = cls._get_job_id()\n cls.jobs[job_id] = (_command, cls.tpe.submit(cls._job, _command))\n\n return job_id\n\n @classmethod\n def submit_queue(cls, commands):\n job_ids = []\n for command in commands:\n job_id = cls._get_job_id()\n cls.jobs[job_id] = (command, None)\n job_ids.append(job_id)\n\n cls._chain_commands(job_ids)\n return job_ids\n\n @classmethod\n def _chain_commands(cls, job_ids):\n\n def _chainer(_job_ids):\n head, *tail = _job_ids\n\n command, _ = cls.jobs[head]\n job = cls.tpe.submit(cls._job, command)\n cls.jobs[head] = (command, job)\n\n if tail:\n while not job.done():\n sleep(0.01)\n _chainer(tail)\n\n cls.tpe.submit(_chainer, job_ids)\n\n @classmethod\n def get_result(cls, job_id):\n _, future = cls.jobs[job_id]\n\n if future is None:\n return job_id, None\n\n return job_id, future.result()\n\n @classmethod\n def get_status(cls, job_id):\n command, future = cls.jobs[job_id]\n\n if future is None:\n return job_id, {\n 'command': command,\n 'done': False,\n 'running': False,\n 'waiting': True}\n\n return job_id, {\n 'command': command,\n 'done': future.done(),\n 'running': future.running(),\n 'waiting': False}\n\n @classmethod\n def get_all_statuses(cls):\n for job_id, (command, future) in cls.jobs.items():\n if future is None:\n yield job_id, {\n 'command': command,\n 'done': False,\n 'running': False,\n 'waiting': True}\n else:\n yield job_id, {\n 'command': command,\n 'done': future.done(),\n 'running': future.running(),\n 'waiting': False}\n","sub_path":"juggler.py","file_name":"juggler.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"529832572","text":"\n# imports\nfrom __future__ import print_function\nimport httplib2\nimport os\nimport fileinput\n\n# TODO\n# I want to build the dates with datetime\n# from datetime import date, datetime, timedelta\n# I'd also like to have an alarm 30 before to tell me to get ready\n# as well as an alarm to go off 10 minutes before to tell me to leave\nfrom apiclient import discovery\nimport oauth2client\nfrom oauth2client import client\nfrom oauth2client import tools\n\nimport csv\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# globals\nSCOPES = 'https://www.googleapis.com/auth/calendar'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Create work events'\nos.chdir('/home/admin/github/work_calendar/')\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef main():\n \"\"\"Update Google Calendar with a csv file.\n\n Reads a name supplied by stdin. If that name exists as a csv file\n in the same directory, grab each event and add it to the calendar\n with personalized color, summary and location.\n \"\"\"\n\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n name = ''\n cal_id = 'primary'\n skylers_id = '0ukenejbv859k40uqo1pf32qos@group.calendar.google.com'\n\n print('Updating...')\n\n # fileinput.input should only have one line..\n for line in fileinput.input():\n name = line\n\n if name == 'Skyler':\n cal_id = skylers_id\n\n users = {\n 'Gideon': {\n 'location': '1401 Hillcrest Rd, Mobile, AL 36695, USA',\n 'color': '11',\n 'summary': 'Work',\n 'transparency': 'opaque',\n },\n 'Skyler': {\n 'location': '',\n 'color': '3',\n 'summary': 'Skyler Work',\n 'transparency': 'transparent',\n },\n }\n\n with open(name + '.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n # format = '2016-09-10T09:00:00-05:00'\n # the 05:00 at the end relates to time zone\n # ^^^ then again.. if I remove it maybe that will help with dlst\n start_time = (\n row['year']\n + '-' + row['month']\n + '-' + row['day']\n + 'T' + row['start']\n + ':00'\n # + '-' + '05:00'\n )\n end_time = (\n row['year']\n + '-' + row['month']\n + '-' + row['day']\n + 'T' + row['end']\n + ':00'\n # + '-' + '05:00'\n )\n\n event = {\n 'summary': users[name]['summary'],\n 'location': users[name]['location'],\n 'description': 'Created using the api',\n 'colorId': users[name]['color'],\n 'transparency': users[name]['transparency'],\n 'start': {\n 'dateTime': start_time,\n 'timeZone': 'America/Chicago',\n },\n 'end': {\n 'dateTime': end_time,\n 'timeZone': 'America/Chicago',\n },\n }\n\n event = service.events().insert(\n calendarId=cal_id,\n body=event).execute()\n\n print('Calendar Updated Successfully!!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"create_event.py","file_name":"create_event.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"443229641","text":"import argparse\nimport time\nimport struct\nimport os\nfrom pymodbus.client.sync import ModbusTcpClient\n\nON_RTD = os.environ.get('READTHEDOCS') == 'True'\nif not ON_RTD:\n from ocs import ocs_agent, site_config\n from ocs.ocs_twisted import TimeoutLock\n\n\n# Convert Data\ndef float2int(num):\n return struct.unpack(\"=i\", struct.pack(\"=f\", num))[0]\n\n\ndef concatData(data):\n tVal = 0\n upper = True\n for reg in data:\n if upper:\n tVal = ((reg & 0xFFFF) << 16)\n upper = False\n else:\n tVal = tVal | (reg & 0xFFFF)\n upper = True\n return tVal\n\n\n# Converting numbers to 16-bit data arrays\ndef uint16_to_data(num):\n return struct.unpack(\"=H\", struct.pack(\"=H\", num & 0xFFFF))[0]\n\n\ndef uint32_to_data(num):\n data = [0, 0]\n data[0] = struct.unpack(\"=H\", struct.pack(\"=H\", (num >> 16) & 0xffff))[0]\n data[1] = struct.unpack(\"=H\", struct.pack(\"=H\", num & 0xffff))[0]\n return data\n\n\ndef int32_to_data(num):\n data = [0, 0]\n data[0] = struct.unpack(\"=H\", struct.pack(\"=H\", (num >> 16) & 0xffff))[0]\n data[1] = struct.unpack(\"=H\", struct.pack(\"=H\", num & 0xffff))[0]\n return data\n\n\ndef float32_to_data(num):\n intNum = float2int(num)\n data = [0, 0]\n data[0] = (intNum >> 16) & 0xFFFF\n data[1] = intNum & 0xFFFF\n return data\n\n\n# Converting data arrays to numbers\ndef data_to_uint16(data):\n return data[0]\n\n\ndef data_to_uint32(data):\n return concatData(data)\n\n\ndef data_to_int32(data):\n return struct.unpack(\"=i\", struct.pack(\"=I\", concatData(data)))[0]\n\n\ndef data_to_float32(data):\n return struct.unpack(\"=f\", struct.pack(\"=I\", concatData(data)))[0]\n\n\n# LabJack agent class\nclass LabJackAgent:\n def __init__(self, agent, ip_address, num_channels):\n self.active = True\n self.agent = agent\n self.log = agent.log\n self.lock = TimeoutLock()\n self.ip_address = ip_address\n self.module = None\n self.sensors = ['Channel {}'.format(i+1) for i in range(num_channels)]\n\n self.initialized = False\n self.take_data = False\n\n # Register feed\n agg_params = {\n 'frame_length': 60,\n }\n self.agent.register_feed('Sensors',\n record=True,\n agg_params=agg_params,\n buffer_time=1)\n\n # Task functions\n def init_labjack_task(self, session, params=None):\n \"\"\"\n task to initialize labjack module\n \"\"\"\n\n if self.initialized:\n return True, \"Already initialized module\"\n\n with self.lock.acquire_timeout(0, job='init') as acquired:\n if not acquired:\n self.log.warn(\"Could not start init because \"\n \"{} is already running\".format(self.lock.job))\n return False, \"Could not acquire lock.\"\n\n session.set_status('starting')\n\n self.module = ModbusTcpClient(str(self.ip_address))\n\n print(\"Initialized labjack module\")\n\n session.add_message(\"Labjack initialized\")\n\n self.initialized = True\n\n return True, 'LabJack module initialized.'\n\n def start_acq(self, session, params=None):\n \"\"\"\n Task to start data acquisition.\n\n Args:\n sampling_frequency (float):\n Sampling frequency for data collection. Defaults to 2.5 Hz\n\n \"\"\"\n if params is None:\n params = {}\n\n f_sample = params.get('sampling_frequency', 2.5)\n sleep_time = 1/f_sample - 0.01\n\n with self.lock.acquire_timeout(0, job='acq') as acquired:\n if not acquired:\n self.log.warn(\"Could not start acq because \"\n \"{} is already running\".format(self.lock.job))\n return False, \"Could not acquire lock.\"\n\n session.set_status('running')\n\n self.take_data = True\n\n while self.take_data:\n data = {\n 'timestamp': time.time(),\n 'block_name': 'sens',\n 'data': {}\n }\n\n for i, sens in enumerate(self.sensors):\n rr = self.module.read_input_registers(2*i, 2)\n data['data'][sens + 'V'] = data_to_float32(rr.registers)\n\n time.sleep(sleep_time)\n\n self.agent.publish_to_feed('Sensors', data)\n\n self.agent.feeds['Sensors'].flush_buffer()\n\n return True, 'Acquisition exited cleanly.'\n\n def stop_acq(self, session, params=None):\n if self.take_data:\n self.take_data = False\n return True, 'requested to stop taking data.'\n else:\n return False, 'acq is not currently running'\n\ndef make_parser(parser=None):\n \"\"\"Build the argument parser for the Agent. Allows sphinx to automatically\n build documentation based on this function.\n\n \"\"\"\n if parser is None:\n parser = argparse.ArgumentParser()\n\n # Add options specific to this agent.\n pgroup = parser.add_argument_group('Agent Options')\n\n pgroup.add_argument('--ip-address')\n pgroup.add_argument('--num-channels', default='13')\n\n return parser\n\nif __name__ == '__main__':\n site_parser = site_config.add_arguments()\n parser = make_parser(site_parser)\n\n args = parser.parse_args()\n\n site_config.reparse_args(args, 'LabJackAgent')\n\n ip_address = str(args.ip_address)\n num_channels = int(args.num_channels)\n\n agent, runner = ocs_agent.init_site_agent(args)\n\n sensors = LabJackAgent(agent,\n ip_address=ip_address,\n num_channels=num_channels)\n\n agent.register_task('init_labjack', sensors.init_labjack_task)\n agent.register_process('acq', sensors.start_acq, sensors.stop_acq)\n\n runner.run(agent, auto_reconnect=True)\n","sub_path":"agents/labjack/labjack_agent.py","file_name":"labjack_agent.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"24830523","text":"file = open(\"6\", \"r\")\r\ncontent = file.read()\r\nfile.close()\r\n\r\nanswers = content.split(\"\\n\\n\")\r\nfor i in range(len(answers)):\r\n answers[i] = answers[i].replace(\"\\n\", \"\")\r\n\r\ndef count_yes(answer):\r\n no_dupl = \"\"\r\n for l in answer:\r\n if l not in no_dupl:\r\n no_dupl += l\r\n return len(no_dupl)\r\n\r\ns = 0\r\nfor answer in answers:\r\n s += count_yes(answer)\r\n\r\nprint(s)","sub_path":"AOC/6-1.py","file_name":"6-1.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"201236983","text":"from numpy import absolute\nfrom numpy import array\nfrom numpy import ndarray\nfrom numpy import resize\nfrom numpy import uint8\nfrom numpy import zeros\nfrom numpy import ones\nfrom numpy import square\nfrom numpy import sqrt\nfrom numpy import amax\nfrom numpy import amin\nfrom numpy import sum\nfrom numpy import cos\nfrom numpy import pi\nfrom numpy import sin\nfrom numpy import dot\nfrom numpy import ogrid\nfrom numpy import mgrid\nfrom numpy import where\nfrom numpy import int64\nfrom numpy import arctan2\nfrom numpy import arange\nfrom numpy import repeat\nfrom numpy import arccos\nfrom numpy import repeat\nfrom numpy import flip\nfrom numpy import ndindex\nfrom numpy import dstack\nfrom numpy import meshgrid\nfrom numpy import linalg\n\nimport cv2\n\ndef join_forms(forms, coords):\n #if (coords < 0).any():\n coords -= amin(coords, axis=(0,))\n\n shapes = array([f.shape for f in forms], dtype=int64)\n new = zeros((3, amax(shapes[:, 1]+coords[:, 1]), amax(shapes[:, 2]+coords[:, 0])), dtype=uint8)\n \n for xy, f in zip(coords, forms):\n y, x = xy\n xm, ym = x+f.shape[1], y+f.shape[2]\n new[:, x:xm, y:ym] = where(f!=0, f, new[:, x:xm, y:ym])\n return new\n\n\ndef distance(a, b):\n result = linalg.norm(a-b)\n return result\n\ndef line(x1, y1, x2, y2, width=1, color=None):\n width = width - 1 if width > 0 else 0\n\n coords = array(((x1, y1), (x2, y2)), dtype=int64)\n coords -= amin(coords, axis=(0,))\n size = amax(coords, axis=(0,))\n array_ = zeros((3, size[1], size[0]), dtype=uint8)\n layer = zeros((size[1], size[0]), dtype=bool)\n\n equations = array(((x1, 1), (x2, 1)), dtype=int64)\n solutions = array(((y1), (y2)), dtype=int64)\n a, b = linalg.solve(equations, solutions)\n\n y, x = mgrid[:size[1], :size[0]]\n layer = where((y-width<=a*x + b)*(a*x + b<=y+width), True, False)\n \n if color==None:\n array_[:] = 255*layer\n return array_\n\n for e, c in enumerate(color):\n array_[e] = c*layer\n\ndef lines(points, width, color=None):\n points -= amin(points, axis=(0,))\n size = amax(points[:, 0]), amax(points[:, 1])\n array_ = zeros((3, size[1], size[0]), dtype=uint8)\n layers = zeros((size[1], size[0]), dtype=uint8)\n\n points = repeat(points, 2, axis=0)[1:]\n for p in resize(points, (points.shape[0]//2, 2, 2)):\n l = line(*p.flatten(), width=width, color=color)\n array_ = join_forms((array_, l),\n array(\n ((0, 0),\n amin(p, axis=(0,)))\n )\n )\n\n return array_\n\n\ndef Rectangle(width, height, color):\n array_ = zeros((3, height, width), dtype=uint8)\n \n for e, rgb in enumerate(color):\n array_[e] += rgb\n return array_\n\ndef Circle(radius, color):\n array_ = zeros((3, 2*radius, 2*radius), dtype=uint8)\n c = zeros((2*radius, 2*radius), dtype=uint8)\n \n y, x = mgrid[:2*radius, :2*radius]\n c[:] = square(x-radius) + square(y-radius) <= square(radius)\n \n for e, rgb in enumerate(color):\n array_[e][:] = c*rgb\n return array_\n\ndef Triangle_from_points(a, b, c, color):\n if a[1] > b[1] and a[1] > c[1]:\n a[1], b[1], c[1] = a[1]-a[1], a[1]-b[1], a[1]-c[1]\n elif b[1] > a[1] and b[1] > c[1]:\n a[1], b[1], c[1] = b[1]-a[1], b[1]-b[1], b[1]-c[1]\n elif c[1] > a[1] and c[1] > b[1]:\n a[1], b[1], c[1] = c[1]-a[1], c[1]-b[1], c[1]-c[1]\n\n if a[0] > b[0] and a[0] > c[0]:\n a[0], b[0], c[0] = a[0]-a[0], a[0]-b[0], a[0]-c[0]\n elif b[0] > a[0] and b[0] > c[0]:\n a[0], b[0], c[0] = b[0]-a[0], b[0]-b[0], b[0]-c[0]\n elif c[0] > a[0] and c[0] > b[0]:\n a[0], b[0], c[0] = c[0]-a[0], c[0]-b[0], c[0]-c[0]\n\n width = amax((a[1], b[1], c[1]))\n height = amax((a[0], b[0], c[0]))\n\n array_ = zeros((3, width, height), dtype=uint8)\n t = zeros((width, height), dtype=uint8)\n\n area = lambda x1, y1, x2, y2, x3, y3: absolute(x1*(y2-y3)+x2*(y3-y1)+x3*(y1-y2))/2\n\n main_area = area(*a, *b, *c)\n\n y, x = mgrid[:width, :height]\n t[:] = main_area - (area(*a, *b, x, y) + area(*a, *c, x, y) + area(*c, *b, x, y)) == 0\n\n for e, rgb in enumerate(color):\n array_[e][:] = t*rgb\n array_[:] = flip(array_, axis=(2, 1))\n return array_\n\ndef Polygon(points, color):\n if (points < 0).any():\n points -= amin(points, axis=(0,))\n \n points = resize(points, (points.shape[0]//3, 3, 2))\n\n array_ = zeros([3, 1, 1], dtype=uint8)\n\n for p in points:\n array_ = join()\n\ndef Circular_sector(radius, angle_range, color):\n array_ = zeros((3, 2*radius, 2*radius), dtype=uint8)\n circle = zeros((2*radius, 2*radius), dtype=uint8)\n theta = zeros((2*radius, 2*radius), dtype=uint8)\n\n x, y = ogrid[:2*radius, :2*radius]\n cx, cy = radius, radius\n tmin, tmax = angle_range\n\n if tmax < tmin:\n tmax += 2*pi\n\n circle[:] = square(x-cx)+square(y-cy) <= square(radius)\n theta = arctan2(x-cx, y-cy) - tmin\n\n theta[:] %= 2*pi\n\n sector = theta <= (tmax-tmin)\n circle *= sector\n\n for e, rgb in enumerate(color):\n array_[e][:] = circle*rgb\n array_ = flip(array_, axis=(1))\n return array_\n\nif __name__ == '__main__':\n #a = Rectangle(300, 1000, (255, 10, 0))\n #b = Circle(300, (255, 0, 0))\n '''a1 = array([100, -300])\n b1 = array([0, 300])\n c1 = array([400, 0])\n c = Triangle_from_points(b1, a1, c1, (255, 0, 255))\n d = Circular_sector(500, (pi/2, pi), (255, 0, 0))\n e = join_forms((c, d), array(((-100, -90), (0, 0)))) '''\n #f = line(5,5, 250, 200, 2)\n g = lines(array([[0,0],[500, 100],[0, 400]], dtype=int), 2)\n print(g.shape)\n\n g = dstack([*g])\n cv2.imshow('img', g)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n","sub_path":"forms/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"254910399","text":"# Copyright 2014 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\niLO Deploy Driver(s) and supporting methods.\n\"\"\"\n\nimport tempfile\n\nfrom oslo.config import cfg\n\nfrom ironic.common import boot_devices\nfrom ironic.common import exception\nfrom ironic.common.i18n import _\nfrom ironic.common.i18n import _LE\nfrom ironic.common.i18n import _LI\nfrom ironic.common import images\nfrom ironic.common import states\nfrom ironic.common import swift\nfrom ironic.conductor import task_manager\nfrom ironic.conductor import utils as manager_utils\nfrom ironic.drivers import base\nfrom ironic.drivers.modules import agent\nfrom ironic.drivers.modules import deploy_utils\nfrom ironic.drivers.modules.ilo import common as ilo_common\nfrom ironic.drivers.modules import ipmitool\nfrom ironic.drivers.modules import iscsi_deploy\nfrom ironic.drivers.modules import pxe\nfrom ironic.drivers import utils as driver_utils\nfrom ironic.openstack.common import log as logging\n\nLOG = logging.getLogger(__name__)\n\nCONF = cfg.CONF\n\nREQUIRED_PROPERTIES = {\n 'ilo_deploy_iso': _(\"UUID (from Glance) of the deployment ISO. \"\n \"Required.\")\n}\nCOMMON_PROPERTIES = REQUIRED_PROPERTIES\n\nCONF.import_opt('pxe_append_params', 'ironic.drivers.modules.iscsi_deploy',\n group='pxe')\nCONF.import_opt('swift_ilo_container', 'ironic.drivers.modules.ilo.common',\n group='ilo')\n\n\ndef _get_boot_iso_object_name(node):\n \"\"\"Returns the boot iso object name for a given node.\n\n :param node: the node for which object name is to be provided.\n \"\"\"\n return \"boot-%s\" % node.uuid\n\n\ndef _get_boot_iso(task, root_uuid):\n \"\"\"This method returns a boot ISO to boot the node.\n\n It chooses one of the two options in the order as below:\n 1. Image deployed has a meta-property 'boot_iso' in Glance. This should\n refer to the UUID of the boot_iso which exists in Glance.\n 2. Generates a boot ISO on the fly using kernel and ramdisk mentioned in\n the image deployed. It uploads the generated boot ISO to Swift.\n\n :param task: a TaskManager instance containing the node to act on.\n :param root_uuid: the uuid of the root partition.\n :returns: the information about the boot ISO. Returns the information in\n the format 'glance:' or\n 'swift:'. In case of Swift, it is assumed\n that the object exists in CONF.ilo.swift_ilo_container.\n On error finding the boot iso, it returns None.\n :raises: MissingParameterValue, if any of the required parameters are\n missing in the node's driver_info or instance_info.\n :raises: InvalidParameterValue, if any of the parameters have invalid\n value in the node's driver_info or instance_info.\n :raises: SwiftOperationError, if operation with Swift fails.\n :raises: ImageCreationFailed, if creation of boot ISO failed.\n \"\"\"\n # Option 1 - Check if user has provided a boot_iso in Glance.\n LOG.debug(\"Trying to get a boot ISO to boot the baremetal node\")\n deploy_info = _parse_deploy_info(task.node)\n\n image_href = deploy_info['image_source']\n glance_properties = (\n images.get_glance_image_properties(task.context,\n image_href, ['boot_iso', 'kernel_id', 'ramdisk_id']))\n\n boot_iso_uuid = glance_properties.get('boot_iso')\n kernel_uuid = glance_properties.get('kernel_id')\n ramdisk_uuid = glance_properties.get('ramdisk_id')\n\n if boot_iso_uuid:\n LOG.debug(\"Found boot_iso %s in Glance\", boot_iso_uuid)\n return 'glance:%s' % boot_iso_uuid\n\n # NOTE(faizan) For uefi boot_mode, operator should provide efi capable\n # boot-iso in glance\n if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':\n LOG.error(_LE(\"Unable to find boot_iso in Glance, required to deploy \"\n \"node %(node)s in UEFI boot mode.\"),\n {'node': task.node.uuid})\n return\n\n if not kernel_uuid or not ramdisk_uuid:\n LOG.error(_LE(\"Unable to find 'kernel_id' and 'ramdisk_id' in Glance \"\n \"image %(image)s for generating boot ISO for %(node)s\"),\n {'image': image_href, 'node': task.node.uuid})\n return\n\n # NOTE(rameshg87): Functionality to share the boot ISOs created for\n # similar instances (instances with same deployed image) is\n # not implemented as of now. Creation/Deletion of such a shared boot ISO\n # will require synchronisation across conductor nodes for the shared boot\n # ISO. Such a synchronisation mechanism doesn't exist in ironic as of now.\n\n # Option 2 - Create boot_iso from kernel/ramdisk, upload to Swift\n # and provide its name.\n boot_iso_object_name = _get_boot_iso_object_name(task.node)\n kernel_params = CONF.pxe.pxe_append_params\n container = CONF.ilo.swift_ilo_container\n\n with tempfile.NamedTemporaryFile() as fileobj:\n boot_iso_tmp_file = fileobj.name\n images.create_boot_iso(task.context, boot_iso_tmp_file,\n kernel_uuid, ramdisk_uuid, root_uuid, kernel_params)\n swift_api = swift.SwiftAPI()\n swift_api.create_object(container, boot_iso_object_name,\n boot_iso_tmp_file)\n\n LOG.debug(\"Created boot_iso %s in Swift\", boot_iso_object_name)\n\n return 'swift:%s' % boot_iso_object_name\n\n\ndef _clean_up_boot_iso_for_instance(node):\n \"\"\"Deletes the boot ISO created in Swift for the instance.\n\n :param node: an ironic node object.\n \"\"\"\n swift_api = swift.SwiftAPI()\n container = CONF.ilo.swift_ilo_container\n boot_iso_object_name = _get_boot_iso_object_name(node)\n try:\n swift_api.delete_object(container, boot_iso_object_name)\n except exception.SwiftOperationError as e:\n LOG.exception(_LE(\"Failed to clean up boot ISO for %(node)s.\"\n \"Error: %(error)s.\"),\n {'node': node.uuid, 'error': e})\n\n\ndef _get_single_nic_with_vif_port_id(task):\n \"\"\"Returns the MAC address of a port which has a VIF port id.\n\n :param task: a TaskManager instance containing the ports to act on.\n :returns: MAC address of the port connected to deployment network.\n None if it cannot find any port with vif id.\n \"\"\"\n for port in task.ports:\n if port.extra.get('vif_port_id'):\n return port.address\n\n\ndef _parse_driver_info(node):\n \"\"\"Gets the driver specific Node deployment info.\n\n This method validates whether the 'driver_info' property of the\n supplied node contains the required information for this driver to\n deploy images to the node.\n\n :param node: a single Node.\n :returns: A dict with the driver_info values.\n :raises: MissingParameterValue, if any of the required parameters are\n missing.\n \"\"\"\n info = node.driver_info\n d_info = {}\n d_info['ilo_deploy_iso'] = info.get('ilo_deploy_iso')\n\n error_msg = _(\"Error validating iLO virtual media deploy. Some parameters\"\n \" were missing in node's driver_info\")\n deploy_utils.check_for_missing_params(d_info, error_msg)\n\n return d_info\n\n\ndef _parse_deploy_info(node):\n \"\"\"Gets the instance and driver specific Node deployment info.\n\n This method validates whether the 'instance_info' and 'driver_info'\n property of the supplied node contains the required information for\n this driver to deploy images to the node.\n\n :param node: a single Node.\n :returns: A dict with the instance_info and driver_info values.\n :raises: MissingParameterValue, if any of the required parameters are\n missing.\n :raises: InvalidParameterValue, if any of the parameters have invalid\n value.\n \"\"\"\n info = {}\n info.update(iscsi_deploy.parse_instance_info(node))\n info.update(_parse_driver_info(node))\n return info\n\n\ndef _reboot_into(task, iso, ramdisk_options):\n \"\"\"Reboots the node into a given boot ISO.\n\n This method attaches the given bootable ISO as virtual media, prepares the\n arguments for ramdisk in virtual media floppy, and then reboots the node.\n\n :param task: a TaskManager instance containing the node to act on.\n :param iso: a bootable ISO image to attach to. The boot iso\n should be present in either Glance or in Swift. If present in\n Glance, it should be of format 'glance:'.\n If present in Swift, it should be of format 'swift:'.\n It is assumed that object is present in CONF.ilo.swift_ilo_container.\n :param ramdisk_options: the options to be passed to the ramdisk in virtual\n media floppy.\n :raises: ImageCreationFailed, if it failed while creating the floppy image.\n :raises: IloOperationError, if some operation on iLO failed.\n \"\"\"\n ilo_common.setup_vmedia_for_boot(task, iso, ramdisk_options)\n manager_utils.node_set_boot_device(task, boot_devices.CDROM)\n manager_utils.node_power_action(task, states.REBOOT)\n\n\nclass IloVirtualMediaIscsiDeploy(base.DeployInterface):\n\n def get_properties(self):\n return COMMON_PROPERTIES\n\n def validate(self, task):\n \"\"\"Validate the deployment information for the task's node.\n\n :param task: a TaskManager instance containing the node to act on.\n :raises: InvalidParameterValue, if some information is invalid.\n :raises: MissingParameterValue if 'kernel_id' and 'ramdisk_id' are\n missing in the Glance image.\n \"\"\"\n iscsi_deploy.validate(task)\n\n props = ['kernel_id', 'ramdisk_id']\n d_info = _parse_deploy_info(task.node)\n iscsi_deploy.validate_glance_image_properties(task.context, d_info,\n props)\n driver_utils.validate_boot_mode_capability(task.node)\n\n @task_manager.require_exclusive_lock\n def deploy(self, task):\n \"\"\"Start deployment of the task's node.\n\n Fetches the instance image, prepares the options for the deployment\n ramdisk, sets the node to boot from virtual media cdrom, and reboots\n the given node.\n\n :param task: a TaskManager instance containing the node to act on.\n :returns: deploy state DEPLOYWAIT.\n :raises: InstanceDeployFailure, if image size if greater than root\n partition.\n :raises: ImageCreationFailed, if it failed while creating the floppy\n image.\n :raises: IloOperationError, if some operation on iLO fails.\n \"\"\"\n node = task.node\n manager_utils.node_power_action(task, states.POWER_OFF)\n\n iscsi_deploy.cache_instance_image(task.context, node)\n iscsi_deploy.check_image_size(task)\n\n deploy_ramdisk_opts = iscsi_deploy.build_deploy_ramdisk_options(node)\n deploy_nic_mac = _get_single_nic_with_vif_port_id(task)\n deploy_ramdisk_opts['BOOTIF'] = deploy_nic_mac\n deploy_iso_uuid = node.driver_info['ilo_deploy_iso']\n deploy_iso = 'glance:' + deploy_iso_uuid\n\n _reboot_into(task, deploy_iso, deploy_ramdisk_opts)\n\n return states.DEPLOYWAIT\n\n @task_manager.require_exclusive_lock\n def tear_down(self, task):\n \"\"\"Tear down a previous deployment on the task's node.\n\n Power off the node. All actual clean-up is done in the clean_up()\n method which should be called separately.\n\n :param task: a TaskManager instance containing the node to act on.\n :returns: deploy state DELETED.\n \"\"\"\n manager_utils.node_power_action(task, states.POWER_OFF)\n return states.DELETED\n\n def prepare(self, task):\n \"\"\"Prepare the deployment environment for this task's node.\n\n :param task: a TaskManager instance containing the node to act on.\n :raises: IloOperationError, if some operation on iLO failed.\n \"\"\"\n boot_mode = driver_utils.get_node_capability(task.node, 'boot_mode')\n if boot_mode is not None:\n ilo_common.set_boot_mode(task.node, boot_mode)\n else:\n ilo_common.update_boot_mode_capability(task)\n\n def clean_up(self, task):\n \"\"\"Clean up the deployment environment for the task's node.\n\n Unlinks instance image and triggers image cache cleanup.\n\n :param task: a TaskManager instance containing the node to act on.\n \"\"\"\n _clean_up_boot_iso_for_instance(task.node)\n iscsi_deploy.destroy_images(task.node.uuid)\n\n def take_over(self, task):\n pass\n\n\nclass IloVirtualMediaAgentDeploy(base.DeployInterface):\n \"\"\"Interface for deploy-related actions.\"\"\"\n\n def get_properties(self):\n \"\"\"Return the properties of the interface.\n\n :returns: dictionary of : entries.\n \"\"\"\n return COMMON_PROPERTIES\n\n def validate(self, task):\n \"\"\"Validate the driver-specific Node deployment info.\n\n :param task: a TaskManager instance\n :raises: MissingParameterValue if some parameters are missing.\n \"\"\"\n _parse_driver_info(task.node)\n\n @task_manager.require_exclusive_lock\n def deploy(self, task):\n \"\"\"Perform a deployment to a node.\n\n Prepares the options for the agent ramdisk and sets the node to boot\n from virtual media cdrom.\n\n :param task: a TaskManager instance.\n :returns: states.DEPLOYWAIT\n :raises: ImageCreationFailed, if it failed while creating the floppy\n image.\n :raises: IloOperationError, if some operation on iLO fails.\n \"\"\"\n deploy_ramdisk_opts = agent.build_agent_options(task.node)\n deploy_iso_uuid = task.node.driver_info['ilo_deploy_iso']\n deploy_iso = 'glance:' + deploy_iso_uuid\n _reboot_into(task, deploy_iso, deploy_ramdisk_opts)\n\n return states.DEPLOYWAIT\n\n @task_manager.require_exclusive_lock\n def tear_down(self, task):\n \"\"\"Tear down a previous deployment on the task's node.\n\n :param task: a TaskManager instance.\n :returns: states.DELETED\n \"\"\"\n manager_utils.node_power_action(task, states.POWER_OFF)\n return states.DELETED\n\n def prepare(self, task):\n \"\"\"Prepare the deployment environment for this node.\n\n :param task: a TaskManager instance.\n \"\"\"\n node = task.node\n node.instance_info = agent.build_instance_info_for_deploy(task)\n node.save()\n\n def clean_up(self, task):\n \"\"\"Clean up the deployment environment for this node.\n\n Ejects the attached virtual media from the iLO and also removes\n the floppy image from Swift, if it exists.\n\n :param task: a TaskManager instance.\n \"\"\"\n ilo_common.cleanup_vmedia_boot(task)\n\n def take_over(self, task):\n \"\"\"Take over management of this node from a dead conductor.\n\n :param task: a TaskManager instance.\n \"\"\"\n pass\n\n\nclass IloPXEDeploy(pxe.PXEDeploy):\n\n def prepare(self, task):\n \"\"\"Prepare the deployment environment for this task's node.\n\n If the node's 'capabilities' property includes a boot_mode, that\n boot mode will be applied for the node. Otherwise, the existing\n boot mode of the node is used in the node's 'capabilities' property.\n\n PXEDeploys' prepare method is then called, to prepare the deploy\n environment for the node\n\n :param task: a TaskManager instance containing the node to act on.\n \"\"\"\n boot_mode = driver_utils.get_node_capability(task.node, 'boot_mode')\n if boot_mode is None:\n ilo_common.update_boot_mode_capability(task)\n else:\n ilo_common.set_boot_mode(task.node, boot_mode)\n super(IloPXEDeploy, self).prepare(task)\n\n def deploy(self, task):\n \"\"\"Start deployment of the task's node.\n\n This method sets the boot device to 'NETWORK' and then calls\n PXEDeploy's deploy method to deploy on the given node.\n\n :param task: a TaskManager instance containing the node to act on.\n :returns: deploy state DEPLOYWAIT.\n \"\"\"\n manager_utils.node_set_boot_device(task, boot_devices.PXE)\n return super(IloPXEDeploy, self).deploy(task)\n\n\nclass IloConsoleInterface(ipmitool.IPMIShellinaboxConsole):\n \"\"\"A ConsoleInterface that uses ipmitool and shellinabox.\"\"\"\n\n def get_properties(self):\n d = ilo_common.REQUIRED_PROPERTIES.copy()\n d.update(ilo_common.CONSOLE_PROPERTIES)\n return d\n\n def validate(self, task):\n \"\"\"Validate the Node console info.\n\n :param task: a task from TaskManager.\n :raises: InvalidParameterValue\n :raises: MissingParameterValue when a required parameter is missing\n\n \"\"\"\n node = task.node\n driver_info = ilo_common.parse_driver_info(node)\n if 'console_port' not in driver_info:\n raise exception.MissingParameterValue(_(\n \"Missing 'console_port' parameter in node's driver_info.\"))\n\n ilo_common.update_ipmi_properties(task)\n super(IloConsoleInterface, self).validate(task)\n\n\nclass IloPXEVendorPassthru(pxe.VendorPassthru):\n\n @base.passthru(['POST'], method='pass_deploy_info')\n def _continue_deploy(self, task, **kwargs):\n manager_utils.node_set_boot_device(task, boot_devices.PXE, True)\n super(IloPXEVendorPassthru, self)._continue_deploy(task, **kwargs)\n\n\nclass VendorPassthru(base.VendorInterface):\n \"\"\"Vendor-specific interfaces for iLO deploy drivers.\"\"\"\n\n def get_properties(self):\n return COMMON_PROPERTIES\n\n def validate(self, task, method, **kwargs):\n \"\"\"Validate vendor-specific actions.\n\n Checks if a valid vendor passthru method was passed and validates\n the parameters for the vendor passthru method.\n\n :param task: a TaskManager instance containing the node to act on.\n :param method: method to be validated.\n :param kwargs: kwargs containing the vendor passthru method's\n parameters.\n :raises: MissingParameterValue, if some required parameters were not\n passed.\n :raises: InvalidParameterValue, if any of the parameters have invalid\n value.\n \"\"\"\n iscsi_deploy.get_deploy_info(task.node, **kwargs)\n\n @base.passthru(['POST'], method='pass_deploy_info')\n @task_manager.require_exclusive_lock\n def _continue_deploy(self, task, **kwargs):\n \"\"\"Continues the iSCSI deployment from where ramdisk left off.\n\n Continues the iSCSI deployment from the conductor node, finds the\n boot ISO to boot the node, and sets the node to boot from boot ISO.\n\n :param task: a TaskManager instance containing the node to act on.\n :param kwargs: kwargs containing parameters for iSCSI deployment.\n :raises: InvalidState\n \"\"\"\n node = task.node\n task.process_event('resume')\n\n ilo_common.cleanup_vmedia_boot(task)\n root_uuid = iscsi_deploy.continue_deploy(task, **kwargs)\n\n if not root_uuid:\n return\n\n try:\n boot_iso = _get_boot_iso(task, root_uuid)\n\n if not boot_iso:\n LOG.error(_LE(\"Cannot get boot ISO for node %s\"), node.uuid)\n return\n\n ilo_common.setup_vmedia_for_boot(task, boot_iso)\n manager_utils.node_set_boot_device(task, boot_devices.CDROM)\n\n address = kwargs.get('address')\n deploy_utils.notify_deploy_complete(address)\n\n LOG.info(_LI('Deployment to node %s done'), node.uuid)\n\n i_info = node.instance_info\n i_info['ilo_boot_iso'] = boot_iso\n node.instance_info = i_info\n task.process_event('done')\n except Exception as e:\n LOG.error(_LE('Deploy failed for instance %(instance)s. '\n 'Error: %(error)s'),\n {'instance': node.instance_uuid, 'error': e})\n msg = _('Failed to continue iSCSI deployment.')\n deploy_utils.set_failed_state(task, msg)\n","sub_path":"ironic/drivers/modules/ilo/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":20507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"133484987","text":"import io\r\nimport json\r\nimport os\r\nfrom utility import *\r\nimport time\r\nimport csv\r\nimport socket\r\nfrom datetime import datetime\r\nimport pytz\r\n\r\nclass arpAnaliytics():\r\n\r\n\r\n def __init__(self,path):\r\n self.path_to_analiytic_json = os.path.dirname(os.path.abspath(__file__)) + '/ARPanaliytic.json'\r\n # self.path_to_blacklist_MAC_addresses = os.path.dirname(os.path.abspath(__file__)) + '/Blacklist_MAC_addresses.csv'\r\n self.path = path[0]\r\n self.check_if_cash_file_is_exist_if_not_created()\r\n self.check_if_history_file_not_created()\r\n self.suspected_MACs = []\r\n \r\n \r\n def Alert_for_suspected_MAC_address(self,s_MAC,s_IP,victimIP):\r\n WeAlreadyAlertAboutThatInLastMinute = False\r\n\r\n # checking if the last attack was at list minute ago\r\n # if it was from the same mac\r\n with open(self.path + '/history.csv', 'r') as f:\r\n listOfAttacks = list(reversed(list(csv.reader(f))))\r\n if len(listOfAttacks) != 0:\r\n\r\n for row in listOfAttacks:\r\n if row[0] == s_MAC:\r\n lastAttack = row\r\n # Here we found attack last time in history \r\n # From the current attacker \r\n if ( float(time.time()) - float(lastAttack[7]) ) < 60 :\r\n WeAlreadyAlertAboutThatInLastMinute = True\r\n\r\n if WeAlreadyAlertAboutThatInLastMinute:\r\n return\r\n\r\n # assemble data\r\n def lookup(addr):\r\n try:\r\n return socket.gethostbyaddr(str(addr))[0]\r\n except:\r\n return 'Offline'\r\n victimName = lookup(victimIP)\r\n attackerName = lookup(s_IP)\r\n date = time.strftime(\"%d/%m/%y\")\r\n now = datetime.now(tz=pytz.timezone('Israel'))\r\n time_now = str(now.hour) +':'+ str(now.minute)\r\n line = [s_MAC,s_IP,victimIP,victimName,attackerName,date,time_now,time.time()] \r\n log('Alert',True)\r\n log(line,True)\r\n\r\n with open(self.path + '/history.csv', 'a') as f:\r\n writer = csv.writer(f)\r\n writer.writerow(line)\r\n\r\n sendEmail('ARP Spoofing',str(line))\r\n \r\n\r\n def updateDataBase(self,listOf_ip_and_mac):\r\n if os.path.isfile(self.path_to_analiytic_json) and os.access(self.path_to_analiytic_json, os.R_OK):\r\n \r\n with open(self.path_to_analiytic_json) as data_file:\r\n data_loaded = json.load(data_file)\r\n log(data_loaded)\r\n\r\n infected_IP = False\r\n\r\n #if not the first time\r\n if data_loaded != {}:\r\n for i in range(0,len(listOf_ip_and_mac)):\r\n newList = list(listOf_ip_and_mac[i])\r\n \r\n for j in data_loaded:\r\n # If the MAC is the same but ip is defferent.\r\n if newList[0] == j[0] and newList[1] != j[1]:\r\n self.Alert_for_suspected_MAC_address(newList[0],newList[1],j[1])\r\n infected_IP = True\r\n if (newList in data_loaded) or (infected_IP == True):\r\n infected_IP = False\r\n pass\r\n elif newList[0] != '00:00:00:00:00:00':\r\n print(newList)\r\n data_loaded.append(newList)\r\n self.updateLocalDataBase(data_loaded)\r\n infected_IP = False\r\n log(data_loaded)\r\n else:\r\n listWithoutZeros = []\r\n for i in listOf_ip_and_mac:\r\n if i[0] != '00:00:00:00:00:00':\r\n listWithoutZeros.append(i)\r\n self.updateLocalDataBase(listWithoutZeros)\r\n\r\n def updateLocalDataBase(self,listOf_ip_and_mac):\r\n with open(self.path_to_analiytic_json, 'w') as f:\r\n json.dump(listOf_ip_and_mac, f, ensure_ascii=False)\r\n\r\n def check_if_history_file_not_created(self):\r\n try:\r\n with open(self.path + '/history.csv', 'rb') as csvfile:\r\n pass\r\n except:\r\n with open(self.path + '/history.csv', 'wb') as csvfile:\r\n pass\r\n # if os.path.isfile(self.path_to_blacklist_MAC_addresses) and os.access(self.path_to_blacklist_MAC_addresses, os.R_OK):\r\n # with open(self.path_to_blacklist_MAC_addresses, 'w+') as csvfile:\r\n # filewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n # os.chmod(self.path_to_blacklist_MAC_addresses,0o777)\r\n \r\n\r\n def check_if_cash_file_is_exist_if_not_created(self):\r\n \r\n log(self.path_to_analiytic_json)\r\n\r\n # if os.path.isfile(self.path_to_analiytic_json) and os.access(self.path_to_analiytic_json, os.R_OK):\r\n # # checks if file exists\r\n # log (\"File exists and is readable\")\r\n # os.chmod(self.path_to_analiytic_json,0o777)\r\n # else:\r\n # log (\"Either file is missing or is not readable, creating file...\")\r\n with io.open(os.path.join(self.path_to_analiytic_json, self.path_to_analiytic_json), \"w+\") as db_file:\r\n db_file.write(json.dumps({}))\r\n os.chmod(self.path_to_analiytic_json,0o777) ","sub_path":"arpAnaliytic.py","file_name":"arpAnaliytic.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"82904248","text":"############################### README ###############################\n# usage exp.: python3 encipher.py -k key_doc.txt -f to_be_enciphered_doc.txt -o output.txt\n######################################################################\n\nfrom main import zh_encipher, zh_encipher_key\nimport sys\nimport os\nimport time\nimport argparse\nimport datetime\n\nstart_time = time.time()\n\nparser = argparse.ArgumentParser(description=\"ENCIPHER TEXT FILE\")\nparser.add_argument(\"-k\", \"--key\", help=\"Specify key file.\", required=True)\nparser.add_argument(\n \"-f\", \"--file\", help=\"Specify file to be encrypted.\", required=True)\nparser.add_argument(\n \"-o\", \"--output\", help=\"Specify optional output file name.\")\nargs = parser.parse_args(sys.argv[1:])\n\nwith open(args.key) as key_file:\n key = [int(i) for i in key_file.read().split()]\n\nwith open(args.file) as msg_file:\n msg = msg_file.read()\n\nenciphered_msg = zh_encipher_key(msg, key)\n\nif args.output:\n with open(args.output, \"w\") as output:\n output.write(enciphered_msg+\"\\n\")\nelse:\n with open(\"enciphered \" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \".txt\", \"w\") as output:\n output.write(enciphered_msg+\"\\n\")\n\nend_time = time.time()\nprint(args.file + \" (\" + str(len(msg)) + \" char(s)) encrypted in \" +\n str(round(end_time-start_time, 2)) + \" second(s).\")\n","sub_path":"zh_encipher.py","file_name":"zh_encipher.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"386754004","text":"# pylint: disable=redefined-outer-name\n# pylint: disable=unused-argument\n# pylint: disable=unused-variable\n\n\nfrom typing import Any, Awaitable, Callable, Mapping\n\nimport aiodocker\nimport pytest\nfrom models_library.generated_models.docker_rest_api import Task\nfrom pydantic import ValidationError, parse_obj_as\nfrom simcore_service_autoscaling.models import SimcoreServiceDockerLabelKeys\n\n\nasync def test_task_ownership_from_task_with_missing_labels_raises(\n async_docker_client: aiodocker.Docker,\n create_service: Callable[[dict[str, Any]], Awaitable[Mapping[str, Any]]],\n task_template: dict[str, Any],\n):\n service_missing_osparc_labels = await create_service(task_template)\n service_tasks = parse_obj_as(\n list[Task],\n await async_docker_client.tasks.list(\n filters={\"service\": service_missing_osparc_labels[\"Spec\"][\"Name\"]}\n ),\n )\n assert service_tasks\n assert len(service_tasks) == 1\n with pytest.raises(ValidationError):\n SimcoreServiceDockerLabelKeys.from_docker_task(service_tasks[0])\n\n\ndef test_osparc_docker_label_keys_to_docker_labels(\n osparc_docker_label_keys: SimcoreServiceDockerLabelKeys,\n):\n exported_dict = osparc_docker_label_keys.to_docker_labels()\n assert all(isinstance(v, str) for v in exported_dict.values())\n assert parse_obj_as(SimcoreServiceDockerLabelKeys, exported_dict)\n\n\nasync def test_task_ownership_from_task(\n async_docker_client: aiodocker.Docker,\n create_service: Callable[\n [dict[str, Any], dict[str, str]], Awaitable[Mapping[str, Any]]\n ],\n task_template: dict[str, Any],\n osparc_docker_label_keys: SimcoreServiceDockerLabelKeys,\n):\n service_with_labels = await create_service(\n task_template,\n osparc_docker_label_keys.to_docker_labels(),\n )\n service_tasks = parse_obj_as(\n list[Task],\n await async_docker_client.tasks.list(\n filters={\"service\": service_with_labels[\"Spec\"][\"Name\"]}\n ),\n )\n assert service_tasks\n assert len(service_tasks) == 1\n task_ownership = SimcoreServiceDockerLabelKeys.from_docker_task(service_tasks[0])\n assert task_ownership\n assert task_ownership.user_id == osparc_docker_label_keys.user_id\n assert task_ownership.project_id == osparc_docker_label_keys.project_id\n assert task_ownership.node_id == osparc_docker_label_keys.node_id\n","sub_path":"services/autoscaling/tests/unit/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"424445639","text":"#coding:utf-8\nimport os\nimport numpy as np\nimport pandas as pd\nfrom operator import itemgetter\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom matplotlib import pyplot as plt\n\n\n\n\ndef test_k(X_train, y_train, X_test, y_test):\n error = []\n\n # Calculating error for K values between 1 and 40\n for i in range(1, 40):\n knn = KNeighborsClassifier(n_neighbors=i)\n knn.fit(X_train, y_train)\n pred_i = knn.predict(X_test)\n error.append(np.mean(pred_i != y_test)) \n\n print(error)\n plt.figure(figsize=(12, 6))\n plt.plot(range(1, 40), error, color='red', linestyle='dashed', marker='o',\n markerfacecolor='blue', markersize=10)\n plt.title('Error Rate K Value')\n plt.xlabel('K Value')\n plt.ylabel('Mean Error')\n plt.show()\n\n\n\n\n\ndef main():\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n\n # 为数据集分配colum名称\n names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'Class']\n\n\n # 将数据集读取到panda dataframe\n dataset = pd.read_csv(url, names=names) ## dataset = datasets.load_iris().data\n \n print('特征变量的长度',len(dataset))\n\n X = dataset.iloc[:, :-1].values #前四列\n y = dataset.iloc[:, 4].values # 最后一项\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)\n # X_train 划出的训练集数据(返回值)\n # X_test 划出的测试集数据(返回值) \n # y_train 划出的训练集标签(返回值)\n # y_test 划出的测试集标签(返回值)\n ## 归一化特征\n scaler = StandardScaler()\n scaler.fit(X_train)\n # X_train = scaler.transform(X_train) ### z = (x - u) / s \n # X_test = scaler.transform(X_test)\n\n # 训练数据\n # 引入训练方法\n classifier = KNeighborsClassifier(n_neighbors=4) ## k = 4\n classifier.fit(X_train, y_train)\n\n # 测试k值\n # test_k(X_train, y_train, X_test, y_test)\n\n ### # 预测数据,预测特征值\n y_pred = classifier.predict(X_test)\n\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n\n\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"knn/knn3.py","file_name":"knn3.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"67342646","text":"# from sys import argv\n#\n# script_name, p1, p2, p3 = argv\n#\n# print('Имя скрипта: ', script_name)\n# print(\"Параметр1: \", p1)\n# print(\"Параметр2: \", p2)\n# print(\"Параметр3: \", p3)\n\n# my_list = [1, 2, 3, 4, 5, 6]\n# new_list = [el + 10 for el in my_list]\n# new_list1 = [el for el in my_list if el % 2 == 0]\n# print(my_list)\n# print(new_list)\n# print(new_list1)\n#\n# s1 = 'abc'\n# s2 = 'd'\n# s3 = 'efg'\n# new_list2 = [i + j + k for i in s1 for j in s2 for k in s3]\n# print(new_list2)\n#\n# new_list3 = {el: el ** 2 for el in range(10,20)}\n# new_list4 = {el ** 3 for el in range(5,10)}\n# my_tuple = (2,4,6)\n# new_list5 = (el +10 for el in my_tuple)\n# print(new_list3)\n# print(new_list4)\n# print(next(new_list5))\n\n# import random\n# print(random.randint(0,10))\n\nimport random\n\n# from random import randint, randrange\n#\n# print(randint(0, 10))\n# print(randrange(0, 10, 3)) - от нуля не включая верхнюю границу с шагом 3\n\n# generator = (par * par for par in range(5))\n# print(generator)\n# for el in generator:\n# print(el)\n#\n# def generator1():\n# for i in (10,20,30):\n# yield i\n# g = generator1()\n# print(g)\n# for i in g:\n# print(i)\n\ndef cube(n):\n c_list = []\n for i in n:\n c_list.append(i ** 3)\n return c_list\nprint(cube([1,2,3,4]))\n\ndef cube1(n):\n for i in n:\n yield i ** 3\nm = cube1([1,2,3,4,5])\nfor i in m:\n print(i)\n\nfrom functools import reduce\ndef my_f(el0, el1):\n return el0 + el1\nprint(reduce(my_f, [1,2,3,4,5]))\n\nfrom itertools import count, cycle\n\n# for el in count(7, 3):\n# if el > 15:\n# break\n# else:\n# print(el)\n# c = 0\n# for el in cycle(\"ABC\"):\n# if c > 10:\n# break\n# print(el)\n# c += 1\n","sub_path":"lesson4.py","file_name":"lesson4.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"187584385","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String, Bool\nfrom ltl_automaton_planner.ltl_automaton_utilities import import_ts_from_file\n# from sensor_msgs.msg import JointState\nfrom copy import deepcopy\nfrom ltl_automaton_msgs.msg import TransitionSystemStateStamped\n# from ltl_automaton_msgs.srv import ClosestState\nimport sys\n\n#=====================================\n# Monitor pick and drop agents\n# return pick/drop inplace state\n#=====================================\nclass HebiPickDropInPLaceMonitor(object):\n def __init__(self):\n\n # to obtain from launch file\n self.pick_agent_list = rospy.get_param('~pick_agent_list')\n self.drop_agent_list = rospy.get_param('~drop_agent_list')\n self.pick_agent_load_state_guard_list = rospy.get_param('~pick_agent_load_state_guard')\n\n self.pick_region = rospy.get_param('~pick_region')\n self.drop_region = rospy.get_param('~drop_region')\n\n self.pick_agent_region_dic = {}\n self.drop_agent_region_dic = {}\n self.pick_agent_load_dic = {}\n self.drop_agent_load_dic = {}\n\n for agent in self.pick_agent_list:\n self.pick_agent_region_dic[agent] = None\n self.pick_agent_load_dic[agent] = None\n\n for agent in self.drop_agent_list:\n self.drop_agent_region_dic[agent] = None\n self.drop_agent_load_dic[agent] = None\n\n\n\n self.pick_agent_inplace = False\n self.drop_agent_inplace = False\n\n # print(self.pick_agent_list[0])\n\n # Setup callback\n self.setup_pub_sub()\n self.main_loop()\n\n #----------------------------------\n # Setup subscribers and publishers\n #----------------------------------\n def setup_pub_sub(self):\n\n # for experiment\n for agent in self.pick_agent_list:\n rospy.Subscriber(\"/\" + agent+ \"/current_region\", String, self.pick_agent_region_callback,agent, queue_size=10)\n rospy.Subscriber(\"/\" + agent+ \"/ts_state\", TransitionSystemStateStamped, self.pick_agent_loaded_callback,agent,queue_size=10)\n\n for agent in self.drop_agent_list:\n rospy.Subscriber(\"/\" + agent+ \"/current_region\", String, self.drop_agent_region_callback,agent, queue_size=10)\n rospy.Subscriber(\"/\" + agent+ \"/ts_state\", TransitionSystemStateStamped, self.drop_agent_loaded_callback,agent,queue_size=10)\n\n # Publisher of current pick/drop inplace info\n self.pick_inplece_pub = rospy.Publisher(\"pick_inplace_ack\", Bool, latch=True, queue_size=10)\n self.drop_inplece_pub = rospy.Publisher(\"drop_inplace_ack\", Bool, latch=True, queue_size=10)\n\n #---------------------------------------\n # handle pick_agent_region_callback callback\n #---------------------------------------\n def pick_agent_region_callback(self, msg,agent):\n self.pick_agent_region_dic[agent] = msg.data\n\n #---------------------------------------\n # handle pick_agent_loaded_callback callback\n #---------------------------------------\n def pick_agent_loaded_callback(self, msg,agent):\n self.pick_agent_load_dic[agent] = False\n for load_state in self.pick_agent_load_state_guard_list:\n if load_state in msg.ts_state.states:\n self.pick_agent_load_dic[agent] = True\n\n #---------------------------------------\n # handle pick_agent_region_callback callback\n #---------------------------------------\n def drop_agent_region_callback(self, msg,agent):\n self.drop_agent_region_dic[agent] = msg.data\n\n #---------------------------------------\n # handle pick_agent_loaded_callback callback\n #---------------------------------------\n def drop_agent_loaded_callback(self, msg,agent):\n self.drop_agent_load_dic[agent] = True\n if \"unloaded\" in msg.ts_state.states:\n self.drop_agent_load_dic[agent] = False\n\n\n def main_loop(self):\n rate = rospy.Rate(20)\n\n while not rospy.is_shutdown():\n # Check for the pick region is occupied or not and the load state\n # update message and publish it\n self.pick_agent_inplace = False\n self.drop_agent_inplace = False\n\n for pick_agent in self.pick_agent_list:\n if self.pick_agent_region_dic[pick_agent] == self.pick_region and self.pick_agent_load_dic[pick_agent] == True:\n self.pick_agent_inplace = True\n\n\n for drop_agent in self.drop_agent_list:\n if self.drop_agent_region_dic[drop_agent] == self.drop_region and self.drop_agent_load_dic[drop_agent] == False:\n self.drop_agent_inplace = True\n\n self.pick_inplece_pub.publish(self.pick_agent_inplace)\n self.drop_inplece_pub.publish(self.drop_agent_inplace)\n\n # rospy.loginfo(\"State is %s and prev state is %s\" %(self.curr_ltl_state, self.prev_ltl_state))\n rate.sleep() \n\n\n\n#============================\n# Main \n#============================\nif __name__ == '__main__':\n rospy.init_node('hebi_pick_drop_inplace_monitor',anonymous=False)\n try:\n hebi_load_monitor = HebiPickDropInPLaceMonitor()\n rospy.spin()\n except ValueError as e:\n rospy.logerr(\"Hebi load Monitor: %s\" %(e))\n sys.exit(0)\n\n\n\n ","sub_path":"src/hebi_pick_drop_inplace_monitor.py","file_name":"hebi_pick_drop_inplace_monitor.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"245880972","text":"# -*- coding: utf-8 -*-\n# @Time : now()\n# @Author : AngesZhu\n# @File : .py\n# @desc:\nfrom kazoo.client import KazooClient\nfrom urllib.parse import unquote\n\n\nclass GetDubboInfoForZK:\n \"\"\"\n @Author: 朱孟彤\n @desc: python操作zk查询dubbo的基础类\n \"\"\"\n\n def createzk(self, host):\n \"\"\"\n 创建一个zk链接\n :return 返回zk链接\n \"\"\"\n zk = KazooClient(hosts=host)\n zk.start()\n return zk\n\n def getallinterfaceforaldb(self, zk):\n \"\"\"\n 通过zk链接,查询所有注册包含aldb的interface\n :param zk:\n :return: 返回interface的分类以及列表\n \"\"\"\n # 查找zk dubbo-online下面的子节点\n children = zk.get_children('/dubbo-online')\n # 筛选出method为空的interface\n c = []\n if isinstance(children, list):\n for inter in children:\n if zk.get_children('/dubbo-online/%s/providers' % inter):\n c.append(inter)\n # 筛选出包含aldb的interface\n mudel = []\n for i in c:\n # 取出所有模块的名称\n if 'com.aldb.' == i[0:9]:\n s = str(i)\n l = s.split('.')\n mudel.append(l[2])\n m = set(mudel) # 去重模块名称\n # 生成模块名以及所对应的interface列表的字典\n d = {}\n for n in m:\n o = []\n for j in c:\n if n in j:\n o.append(j)\n d[n] = o\n return d\n\n def getinfofrominterface(self, zk, interface):\n \"\"\"\n 根据interface获取信息\n :param zk: zk链接\n :param interface: 要查询的interface\n :return: interface对应的信息\n \"\"\"\n children = zk.get_children('/dubbo-online/%s/providers' % interface)\n if children:\n info = children[0]\n if isinstance(info, str):\n j = unquote(info)\n l = j.split(\"&\")\n return l\n\n def infohandle(self, zk, interface):\n \"\"\"\n 根据interface获取信息\n :param zk: zk链接\n :param interface: 要查询的interface\n :return: interface对应的信息\n \"\"\"\n infodict = {}\n infodict['interface'] = interface\n allinfo = self.getinfofrominterface(zk=zk, interface=interface)\n hosts = allinfo[0]\n hosts = hosts.split(\"/\")[2] # 字符串截取方式获取ip和端口号\n infodict['hosts'] = hosts\n for i in allinfo:\n if 'methods' in i:\n if isinstance(i, str):\n l = i.split(\"=\")\n methods = l[1].split(',')\n infodict['methods'] = methods\n return infodict\n\n\nif __name__ == '__main__':\n zktelent = GetDubboInfoForZK()\n zk = zktelent.createzk(host='10.148.16.24:2181')\n print(zktelent.getallinterfaceforaldb(zk))\n # children = zk.get_children('/dubbo-online')\n # print(children)\n # print(type(children))\n # for i in children:\n # \t# print(i)\n # \tinfo=zk.get_children('/dubbo-online/%s/providers' % i)\n # \tif info:\n # \t\tinfo=info[0]\n # \t\tif isinstance(info, str):\n # \t\t\tj = unquote(info)\n # \t\t\tl = j.split(\"&\")\n # \t\t\tprint(l)\n # d = zktelent.getallinterfaceforaldb(zk)\n # info = zktelent.infohandle(zk,'com.aldb.magiclub.api.facade.CardFacade')\n # print(info)\n # print(info['methods'])\n # m = d.keys()\n # print(list(m).sort())\n zk.stop()\n","sub_path":"AutoTest/Src/common/DubboZK.py","file_name":"DubboZK.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"464810227","text":"import plotly.express as px\nimport plotly.graph_objects as go\nimport plotly\nimport pandas as pd\nimport sys\n\naccesstoken = open(sys.argv[1]).read()\n\ndf = pd.read_json(\"Filialen.json\")\n\npx.set_mapbox_access_token(accesstoken)\nfig = px.scatter_mapbox(\n df, lat=\"lat\", lon=\"lon\", color=\"chain\", hover_data={\"label\": True, \"hours\": True},\n labels={\"chain\": \"Kette\"}, title=\"Lebensmittelläden mit Sonntagsöffung\",\n opacity=.8, mapbox_style=\"light\", zoom=5.5, center={\"lat\": 51.2, \"lon\": 10.3},\n color_discrete_map={\"Schwarzer Netto\": \"black\", \"Penny\": \"#cd1414\", \"Aldi Nord\": \"#00b4dc\",\n \"Aldi Süd\": \"#ee6e00\", \"Lidl\": \"#003673\", \"Edeka\": \"#fce531\", \"Rewe\": \"#cc071e\"}) #\"Roter Netto\": \"#ffe500\",\nfig.update_traces({\n \"textposition\": \"bottom center\",\n \"marker\": {\"size\": 10},\n \"hovertemplate\": \"%{customdata[0]}

%{customdata[1]}\"\n})\nfig.update_layout(autosize=True, legend=dict(xanchor=\"left\", yanchor=\"top\", y=1, x=0))\n\n# fig.show(config={\"locale\": \"de-DE\"})\nfig.write_html(\"index.html\")\n\nfig.update_layout(title={\"text\": \"\"}, margin={\"l\": 10, \"r\": 10, \"b\": 10, \"t\": 10})\nfig.write_json(\"figure.json\")\n\n# orca graph .\\figure.json -o figure.png --mathbox-access-token \"pk.eyJ1Ijoiam9oYW5uZ29sdHoiLCJhIjoiY2tjbWFiaHRsMjBzcjJycXFuM3pseDEybSJ9.8MIAWvV1iG11vsGgLpmJsA\" --scale 3 --width 700 --height 900\n","sub_path":"draw_map.py","file_name":"draw_map.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"175632198","text":"from re import search\nfrom django.shortcuts import render\nfrom django.http.response import HttpResponse,JsonResponse\nfrom django.shortcuts import render,redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nimport requests\nfrom flats.models import Flats\nfrom flats.serializers import FlatsSerializer\nfrom rest_framework import status\nimport json\n# Create your views here.\n\ndef home(request):\n return render(request,'home.html')\n\ndef add_page(request):\n return render(request,'add.html')\n\ndef view_page(request):\n flatdata = requests.get(\"http://13.126.238.231:8000/flats/all/\").json()\n return render(request,\"view.html\",{'data':flatdata})\n\ndef search_page(request):\n return render(request,'search.html')\n\ndef update_page(request):\n return render(request,'update.html')\n\ndef delete_page(request):\n return render(request,'delete.html')\n\n@csrf_exempt\ndef add_flat(request):\n if(request.method == \"POST\"):\n # flat= JSONParser().parse(request)\n flat_serialize = FlatsSerializer(data=request.POST)\n if(flat_serialize.is_valid()):\n flat_serialize.save()\n # return JsonResponse(flat_serialize.data)\n return redirect(view_page)\n # else:\n # return HttpResponse(\"error \")\n # else:\n # return HttpResponse(\"no get method\")\n\n\n@csrf_exempt\ndef view_flat(request):\n if(request.method == \"GET\"):\n flat = Flats.objects.all()\n flat_serialize = FlatsSerializer(flat,many =True)\n return JsonResponse(flat_serialize.data,safe=False,status=status.HTTP_200_OK)\n\n\n@csrf_exempt\ndef update(request,bulding_no):\n try:\n flat = Flats.objects.get(bulding_no=bulding_no)\n if(request.method == \"GET\"):\n flat_serialize = FlatsSerializer(flat)\n return JsonResponse(flat_serialize.data,status=status.HTTP_200_OK)\n\n if (request.method == \"DELETE\"):\n flat.delete()\n return HttpResponse(\"deleted\")\n\n if(request.method == \"PUT\"):\n myflat = JSONParser().parse(request)\n flat_serialize = FlatsSerializer(flat,data=myflat)\n if(flat_serialize.is_valid()):\n flat_serialize.save()\n return JsonResponse(flat_serialize.data,status=status.HTTP_200_OK)\n \n except Flats.DoesNotExist:\n return HttpResponse(\"You Enter Invalid ID\",status=status.HTTP_404_NOT_FOUND)\n\n\n@csrf_exempt\ndef search_flat(request):\n try:\n getbulding_no = request.POST.get(\"bulding_no\")\n getbulding = Flats.objects.filter(bulding_no=getbulding_no)\n bulding_serialize = FlatsSerializer(getbulding,many=True)\n return render(request,'search.html',{\"data\":bulding_serialize.data})\n except Flats.DoesNotExist:\n return HttpResponse('Invalid No ')\n except:\n return HttpResponse(\"something went wrong\")\n\n\n@csrf_exempt\ndef updateapi(request):\n try:\n getbulding_no = request.POST.get(\"bulding_no\")\n getbulding = Flats.objects.filter(bulding_no=getbulding_no)\n bulding_serialize = FlatsSerializer(getbulding,many=True)\n return render(request,'update.html',{\"data\":bulding_serialize.data})\n except Flats.DoesNotExist:\n return HttpResponse('Invalid No')\n except:\n return HttpResponse(\"something went wrong\")\n\n@csrf_exempt\ndef update_flat(request):\n getid = request.POST.get(\"newid\")\n getbuilding_no = request.POST.get(\"newbuilding_no\")\n getownername = request.POST.get(\"newowner_name\")\n getaddress = request.POST.get(\"newaddress\")\n getmobile_no = request.POST.get(\"newmobile_no\")\n getadhar_no = request.POST.get(\"newadhar_no\")\n getemailid = request.POST.get(\"newemailid\")\n getpassword = request.POST.get(\"newpassword\")\n mydata= {\"id\":getid,\"bulding_no\":getbuilding_no,\"owner_name\":getownername,\"address\":getaddress,\"mobile_no\":getmobile_no,\"adhar_no\":getadhar_no,\"emailid\":getemailid,\"password\":getpassword}\n jsondata=json.dumps(mydata)\n print(jsondata)\n apilink = \"http://13.126.238.231:8000/flats/update/\"+str(getbuilding_no)\n requests.put(apilink, data=jsondata)\n return redirect(view_page)\n\n # return HttpResponse('Data has updated successfully')\n\n@csrf_exempt\ndef deleteapi(request):\n try:\n getflat = request.POST.get(\"bulding_no\")\n getf = Flats.objects.filter(bulding_no=getflat)\n flat_serialize = FlatsSerializer(getf,many=True)\n return render(request,'delete.html',{\"data\":flat_serialize.data})\n except Flats.DoesNotExist:\n return HttpResponse('Invalid No ')\n except:\n return HttpResponse(\"something went wrong\")\n\n\n\n@csrf_exempt\ndef delete_data(request): \n getno = request.POST.get(\"newbuilding_no\")\n apilink = \"http://13.126.238.231:8000/flats/update/\"+str(getno)\n requests.delete(apilink)\n # return HttpResponse('Data has deleted successfully')\n return redirect(view_page)","sub_path":"flats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"348571769","text":"\n\ndef sum_of_all_sub_matrix(matrix):\n i_row = len(matrix)\n j_col = len(matrix[0])\n sum_t = 0\n for i in range(0, i_row):\n for j in range(0, j_col):\n sum_t += matrix[i][j]\n for k in range(i, i_row):\n for l in range(j, i_row):\n sum_t += matrix[k][l]\n for a in range(i, k):\n for b in range(j, l):\n sum_t += matrix[a][b]\n\n return sum_t\n\n\n\na = [[1, 1], [1, 1]]\n# print(sum_of_all_sub_matrix(a))\n\n\ndef sum_of_all_sub_matrix(matrix):\n i_row = len(matrix)\n j_col = len(matrix[0])\n sum_t = 0\n for i in range(i_row):\n for j in range(j_col):\n sum_t += (i+1)*(j+1)*(i_row - i)*(j_col - j) * matrix[i][j]\n\n return sum_t\n\nprint(sum_of_all_sub_matrix(a))\n","sub_path":"array/sum_of_all_sub_matrix.py","file_name":"sum_of_all_sub_matrix.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"454717915","text":"\nimport inspect\nimport imp\nimport os\nimport re\nimport types\nfrom DIRAC import S_OK, S_ERROR, rootPath, gLogger\nfrom DIRAC.Core.Utilities.ObjectLoader import ObjectLoader\nfrom DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton\nfrom DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals\nimport WebAppDIRAC\nfrom WebAppDIRAC.Lib.WebHandler import WebHandler\nfrom WebAppDIRAC.Core.CoreHandler import CoreHandler\nfrom WebAppDIRAC.Core.StaticHandler import StaticHandler\n\nclass HandlerMgr( object ):\n __metaclass__ = DIRACSingleton\n\n def __init__( self, baseURL = \"\" ):\n self.__baseURL = baseURL.strip( \"/\" )\n self.__routes = []\n self.__handlers = []\n self.__setupGroupRE = r\"(?:/s:([\\w-]*)/g:([\\w-]*))?\"\n self.log = gLogger.getSubLogger( \"Routing\" )\n\n def getPaths( self, dirName ):\n \"\"\"\n Get lists of paths for all installed and enabled extensions\n \"\"\"\n pathList = []\n for extName in CSGlobals.getCSExtensions():\n if extName.rfind( \"DIRAC\" ) != len( extName ) - 5:\n extName = \"%sDIRAC\" % extName\n if extName == \"WebAppDIRAC\":\n continue\n try:\n modFile, modPath, desc = imp.find_module( extName )\n except ImportError:\n continue\n staticPath = os.path.join( modPath, \"WebApp\", dirName )\n if os.path.isdir( staticPath ):\n pathList.append( staticPath )\n #Add WebAppDirac to the end\n pathList.append( os.path.join( WebAppDIRAC.rootPath, \"WebApp\", dirName ) )\n return pathList\n\n def __calculateRoutes( self ):\n \"\"\"\n Load all handlers and generate the routes\n \"\"\"\n ol = ObjectLoader( [ 'WebAppDIRAC' ] )\n origin = \"WebApp.handler\"\n result = ol.getObjects( origin, parentClass = WebHandler, recurse = True )\n if not result[ 'OK' ]:\n return result\n self.__handlers = result[ 'Value' ]\n staticPaths = self.getPaths( \"static\" )\n self.log.verbose( \"Static paths found:\\n - %s\" % \"\\n - \".join( staticPaths ) )\n self.__routes = []\n for pattern in ( ( r\"/static/(.*)\", r\"/(favicon\\.ico)\", r\"/(robots\\.txt)\" ) ):\n if self.__baseURL:\n pattern = \"/%s%s\" % ( self.__baseURL, pattern )\n self.__routes.append( ( pattern, StaticHandler, dict( pathList = staticPaths ) ) )\n for hn in self.__handlers:\n self.log.info( \"Found handler %s\" % hn )\n handler = self.__handlers[ hn ]\n #CHeck it has AUTH_PROPS\n if type( handler.AUTH_PROPS ) == None:\n return S_ERROR( \"Handler %s does not have AUTH_PROPS defined. Fix it!\" % hn )\n #Get the root for the handler\n if handler.LOCATION:\n handlerRoute = handler.LOCATION.strip( \"/\")\n else:\n handlerRoute = hn[ len( origin ): ].replace( \".\", \"/\" ).replace( \"Handler\", \"\" )\n #Add the setup group RE before\n baseRoute = self.__setupGroupRE\n #IF theres a base url like /DIRAC add it\n if self.__baseURL:\n baseRoute = \"/%s%s\" % ( self.__baseURL, baseRoute )\n #Set properly the LOCATION after calculating where it is with helpers to add group and setup later\n handler.LOCATION = handlerRoute\n handler.PATH_RE = re.compile( \"%s(%s/.*)\" % ( baseRoute, handlerRoute ) )\n handler.URLSCHEMA = \"/%s%%(setup)s%%(group)s%%(location)s/%%(action)s\" % ( self.__baseURL )\n #Look for methods that are exported\n for mName, mObj in inspect.getmembers( handler ):\n if inspect.ismethod( mObj ) and mName.find( \"web_\" ) == 0:\n if mName == \"web_index\":\n #Index methods have the bare url\n self.log.verbose( \" - Route %s -> %s.web_index\" % ( handlerRoute, hn ) )\n route = \"%s(%s/)\" % ( baseRoute, handlerRoute )\n self.__routes.append( ( route, handler ) )\n self.__routes.append( ( route.rstrip( \"/\" ), CoreHandler, dict( action = 'addSlash' ) ) )\n else:\n #Normal methods get the method appeded without web_\n self.log.verbose( \" - Route %s/%s -> %s.%s\" % ( handlerRoute, mName[4:], hn, mName ) )\n route = \"%s(%s/%s)\" % ( baseRoute, handlerRoute, mName[4:] )\n self.__routes.append( ( route, handler ) )\n self.log.debug( \" * %s\" % route )\n #Send to root\n self.__routes.append( ( \"%s(/?)\" % self.__setupGroupRE, CoreHandler, dict( action = \"sendToRoot\" ) ) )\n if self.__baseURL:\n self.__routes.append( ( \"/%s%s()\" % ( self.__baseURL, self.__setupGroupRE ),\n CoreHandler, dict( action = \"sendToRoot\" ) ) )\n return S_OK()\n\n def getHandlers( self ):\n if not self.__handlers:\n result = self.__calculateRoutes()\n if not result[ 'OK' ]:\n return result\n return S_OK( self.__handlers )\n\n def getRoutes( self ):\n if not self.__routes:\n result = self.__calculateRoutes()\n if not result[ 'OK' ]:\n return result\n return S_OK( self.__routes )\n","sub_path":"Core/HandlerMgr.py","file_name":"HandlerMgr.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"418849469","text":"\"\"\"SGX-backend-specific build rules for binaries and tests.\"\"\"\n\nload(\"//asylo/bazel:asylo.bzl\", \"enclave_test\")\nload(\"@linux_sgx//:sgx_sdk.bzl\", \"sgx\")\n\ndef sgx_enclave_test(name, srcs, **kwargs):\n \"\"\"Build target for testing one or more instances of 'debug_sign_enclave'.\n\n This macro invokes enclave_test with the \"asylo-sgx\" tag added.\n\n Args:\n name: The target name.\n srcs: Same as cc_test srcs.\n **kwargs: enclave_test arguments.\n \"\"\"\n enclave_test(\n name,\n srcs = srcs,\n backends = sgx.backend_labels,\n **kwargs\n )\n","sub_path":"asylo/bazel/sgx_rules.bzl","file_name":"sgx_rules.bzl","file_ext":"bzl","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"508243198","text":"from Crypto.Cipher import AES\nimport base64\nfrom Crypto import Random\nfrom format import *\n\n# Separate a string or a list into dispersed elements, limited by LENGTH\nspc = lambda x,LENGTH:[x[i:i + LENGTH:] for i in range(0, len(x) - LENGTH, LENGTH)] + [x[-(len(x)%LENGTH):-1]+ x[-1]]\n\nclass Asymmetric(object):\n '''RSA class, using for encryption and decryption'''\n def __init__(self, key):\n '''Initialize the key into special format '''\n key = key.split(b' ')\n self.key = str64decode(key[0]), str64decode(key[1])\n self.MAX_KEY_LEN = len(str(self.key[0]))//3 - 1\n \n def encrypt(self,text):\n '''Encrypt the plaintext into ciphertext. Argument \"text\" should be string. The output is a bytes array'''\n text = spc(text, self.MAX_KEY_LEN)\n en = []\n for elem in text:\n elem = intEncode(elem)\n en.append(str(pow(elem,self.key[1],self.key[0])))\n\n return ' '.join(en).encode()\n\n def decrypt(self, text):\n '''Decrypt the ciphertext into plaintext. Argument \"text\" should be a list of string. The output is a string'''\n text = [int(i) for i in text.decode().split()]\n de = ''\n for elem in text: \n de += intDecode(pow(elem, self.key[1],self.key[0]))\n \n return de.encode()\n\nBS = AES.block_size #The length of plaintext to encrypted should be the multiple of block_size\npad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS) # Append the length of inputed string into the multiple of block_size\nunpad = lambda s : s[:-ord(s[len(s)-1:])] #return the orginal text that is appended\n\nclass Symmetric(object):\n '''AES class, using for encryption and decryption'''\n def __init__(self, key, mode = AES.MODE_CBC):\n '''Initialize key, mode of AES'''\n self.key = key\n self.mode = mode\n\n def encrypt(self, plaintext):\n '''Encrypt the plaintext into ciphertext. Argument \"plaintext\" should be string. The output is a bytes array'''\n en = pad(plaintext).encode()\n iv = Random.new().read(AES.block_size)\n cryptor = AES.new(self.key, self.mode, iv)\n ciphertext = cryptor.encrypt(en)\n\n return base64.b64encode(iv + ciphertext)\n\n def decrypt(self, ciphertext):\n '''Decrypt the ciphertext into plaintext. Argument \"ciphertext\" should be a bytes array. The output is a string'''\n ciphertext = base64.b64decode(ciphertext)\n iv = ciphertext[:AES.block_size]\n cryptor = AES.new(self.key, self.mode, iv)\n plaintext = cryptor.decrypt(ciphertext[AES.block_size:])\n\n return unpad(plaintext).decode()\n\n def getKey(self):\n '''Return the AES key'''\n return self.key.decode()","sub_path":"SSL server/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"35367117","text":"import shapely.geometry\n\nfrom .. import get_execute_graph\nfrom ... import load_json_resource\n\n\ndef test_polygon_timeseries_polygon(connection, api_version):\n polygon = shapely.geometry.shape(load_json_resource(\"data/polygon.json\"))\n fapar = (\n connection\n .load_collection(\"S2\")\n .filter_bbox(3, 6, 52, 50, \"EPSG:4326\")\n .polygonal_mean_timeseries(polygon)\n )\n assert get_execute_graph(fapar) == load_json_resource('data/%s/aggregate_zonal_polygon.json' % api_version)\n\n\ndef test_polygon_timeseries_path(connection, api_version):\n probav_s10_toc_ndvi = (\n connection.load_collection('S2')\n .bbox_filter(west=3, east=6, north=52, south=50, crs='EPSG:4326')\n .polygonal_mean_timeseries(polygon=\"/some/path/to/GeometryCollection.geojson\")\n )\n actual = get_execute_graph(probav_s10_toc_ndvi)\n assert actual == load_json_resource('data/%s/aggregate_zonal_path.json' % api_version)\n","sub_path":"tests/rest/datacube/test_zonal_stats.py","file_name":"test_zonal_stats.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"598265833","text":"from __future__ import division\nimport tensorflow as tf\nimport os\nimport pandas as pd\nimport numpy as np\nfrom nets import inception_utils, inception_v3\nfrom glob import glob\nimport imageio\nimport sys\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import OneHotEncoder\n\ndef get_checkpoint_init_fn():\n # Load from .ckpt file\n # variables_to_restore contains all the variables defined in Inception V3\n variables_to_restore = slim.get_variables_to_restore(exclude=[\"InceptionV3/Logits/Conv2d_1c_1x1/weights:0\", \"InceptionV3/Logits/Conv2d_1c_1x1/biases:0\"])\n # Reset the global traning step counter\n global_step_reset = tf.assign(tf.train.get_or_create_global_step(), 0)\n # Load all the vars into memory and ready for training\n # ignore_missing_vars have to be set to True since we need modify from original inception v3\n slim_init_fn = slim.assign_from_checkpoint_fn(\"./inception_v3.ckpt\",variables_to_restore,ignore_missing_vars=True)\n return slim_init_fn\n\n# Load Data\nif sys.platform == \"darwin\":\n base_skin_dir = os.path.join('./Data/')\nelse:\n base_skin_dir = os.path.join('/datacommons/plusds/skin_cancer/team2')\nslim = tf.contrib.slim\n\n# Set Params\nMAX_EPOCH = 50000\nNUM_CLASSES = 7\nNUM_IMG_FROM_EACH_CLASS = 9\ninput_size = NUM_IMG_FROM_EACH_CLASS * NUM_CLASSES\nVALIDATION_INTERVAL = 500\nSTART_LR = 1e-04\nDECAY_STEP = 10000 / 63 * 2.5\nDECAY_RATE = 0.94\nLOG_DIR = \"./saved_model/Inception_\" + str(START_LR) + \"_\" + str(DECAY_STEP) + \"_\" + str(DECAY_RATE)\n\n# Basic config of tensorflow\nsession_config = tf.ConfigProto(log_device_placement=False)\nsession_config.gpu_options.allow_growth = True\n\n# Dictionary for Loading Resized Images\nimageid_path_dict = {os.path.splitext(os.path.basename(x))[0]: x\n for x in glob(os.path.join(base_skin_dir, 'HAM10000_images_part_[1-2]_resize', '*.jpg'))}\n\nlesion_type_dict = {\n 'nv': 'Melanocytic nevi',\n 'mel': 'dermatofibroma',\n 'bkl': 'Benign keratosis-like lesions ',\n 'bcc': 'Basal cell carcinoma',\n 'akiec': 'Actinic keratoses',\n 'vasc': 'Vascular lesions',\n 'df': 'Dermatofibroma'\n}\n\n# Side Information DataFrame\n# Preprocessing to include path and convert lesion types into integers\nside_df = pd.read_csv(os.path.join(base_skin_dir, 'HAM10000_metadata.csv'))\nside_df['path'] = side_df['image_id'].map(imageid_path_dict.get)\nside_df['cell_type'] = side_df['dx'].map(lesion_type_dict.get) \nside_df['cell_type_idx'] = pd.Categorical(side_df['cell_type']).codes\n\n# Sort the large dataset and order by lesion type\nimage_by_type = [side_df.iloc[np.array(side_df[\"cell_type_idx\"] == i)] for i in range(len(lesion_type_dict))]\n# 90% to train\nimage_by_type_train = [i.head(int(np.ceil(len(i)*0.9))) for i in image_by_type]\n# 10% to test\nimage_by_type_val = [i.tail(int(np.floor(len(i)*0.1))) for i in image_by_type]\n\n# Train\n# Setup one hot encoder\none_hot_encoder = OneHotEncoder(NUM_CLASSES)\n# Setup the list size that encoder will output\none_hot_encoder.fit(np.arange(NUM_CLASSES).reshape(-1,1))\n\ng = tf.Graph()\n\nwith g.as_default():\n # define traning holders/vars/assign operation\n img_holder = tf.placeholder(shape=[input_size,299,299,3], dtype=tf.float32, name=\"Img_Holder\")\n label_holder = tf.placeholder(shape=[input_size,7], dtype=tf.float32, name=\"Label_Holder\")\n img = tf.Variable(img_holder, name=\"Img_Var\", trainable=False)\n label = tf.Variable(label_holder, name=\"Label_Var\", trainable=False)\n img_assign = img.assign(img_holder, name=\"Img_Assign\")\n label_assign = label.assign(label_holder, name=\"Label_Assign\")\n # define validation holders/vars/assign operation\n img_holder_val = tf.placeholder(shape=[input_size,299,299,3], dtype=tf.float32, name=\"Img_Holder_val\")\n label_holder_val = tf.placeholder(shape=[input_size,7], dtype=tf.float32, name=\"Label_Holder_val\")\n img_val = tf.Variable(img_holder_val, name=\"Img_Var_val\", trainable=False)\n label_val = tf.Variable(label_holder_val, name=\"Label_Var_val\", trainable=False)\n img_assign_val = img_val.assign(img_holder_val, name=\"Img_Assign_val\")\n label_assign_val = label_val.assign(label_holder_val, name=\"Label_Assign_val\")\n\n with slim.arg_scope(inception_v3.inception_v3_arg_scope()):\n # This defines the network we need to train\n logits, end_points = inception_v3.inception_v3(img, num_classes=7, create_aux_logits=False, is_training=True)\n # This one just create an alis of the network above for validation\n logits_val, _ = inception_v3.inception_v3(img_val, num_classes=7, create_aux_logits=False, is_training=False, reuse=tf.AUTO_REUSE)\n # set up loss\n loss = tf.losses.softmax_cross_entropy(label, logits)\n # Use the following line to seperate validation loss from traning process\n total_loss = tf.losses.get_total_loss()\n # loss for validation just for summary purposes\n loss_val = tf.losses.softmax_cross_entropy(label_val, logits_val, loss_collection=\"validation\")\n # set decay learning rate\n learning_rate = tf.train.exponential_decay(START_LR, tf.train.get_or_create_global_step(), DECAY_STEP, DECAY_RATE)\n # creat train op\n opt = tf.train.AdamOptimizer(learning_rate)\n # creat train fn that will be fed into a slim.train wrapper later\n train_tensor = slim.learning.create_train_op(total_loss, optimizer=opt)\n # Creat Summary\n slim.summaries.add_scalar_summary(total_loss, 'cross_entropy_loss', 'losses')\n slim.summaries.add_scalar_summary(learning_rate, 'learning_rate', 'training')\n slim.summaries.add_scalar_summary(loss_val, 'validation_loss', 'losses')\n slim.summaries.add_scalar_summary(loss_val-total_loss, 'validation_delta', 'losses')\n \n\ndef train_step_fn(sess, train_op, global_step, train_step_kwargs):\n \"\"\"\n slim.learning.train_step():\n train_step_kwargs = {summary_writer:, should_log:, should_stop:}\n \"\"\"\n\n # Create training batch\n input_path = np.array([image_by_type_train[i][\"path\"].sample(NUM_IMG_FROM_EACH_CLASS) for i in range(NUM_CLASSES)]).reshape(-1)\n input_images = np.array([imageio.imread(i) for i in input_path]).astype(np.float32)\n labels = np.array([[i]*NUM_IMG_FROM_EACH_CLASS for i in range(NUM_CLASSES)]).reshape(-1,1)\n input_images, labels = shuffle(input_images, labels)\n labels = one_hot_encoder.transform(labels).toarray()\n \n # Pass the images into tf.vars\n sess.run([img_assign,label_assign], feed_dict={img_holder:input_images, label_holder:labels})\n# print sess.run([img,label])\n\n # calc training losses\n total_loss, should_stop = slim.learning.train_step(sess, train_op, global_step, train_step_kwargs)\n\n # validate on interval\n if global_step.eval(session=sess) % VALIDATION_INTERVAL == 0:\n # Create validation batch\n input_path_val = np.array([image_by_type_val[i][\"path\"].sample(NUM_IMG_FROM_EACH_CLASS) for i in range(NUM_CLASSES)]).reshape(-1)\n input_images_val = np.array([imageio.imread(i) for i in input_path_val]).astype(np.float32)\n labels_val = np.array([[i]*NUM_IMG_FROM_EACH_CLASS for i in range(NUM_CLASSES)]).reshape(-1,1)\n input_images_val, labels_val = shuffle(input_images_val, labels_val)\n labels_val = one_hot_encoder.transform(labels_val).toarray()\n # Calculate the logits\n sess.run([img_assign_val,label_assign_val,logits_val], feed_dict={img_holder_val:input_images_val, label_holder_val:labels_val})\n # Calculate the validation loss\n validiate_loss = sess.run(loss_val)\n\n return [total_loss, should_stop]\n\nwith g.as_default():\n \n # Prepare data for initialize\n \n input_path = np.array([image_by_type_train[i][\"path\"].sample(NUM_IMG_FROM_EACH_CLASS) for i in range(NUM_CLASSES)]).reshape(-1)\n input_images = np.array([imageio.imread(i) for i in input_path]).astype(np.float32)\n# input_size = np.shape(input_images)[0]\n labels = np.array([[i]*NUM_IMG_FROM_EACH_CLASS for i in range(NUM_CLASSES)]).reshape(-1,1)\n input_images, labels = shuffle(input_images, labels)\n labels = one_hot_encoder.transform(labels).toarray()\n \n input_path_val = np.array([image_by_type_val[i][\"path\"].sample(NUM_IMG_FROM_EACH_CLASS) for i in range(NUM_CLASSES)]).reshape(-1)\n input_images_val = np.array([imageio.imread(i) for i in input_path_val]).astype(np.float32)\n labels_val = np.array([[i]*NUM_IMG_FROM_EACH_CLASS for i in range(NUM_CLASSES)]).reshape(-1,1)\n input_images_val, labels_val = shuffle(input_images_val, labels_val)\n labels_val = one_hot_encoder.transform(labels_val).toarray()\n\n slim.learning.train(\n train_tensor,\n LOG_DIR,\n log_every_n_steps=1,\n number_of_steps=MAX_EPOCH,\n graph=g,\n save_summaries_secs=60,\n save_interval_secs=300,\n init_fn=get_checkpoint_init_fn(),\n global_step=tf.train.get_global_step(),\n train_step_fn = train_step_fn,\n session_config=session_config,\n init_feed_dict = {img_holder:input_images, label_holder:labels, img_holder_val: input_images_val, label_holder_val: labels_val})\n\n\n\n\n \"\"\"\n LOGIN TO TENSORBOARD\n 1. open terminal and cd into the directory that stores the summary files\n 2. $ tensorboard --logdir=\"./\"\n \"\"\"\n\n \"\"\"\n with g.as_default():\n with slim.arg_scope(\"FC1\"):\n with tf.arg_scope(\"FC1\"):\n net = slim.fully_connected(end_points[\"Mixed_7c\"], )\n print end_points[\"Mixed_7c\"]\n \"\"\"\n\n ","sub_path":"inception_v3_lot_comments.py","file_name":"inception_v3_lot_comments.py","file_ext":"py","file_size_in_byte":9423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"68057336","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader\r\nfrom config import cfg\r\nfrom dataset.data_preprocessing import TrainAugmentation\r\nfrom dataset.data_preprocessing import TestTransform\r\nfrom model.MatchPrior import MatchPrior\r\nfrom model.PriorBox import PriorBox\r\nfrom dataset.buildDataset import build_dataset\r\nfrom torch.utils.data.sampler import BatchSampler\r\nfrom torch.utils.data import RandomSampler\r\nfrom dataset.Sampler import IterationBasedBatchSampler\r\nfrom model.moblieSSD import mobileSSD\r\nfrom utils.LossFun import LossFunction\r\nfrom utils.adjustLearningRate import adjust_learning_rate\r\nfrom eval_net import do_evaluation\r\nimport logging\r\nimport time\r\nimport argparse\r\nimport datetime\r\nimport os\r\n\r\ndef weight_init(m):\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.xavier_uniform(m.weight)\r\n m.bias.data.zero_()\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Mobile Single Shot MultiBox Detector Training With PyTorch')\r\n parser.add_argument('--resume_model',\r\n default='/home/jiefengan/mobileSSD/saveModel/weight/mobileSSD300_VOC_110000.pth',\r\n type=str,\r\n help='Checkpoint state_dict file to resume training from')\r\n parser.add_argument('--resume',\r\n default=None,\r\n type=str,\r\n help='Checkpoint state_dict file to resume training from')\r\n\r\n parser.add_argument('--log_step', default=50, type=int, help='Print logs every log_step')\r\n parser.add_argument('--save_step', default=10000, type=int, help='Save checkpoint every save_step')\r\n parser.add_argument('--eval_step', default=10000, type=int,\r\n help='Evaluate dataset every eval_step, disabled when eval_step < 0')\r\n\r\n parser.add_argument('--start_epoch', default=0, type=int,help='the start epoch of training')\r\n parser.add_argument('--max_epoch', default=10, type=int, help='the epoch to end training')\r\n args = parser.parse_args()\r\n\r\n logger = logging.getLogger(\"mobileSSD\")\r\n logger.setLevel(logging.DEBUG)\r\n fileHanlder = logging.FileHandler('mobileSSD.log')\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fileHanlder.setFormatter(formatter)\r\n logger.addHandler(fileHanlder)\r\n\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n train_transform = TrainAugmentation(cfg.INPUT.IMAGE_SIZE, cfg.INPUT.PIXEL_MEAN)\r\n target_transform = MatchPrior(PriorBox(cfg)(), cfg.MODEL.CENTER_VARIANCE, cfg.MODEL.SIZE_VARIANCE,\r\n cfg.MODEL.THRESHOLD)\r\n train_dataset = build_dataset(dataset_list=cfg.DATASETS.TRAIN, transform=train_transform,\r\n target_transform=target_transform)\r\n sampler = RandomSampler(train_dataset)\r\n batch_sampler = BatchSampler(sampler=sampler, batch_size=cfg.SOLVER.BATCH_SIZE,\r\n drop_last=False)\r\n batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations=cfg.SOLVER.MAX_ITER)\r\n train_loader = DataLoader(train_dataset, num_workers=4, batch_sampler=batch_sampler, pin_memory=True)\r\n logger.info('Train Dataset uploaded!')\r\n logger.info('device:{}'.format(device))\r\n\r\n model = mobileSSD().to(device)\r\n\r\n logger.info(\"Resume from the model {}\".format(args.resume_model))\r\n model.load_state_dict(torch.load(args.resume_model))\r\n\r\n lr = cfg.SOLVER.LR\r\n optimizer = optim.RMSprop(model.parameters(), lr=lr, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)\r\n start_epoch = args.start_epoch\r\n max_epoch = args.max_epoch\r\n\r\n if args.resume:\r\n checkpoint = torch.load(args.resume_checkpoint)\r\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n model.load_state_dict(checkpoint['model_state_dict'])\r\n start_epoch = checkpoint['epoch']\r\n logger.info('Resume from the checkpoint {}'.format(args.resume))\r\n\r\n criterion = LossFunction(cfg.MODEL.NEG_POS_RATIO)\r\n\r\n for epoch in range(start_epoch, start_epoch + max_epoch):\r\n model = do_train(epoch, args, device, optimizer, train_loader, model, criterion)\r\n\r\ndef do_train(epoch, args, device, optimizer, train_loader, model, criterion):\r\n logger = logging.getLogger(\"mobileSSD.trainer\")\r\n logger.setLevel(logging.INFO)\r\n fileHanlder = logging.FileHandler('train.log')\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fileHanlder.setFormatter(formatter)\r\n logger.addHandler(fileHanlder)\r\n\r\n logger.info(\"Strating training in epoch: {:02d}\".format(epoch))\r\n model.train()\r\n max_iter = len(train_loader)\r\n lr_step = cfg.SOLVER.LR_STEPS\r\n starting_training_time = time.time()\r\n trained_time = 0\r\n tic = time.time()\r\n end = time.time()\r\n for iteration, (images, boxes, labels) in enumerate(train_loader):\r\n iteration = iteration + 1\r\n images = images.to(device)\r\n boxes = boxes.to(device)\r\n labels = labels.to(device)\r\n optimizer.zero_grad()\r\n confidence, pre_location = model(images)\r\n regression_loss, classification_loss = criterion(confidence, pre_location, labels, boxes)\r\n loss = regression_loss + classification_loss\r\n loss.backward()\r\n optimizer.step()\r\n trained_time += time.time() - end\r\n end = time.time()\r\n if iteration % args.log_step == 0:\r\n eta_seconds = int((trained_time / iteration)) * (max_iter - iteration)\r\n log_str = [\r\n \"Epoch:{:02d}, Iter:{:06d}, Lr:{:.5f}, Cost:{:.2f}s, Eta:{}\".format(epoch, iteration,\r\n optimizer.param_groups[0]['lr'], time.time() - tic, str(datetime.timedelta(seconds=eta_seconds))),\r\n \"Loss:{:.3f}\".format(loss),\r\n \"Regression_loss:{:.3f}\".format(regression_loss),\r\n \"Classification_loss:{:.3f}\".format(classification_loss)]\r\n logger.info(log_str)\r\n tic = time.time()\r\n\r\n if iteration in lr_step:\r\n step_index = lr_step.index(iteration) + 1\r\n adjust_learning_rate(optimizer, cfg.SOLVER.GAMMA, step_index)\r\n\r\n if iteration % args.save_step == 0 and iteration != 0:\r\n path = os.path.join(cfg.OUTPUT_MODEL_DIR, 'checkpoints_'+repr(epoch)+'_'+repr(iteration)+'.pth')\r\n torch.save({\r\n 'epoch': epoch,\r\n 'model_state_dict': model.state_dict(),\r\n 'optimizer_state_dict': optimizer.state_dict(),\r\n }, path)\r\n\r\n if args.eval_step > 0 and iteration % args.eval_step == 0 and iteration != 0:\r\n dataset_metrics, logger_eval = do_evaluation(device, model, cfg.OUTPUT_EVAL_DIR)\r\n for dataset_name, metrics in dataset_metrics.items():\r\n for metric_name, metric_value in metrics.get_printable_metrics().items():\r\n log_str = [\"{}, :{:.4f}, iteration:{:6d}\".format('-'.join(['val', dataset_name, metric_name]),\r\n metric_value, iteration)]\r\n logger_eval.info(log_str)\r\n model.train()\r\n\r\n total_training_time = int(time.time() - starting_training_time)\r\n total_training_time_str = str(datetime.timedelta(seconds=total_training_time))\r\n logger.info(\"Total training time: {} ({:.4f}seconds/iteration)\".format(total_training_time_str,\r\n total_training_time / max_iter))\r\n torch.save(model.state_dict(),os.path.join(cfg.OUTPUT_MODEL_DIR, 'mobileSSD300_VOC_120000.pth'))\r\n return model\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"mobileSSD/train_net.py","file_name":"train_net.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"555494489","text":"import numpy as np\nimport math\n\ndef rotation_manipulation(thetas,rotation_angle,old_value):\n\t\"\"\"\n\t\tThe coefficients are to impose harder constraints on the \n\t\trotation of the canonical bins. This ensures that they \n\t\taren't rotated by more than the actual anlge of rotation\n\t\tin sum_delta. Bins 0,44,45 and 89 are the canonical bins.\n\t\"\"\"\n\tsum_thetas = 0\n\tfor i in range(90):\n\t\ttry:\n\t\t\tsum_thetas+=(thetas(i)-thetas(i+1))**2\n\t\texcept:\n\t\t\tcontinue\n\tsum_delta = 0\n\tsum_delta = 1000*((thetas[0] - rotation_angle)**2 + (thetas[44]-rotation_angle)**2 + (thetas[45]-rotation_angle)**2 + (thetas[89] - rotation_angle)**2)\n\tenergy_rotation = sum_delta + sum_thetas\n\treturn energy_rotation+old_value\n\ndef line_preservation(linethetas, V, uk, Pk, lines, bins,old_value):\n\t\"\"\"\n\t\tThis function is to ensure that we build a relation between\n\t\tthe lines and the meshes. This is a function of both V and \n\t\tTheta. \n\t\"\"\"\n\tek = Pk.dot(V)\n\tfor k in range(len(lines)):\n\t\n\t\tU_k = uk[k].dot(np.linalg.inv(np.transpose(uk[k]).dot(uk[k]))).dot(np.transpose(uk[k]))\n\t\ttheta = linetheta[k]*math.pi/180 \n\t\t\"\"\"\n\t\t\tConvering the Angle into radians\n\t\t\tList of all the lines with their angles of orientation\n\t\t\"\"\"\n\t\tR_k = np.asarray([[math.cos(theta), -math.sin(theta)],[math.sin(theta),math.sin(theta)]])\n\n\t\tval = (R_k.dot(U_k).dot(np.transpose(R_k)) - np.eye(2)).dot(ek[k])\n\t\treturn linalg.norm(val)\n\ndef boundary_preservation(V,old_value):\n\t\"\"\"\n\t\tEssentially an energy function designed to impose very strong \n\t\tconditions on the boundary. The penalty is enormous in this \n\t\tminimization problem, for we should try to change the value. \n\t\"\"\"\n\ndef shape_preservation(quad_count,x,y,V,Q,old_value):\n\t\"\"\"\n\t\tAnother Energy Function mainly aimed towards preserving \n\t\tthe shape of the image. There's no penalty in terms of a\n\t\tcoefficient for this case though, unlike other cases.\n\t\"\"\"\n\tVq = Q.dot(V)\n\tN = quad_count\n\tfor q in range(N):\n\t\tAq = np.asarray([[x[i],-y[i],1,0],\n\t\t\t\t\t\t[y[i],x[i],0,1],\n\t\t\t\t\t\t[x[i+1],-y[i+1],1,0],\n\t\t\t\t\t\t[y[i+1],x[i+1],0,1],\n\t\t\t\t\t\t[x[i+2],-y[i+2],1,0],\n\t\t\t\t\t\t[y[i+2],x[i+2],0,1],\n\t\t\t\t\t\t[x[i+3],-y[i+3],1,0],\n\t\t\t\t\t\t[y[i+3],x[i+3],0,1]])\n\t\tS = (Aq.dot(np.linalg.inv(np.transpose(Aq).dot(Aq))).dot(Aq)-eye(8)).dot(Vq)\n\t\tval = np.transpose(S).dot(np.transpose(S))\n\n\ndef total_energy(quad_count,x,y,linethetas,Q,V):\n\t\"\"\" \n\tThis is the total energy function. This is a linear combination\n\tof all the above components written above. The penalities are \n\treflected in the form of lambdas.\n\t\"\"\"\n\n\tlambda_b = 10000000\n\tlambda_l = 100\n\tlambda_r = 100\n\ttotal_energy = shape_preservation(quad_count,x,y,v,Q) + lambda_b*boundary_preservation() +lambda_l*line_preservation(linethetas, V, uk, Pk, lines, bins) + lambda_r*rotation_manipulation(thetas,rotation_angle)\n\t\n\treturn total_energy\n","sub_path":"Content Aware Rotation/Content Aware Rotation/Codes/energyfunction.py","file_name":"energyfunction.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"210056504","text":"from threading import Thread\nimport queue\nfrom tkinter import *\nimport socket\nimport pickle\nfrom tkinter import messagebox\n\n\nclass ClientData:\n \"\"\"\n Classe définissant le format des données de jeu intelligibles par le client. C'est sous cette forme que transite\n l'information entre serveur et client.\n Le remplissage par défaut fait office d'écran d'attente lors de l'attente de connexion des joueurs\n \"\"\"\n def __init__(self):\n grid_size = 7\n self.player_hand = [('red', 1), ('orange', 2), ('yellow', 3), ('green', 4), ('blue', 5), ('purple', 6)]\n self.current_grid = [[None for _ in range(grid_size)] for _ in range(grid_size)]\n self.other_players = [('En attente...', 5), ('En attente...', 5)] # Nom et nombre de cartes en main\n self.deck_size = 42\n\n\nfrom PlayerGUI import PlayerGUI\n\n\nclass ClientPlayer:\n \"\"\"\n Classe gérant le processus côté client\n \"\"\"\n def __init__(self, nickname, host, port):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try: # Initialisation de la connexion\n self.socket.connect((host, port))\n print('connection on {}'.format(port))\n self.socket.send(pickle.dumps(nickname))\n except:\n print(\"Connection error\")\n sys.exit()\n self.socket.settimeout(0.05) # Pour éviter d'attendre indéfiniment qu'un joueur joue\n\n self.board_data_queue = queue.Queue() # Queue des infos allant vers l'interface de jeu (ClientData)\n self.card_play_queue = queue.Queue() # Queue des infos venant de l'interface (consigne du joueur)\n self.client_data = ClientData()\n self.tk_root = Tk() # Initialisation de l'instance de l'interface\n self.tk_root.protocol(\"WM_DELETE_WINDOW\", self.kill_application)\n self.gui = PlayerGUI(self.client_data, self.card_play_queue, self.tk_root)\n\n self.is_running = True\n self.socket_thread = Thread(target=self.socket_interface) # Lancement du thread de dialogue avec le serv\n self.socket_thread.start()\n self.periodical_gui_refresh() # On gère les consignes à l'IHM sur ce thread (TKinter génèrera lui même un autre thread)\n\n self.tk_root.mainloop()\n\n def socket_interface(self): # Fonction executée sur un thread par le constructeur\n \"\"\"\n Fonction gérant les échanges entre le serveur et le client en faisant les transitions entre les queues et le socket\n C'est également cette fonction qui gère l'affichage du gagant et la gestion du rematch.\n :return: None\n \"\"\"\n while self.is_running: # Tant que le programme est en cours d'execution\n try:\n msg = pickle.loads(self.socket.recv(5000)) # On essaye de lire le message*\n if isinstance(msg, str):\n replay = messagebox.askyesno(\"Partie terminée !\", \"{} a gagné ! Voulez vous rejouer ?\".format(msg))\n self.socket.send(pickle.dumps(replay))\n elif isinstance(msg, ClientData):\n self.board_data_queue.put(msg) # Si on en reçoit un, on le transmet à l'interface par la queue\n except ConnectionAbortedError: #Si le serveur se déconnecte, on arrête le client\n self.kill_application()\n except:\n pass\n while self.card_play_queue.qsize(): # Tant qu'on a des messages dans la queue venant dans l'interface\n try: # On envoie ces messages (cartes à jouer) sur au serveur par le socket\n played_move = self.card_play_queue.get()\n self.socket.send(pickle.dumps(played_move))\n except ConnectionAbortedError: # Si le serveur se déconnecte, on arrête le client\n self.kill_application()\n except queue.Empty:\n pass\n\n # NB : Ces deux méthodes sont brouillons et leur séparation gagnerait à être clarifiée\n def periodical_gui_refresh(self):\n \"\"\"Met à jour périodiquement (50ms) le jeu avec les données arrivant ou ferme le jeu\"\"\"\n self.gui_refresh()\n if not self.is_running:\n sys.exit(1) # Fermer ici est préconnisé par la doc tkinter\n self.tk_root.after(50, self.periodical_gui_refresh)\n\n def gui_refresh(self):\n \"\"\"Fonction mettant à jour l'interface si des données sont présente dans la queue correspondante.\n On attend qu'il y ait un message dans la queue des données plateaux et on s'en sert pour mettre à jour l'interface\n \"\"\"\n while self.board_data_queue.qsize():\n try:\n msg = self.board_data_queue.get()\n self.gui.draw_game(msg)\n except queue.Empty:\n pass\n\n def kill_application(self):\n \"\"\"\n Arrête le socket et donne l'ordre d'arrêter le client.\n :return:\n \"\"\"\n print(\"Fermeture du socket client.\")\n self.socket.close()\n self.is_running = False\n print('Arrêt du client.')\n\n","sub_path":"ClientPlayer.py","file_name":"ClientPlayer.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"117920134","text":"import pytest\nfrom selenium import webdriver\n\ndriver = None\n\n@pytest.fixture(scope='function')\ndef BrowserSetUp(request, browser):\n global driver\n print(\"Running browser setUp\")\n\n if browser == 'firefox':\n print(\"Tests will be executed on Firefox\")\n driver = webdriver.Firefox()\n\n elif browser =='chrome':\n print(\"Tests will be executed on Chrome\")\n driver = webdriver.Chrome(\"config/chromedriver.exe\")\n\n elif browser == 'safari':\n driver = webdriver.Safari()\n\n driver.maximize_window()\n driver.implicitly_wait(20)\n\n if request.cls:\n request.cls.driver = driver\n\n yield driver\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\")\n\n@pytest.fixture(scope='session')\ndef browser(request):\n return request.config.getoption(\"--browser\")","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"219796602","text":"import tensorflow as tf\nimport numpy as np\nimport pickle\nimport time\nfrom keras.preprocessing.text import Tokenizer, one_hot,text_to_word_sequence\nfrom sklearn.model_selection import train_test_split\ntf.logging.set_verbosity(tf.logging.INFO)\n\n#PARAMETERS\n\nTEXT_LENGTH = 4000\nNR_WORDS = 4000\n\n\ndef reshape_list_to_matrix(sequence):\n # sequence = np.array(sequence)\n # sequence.resize(10000)\n # sequence.tolist()\n # for i in range(len(sequence)):\n # temp=str(sequence[i]).strip()\n # sequence[i]=int(temp) if temp else 0\n if len(sequence)>TEXT_LENGTH:\n sequence=sequence[:TEXT_LENGTH]\n else:\n while len(sequence) best_score and mode == \"max\"):\n\t\tsave_checkpoint.best_step = step\n\t\tsave_checkpoint.best_score = score\n\n\tif not args.no_save and step % args.save_interval == 0:\n\t\tos.makedirs(args.checkpoint_dir, exist_ok=True)\n\t\tmodel = [model] if model is not None and not isinstance(model, list) else model\n\t\toptimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer\n\t\tscheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler\n\t\tstate_dict = {\n\t\t\t\"step\": step,\n\t\t\t\"score\": score,\n\t\t\t\"last_step\": save_checkpoint.last_step,\n\t\t\t\"best_step\": save_checkpoint.best_step,\n\t\t\t\"best_score\": getattr(save_checkpoint, \"best_score\", None),\n\t\t\t\"model\": [m.state_dict() for m in model] if model is not None else None,\n\t\t\t\"optimizer\": [o.state_dict() for o in optimizer] if optimizer is not None else None,\n\t\t\t\"scheduler\": [s.state_dict() for s in scheduler] if scheduler is not None else None,\n\t\t\t\"args\": argparse.Namespace(**{k: v for k, v in vars(args).items() if not callable(v)}),\n\t\t}\n\n\t\tif args.step_checkpoints:\n\t\t\ttorch.save(state_dict, os.path.join(args.checkpoint_dir, \"checkpoint{}.pt\".format(step)))\n\t\tif (score < best_score and mode == \"min\") or (score > best_score and mode == \"max\"):\n\t\t\ttorch.save(state_dict, os.path.join(args.checkpoint_dir, \"checkpoint_best.pt\"))\n\t\tif step > last_step:\n\t\t\ttorch.save(state_dict, os.path.join(args.checkpoint_dir, \"checkpoint_last.pt\"))\n\n\ndef save_checkpoint_GAN(args, step, modelG, modelD, optimizerG=None, optimizerD=None, scheduler=None, score=None, mode=\"min\"):\n\tassert mode == \"min\" or mode == \"max\"\n\tlast_step = getattr(save_checkpoint_GAN, \"last_step\", -1)\n\tsave_checkpoint_GAN.last_step = max(last_step, step)\n\n\tdefault_score = float(\"inf\") if mode == \"min\" else float(\"-inf\")\n\tbest_score = getattr(save_checkpoint_GAN, \"best_score\", default_score)\n\tif (score < best_score and mode == \"min\") or (score > best_score and mode == \"max\"):\n\t\tsave_checkpoint_GAN.best_step = step\n\t\tsave_checkpoint_GAN.best_score = score\n\n\tif not args.no_save and step % args.save_interval == 0:\n\t\tos.makedirs(args.checkpoint_dir, exist_ok=True)\n\t\tmodelG = modelG if modelG is not None and not isinstance(modelG, list) else modelG\n\t\tmodelD = modelD if modelD is not None and not isinstance(modelD, list) else modelD\n\t\toptimizerG = [optimizerG] if optimizerG is not None and not isinstance(optimizerG, list) else optimizerG\n\t\toptimizerD = [optimizerD] if optimizerD is not None and not isinstance(optimizerD, list) else optimizerD\n\t\tscheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler\n\t\tstate_dict = {\n\t\t\t\"step\": step,\n\t\t\t\"score\": score,\n\t\t\t\"last_step\": save_checkpoint_GAN.last_step,\n\t\t\t\"best_step\": save_checkpoint_GAN.best_step,\n\t\t\t\"best_score\": getattr(save_checkpoint_GAN, \"best_score\", None),\n\t\t\t\"modelG\": modelG.state_dict() if modelG is not None else None,\n\t\t\t\"modelD\": modelD.state_dict() if modelD is not None else None,\n\t\t\t\"optimizerG\": [o.state_dict() for o in optimizerG] if optimizerG is not None else None,\n\t\t\t\"optimizerD\": [o.state_dict() for o in optimizerD] if optimizerD is not None else None,\n\t\t\t\"scheduler\": [s.state_dict() for s in scheduler] if scheduler is not None else None,\n\t\t\t\"args\": argparse.Namespace(**{k: v for k, v in vars(args).items() if not callable(v)}),\n\t\t}\n\n\t\tif args.step_checkpoints:\n\t\t\ttorch.save(state_dict, os.path.join(args.checkpoint_dir, \"checkpoint{}.pt\".format(step)))\n\t\tif (score < best_score and mode == \"min\") or (score > best_score and mode == \"max\"):\n\t\t\ttorch.save(state_dict, os.path.join(args.checkpoint_dir, \"checkpoint_best.pt\"))\n\t\tif step > last_step:\n\t\t\ttorch.save(state_dict, os.path.join(args.checkpoint_dir, \"checkpoint_last.pt\"))\n\n\ndef load_checkpoint(args, model=None, optimizer=None, scheduler=None):\n\tif args.restore_file is not None and os.path.isfile(args.restore_file):\n\t\tprint('restoring model..')\n\t\tstate_dict = torch.load(args.restore_file, map_location=lambda s, l: default_restore_location(s, \"cpu\"))\n\n\t\tmodel = [model] if model is not None and not isinstance(model, list) else model\n\t\toptimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer\n\t\tscheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler\n\n\t\tif \"best_score\" in state_dict:\n\t\t\tsave_checkpoint.best_score = state_dict[\"best_score\"]\n\t\t\tsave_checkpoint.best_step = state_dict[\"best_step\"]\n\t\tif \"last_step\" in state_dict:\n\t\t\tsave_checkpoint.last_step = state_dict[\"last_step\"]\n\t\tif model is not None and state_dict.get(\"model\", None) is not None:\n\t\t\tfor m, state in zip(model, state_dict[\"model\"]):\n\t\t\t\tm.load_state_dict(state)\n\t\tif optimizer is not None and state_dict.get(\"optimizer\", None) is not None:\n\t\t\tfor o, state in zip(optimizer, state_dict[\"optimizer\"]):\n\t\t\t\to.load_state_dict(state)\n\t\tif scheduler is not None and state_dict.get(\"scheduler\", None) is not None:\n\t\t\tfor s, state in zip(scheduler, state_dict[\"scheduler\"]):\n\t\t\t\tmilestones = s.milestones\n\t\t\t\tstate['milestones'] = milestones\n\t\t\t\ts.load_state_dict(state)\n\t\t\t\ts.milestones = milestones\n\n\t\tlogging.info(\"Loaded checkpoint {}\".format(args.restore_file))\n\t\treturn state_dict\n\ndef load_checkpoint_GAN(args, modelG=None, modelD=None, optimizerG=None, optimizerD=None, scheduler=None):\n\tif args.restore_file is not None and os.path.isfile(args.restore_file):\n\t\tprint('restoring model..')\n\t\tstate_dict = torch.load(args.restore_file, map_location=lambda s, l: default_restore_location(s, \"cpu\"))\n\n\t\tmodelG = [modelG] if modelG is not None and not isinstance(modelG, list) else modelG\n\t\tmodelD = [modelD] if modelD is not None and not isinstance(modelD, list) else modelD\n\t\toptimizerG = [optimizerG] if optimizerG is not None and not isinstance(optimizerG, list) else optimizerG\n\t\toptimizerD = [optimizerD] if optimizerD is not None and not isinstance(optimizerD, list) else optimizerD\n\t\tscheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler\n\n\t\tif \"best_score\" in state_dict:\n\t\t\tsave_checkpoint_GAN.best_score = state_dict[\"best_score\"]\n\t\t\tsave_checkpoint_GAN.best_step = state_dict[\"best_step\"]\n\t\tif \"last_step\" in state_dict:\n\t\t\tsave_checkpoint_GAN.last_step = state_dict[\"last_step\"]\n\t\tif modelG is not None and state_dict.get(\"modelG\", None) is not None:\n\t\t\tfor m, state in zip(modelG, state_dict[\"modelG\"]):\n\t\t\t\tm.load_state_dict(state)\n\t\tif modelD is not None and state_dict.get(\"modelD\", None) is not None:\n\t\t\tfor m, state in zip(modelD, state_dict[\"modelD\"]):\n\t\t\t\tm.load_state_dict(state)\n\t\tif optimizerG is not None and state_dict.get(\"optimizerG\", None) is not None:\n\t\t\tfor o, state in zip(optimizerG, state_dict[\"optimizerG\"]):\n\t\t\t\to.load_state_dict(state)\n\t\tif optimizerD is not None and state_dict.get(\"optimizerD\", None) is not None:\n\t\t\tfor o, state in zip(optimizerD, state_dict[\"optimizerD\"]):\n\t\t\t\to.load_state_dict(state)\n\t\tif scheduler is not None and state_dict.get(\"scheduler\", None) is not None:\n\t\t\tfor s, state in zip(scheduler, state_dict[\"scheduler\"]):\n\t\t\t\tmilestones = s.milestones\n\t\t\t\tstate['milestones'] = milestones\n\t\t\t\ts.load_state_dict(state)\n\t\t\t\ts.milestones = milestones\n\n\t\tlogging.info(\"Loaded checkpoint {}\".format(args.restore_file))\n\t\treturn state_dict\n\ndef save_losses_curve(G_losses,D_losses,args, filename='Loss_curve'):\n k=0\n plt.figure(figsize=[15,10])\n plt.subplot(2,1,1)\n plt.title(\"Generator Loss\")\n plt.ylabel(\"Error\")\n plt.xlabel(\"Total batch number\")\n plt.plot(G_losses[k:],'bx')\n\n plt.subplot(2,1,2)\n plt.title(\"Discriminator Loss\")\n plt.ylabel(\"Error\")\n plt.xlabel(\"Total batch number\")\n plt.plot(D_losses[k:],'rx')\n plt.savefig(os.path.join(args.experiment_dir, filename))","sub_path":"utils/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":11599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"507937239","text":"import xlwings as xw\n\napp = xw.App(visible=True, add_book=False)\n\n# 打开工作簿\nworkbook = app.books.open('../source_material/01/薪资表.xlsx')\n\n# 打开工作表\nsht = workbook.sheets['sheet1']\n\n# 添加新工作表/可添加多个工作表\nnew_wb = workbook.sheets.add(\"奖金\")\nnew_wb1 = workbook.sheets.add(\"年终奖\")\n\n# 在新工作表中写入数据\nnew_wb.range(\"A1\").value = \"奖金职工号\"\nnew_wb1.range(\"A1\").value = \"年终奖职工号\"\n\n# 获取所有工作表\nprint(workbook.sheets)\n\n# 另存为\nworkbook.save(\"../source_material/01/薪资表(4).xlsx\")\n\n# 关闭表\nworkbook.close()\n\n# 关闭工作簿\napp.quit()\n","sub_path":"Python_Office_Automation/02-进阶提升让Excel飞起来/01-xlwings库让excel飞起来/08-原工作簿增加新表格.py","file_name":"08-原工作簿增加新表格.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"531296385","text":"# For unknown arguments in a function\r\ndef myfunc(**name):\r\n print(name[\"fname\"]+\" \"+name[\"lname\"])\r\n\r\n\r\nmyfunc(fname=\"Emily\", lname=\"Scott\")\r\n\r\ndef func(*kids):\r\n print(\"His name is: \"+ kids[1])\r\n\r\n\r\nfunc(\"Mark\", \"Harvey\", \"Louis\")\r\n\r\n# Recursion\r\ndef recu(k):\r\n if(k > 0):\r\n result = k + recu(k-1)\r\n print(result)\r\n else:\r\n result = 0\r\n return result\r\n\r\nprint(\"The Recursion Example Results \\n\")\r\nrecu(3)\r\n","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"78547467","text":"def findRoot(x):\n global father\n while father[x]!=-1:\n x=father[x]\n return x\n\nline=[int(x) for x in input().split()]\nNv=line[0]\nNe=line[1]\nedgeInfo=[]\nfor i in range(Ne):\n edgeInfo.append([int(x) for x in input().split()])\nedgeInfo.sort(key=lambda x:x[2])\nfather=[]\nfor i in range(Nv+1):\n father.append(-1)\ncntEdge=0\nfor x,y,len in edgeInfo:\n fatherX,fatherY=findRoot(x),findRoot(y)\n if fatherX!=fatherY:\n cntEdge+=1\n if cntEdge==Nv-1: print(len)\n father[fatherX]=fatherY","sub_path":"Code/CodeRecords/2375/60846/245301.py","file_name":"245301.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"157558601","text":"# coding=utf-8\nimport time\nimport random\nfrom lxml import etree\nfrom pillow_crawler.main.crawler_manager import *\nfrom pillow_crawler.crawler.base_crawler import *\n\n\nclass AnjukeBeijingCrawler(BaseCrawler):\n \"\"\"\n 用于爬取安居客北京的数据,用于租房选址\n \"\"\"\n def __init__(self):\n BaseCrawler.__init__(self)\n self.crawler_rules = [\n CrawlerRule(\n url_pattern=r\"http://139\\.196\\.149\\.203/*\",\n process_func=self.proxy_test,\n downloader_name=\"xdaili1_downloader\"\n )\n ]\n\n def proxy_test(self, url, response):\n # 获取存储器\n print(\"do: \"+url)\n print(response)\n\n\ndef main():\n # 创建爬虫管理器,加载配置文件\n config_filepath = \"conf.yaml\"\n crawler_manager = CrawlerManager(config_filepath)\n # 创建爬虫,添加初始任务\n crawlers = [AnjukeBeijingCrawler() for i in range(1)]\n crawler_manager.set_crawlers(crawlers)\n crawler_manager.add_task(\"http://139.196.149.203:5001/crawler\")\n crawler_manager.start()\n crawler_manager.join()\n\n\nif __name__ == '__main__':\n main()","sub_path":"samples/anjuke_beijing/proxy_test.py","file_name":"proxy_test.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"540656907","text":"from random import randint\n\n\nclass Ultraman(object):\n\n __slots__ = ('_name', '_hp', '_mp') # 限定属性数量,只允许有这几个属性\n\n def __init__(self, name, hp, mp):\n self._name = name\n self._hp = hp\n self._mp = mp\n\n @property\n def name(self):\n return self._name # 修饰,\n\n @property\n def hp(self): # 修饰\n return self._hp\n\n @hp.setter # 修改器\n def hp(self, hp):\n self._hp = hp if hp >= 0 else 0\n\n def attack(self, monster):\n \"\"\"\n 攻击怪兽,怪兽掉血。\n :param monster:\n :return:\n \"\"\"\n monster.hp -= randint(15, 25)\n\n\n def huge_attack(self, monster):\n \"\"\"\n 如果蓝量大于50,就放大技能:怪兽掉四分之三的血,如果不够扣,直接扣50点血,\n 如果蓝量不够,执行普通攻击\n :param monster:\n :return:\n \"\"\"\n if self._mp >= 50:\n self._mp -= 50\n injury = monster.hp * 3 // 4\n injury = injury if injury >= 50 else 50\n monster.hp -= injury\n else:\n self.attack(monster)\n\n def magic_attack(self, m1, m2, m3):\n if self._mp >= 20:\n self._mp -= 20\n for i in ms:\n i -= randint(10, 15)\n\n def __str__(self):\n return '%s奥特曼\\n' % self._name + \\\n '生命值:%d\\n' % self._hp + \\\n '魔法值:%d\\n' % self._mp\n\n\nclass Monster(object):\n\n __slots__ = ('_name', '_hp') # 限制了你的成员变量\n\n def __init__(self, name, hp): # 成员变量\n self._name = name\n self._hp = hp\n\n @property\n def name(self): # 属性,被包装好的。\n return self._name\n\n @property\n def hp(self):\n return self._hp\n\n @hp.setter\n def hp(self, hp):\n self._hp = hp if hp >= 0 else 0\n\n def attack(self, ultraman):\n ultraman.hp -= randint(10, 20)\n\n def __str__(self):\n return '%s小怪兽\\n' % self._name + \\\n '生命值:%d\\n' % self._hp\n\n\ndef main():\n u = Ultraman('骆昊', 1000, 120)\n u_is_alive = True\n print(u)\n ms = [Monster('曹宇', 50), Monster('杨茜然', 200), Monster('张超', 150)]\n m1 = ms[0]\n m1_is_alive = True\n m2 = ms[1]\n m2_is_alive = True\n m3 = ms[2]\n m3_is_alive = True\n print(m1, m2, m3)\n fight_round = 1\n\n while u.hp > 0 :\n # print('===第%d回合===' % fight_round)\n # fight_round += 1\n # u.attack(m)\n # if m.hp > 0:\n # m.attack(u)\n # print(u)\n # print(m) # 每一个\n i = randint(1, 10)\n print('===第%d回合===' % fight_round)\n fight_round += 1\n if i <= 6:\n if m1_is_alive:\n u.attack(m1)\n if m1.hp > 0:\n m1.attack(u)\n m2.attack(u)\n m3.attack(u)\n else:\n m1_is_alive = False\n m2.attack(u)\n m3.attack(u)\n print(u)\n print(m1, m2, m3)\n elif m2_is_alive:\n u.attack(m2)\n if m2.hp > 0:\n m2.attack(u)\n m3.attack(u)\n else:\n m2_is_alive = False\n m3.attack(u)\n print(u)\n print(m1, m2, m3)\n elif m3_is_alive:\n u.attack(m3)\n if m3.hp > 0:\n m3.attack(u)\n else:\n m3_is_alive = False\n print(u)\n print(m1, m2, m3)\n if i <= 9:\n if m1_is_alive:\n u.magic_attack(m1)\n if m1.hp > 0:\n m1.attack(u)\n m2.attack(u)\n m3.attack(u)\n else:\n m1_is_alive = False\n m2.attack(u)\n m3.attack(u)\n print(u)\n print(m1, m2, m3)\n elif m2_is_alive:\n u.magic_attack(m2)\n if m2.hp > 0:\n m2.attack(u)\n m3.attack(u)\n else:\n m2_is_alive = False\n m3.attack(u)\n print(u)\n print(m1, m2, m3)\n elif m3_is_alive:\n u.magic_attack(m3)\n if m3.hp > 0:\n m3.attack(u)\n else:\n m3_is_alive = False\n print(u)\n print(m1, m2, m3)\n if i == 10:\n if m1_is_alive:\n u.huge_attack(ms)\n if m1.hp > 0:\n m1.attack(u)\n m2.attack(u)\n m3.attack(u)\n else:\n m1_is_alive = False\n m2.attack(u)\n m3.attack(u)\n print(u)\n print(m1, m2, m3)\n elif m2_is_alive:\n u.huge_attack(ms)\n if m2.hp > 0:\n m2.attack(u)\n m3.attack(u)\n else:\n m2_is_alive = False\n m3.attack(u)\n print(u)\n print(m1, m2, m3)\n elif m3_is_alive:\n u.huge_attack(ms)\n if m3.hp > 0:\n m3.attack(u)\n else:\n m3_is_alive = False\n print(u)\n print(m1, m2, m3)\n\n if u.hp > 0:\n print('%s奥特曼胜利' % u.name)\n if m3.hp > 0:\n print('%s小怪兽胜利' % m1.name)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"day11/day10.aoeman.py","file_name":"day10.aoeman.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"616362904","text":"how_many_snakes = 1\nsnake_string = \"\"\"\nWelcome to Python3!\n\n ____\n / . .\\\\\n \\ ---<\n \\ /\n __________/ /\n-=:___________/\n\n<3, Juno\n\"\"\"\n\n\nprint(snake_string * how_many_snakes)\n\n\nname = input(\"Enter your name: \")\nprint(\"Hello there, {}!\".format(name.title()))\n\n\nnum = int(input(\"Enter an integer: \"))\nprint(\"hello \" * num)\n\nresult = eval(input(\"Enter an expression: \"))\nprint(result)\n","sub_path":"Script_Folder/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"256871351","text":"# coding: utf-8\n\nimport time\n\n# Gather breast cancer data\n\nfrom sklearn.datasets import load_breast_cancer\nbreast_cancer = load_breast_cancer()\nbreast_cancer_data = breast_cancer.data\nbreast_cancer_labels = breast_cancer.target\n\n\n# Prepare data as pandas dataframe\n\n\nimport numpy as np\nlabels = np.reshape(breast_cancer_labels,(569,1))\nfinal_breast_cancer_data = np.concatenate([breast_cancer_data,labels],axis=1)\n\nimport pandas as pd\nbreast_cancer_dataset = pd.DataFrame(final_breast_cancer_data)\nfeatures = breast_cancer.feature_names\nfeatures_labels = np.append(features,'label')\nbreast_cancer_dataset.columns = features_labels\n\n\"\"\"\nReplace 0,1 label by medical terminology (Benign = cancer false, Malignant = cancer true)\n\nbreast_cancer_dataset['label'].replace(0, 'Benign',inplace=True)\nbreast_cancer_dataset['label'].replace(1, 'Malignant',inplace=True)\n\"\"\"\n\n# Split data for training and testing\n\nfrom sklearn.model_selection import train_test_split\n\nsplit = 0.3\nbreast_cancer_dataset_train, breast_cancer_dataset_test = train_test_split(breast_cancer_dataset, test_size=split)\nX_train, Y_train = breast_cancer_dataset_train.drop(columns='label'), breast_cancer_dataset_train['label']\nX_test, Y_test = breast_cancer_dataset_test.drop(columns='label'), breast_cancer_dataset_test['label']\n\n\n# Create dataframe for test statistics\n\ndata = [\"ML Algorithm\", \"Accuracy\", \"#Correct\", \"#Incorrect\", \"%Unlabeled\", \"Training Time (ns)\"]\nstatistics = pd.DataFrame(columns = data)\n\n\n# Train logistic regression model, predict and collect statistics\n\nfrom sklearn.linear_model import LogisticRegression\n\nstart_time1 = time.time_ns()\nmodel1 = LogisticRegression(random_state=0,max_iter=10000).fit(X_train, Y_train)\ntraining_time1 = time.time_ns() - start_time1\npred = model1.predict(X_test) == Y_test\n\nstatistics.loc[0] = [\"Logistic Regression\",\n round(100*model1.score(X_test, Y_test),2), \n np.count_nonzero(pred==True),\n np.count_nonzero(pred==False), \n 0,\n training_time1]\n\n# Train SVM model, predict (use 'linear' as kernel) and collect statistics\n\nfrom sklearn import svm\n\nstart_time2 = time.time_ns()\nmodel2 = svm.SVC(kernel='linear').fit(X_train, Y_train)\ntraining_time2 = time.time_ns() - start_time2\npred = model2.predict(X_test) == Y_test\n\nstatistics.loc[1] = [\"Support Vector Machine\",\n round(100*model2.score(X_test, Y_test),2), \n np.count_nonzero(pred==True),\n np.count_nonzero(pred==False), \n 0,\n training_time2]\n\n# Train decision tree model, predict and collect statistics\n\nfrom sklearn import tree\n\nstart_time3 = time.time_ns()\nmodel3 = tree.DecisionTreeClassifier(criterion = \"entropy\", max_depth = 5).fit(X_train,Y_train)\ntraining_time3 = time.time_ns() - start_time3\npred = model3.predict(X_test) == Y_test\n\nstatistics.loc[2] = [\"Decision Tree\", \n round(100*model3.score(X_test, Y_test),2), \n np.count_nonzero(pred==True), \n np.count_nonzero(pred==False), \n 0,\n training_time3]\n\n# Define the number of unlabeled data sets and randomly remove the labels (set them to -1)\n\nn_unlabeled = int(X_train.shape[0] * 0.9)\nidxs = np.random.choice(X_train.shape[0], replace = False, size=n_unlabeled)\n\ny = np.asarray(Y_train)\nfor i in idxs:\n y[i] = -1 \n\nY_train = y\n\n\n# Train semi-supervised Naive Bayes model from package \"pomegranate\", predict and collect statistics\n\nfrom pomegranate import NaiveBayes, NormalDistribution\n\nstart_time4 = time.time_ns()\nmodel4 = NaiveBayes.from_samples(NormalDistribution, X_train, Y_train, verbose=False)\ntraining_time4 = time.time_ns() - start_time4\npred = model4.predict(X_test) == Y_test\n\nstatistics.loc[3] = [\"SS Naive Bayes\", \n round(100*model4.score(X_test, Y_test),2), \n np.count_nonzero(pred==True), \n np.count_nonzero(pred==False), \n round(100*idxs.size/Y_train.size,2),\n training_time4]\n\n# Train semi-supervised LabelSpreading model, predict (use 'knn' as kernel) and collect statistics\n\nfrom sklearn.semi_supervised import LabelSpreading\n\nstart_time5 = time.time_ns()\nmodel5 = LabelSpreading(kernel = 'knn', n_neighbors = 10, max_iter=1000).fit(X_train, Y_train)\ntraining_time5 = time.time_ns() - start_time5\npred = model5.predict(X_test) == Y_test\n\nstatistics.loc[4] = [\"SS Label Spreading\", \n round(100*model5.score(X_test, Y_test),2), \n np.count_nonzero(pred==True), \n np.count_nonzero(pred==False), \n round(100*idxs.size/Y_train.size,2),\n training_time5]\n\n# Print summary statistics\n\nprint(statistics)\n\n\n","sub_path":"StandardAlgorithms/BreastCancerAnalysisViaML.py","file_name":"BreastCancerAnalysisViaML.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"291947625","text":"import asyncio\nimport discord\nimport re\nimport os\nimport random\nimport string\nimport json\nimport time\nimport html\nimport codecs\nfrom random import shuffle\nfrom discord.ext import commands\nfrom Cogs import Settings\nfrom Cogs import DisplayName\nfrom Cogs import Nullify\n\n\nclass CardsAgainstHumanity:\n\n # Init with the bot reference, and a reference to the deck file\n def __init__(self, bot, file = None):\n self.bot = bot\n self.games = []\n self.maxBots = 5 # Max number of bots that can be added to a game - don't count toward max players\n self.maxPlayers = 10 # Max players for ranjom joins\n self.maxDeadTime = 3600 # Allow an hour of dead time before killing a game\n self.checkTime = 300 # 5 minutes between dead time checks\n self.winAfter = 10 # 10 wins for the game\n self.botWaitMin = 5 # Minimum number of seconds before the bot makes a decision\n self.botWaitMax = 30 # Max number of seconds before a bot makes a decision\n self.charset = \"1234567890\"\n self.botName = 'Rando Cardrissian'\n self.minMembers = 3\n self.loopsleep = 0.01\n if file == None:\n file = \"deck.json\"\n # Let's load our deck file\n # Can be found at http://www.crhallberg.com/cah/json\n if os.path.exists(file):\n f = open(file,'r')\n filedata = f.read()\n f.close()\n\n self.deck = json.loads(filedata)\n else:\n # File doesn't exist - create a placeholder\n self.deck = {}\n self.bot.loop.create_task(self.checkDead())\n\n def cleanJson(self, json):\n json = html.unescape(json)\n # Clean out html formatting\n json = json.replace('_','[blank]')\n json = json.replace('
','\\n')\n json = json.replace('
','\\n')\n json = json.replace('', '*')\n json = json.replace('', '*')\n return json\n\n\n async def checkDead(self):\n while not self.bot.is_closed:\n # Wait first - then check\n await asyncio.sleep(self.checkTime)\n for game in self.games:\n gameTime = game['Time']\n currentTime = int(time.time())\n timeRemain = currentTime - gameTime\n if timeRemain > self.maxDeadTime:\n # Game is dead - quit it and alert members\n for member in game['Members']:\n if member['IsBot']:\n continue\n msg = \"Game id: *{}* has been closed due to inactivity.\".format(game['ID'])\n await self.bot.send_message(member['User'], msg)\n self.games.remove(game)\n\n async def checkPM(self, message):\n # Checks if we're talking in PM, and if not - outputs an error\n if message.channel.is_private:\n # PM\n return True\n else:\n # Not in PM\n await self.bot.send_message(message.channel, 'Cards Against Humanity commands must be run in PM.')\n return False\n\n\n def randomID(self, length = 8):\n # Create a random id that doesn't already exist\n while True:\n # Repeat until found\n newID = ''.join(random.choice(self.charset) for i in range(length))\n exists = False\n for game in self.games:\n if game['ID'] == newID:\n exists = True\n break\n if not exists:\n break\n return newID\n\n def randomBotID(self, game, length = 4):\n # Returns a random id for a bot that doesn't already exist\n while True:\n # Repeat until found\n newID = ''.join(random.choice(self.charset) for i in range(length))\n exists = False\n for member in game['Members']:\n if member['ID'] == newID:\n exists = True\n break\n if not exists:\n break\n return newID\n\n def userGame(self, user):\n # Returns the game the user is currently in\n if not type(user) is str:\n # Assume it's a discord.Member/User\n user = user.id\n\n for game in self.games:\n for member in game['Members']:\n if member['ID'] == user:\n # Found our user\n return game\n return None\n\n def gameForID(self, id):\n # Returns the game with the passed id\n for game in self.games:\n if game['ID'] == id:\n return game\n return None\n\n async def removeMember(self, user, game = None):\n if not type(user) is str:\n # Assume it's a discord.Member/User\n user = user.id\n outcome = False\n removed = None\n if not game:\n game = self.userGame(user)\n if game:\n for member in game['Members']:\n if member['ID'] == user:\n game['Members'].remove(member)\n removed = member\n outcome = True\n if not member['IsBot']:\n msg = 'You were removed from game id: *{}*.'.format(game['ID'])\n await self.bot.send_message(member['User'], msg)\n if not outcome:\n return outcome\n # We removed someone - let's tell the world\n if removed['IsBot']:\n msg = '*{} ({})* left the game - reorganizing...'.format(self.botName, removed['ID'])\n else:\n msg = '*{}* left the game - reorganizing...'.format(DisplayName.name(removed['User']))\n for member in game['Members']:\n if member['IsBot']:\n continue\n await self.bot.send_message(member['User'], msg)\n return game\n \n\n def checkGame(self, game):\n for member in game['Members']:\n if not member['IsBot']:\n return True\n # If we got here - only bots, or empty game\n # Kill the game loop\n task = game['Task']\n task.cancel()\n self.games.remove(game)\n return False\n\n async def typing(self, game, typeTime = 5):\n # Allows us to show the bot typing\n waitTime = random.randint(self.botWaitMin, self.botWaitMax)\n preType = waitTime-typeTime\n if preType > 0:\n await asyncio.sleep(preType)\n for member in game['Members']:\n if member['IsBot']:\n continue\n # Show that we're typing\n await self.bot.send_typing(member['User'])\n await asyncio.sleep(typeTime)\n else:\n for member in game['Members']:\n if member['IsBot']:\n continue\n # Show that we're typing\n await self.bot.send_typing(member['User'])\n await asyncio.sleep(waitTime)\n\n async def botPick(self, ctx, bot, game):\n # Has the bot pick their card\n blackNum = game['BlackCard']['Pick']\n if blackNum == 1:\n cardSpeak = 'card'\n else:\n cardSpeak = 'cards'\n i = 0\n cards = []\n while i < blackNum:\n randCard = random.randint(0, len(bot['Hand'])-1)\n cards.append(bot['Hand'].pop(randCard)['Text'])\n i += 1\n \n await self.typing(game)\n\n # Make sure we haven't laid any cards\n if bot['Laid'] == False:\n newSubmission = { 'By': bot, 'Cards': cards }\n game['Submitted'].append(newSubmission)\n # Shuffle cards\n shuffle(game['Submitted'])\n bot['Laid'] = True\n game['Time'] = currentTime = int(time.time())\n await self.checkSubmissions(ctx, game)\n \n\n async def botPickWin(self, ctx, game):\n totalUsers = len(game['Members'])-1\n submitted = len(game['Submitted'])\n if submitted >= totalUsers:\n # Judge is a bot - and all cards are in!\n await self.typing(game)\n # Pick a winner\n winner = random.randint(0, totalUsers-2)\n await self.winningCard(ctx, game, winner)\n\n\n async def checkSubmissions(self, ctx, game):\n totalUsers = len(game['Members'])-1\n submitted = len(game['Submitted'])\n for member in game['Members']:\n if member['IsBot'] == True:\n continue\n if submitted < totalUsers:\n msg = '{}/{} cards submitted...'.format(submitted, totalUsers)\n await self.bot.send_message(member['User'], msg)\n else:\n msg = 'All cards have been submitted!'\n # if \n await self.bot.send_message(member['User'], msg)\n await self.showOptions(ctx, member['User'])\n\n # Check if a bot is the judge\n judge = game['Members'][game['Judge']]\n if not judge['IsBot']:\n continue\n task = self.bot.loop.create_task(self.botPickWin(ctx, game))\n judge['Task'] = task\n \n\n async def winningCard(self, ctx, game, card):\n # Let's pick our card and alert everyone\n winner = game['Submitted'][card]\n if winner['By']['IsBot']:\n winnerName = '{} ({})'.format(self.botName, winner['By']['ID'])\n winner['By']['Points'] += 1\n winner['By']['Won'].append(game['BlackCard']['Text'])\n else:\n winnerName = DisplayName.name(winner['By']['User'])\n for member in game['Members']:\n if member['IsBot']:\n continue\n stat_embed = discord.Embed(color=discord.Color.gold())\n stat_embed.set_footer(text='Cards Against Humanity - id: {}'.format(game['ID']))\n index = game['Members'].index(member)\n if index == game['Judge']:\n stat_embed.set_author(name='You picked {}\\'s card!'.format(winnerName))\n elif member == winner['By']:\n stat_embed.set_author(name='YOU WON!!')\n member['Points'] += 1\n member['Won'].append(game['BlackCard']['Text'])\n else:\n stat_embed.set_author(name='{} won!'.format(winnerName))\n if len(winner['Cards']) == 1:\n msg = 'The **Winning** card was:\\n\\n{}'.format('{}'.format(' - '.join(winner['Cards'])))\n else:\n msg = 'The **Winning** cards were:\\n\\n{}'.format('{}'.format(' - '.join(winner['Cards'])))\n await self.bot.send_message(member['User'], embed=stat_embed)\n await self.bot.send_message(member['User'], msg)\n\n # await self.nextPlay(ctx, game)\n \n # Start the game loop\n event = game['NextHand']\n self.bot.loop.call_soon_threadsafe(event.set)\n game['Time'] = currentTime = int(time.time())\n\n async def gameCheckLoop(self, ctx, game):\n task = game['NextHand']\n while True:\n # Clear the pending task\n task.clear()\n # Wait for a second before continuing\n await asyncio.sleep(1)\n # Queue up the next hand\n await self.nextPlay(ctx, game)\n # Wait until our next clear\n await task.wait()\n\n async def messagePlayers(self, ctx, message, game, judge = False):\n # Messages all the users on in a game\n for member in game['Members']:\n if member['IsBot']:\n continue\n # Not bots\n if member is game['Members'][game['Judge']]:\n # Is the judge\n if judge:\n await self.bot.send_message(member['User'], message)\n else:\n # Not the judge\n await self.bot.send_message(member['User'], message)\n\n ################################################\n \n async def showPlay(self, ctx, user):\n # Creates an embed and displays the current game stats\n stat_embed = discord.Embed(color=discord.Color.blue())\n game = self.userGame(user)\n if not game:\n return\n # Get the judge's name\n if game['Members'][game['Judge']]['User'] == user:\n judge = '**YOU** are'\n else:\n if game['Members'][game['Judge']]['IsBot']:\n # Bot\n judge = '*{} ({})* is'.format(self.botName, game['Members'][game['Judge']]['ID'])\n else:\n judge = '*{}* is'.format(DisplayName.name(game['Members'][game['Judge']]['User']))\n \n # Get the Black Card\n try:\n blackCard = game['BlackCard']['Text']\n blackNum = game['BlackCard']['Pick']\n except Exception:\n blackCard = 'None.'\n blackNum = 0\n\n msg = '{} the judge.\\n\\n'.format(judge)\n msg += '__Black Card:__\\n\\n**{}**\\n\\n'.format(blackCard)\n \n totalUsers = len(game['Members'])-1\n submitted = len(game['Submitted'])\n if len(game['Members']) >= self.minMembers:\n if submitted < totalUsers:\n msg += '{}/{} cards submitted...'.format(submitted, totalUsers)\n else:\n msg += 'All cards have been submitted!'\n await self.showOptions(ctx, user)\n return\n if not judge == '**YOU** are':\n # Judge doesn't need to lay a card\n if blackNum == 1:\n # Singular\n msg += '\\n\\nPick a card with `{}lay [card number]`'.format(ctx.prefix)\n elif blackNum > 1:\n # Plural\n msg += '\\n\\nPick **{} cards** with `{}lay [card numbers separated by commas (1,2,3)]`'.format(blackNum, ctx.prefix)\n \n stat_embed.set_author(name='Current Play')\n stat_embed.set_footer(text='Cards Against Humanity - id: {}'.format(game['ID']))\n await self.bot.send_message(user, embed=stat_embed)\n await self.bot.send_message(user, msg)\n \n async def showHand(self, ctx, user):\n # Shows the user's hand in an embed\n stat_embed = discord.Embed(color=discord.Color.green())\n game = self.userGame(user)\n if not game:\n return\n i = 0\n msg = ''\n points = '? points'\n for member in game['Members']:\n if member['ID'] == user.id:\n # Got our user\n if member['Points']==1:\n points = '1 point'\n else:\n points = '{} points'.format(member['Points'])\n for card in member['Hand']:\n i += 1\n msg += '{}. {}\\n'.format(i, card['Text'])\n\n try:\n blackCard = '**{}**'.format(game['BlackCard']['Text'])\n except Exception:\n blackCard = '**None.**'\n stat_embed.set_author(name='Your Hand - {}'.format(points))\n stat_embed.set_footer(text='Cards Against Humanity - id: {}'.format(game['ID']))\n await self.bot.send_message(user, embed=stat_embed)\n await self.bot.send_message(user, msg)\n \n async def showOptions(self, ctx, user):\n # Shows the judgement options\n stat_embed = discord.Embed(color=discord.Color.orange())\n game = self.userGame(user)\n if not game:\n return\n # Add title\n stat_embed.set_author(name='JUDGEMENT TIME!!')\n stat_embed.set_footer(text='Cards Against Humanity - id: {}'.format(game['ID']))\n await self.bot.send_message(user, embed=stat_embed)\n\n if game['Members'][game['Judge']]['User'] == user:\n judge = '**YOU** are'\n else:\n if game['Members'][game['Judge']]['IsBot']:\n # Bot\n judge = '*{} ({})* is'.format(self.botName, game['Members'][game['Judge']]['ID'])\n else:\n judge = '*{}* is'.format(DisplayName.name(game['Members'][game['Judge']]['User']))\n blackCard = game['BlackCard']['Text']\n\n msg = '{} judging.\\n\\n'.format(judge)\n msg += '__Black Card:__\\n\\n**{}**\\n\\n'.format(blackCard)\n msg += '__Submitted White Cards:__\\n\\n'\n\n i = 0\n for sub in game['Submitted']:\n i+=1\n msg += '{}. {}\\n'.format(i, ' - '.join(sub['Cards']))\n if judge == '**YOU** are':\n msg += '\\nPick a winner with `{}pick [submission number]`.'.format(ctx.prefix)\n await self.bot.send_message(user, msg)\n \n async def drawCard(self, game):\n # Draws a random unused card and shuffles the deck if needed\n totalDiscard = len(game['Discard'])\n for member in game['Members']:\n totalDiscard += len(member['Hand'])\n if totalDiscard >= len(self.deck['whiteCards']):\n # Tell everyone the cards were shuffled\n for member in game['Members']:\n if member['IsBot']:\n continue\n user = member['User']\n await self.bot.send_message(user, 'Shuffling white cards...')\n # Shuffle the cards\n self.shuffle(game)\n while True:\n # Random grab a unique card\n index = random.randint(0, len(self.deck['whiteCards'])-1)\n if not index in game['Discard']:\n game['Discard'].append(index)\n text = self.deck['whiteCards'][index]\n text = self.cleanJson(text)\n card = { 'Index': index, 'Text': text }\n return card\n\n\n def shuffle(self, game):\n # Adds discards back into the deck\n game['Discard'] = []\n for member in game['Members']:\n for card in member['Hand']:\n game['Discard'].append(card['Index'])\n\n\n async def drawCards(self, user, cards = 10):\n if not type(user) is str:\n # Assume it's a discord.Member/User\n user = user.id\n # fills the user's hand up to number of cards\n game = self.userGame(user)\n for member in game['Members']:\n if member['ID'] == user:\n # Found our user - let's draw cards\n i = len(member['Hand'])\n while i < cards:\n # Draw unique cards until we fill our hand\n newCard = await self.drawCard(game)\n member['Hand'].append(newCard)\n i += 1\n\n\n async def drawBCard(self, game):\n # Draws a random black card\n totalDiscard = len(game['BDiscard'])\n if totalDiscard >= len(self.deck['blackCards']):\n # Tell everyone the cards were shuffled\n for member in game['Members']:\n if member['IsBot']:\n continue\n user = member['User']\n await self.bot.send_message(user, 'Shuffling black cards...')\n # Shuffle the cards\n game['BDiscard'] = []\n while True:\n # Random grab a unique card\n index = random.randint(0, len(self.deck['blackCards'])-1)\n if not index in game['BDiscard']:\n game['BDiscard'].append(index)\n text = self.deck['blackCards'][index]['text']\n text = self.cleanJson(text)\n game['BlackCard'] = { 'Text': text, 'Pick': self.deck['blackCards'][index]['pick'] }\n return game['BlackCard']\n\n\n async def nextPlay(self, ctx, game):\n # Advances the game\n if len(game['Members']) < self.minMembers:\n stat_embed = discord.Embed(color=discord.Color.red())\n stat_embed.set_author(name='Not enough players to continue! ({}/{})'.format(len(game['Members']), self.minMembers))\n stat_embed.set_footer(text='Have other users join with: {}joincah {}'.format(ctx.prefix, game['ID']))\n for member in game['Members']:\n if member['IsBot']:\n continue\n await self.bot.send_message(member['User'], embed=stat_embed)\n return\n\n # Find if we have a winner\n winner = False\n stat_embed = discord.Embed(color=discord.Color.lighter_grey())\n for member in game['Members']:\n if member['IsBot']:\n # Clear pending tasks and set to None\n if not member['Task'] == None:\n task = member['Task']\n task.cancel()\n asyncio.sleep(1)\n member['Task'] = None\n if member['Points'] >= self.winAfter:\n # We have a winner!\n winner = True\n stat_embed.set_author(name='{} is the WINNER!!'.format(DisplayName.name(member['User'])))\n break\n if winner:\n for member in game['Members']:\n if not member['IsBot']:\n await self.bot.send_message(member['User'], embed=stat_embed)\n # Reset all users\n member['Hand'] = []\n member['Points'] = 0\n member['Won'] = []\n member['Laid'] = False\n\n # Clear submitted cards\n game['Submitted'] = []\n # We have enough members\n if game['Judge'] == -1:\n # First game - randomize judge\n game['Judge'] = random.randint(0, len(game['Members'])-1)\n else:\n game['Judge']+=1\n # Reset the judge if out of bounds\n if game['Judge'] >= len(game['Members']):\n game['Judge'] = 0\n\n # Draw the next black card\n bCard = await self.drawBCard(game)\n\n # Draw cards\n for member in game['Members']:\n member['Laid'] = False\n await self.drawCards(member['ID'])\n\n # Show hands\n for member in game['Members']:\n if member['IsBot']:\n continue\n await self.showPlay(ctx, member['User'])\n index = game['Members'].index(member)\n if not index == game['Judge']:\n await self.showHand(ctx, member['User'])\n\n # Have the bots lay their cards\n for member in game['Members']:\n if not member['IsBot']:\n continue\n if member['ID'] == game['Members'][game['Judge']]['ID']:\n continue\n # Not a human player, and not the judge\n task = self.bot.loop.create_task(self.botPick(ctx, member, game))\n member['Task'] = task\n # await self.botPick(ctx, member, game)\n\n\n @commands.command(pass_context=True)\n async def game(self, ctx, *, message = None):\n \"\"\"Displays the game's current status.\"\"\"\n if not await self.checkPM(ctx.message):\n return\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n await self.showPlay(ctx, ctx.message.author)\n\n\n @commands.command(pass_context=True)\n async def say(self, ctx, *, message = None):\n \"\"\"Broadcasts a message to the other players in your game.\"\"\"\n if not await self.checkPM(ctx.message):\n return\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n userGame['Time'] = currentTime = int(time.time())\n if message == None:\n msg = \"Ooookay, you say *nothing...*\"\n await self.bot.send_message(ctx.message.author, msg)\n return\n msg = '*{}* says: {}'.format(ctx.message.author.name, message)\n for member in userGame['Members']:\n if member['IsBot']:\n continue\n # Tell them all!!\n if not member['User'] == ctx.message.author:\n # Don't tell yourself\n await self.bot.send_message(member['User'], msg)\n await self.bot.send_message(ctx.message.author, 'Message sent!')\n \n \n @commands.command(pass_context=True)\n async def lay(self, ctx, *, card = None):\n \"\"\"Lays a card or cards from your hand. If multiple cards are needed, separate them by a comma (1,2,3).\"\"\"\n if not await self.checkPM(ctx.message):\n return\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n userGame['Time'] = currentTime = int(time.time())\n for member in userGame['Members']:\n if member['User'] == ctx.message.author:\n user = member\n index = userGame['Members'].index(member)\n if index == userGame['Judge']:\n await self.bot.send_message(ctx.message.author, \"You're the judge. You don't get to lay cards this round.\")\n return\n for submit in userGame['Submitted']:\n if submit['By']['User'] == ctx.message.author:\n await self.bot.send_message(ctx.message.author, \"You already made your submission this round.\")\n return\n if card == None:\n await self.bot.send_message(ctx.message.author, 'You need you input *something.*')\n return\n card = card.strip()\n card = card.replace(\" \", \"\")\n # Not the judge\n if len(userGame['Members']) < self.minMembers:\n stat_embed = discord.Embed(color=discord.Color.red())\n stat_embed.set_author(name='Not enough players to continue! ({}/{})'.format(len(userGame['Members']), self.minMembers))\n stat_embed.set_footer(text='Have other users join with: {}joincah {}'.format(ctx.prefix, userGame['ID']))\n for member in userGame['Members']:\n if member['IsBot']:\n continue\n await self.bot.send_message(member['User'], embed=stat_embed)\n return\n\n numberCards = userGame['BlackCard']['Pick']\n cards = []\n if numberCards > 1:\n cardSpeak = \"cards\"\n try:\n card = card.split(',')\n except Exception:\n card = []\n if not len(card) == numberCards:\n msg = 'You need to pick **{} cards** (no duplicates) with `{}lay [card numbers separated by commas (1,2,3)]`\\n\\nYour hand is:'.format(numberCards, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n await self.showHand(ctx, ctx.message.author)\n return\n # Got something\n # Check for duplicates\n if not len(card) == len(set(card)):\n msg = 'You need to pick **{} cards** (no duplicates) with `{}lay [card numbers separated by commas (1,2,3)]`\\n\\nYour hand is:'.format(numberCards, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n await self.showHand(ctx, ctx.message.author)\n return\n # Works\n for c in card:\n try:\n c = int(c)\n except Exception:\n msg = 'You need to pick **{} cards** (no duplicates) with `{}lay [card numbers separated by commas (1,2,3)]`\\n\\nYour hand is:'.format(numberCards, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n await self.showHand(ctx, ctx.message.author)\n return\n\n if c-1 < 0 or c-1 > len(user['Hand'])-1:\n msg = 'Card numbers must be between 1 and {}.\\n\\nYour hand is:'.format(len(user['Hand']))\n await self.bot.send_message(ctx.message.author, msg)\n await self.showHand(ctx, ctx.message.author)\n return\n cards.append(user['Hand'][c-1]['Text'])\n # Remove from user's hand\n card = sorted(card, key=lambda card:int(card), reverse=True)\n for c in card:\n user['Hand'].pop(int(c)-1)\n # Valid cards\n \n newSubmission = { 'By': user, 'Cards': cards }\n else:\n cardSpeak = \"card\"\n try:\n card = int(card)\n except Exception:\n msg = 'You need to pick a valid card with `{}lay [card number]`\\n\\nYour hand is:'.format(ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n await self.showHand(ctx, ctx.message.author)\n return\n # Valid card\n newSubmission = { 'By': user, 'Cards': [ user['Hand'].pop(card-1)['Text'] ] }\n userGame['Submitted'].append(newSubmission)\n \n # Shuffle cards\n shuffle(userGame['Submitted'])\n\n user['Laid'] = True\n await self.bot.send_message(ctx.message.author, 'You submitted your {}!'.format(cardSpeak))\n await self.checkSubmissions(ctx, userGame)\n \n\n @commands.command(pass_context=True)\n async def pick(self, ctx, *, card = None):\n \"\"\"As the judge - pick the winning card(s).\"\"\"\n if not await self.checkPM(ctx.message):\n return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n # Not in a game\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n userGame['Time'] = currentTime = int(time.time())\n isJudge = False\n for member in userGame['Members']:\n if member['User'] == ctx.message.author:\n user = member\n index = userGame['Members'].index(member)\n if index == userGame['Judge']:\n isJudge = True\n if not isJudge:\n msg = \"You're not the judge - I guess you'll have to wait your turn.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n # Am judge\n totalUsers = len(userGame['Members'])-1\n submitted = len(userGame['Submitted'])\n if submitted < totalUsers:\n if totalUsers - submitted == 1:\n msg = \"Still waiting on 1 card...\"\n else:\n msg = \"Still waiting on {} cards...\".format(totalUsers-submitted)\n await self.bot.send_message(ctx.message.author, msg)\n return\n try:\n card = int(card)-1\n except Exception:\n card = -1\n if card < 0 or card >= totalUsers:\n msg = \"Your pick must be between 1 and {}.\".format(totalUsers)\n await self.bot.send_message(ctx.message.author, msg)\n return\n # Pick is good!\n await self.winningCard(ctx, userGame, card)\n\n\n @commands.command(pass_context=True)\n async def hand(self, ctx):\n \"\"\"Shows your hand.\"\"\"\n if not await self.checkPM(ctx.message):\n return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n # Not in a game\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n await self.showHand(ctx, ctx.message.author)\n userGame['Time'] = currentTime = int(time.time())\n\n\n @commands.command(pass_context=True)\n async def newcah(self, ctx):\n \"\"\"Starts a new Cards Against Humanity game.\"\"\"\n #if not await self.checkPM(ctx.message):\n #return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n if userGame:\n # Already in a game\n msg = \"You're already in a game (id: *{}*)\\nType `{}leavecah` to leave that game.\".format(userGame['ID'], ctx.prefix)\n await self.bot.send_message(ctx.message.channel, msg)\n return\n\n # Not in a game - create a new one\n gameID = self.randomID()\n currentTime = int(time.time())\n newGame = { 'ID': gameID, 'Members': [], 'Discard': [], 'BDiscard': [], 'Judge': -1, 'Time': currentTime, 'BlackCard': None, 'Submitted': [], 'NextHand': asyncio.Event() }\n member = { 'ID': ctx.message.author.id, 'User': ctx.message.author, 'Points': 0, 'Won': [], 'Hand': [], 'Laid': False, 'IsBot': False, 'Creator': True, 'Task': None }\n newGame['Members'].append(member)\n task = self.bot.loop.create_task(self.gameCheckLoop(ctx, newGame))\n newGame['Task'] = task\n self.games.append(newGame)\n # Tell the user they created a new game and list its ID\n await self.bot.send_message(ctx.message.channel, 'You created game id: *{}*'.format(gameID))\n await self.drawCards(ctx.message.author)\n # await self.showHand(ctx, ctx.message.author)\n # await self.nextPlay(ctx, newGame)\n \n\n @commands.command(pass_context=True)\n async def leavecah(self, ctx): \n \"\"\"Leaves the current game you're in.\"\"\"\n removeCheck = await self.removeMember(ctx.message.author)\n if not removeCheck:\n msg = 'You are not in a game.'\n await self.bot.send_message(ctx.message.channel, msg)\n return\n if self.checkGame(removeCheck):\n # await self.nextPlay(ctx, removeCheck)\n \n # Start the game loop\n event = removeCheck['NextHand']\n self.bot.loop.call_soon_threadsafe(event.set)\n\n\n @commands.command(pass_context=True)\n async def joincah(self, ctx, *, id = None):\n \"\"\"Join a Cards Against Humanity game. If no id or user is passed, joins a random game.\"\"\"\n #if not await self.checkPM(ctx.message):\n #return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n isCreator = False\n if userGame:\n # Already in a game\n msg = \"You're already in a game (id: *{}*)\\nType `{}leavecah` to leave that game.\".format(userGame['ID'], ctx.prefix)\n await self.bot.send_message(ctx.message.channel, msg)\n return\n if len(self.games):\n if id:\n game = self.gameForID(id)\n if game == None:\n # That id doesn't exist - or is possibly a user\n # If user, has to be joined from server chat\n if not ctx.message.server:\n msg = \"I couldn't find a game attached to that id. If you are trying to join a user - run the `{}joincah [user]` command in a channel on a server you share with that user.\".format(ctx.prefix)\n await self.bot.send_message(ctx.message.channel, msg)\n return\n else:\n # We have a server - let's try for a user\n member = DisplayName.memberForName(id, ctx.message.server)\n if not member:\n # Couldn't find user!\n msg = \"I couldn't find a game attached to that id. If you are trying to join a user - run the `{}joincah [user]` command in a channel on a server you share with that user.\".format(ctx.prefix)\n await self.bot.send_message(ctx.message.channel, msg)\n return\n # Have a user - check if they're in a game\n game = self.userGame(member)\n if not game:\n # That user is NOT in a game!\n msg = \"That user doesn't appear to be playing.\"\n await self.bot.send_message(ctx.message.channel, msg)\n return\n \n else:\n game = random.choice(self.games)\n else:\n # No games - create a new one\n gameID = self.randomID()\n game = { 'ID': gameID, 'Members': [], 'Discard': [], 'BDiscard': [], 'Judge': -1, 'Time': 0, 'BlackCard': None, 'Submitted': [], 'NextHand': asyncio.Event() }\n task = self.bot.loop.create_task(self.gameCheckLoop(ctx, game))\n game['Task'] = task\n self.games.append(game)\n # Tell the user they created a new game and list its ID\n await self.bot.send_message(ctx.message.channel, 'You created game id: *{}*'.format(gameID))\n isCreator = True\n\n # Tell everyone else you joined\n for member in game['Members']:\n if member['IsBot']:\n continue\n await self.bot.send_message(member['User'], '*{}* joined the game! Reorganizing...'.format(DisplayName.name(ctx.message.author)))\n \n # We got a user!\n member = { 'ID': ctx.message.author.id, 'User': ctx.message.author, 'Points': 0, 'Won': [], 'Hand': [], 'Laid': False, 'IsBot': False, 'Creator': isCreator, 'Task': None }\n game['Members'].append(member)\n await self.drawCards(ctx.message.author)\n if len(game['Members'])==1:\n # Just created the game\n await self.drawCards(ctx.message.author)\n # await self.showHand(ctx, ctx.message.author)\n # await self.nextPlay(ctx, game)\n else:\n msg = \"You've joined game id: *{}!*\\n\\nThere are *{} users* in this game.\".format(game['ID'], len(game['Members']))\n await self.bot.send_message(ctx.message.channel, msg)\n # await self.nextPlay(ctx, game)\n # Start the game loop\n event = game['NextHand']\n self.bot.loop.call_soon_threadsafe(event.set)\n\n game['Time'] = currentTime = int(time.time())\n\n\n @commands.command(pass_context=True)\n async def addbot(self, ctx):\n \"\"\"Adds a bot to the game. Can only be done by the player who created the game.\"\"\"\n if not await self.checkPM(ctx.message):\n return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n # Not in a game\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n botCount = 0\n for member in userGame['Members']:\n if member['IsBot']:\n botCount += 1\n continue\n if member['User'] == ctx.message.author:\n if not member['Creator']:\n # You didn't make this game\n msg = 'Only the player that created the game can add bots.'\n await self.bot.send_message(ctx.message.author, msg)\n return\n # We are the creator - let's check the number of bots\n if botCount >= self.maxBots:\n # Too many bots!\n msg = 'You already have enough bots (max is {}).'.format(self.maxBots)\n await self.bot.send_message(ctx.message.author, msg)\n return\n # We can get another bot!\n botID = self.randomBotID(userGame)\n lobot = { 'ID': botID, 'User': None, 'Points': 0, 'Won': [], 'Hand': [], 'Laid': False, 'IsBot': True, 'Creator': False, 'Task': None }\n userGame['Members'].append(lobot)\n await self.drawCards(lobot['ID'])\n msg = '*{} ({})* joined the game! Reorganizing...'.format(self.botName, botID)\n for member in userGame['Members']:\n if member['IsBot']:\n continue\n await self.bot.send_message(member['User'], msg)\n # await self.nextPlay(ctx, userGame)\n\n # Start the game loop\n event = userGame['NextHand']\n self.bot.loop.call_soon_threadsafe(event.set)\n\n\n @commands.command(pass_context=True)\n async def addbots(self, ctx, number = None):\n \"\"\"Adds bots to the game. Can only be done by the player who created the game.\"\"\"\n if not await self.checkPM(ctx.message):\n return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n # Not in a game\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n botCount = 0\n for member in userGame['Members']:\n if member['IsBot']:\n botCount += 1\n continue\n if member['User'] == ctx.message.author:\n if not member['Creator']:\n # You didn't make this game\n msg = 'Only the player that created the game can add bots.'\n await self.bot.send_message(ctx.message.author, msg)\n return\n if number == None:\n # No number specified - let's add the max number of bots\n number = self.maxBots - botCount\n\n try:\n number = int(number)\n except Exception:\n msg = 'Number of bots to add must be an integer.'\n await self.bot.send_message(ctx.message.author, msg)\n return\n\n # We are the creator - let's check the number of bots\n if botCount >= self.maxBots:\n # Too many bots!\n msg = 'You already have enough bots (max is {}).'.format(self.maxBots)\n await self.bot.send_message(ctx.message.author, msg)\n return\n\n if number > (self.maxBots - botCount):\n number = self.maxBots - botCount\n \n if number == 1:\n msg = 'Adding {} bot:\\n\\n'.format(number)\n else:\n msg = 'Adding {} bots:\\n\\n'.format(number)\n\n for i in range(0, number):\n # We can get another bot!\n botID = self.randomBotID(userGame)\n lobot = { 'ID': botID, 'User': None, 'Points': 0, 'Won': [], 'Hand': [], 'Laid': False, 'IsBot': True, 'Creator': False, 'Task': None }\n userGame['Members'].append(lobot)\n await self.drawCards(lobot['ID'])\n msg += '*{} ({})* joined the game!\\n'.format(self.botName, botID)\n # await self.nextPlay(ctx, userGame)\n msg += 'Reorganizing...'\n \n for member in userGame['Members']:\n if member['IsBot']:\n continue\n await self.bot.send_message(member['User'], msg)\n\n # Start the game loop\n event = userGame['NextHand']\n self.bot.loop.call_soon_threadsafe(event.set)\n\n\n @commands.command(pass_context=True)\n async def removebot(self, ctx, *, id = None):\n \"\"\"Removes a bot from the game. Can only be done by the player who created the game.\"\"\"\n if not await self.checkPM(ctx.message):\n return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n # Not in a game\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n botCount = 0\n for member in userGame['Members']:\n if member['IsBot']:\n botCount += 1\n continue\n if member['User'] == ctx.message.author:\n if not member['Creator']:\n # You didn't make this game\n msg = 'Only the player that created the game can add bots.'\n await self.bot.send_message(ctx.message.author, msg)\n return\n # We are the creator - let's check the number of bots\n if id == None:\n # Just remove the first bot we find\n for member in userGame['Members']:\n if member['IsBot']:\n await self.removeMember(member['ID'])\n # Start the game loop\n event = userGame['NextHand']\n self.bot.loop.call_soon_threadsafe(event.set)\n return\n msg = 'No bots to remove!'\n await self.bot.send_message(ctx.message.author, msg)\n return\n else:\n # Remove a bot by id\n if not await self.removeMember(id):\n # not found\n msg = 'I couldn\\'t locate that bot on this game.'\n await self.bot.send_message(ctx.message.author, msg)\n return\n # await self.nextPlay(ctx, userGame)\n\n # Start the game loop\n event = userGame['NextHand']\n self.bot.loop.call_soon_threadsafe(event.set)\n\n\n @commands.command(pass_context=True)\n async def cahgames(self, ctx):\n \"\"\"Displays up to 10 CAH games in progress.\"\"\"\n shuffledGames = list(self.games)\n random.shuffle(shuffledGames)\n if not len(shuffledGames):\n await self.bot.send_message(ctx.message.channel, 'No games being played currently.')\n return\n \n max = 10\n if len(shuffledGames) < 10:\n max = len(shuffledGames)\n msg = '__Current CAH Games__:\\n\\n'\n\n for i in range(0, max):\n playerCount = 0\n botCount = 0\n gameID = shuffledGames[i]['ID']\n for j in shuffledGames[i]['Members']:\n if j['IsBot']:\n botCount += 1\n else:\n playerCount += 1\n botText = '{} bot'.format(botCount)\n if not botCount == 1:\n botText += 's'\n playerText = '{} player'.format(playerCount)\n if not playerCount == 1:\n playerText += 's'\n\n msg += '{}. {} - {} | {}\\n'.format(i+1, gameID, playerText, botText)\n\n await self.bot.send_message(ctx.message.channel, msg)\n\n \n\n @commands.command(pass_context=True)\n async def score(self, ctx):\n \"\"\"Display the score of the current game.\"\"\"\n if not await self.checkPM(ctx.message):\n return\n # Check if the user is already in game\n userGame = self.userGame(ctx.message.author)\n if not userGame:\n # Not in a game\n msg = \"You're not in a game - you can create one with `{}newcah` or join one with `{}joincah`.\".format(ctx.prefix, ctx.prefix)\n await self.bot.send_message(ctx.message.author, msg)\n return\n stat_embed = discord.Embed(color=discord.Color.purple())\n stat_embed.set_author(name='Current Score')\n stat_embed.set_footer(text='Cards Against Humanity - id: {}'.format(userGame['ID']))\n await self.bot.send_message(ctx.message.author, embed=stat_embed)\n users = sorted(userGame['Members'], key=lambda card:int(card['Points']), reverse=True)\n msg = ''\n i = 0\n if len(users) > 10:\n msg += '__10 of {} Players:__\\n\\n'.format(len(users))\n else:\n msg += '__Players:__\\n\\n'\n for user in users:\n i += 1\n if i > 10:\n break\n if user['Points'] == 1:\n if user['User']:\n # Person\n msg += '{}. *{}* - 1 point\\n'.format(i, DisplayName.name(user['User']))\n else:\n # Bot\n msg += '{}. *{} ({})* - 1 point\\n'.format(i, self.botName, user['ID'])\n else:\n if user['User']:\n # Person\n msg += '{}. *{}* - {} points\\n'.format(i, DisplayName.name(user['User']), user['Points'])\n else:\n # Bot\n msg += '{}. *{} ({})* - {} points\\n'.format(i, self.botName, user['ID'], user['Points'])\n await self.bot.send_message(ctx.message.author, msg)\n","sub_path":"Cogs/CardsAgainstHumanity.py","file_name":"CardsAgainstHumanity.py","file_ext":"py","file_size_in_byte":48693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"334612492","text":"# This script demonstrates how you can use the action manager\n# to execute the Emboss filter.\n\nfrom win32com.client import Dispatch, GetActiveObject, GetObject\n\n# Start up Photoshop application\n# Or get Reference to already running Photoshop application instance\n# app = Dispatch('Photoshop.Application')\napp = GetActiveObject(\"Photoshop.Application\")\n\nfileName = \"C:\\Git\\photoshop-scripting-python\\PS_Samples_Files\\Layer Comps.psd\"\ndocRef = app.Open(fileName)\n\nnLayerSets = docRef.LayerSets\nnArtLayers = docRef.LayerSets.Item(len(nLayerSets)).ArtLayers\n\ndocRef.ActiveLayer = docRef.LayerSets.Item(len(nLayerSets)).ArtLayers.Item(len(nArtLayers))\n\ndef emboss(inAngle, inHeight, inAmount):\n # Get ID's for the related keys\n keyAngleID = app.CharIDToTypeID(\"Angl\")\n keyHeightID = app.CharIDToTypeID(\"Hght\")\n keyAmountID = app.CharIDToTypeID(\"Amnt\")\n eventEmbossID = app.CharIDToTypeID(\"Embs\")\n\n filterDescriptor = Dispatch('Photoshop.ActionDescriptor')\n filterDescriptor.PutInteger(keyAngleID, inAngle)\n filterDescriptor.PutInteger(keyHeightID, inHeight)\n filterDescriptor.PutInteger(keyAmountID, inAmount)\n\n app.ExecuteAction(eventEmbossID, filterDescriptor)\n\nemboss( 120, 10, 100)\n","sub_path":"EmbossAction.py","file_name":"EmbossAction.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"633997092","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom imblearn.over_sampling import SMOTE\n\nfrom helper import save_to_csv\n\nfrom config import ABS_SPEED_60, \\\n ABS_SPEED_90, \\\n ABS_ACCEL_X_LIM, \\\n ABS_ACCEL_Y_LIM, \\\n ABS_ACCEL_Z_LIM, \\\n ABS_GYRO_X_LIM, \\\n ABS_GYRO_Y_LIM, \\\n ABS_GYRO_Z_LIM, \\\n AGG_COLS_1, \\\n AGG_COLS_2, \\\n AGG_COLS_1_TEST, \\\n AGG_COLS_2_TEST, \\\n COL_NAMES, \\\n COL_NAMES_TEST, \\\n TRIP_DATA_PATH, \\\n TRIP_DATA_TEST_PATH, \\\n X_DATA_PATH, \\\n Y_DATA_PATH\n\ndef _abs_features(df):\n df['abs_speed'] = abs(df['Speed'])\n df['abs_accel_x'] = abs(df['acceleration_x'])\n df['abs_accel_y'] = abs(df['acceleration_y'])\n df['abs_accel_z'] = abs(df['acceleration_z'])\n df['abs_gyro_x'] = abs(df['gyro_x'])\n df['abs_gyro_y'] = abs(df['gyro_y'])\n df['abs_gyro_z'] = abs(df['gyro_z'])\n \n return df\n\ndef _speed_features(df):\n \n # Find zero speed entries\n df['zero_speed']= 0\n df.loc[df['abs_speed'] == 0, 'zero_speed'] = 1\n \n # Find high speed entries\n df['more_than_90kmph'] = 0\n df.loc[df['abs_speed'] >= ABS_SPEED_90, 'more_than_90kmph'] = 1\n \n df['more_than_60kmph'] = 0\n df.loc[(df['abs_speed'] < ABS_SPEED_90) & (df['abs_speed'] > ABS_SPEED_60), 'more_than_60kmph'] = 1\n \n return df\n\ndef _acceleration_features(df):\n \n # High acceleration x\n df['high_accel_x'] = 0\n df.loc[(df['abs_accel_x'] >= ABS_ACCEL_X_LIM), 'high_accel_x'] = 1\n \n # High acceleration y\n df['high_accel_y'] = 0\n df.loc[df['abs_accel_y'] >= ABS_ACCEL_Y_LIM, 'high_accel_y'] = 1\n \n # High acceleration z\n df['high_accel_z'] = 0\n df.loc[(df['abs_accel_z'] >= ABS_ACCEL_Z_LIM), 'high_accel_z'] = 1\n \n return df\n\ndef _gyro_features(df):\n df['high_gyro_x'] = 0\n df.loc[(df['abs_gyro_x'] >= ABS_GYRO_X_LIM), 'high_gyro_x'] = 1\n \n df['high_gyro_y'] = 0\n df.loc[(df['abs_gyro_y'] <= ABS_GYRO_Y_LIM), 'high_gyro_y'] = 1\n \n df['high_gyro_z'] = 0\n df.loc[(df['abs_gyro_z'] >= ABS_GYRO_Z_LIM), 'high_gyro_z'] = 1\n \n return df\n\ndef _weighted_features(df):\n duration = df['second']\n \n df['weighted_zero_spd'] = df['zero_speed'] * duration\n df['weighted_90kmph'] = df['more_than_90kmph'] * duration\n df['weighted_60kmph'] = df['more_than_60kmph'] * duration\n \n df['weighted_high_accel_x'] = df['high_accel_x'] * duration\n df['weighted_high_accel_y'] = df['high_accel_y'] * duration\n df['weighted_high_accel_z'] = df['high_accel_z'] * duration\n \n return df\n\ndef _get_weighted_average(df):\n duration = df['second']\n \n df['weighted_zero_spd'] = df['weighted_zero_spd'] / duration\n df['weighted_90kmph'] = df['weighted_90kmph'] / duration\n df['weighted_60kmph'] = df['weighted_60kmph'] / duration\n df['weighted_high_accel_x'] = df['weighted_high_accel_x'] / duration\n df['weighted_high_accel_y'] = df['weighted_high_accel_y'] / duration\n df['weighted_high_accel_z'] = df['weighted_high_accel_z'] / duration\n \n return df\n\ndef _group_data(df, AGG_COLS_1, AGG_COLS_2, COL_NAMES):\n grouped_df = df.groupby(['bookingID']).agg(AGG_COLS_1).reset_index(drop = False)\n\n tmp_df = df.groupby(['bookingID']).agg(AGG_COLS_2).reset_index(drop = False)\n tmp_df.rename(columns = COL_NAMES, inplace = True)\n\n grouped_df = grouped_df.merge(tmp_df, how = 'left', on = ['bookingID'])\n return grouped_df\n\ndef _xy_split(df):\n y = df['label']\n x = df.drop(columns = ['label', 'bookingID', 'second'], axis = 1)\n\n return x, y\n\ndef _oversample(x, y):\n sm = SMOTE(random_state=1, ratio = 1.0)\n x, y = sm.fit_sample(x, y)\n\n return x, y\n\ndef data_processing_train(df):\n df = _abs_features(df)\n df = _speed_features(df)\n df = _acceleration_features(df)\n df = _gyro_features(df)\n df = _weighted_features(df)\n df = _group_data(df, AGG_COLS_1, AGG_COLS_2, COL_NAMES)\n df = _get_weighted_average(df)\n\n save_to_csv(df, TRIP_DATA_PATH)\n\n x, y = _xy_split(df)\n x, y = _oversample(x, y)\n x = pd.DataFrame(x)\n y = pd.DataFrame(y)\n \n save_to_csv(x, X_DATA_PATH)\n save_to_csv(y, Y_DATA_PATH)\n\n return x, y\n\ndef data_processing_predict(df):\n df = _abs_features(df)\n df = _speed_features(df)\n df = _acceleration_features(df)\n df = _gyro_features(df)\n df = _weighted_features(df)\n df = _group_data(df, AGG_COLS_1_TEST, AGG_COLS_2_TEST, COL_NAMES_TEST)\n df = _get_weighted_average(df)\n\n save_to_csv(df, TRIP_DATA_TEST_PATH)\n\n return df","sub_path":"Code/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"450871816","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 15 10:15:32 2015\n\n@author: wisp10\n\"\"\"\nfrom __future__ import print_function\n\nfrom PyQt4.QtCore import QObject, pyqtSignal, pyqtSlot\nfrom Visa.VisaSetting import EnumSetting, IntegerSetting, NumericEnumSetting, FloatSetting, OnOffSetting, InstrumentWithSettings #, SettingCollection, AngleSetting\nfrom Visa.VisaInstrument import VisaInstrument\nimport numpy as np\nimport warnings\n\nclass SR830LiaStatus(object):\n def __init__(self, statusByte):\n self._statusByte = statusByte\n \n @property\n def inputOverload(self):\n return bool(self._statusByte & 1)\n \n @property\n def filterOverload(self):\n return bool(self._statusByte & 2)\n \n @property\n def outputOverload(self):\n return bool(self._statusByte & 4)\n\n @property\n def anyOverload(self):\n return bool(self._statusByte & 7)\n\n @property\n def unlocked(self):\n return bool(self._statusByte & 8)\n\n @property\n def frequencyRangeChanged(self):\n return bool(self._statusByte & 16)\n \n @property\n def timeConstantRanged(self):\n return bool(self._statusByte & 32)\n \n @property\n def triggeredExternally(self):\n return bool(self._statusByte & 64)\n \n\nclass SR830(VisaInstrument, InstrumentWithSettings, QObject):\n #adcReadingAvailable = pyqtSignal(float, float)\n #resistanceReadingAvailable = pyqtSignal(float, float)\n auxInRead = pyqtSignal(int, float)\n inputOverloadRead = pyqtSignal(bool)\n filterOverloadRead = pyqtSignal(bool)\n outputOverloadRead = pyqtSignal(bool)\n \n \n readingAvailable = pyqtSignal(float, float, float)\n '''Emitted whenever a new reading is taken with snapSignal, provides X, Y, and f'''\n\n OffsetQuantityCodes = {'X': 1, 'Y': 2, 'R': 3}\n OffsetExpandCodes = {1:0, 10:1, 100:2}\n Channel1DisplayItems = {'X': 0, 'R':1, 'Xn':2,'AUX In 1':3, 'AUX In 2':4}\n Channel2DisplayItems = {'Y': 0, 'Theta':1, 'Yn':2,'AUX In 3':3, 'AUX In 4':4}\n \n def __init__(self, visaResource):\n QObject.__init__(self)\n InstrumentWithSettings.__init__(self)\n VisaInstrument.__init__(self, visaResource)\n self._x = None\n self._y = None\n self._f = None\n self._auxIn = [np.nan, np.nan, np.nan, np.nan]\n self.model = 'SR830'\n self.serial = '000000'\n if visaResource is not None:\n try:\n visaId = self.visaId()\n d = visaId.split(',')\n self.model = d[1]\n self.serial = d[2][3:]\n if not self.model in ['SR810', 'SR830', 'SR850']:\n raise Exception('Unknown model %s' % self.model)\n except Exception as e:\n warnings.warn(\"Unable to obtain VISA ID: %s\" % e)\n \n \n self.inputSource = EnumSetting('ISRC', 'input source', [(0, 'A (single ended)'), (1, 'A-B (differential)'), (2, 'I (1 MOhm)'), (3, 'I (100 MOhm)')], self)\n self.inputCoupling = EnumSetting('ICPL', 'input coupling', [(0, 'AC'), (1, 'DC')], self)\n self.inputGrounding = EnumSetting('IGND', 'input shield ground', [(0, 'FLOAT'), (1, 'GND')], self)\n self.inputFilters = EnumSetting('ILIN', 'input filters', [(0, 'None'), (1, '60 Hz'), (2, '120Hz'), (3, '60 & 120 Hz')], self)\n self.reserve = EnumSetting('RMOD', 'reserve', [(0, 'high reserve'), (1, 'normal'), (2, 'low noise')], self)\n self.syncDemodulator = OnOffSetting('SYNC', 'synchronous demodulator', self)\n self.auxOut = []\n for i in range(4):\n self.auxOut.append(FloatSetting('AUXV %i,' % (i+1), 'auxilliary output', -10.,+10., 'V', 1E-3, 3, self, queryString='AUXV? %i' % (i+1)))\n \n self.sineOut = FloatSetting('SLVL', 'sine out amplitude', 0.004, 5.000, 'V', step=0.002, decimals=3, instrument=self)\n self.referenceFrequency = FloatSetting('FREQ', 'reference frequency', 1E-3, 102E3, 'Hz', step=1E-4, decimals=4, instrument=self)\n #self.referencePhase = AngleSetting('PHAS', 'reference phase', self)\n self.referenceTrigger = EnumSetting('RSLP', 'reference trigger', [(0, 'sine'), (1, 'positive edge'),(2,'negative edge')], instrument=self)\n if self.model in ['SR810', 'SR830']:\n self.referenceSource = EnumSetting('FMOD', 'reference source', [(0, 'external'), (1, 'internal')], self)\n elif self.model == 'SR850':\n self.referenceSource = EnumSetting('FMOD', 'reference source', [(0, 'internal'), (1, 'sweep'), (2, 'external')], self)\n\n self.harmonic = IntegerSetting('HARM', 'harmonic', 1, 100, unit='', instrument=self)\n \n self.filterTc = NumericEnumSetting('OFLT', 'filter time constant', [(0,10E-6), (1, 30E-6), (2, 100E-6), (3, 300E-6), (4, 1E-3), (5, 3E-3), (6, 10E-3), (7, 30E-3), (8, 100E-3), (9,300E-3), (10, 1.), (11, 3.), (12, 10.), (13, 30.), (14, 100.), (15, 300.), (16, 1E3), (17, 3E3), (18, 10E3), (19, 30E3)], self, 's')\n self.filterSlope = NumericEnumSetting('OFSL', 'filter roll-off', [(0, 6), (1, 12), (2, 18), (3,24)], self, 'dB/oct.')\n self.sensitivity = NumericEnumSetting('SENS', 'sensitivity', [(0, 2E-9), (1, 5E-9), (2, 1E-8), (3, 2E-8), (4, 5E-8), (5, 1E-7), (6, 2E-7), (7, 5E-7), (8, 1E-6), (9, 2E-6), (10, 5E-6), (11,1E-5), (12,2E-5), (13, 5E-5), (14, 1E-4), (15, 2E-4), (16, 5E-4), (17, 1E-3), (18, 2E-3), (19, 5E-3), (20, 1E-2), (21, 2E-2), (22, 5E-2), (23,0.1), (24, 0.2), (25, 0.5), (26, 1.0)], self, unit='V')\n self.traceLoop = EnumSetting('SEND', 'buffer mode', [(0, 'single shot'), (1, 'loop')], self)\n self.traceRate = NumericEnumSetting('SRAT', 'sample data rate', [(0, 62.5E-3), (1, 125E-3), (2, 250E-3), (3, 500E-3), (4, 1.0), (5, 2.0), (6, 4.0), (7, 8.0), (8, 16.0), (9, 32.0), (10, 64.0), (11, 128.0), (12, 256.0), (13, 512.0)], self, 'Hz')\n \n \n @pyqtSlot()\n def readAll(self):\n self.inputSource.code\n self.inputCoupling.code\n self.inputGrounding.code\n self.inputFilters.code\n self.reserve.code\n self.syncDemodulator.enabled\n self.sineOut.value\n self.referenceFrequency.value\n self.referenceTrigger.code\n self.referenceSource.code\n self.harmonic.value\n self.filterTc.code\n self.filterSlope.code\n self.sensitivity.code\n for i in range(4):\n self.auxOut[i].value\n\n @pyqtSlot(int)\n def snapSignal(self, auxIn=None):\n '''Snap X,Y, and f and optionally one of the AUX inputs from the lock-in.\n Returns X,Y,f. Data are also cached in the LockIn instance and can be read as\n any combination of X,Y, R and Theta + the AUX values.\n '''\n items = ['1','2','9'] #X,Y,f\n if auxIn is not None:\n items.append(str(auxIn+5))\n items = ','.join(items)\n result = self.queryString(\"SNAP ? %s\" % items)\n d = result.split(',')\n self._x = float(d[0])\n self._y = float(d[1])\n self._f = float(d[2])\n self.readingAvailable.emit(self._x,self._y,self._f)\n if auxIn is not None:\n Vaux = float(d[3])\n self._auxIn[auxIn] = Vaux\n self.auxInRead.emit(auxIn, Vaux)\n return (self._x, self._y, self._f)\n \n @pyqtSlot(int)\n def snapSignalR(self, auxIn=None):\n '''Like snapSignal, but instead of X,Y this transfers R and theta.\n This was to test the idea that since R is always positive we may gain\n additional resolution by loosing the sign bit. In practice,\n this does not pan out. Deprecated.'''\n items = ['3', '4', '9'] # R, theta, f\n if auxIn is not None:\n items.append(str(auxIn+5))\n items = ','.join(items)\n result = self.queryString(\"SNAP ? %s\" % items)\n d = result.split(',')\n r = float(d[0])\n theta = np.deg2rad(float(d[1]))\n self._x = r * np.cos(theta)\n self._y = r * np.sin(theta)\n self._f = float(d[2])\n self.readingAvailable.emit(self._x,self._y,self._f)\n if auxIn is not None:\n Vaux = float(d[3])\n self._auxIn[auxIn] = Vaux\n self.auxInRead.emit(auxIn, Vaux)\n return (self._x, self._y, self._f)\n \n \n def checkStatus(self):\n '''Query the staus register of the lock-in\n Returns: SR830LiaStatus\n Emits: inputOverloadRead(bool), filterOverloadRead(bool), outputOverloadRead(bool)\n '''\n lias = SR830LiaStatus(self.queryInteger('LIAS?'))\n self._lockinStatus = lias\n self.inputOverloadRead.emit(lias.inputOverload)\n self.filterOverloadRead.emit(lias.filterOverload)\n self.outputOverloadRead.emit(lias.outputOverload)\n return lias\n\n @property \n def overload(self):\n '''Return if any overload (input, filter or output) has occured. Need to call checkStatus first.\n Deprecated.'''\n return self._lockinStatus.anyOverload\n \n @property\n def R(self):\n return np.sqrt(self._x**2+self._y**2)\n\n @property \n def theta(self):\n return np.arctan2(self._y, self._x)\n \n @property\n def thetaDegree(self):\n return np.rad2deg(self.theta)\n \n @property\n def X(self):\n return self._x\n \n @property\n def Y(self):\n return self._y\n \n @property\n def f(self):\n return self._f\n \n @f.setter\n def f(self, newFrequency):\n self.referenceFrequency.value = newFrequency\n \n def auxIn(self, channel):\n '''Read one of the auxillary inputs \n channel (int): specify channel to be read (0-3)'''\n V = self.queryFloat('OAUX? %i' % channel+1)\n self.auxInRead.emit(channel, V)\n return V\n \n def autoGain(self, block=True):\n '''Execute instrument internal auto-gain.\n block (bool): wait for operation to complete if True (default)\n '''\n self.commandString('AGAN')\n self.sensitivity._value = None\n if block:\n self.waitForOpc()\n \n def autoPhase(self, block=True):\n self.commandString('APHS')\n if block:\n self.waitForOpc()\n \n def autoReserve(self, block=True):\n self.commandString('ARSV')\n if block:\n self.waitForOpc()\n \n def waitForOpc(self):\n warnings.warn('waitForOpc not implemented/does not work!')\n pass\n #while self.queryInteger('*STB?') & 2 == 0:\n # pass\n \n def verifyPresence(self):\n '''Check if instrument is actually present and responding.'''\n visaId = self.visaId()\n return 'SR830' in visaId\n \n def startTrace(self):\n '''Start recording trace data. Make sure to resetTrace first (as needed).'''\n self.commandString('STRT')\n \n def pauseTrace(self):\n '''Pause recording of trace data. Do this before reading when LOOP mode is on.'''\n self.commandString('PAUS')\n \n def resetTrace(self):\n '''Clear past trace data'''\n self.commandString('REST')\n \n def traceNumberOfPoints(self):\n '''Returns number of points in the trace buffer.'''\n return self.queryInteger('SPTS?')\n\n def readTraceAscii(self, display, start=0, count=None):\n '''Read trace buffer, transmitting data as ASCII\n *display*: 1 or 2\n *start* : index of first point to transmit\n *count* : number of points to transmit (if None, transmit all points)\n Returns the data as a numpy array\n '''\n if count is None:\n count = self.traceNumberOfPoints()\n buff = self.queryString('TRCA? %d,%d,%d' % (display,start,count))\n d = buff.split(',')[:-1]\n data = np.asarray([float(v) for v in d])\n return data\n \n def autoOffset(self, quantity):\n '''Automatically offset specified quantity\n quantity (str): 'X', 'Y', or 'Z'\n '''\n code = self.OffsetQuantityCodes[quantity]\n self.commandInteger('AOFF', code)\n\n def offsetExpand(self, quantity='X'):\n '''Returns the offset/expand settings for the specified quantity.\n quantity (str): 'X', 'Y', or 'R'\n returns: percentage, expandFactor (1, 10, or 100)'''\n code = self.OffsetQuantityCodes[quantity]\n r = self.queryString('OEXP? %d' % code)\n d = r.split(',')\n offsetPercent, expandCode = float(d[0]), int(d[1])\n expand = [k for k, v in self.OffsetExpandCodes.items() if v==expandCode]\n return offsetPercent, expand[0]\n \n def disableOffsetExpand(self, quantity = None):\n '''Disable offset/expand for specified quantity.\n quantity (str): 'X', 'Y', 'R', or None for all.'''\n if quantity is None:\n self.disableOffsetExpand('X')\n self.disableOffsetExpand('Y')\n self.disableOffsetExpand('R')\n else:\n self.setOffsetExpand(quantity, 0, 1)\n \n def setOffsetExpand(self, quantity, percent, expand):\n '''Set offset and expand parameters\n quantity (str) : 'X', 'Y', or 'R'\n percent (float): percentage of full scale sensitivity (from -105 to +105%)\n expand (int) : expand factor 1, 10, or 100'''\n quantityCode = self.OffsetQuantityCodes[quantity]\n expandCode = self.OffsetExpandCodes[expand]\n self.commandString('OEXP %d,%.2f,%d' % (quantityCode, percent, expandCode))\n \n def setDisplay(self, channel, item, ratio = 0):\n '''Select the item displayed for specified channel.\n channel (int): Channel 1 or 2\n item : one of Channel1DisplayItem or Channel2DisplayItem\n ratio (int) : 0 for no ratio (default), 1 for ratio with first aux-in channel, \n 2 for ratio with second aux-in channel\n '''\n if channel == 1:\n itemCode = self.Channel1DisplayItems[item]\n elif channel == 2:\n itemCode = self.Channel2DisplayItems[item]\n else:\n raise IndexError\n self.commandString('DDEF %d,%d,%d' % (channel, itemCode, ratio))\n \nif __name__ == '__main__':\n import logging\n logging.basicConfig(level=logging.DEBUG)\n \n #sr830 = SR830(None)\n #print(sr830.autoReserve)\n\n sr830 = SR830('GPIB0::12')\n if False:\n sr830.debug = True\n print(\"Present:\", sr830.verifyPresence())\n print(\"Input source:\", sr830.inputSource.string)\n print(\"Input coupling:\", sr830.inputCoupling.string)\n print(\"Input shield ground:\", sr830.inputGrounding.string)\n print(\"Input filters:\", sr830.inputFilters.string)\n print(\"Harmonic:\", sr830.harmonic.value)\n print(\"Synchronous demodulator enabled:\", sr830.syncDemodulator.enabled)\n print(\"Reference source:\", sr830.referenceSource.string)\n print(\"Reference trigger:\", sr830.referenceTrigger.string)\n for i in range(4):\n print(\"AUX OUT\",i,\":\", sr830.auxOut[i].value)\n \n #sr830.referenceFrequency.value = 333.8\n print(\"Reference frequency\", sr830.referenceFrequency.value)\n #sr830.reserve.code = sr830.reserve.LOW_NOISE\n print(\"Reserve:\", sr830.reserve.string)\n #sr830.sineOut.value = 3.945\n print(\"Sine out:\", sr830.sineOut.value)\n print(\"Filter Tc:\", sr830.filterTc.value)\n print(\"Filter roll-off:\", sr830.filterSlope.value)\n print(\"Sensitivity:\", sr830.sensitivity.string)\n for i in range(10):\n sr830.snapSignal() #auxIn=i%4)\n print(\"Signal: X=\", sr830.X, \"Y=\",sr830.Y, \"f=\",sr830.f, \"R=\",sr830.R, \"theta=\",sr830.theta, \"rad =\",sr830.thetaDegree, \"deg\") \n \n print(sr830.allSettingValues())\n \n import time\n \n \n sr830.disableOffsetExpand()\n # wait\n time.sleep(10)\n FS = sr830.sensitivity.value\n sr830.snapSignal()\n X,Y = sr830.X,sr830.Y\n \n offsetPercentX, offsetPercentY = int(1E2*X/FS), int(1E2*Y/FS)\n offsetPercentX = 0; offsetPercentY = 0\n expand = 1\n sr830.setOffsetExpand('X', offsetPercentX, expand)\n sr830.setOffsetExpand('Y', offsetPercentY, expand)\n time.sleep(15)\n X0 = 1E-2*offsetPercentX*FS\n Y0 = 1E-2*offsetPercentY*FS\n \n count = 500\n x = np.zeros((count,))\n y = np.zeros_like(x)\n t = np.zeros_like(x)\n\n for i in range(count):\n t[i] = time.time()\n sr830.snapSignal()\n deltaX, deltaY = sr830.X, sr830.Y\n x[i] = X0+deltaX\n y[i] = Y0+deltaY\n time.sleep(0.1)\n\n r = np.sqrt(x**2+y**2)\n dxs = np.sort(np.abs(np.diff(x)))\n print('Smallest DX>0:', dxs[dxs>0][:10])\n res = dxs[dxs>0][0]/(FS*1.1)\n print('Resolution:', res)\n print('Resolution:',-np.log(res)/np.log(2), 'bit')\n print('Mean X, Y\"', np.mean(x), np.mean(y), 'V')\n print('Noise X:', np.std(x), 'V')\n print('Noise X:', -np.log(np.std(x)/FS)/np.log(2), 'bit')\n print('Noise Y:', np.std(y), 'V')\n print('Noise R:', np.std(r), 'V')\n import matplotlib.pyplot as mpl \n mpl.plot(t, r, label='R')\n mpl.plot(t, x, label='snap X')\n mpl.plot(t, y, label='snap Y')\n mpl.legend(loc='best')\n mpl.show()\n \n if False: \n sr830.traceRate.code = 7\n sr830.resetTrace()\n sr830.startTrace()\n count = 60\n x = np.zeros((count,))\n y = np.zeros_like(x)\n for i in range(count):\n sr830.snapSignal()\n x[i] = sr830.X\n y[i] = sr830.Y\n #print(sr830.X, sr830.Y)\n time.sleep(0.2)\n sr830.pauseTrace()\n X = sr830.readTraceAscii(1)\n Y = sr830.readTraceAscii(2)\n \n import matplotlib.pyplot as mpl \n mpl.plot(x, label='snap X')\n mpl.plot(X, label='trace X')\n mpl.plot(y, label='snap Y')\n mpl.plot(Y, label='trace Y')\n mpl.legend(loc='best')\n mpl.show()\n","sub_path":"Visa/SR830_New.py","file_name":"SR830_New.py","file_ext":"py","file_size_in_byte":17970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"163868960","text":"#!/usr/bin/env python\n\nimport argparse\nimport os.path as osp\n\nimport jsk_data\n\n\ndef download_data(path, url, md5, pkg_path=None):\n if pkg_path is not None:\n path = osp.join(pkg_path, path)\n return jsk_data.download_data(\n pkg_name='jsk_pcl_ros_utils',\n path=path,\n url=url,\n md5=md5,\n )\n\n\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n '--pkg-path', help='PAKCAGE_SOURCE_DIR in cmake'\n )\n args = parser.parse_args()\n\n download_data(\n pkg_path=args.pkg_path,\n path='sample/data/2017-02-05-16-11-09_shelf_bin.bag',\n url='https://drive.google.com/uc?id=0B9P1L--7Wd2vYWI2NnZrekEwSmc',\n md5='44427634f57ac76111edabd7b1f4e140',\n )\n\n download_data(\n pkg_path=args.pkg_path,\n path='sample/data/bunny_marker_array.bag',\n url='https://drive.google.com/uc?id=0B9P1L--7Wd2vdW1NMlhiRU9KZDQ',\n md5='e7dc29d21bdd30c769396c361e4350fd',\n )\n\n download_data(\n pkg_path=args.pkg_path,\n path='sample/data/bunny.pcd',\n url='https://raw.githubusercontent.com/PointCloudLibrary/pcl/pcl-1.8.0/test/bunny.pcd', # NOQA\n md5='a4e58778ba12d3f26304127f6be82897',\n )\n\n download_data(\n pkg_path=args.pkg_path,\n path='sample/data/arc2017_4objects.bag',\n url='https://drive.google.com/uc?id=0B9P1L--7Wd2vakpvU0wtMFNCTkk',\n md5='2c3af4482cd2e0ee95b58848ae48afaf',\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"jsk_recognition/jsk_pcl_ros_utils/scripts/install_sample_data.py","file_name":"install_sample_data.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"385864494","text":"# coding: utf-8\nimport argparse\nimport json\nimport os\nimport random\nfrom datetime import datetime\nfrom os import path\nfrom glob import glob\n\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\n\nfrom batchiterator import NtcBucketIterator\nfrom decode import decode\nfrom evaluation import packed_evaluate_multiclass_without_none\nfrom log import StandardLogger, write_args_log\nfrom models import PackedE2EStackedBiRNN\nfrom utils import load_dataset, pretrained_word_vecs, set_log_file\n\nrandom.seed(2020)\nnp.random.seed(2020)\n# MAX_SENTENCE_LENGTH = 90\nMAX_SENTENCE_LENGTH = 10000\nBERT_DIM = 768\n\n\ndef create_arg_parser():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--train', type=str, default=None, required=True)\n parser.add_argument('--dev', type=str, default=None, required=True)\n parser.add_argument('--test', type=str, default=None)\n parser.add_argument('--pseudo', type=str, default=None)\n parser.add_argument('--out_dir', type=str, default='result')\n parser.add_argument('--wiki_embed_dir', type=path.abspath, default=None)\n parser.add_argument('--train_bert_embed_file', type=path.abspath, default=None, help=\"hdf5 file\")\n parser.add_argument('--dev_bert_embed_file', type=path.abspath, default=None, help=\"hdf5 file\")\n parser.add_argument('--test_bert_embed_file', type=path.abspath, default=None, help=\"hdf5 file\")\n parser.add_argument('--pseudo_bert_embed_file', type=path.abspath, default=None, help=\"hdf5 file\")\n\n # Training Option\n parser.add_argument('--train_method', type=str, default=\"concat\",\n help=\"Choose from 'concat' or 'pre-train'\")\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--data_size', type=int, default=100,\n help='data size (%)')\n parser.add_argument('--epoch', dest='max_epoch', type=int, default=150)\n parser.add_argument('--batch_size', type=int, default=512)\n parser.add_argument('--loss_stop', action='store_true')\n parser.add_argument('--wiki', action='store_true')\n parser.add_argument('--bert', action='store_true')\n parser.add_argument('--multi_predicate', action='store_true')\n parser.add_argument('--zero_drop', action='store_true')\n parser.add_argument('--mapping_pseudo_train', type=path.abspath, default=None)\n parser.add_argument('--decode', action='store_true')\n parser.add_argument('--load_cpu', action='store_true')\n parser.add_argument('--half_checkpoint', action='store_true')\n parser.add_argument('--epoch_shuffle', action='store_true')\n parser.add_argument('--comment', type=str, default=\"\")\n\n # Hyper Parameter\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--pseudo_lr', type=float, default=0.001)\n parser.add_argument('--hidden_dim', type=int, default=256,\n help='unit vector size in rnn')\n parser.add_argument('--n_layers', type=int, default=10,\n help='the number of hidden layer')\n parser.add_argument('--dropout', type=float, default=0.0,\n help='dropout rate of rnn unit')\n parser.add_argument('--embed_dropout', type=float, default=0.0,\n help='dropout rate of embeddings')\n parser.add_argument('--tune-word-vec', dest='fixed_word_vec', action='store_false',\n help='do not re-train word vec')\n\n parser.set_defaults(fixed_word_vec=True)\n\n return parser\n\n\nclass BertVecHolder(object):\n def __init__(self, train_json: str, train_hdf5: str, pseudo_json: str, pseudo_hdf5: str, data_size: float):\n self.train_json = train_json\n self.train_hdf5 = train_hdf5\n self.pseudo_json_files = sorted(glob(pseudo_json + \".seed*\"))\n self.pseudo_hdf5_files = sorted(glob(pseudo_hdf5 + \".seed*\"))\n self.data_size = data_size\n self.current_index = 0\n self.max_length = len(self.pseudo_json_files)\n self.indices = list(range(self.max_length))\n\n assert self.max_length == len(self.pseudo_hdf5_files)\n print(\"# Number of pseudo files: {}\".format(self.max_length), flush=True)\n for jf, hf in zip(self.pseudo_json_files, self.pseudo_hdf5_files):\n print(\"\\t{}, {}\".format(path.basename(jf), path.basename(hf)), flush=True)\n\n self.data_train = load_dataset(self.train_json, self.data_size)\n random.shuffle(self.indices)\n\n def create_dataset(self):\n if self.current_index >= self.max_length:\n random.shuffle(self.indices)\n self.current_index = 0\n pseudo_json_file = self.pseudo_json_files[self.current_index]\n pseudo_hdf5_file = self.pseudo_hdf5_files[self.current_index]\n basename = path.basename(pseudo_json_file).replace(\".jsonl\", \"\")\n\n print(\"# Load: {}\\n\\tCurrent Index: {}\".format(basename, self.current_index), flush=True)\n assert basename == path.basename(pseudo_hdf5_file).replace(\".hdf5\", \"\")\n\n data_pseudo = load_dataset(pseudo_json_file, self.data_size)\n dataset = self.data_train + data_pseudo\n self.current_index += 1\n\n return dataset, pseudo_hdf5_file\n\n\ndef train(out_dir, data_train, data_dev, model, model_id, epoch, lr_start, lr_min,\n half_checkpoint, bert_vec_holder, train_type=\"\"):\n len_train = len(data_train)\n len_dev = len(data_dev)\n\n early_stopping_thres = 4\n early_stopping_count = 0\n best_performance = -1.0\n best_epoch = 0\n best_thres = None\n best_lr = lr_start\n lr = lr_start\n lr_reduce_factor = 0.5\n lr_epsilon = lr_min * 1e-4\n\n loss_function = nn.NLLLoss(ignore_index=4)\n\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr_start)\n losses = []\n\n thres_set_ga = list(map(lambda n: n / 100.0, list(range(10, 71, 1))))\n thres_set_wo = list(map(lambda n: n / 100.0, list(range(20, 86, 1))))\n thres_set_ni = list(map(lambda n: n / 100.0, list(range(0, 61, 1))))\n thres_lists = [thres_set_ga, thres_set_wo, thres_set_ni]\n labels = [\"ga\", \"wo\", \"ni\", \"all\"]\n\n best_f1_history = []\n\n if path.exists(out_dir + \"/pretrained_model-\" + model_id + \".h5\"):\n print(\"-\" * 10 + \" Original data \" + \"-\" * 10, flush=True)\n\n # Load model\n print(\"# Load pre-trained model\", flush=True)\n model.load_state_dict(torch.load(out_dir + \"/pretrained_model-\" + model_id + \".h5\"))\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)\n\n # Save model\n torch.save(model.state_dict(), out_dir + \"/model-\".format(train_type) + model_id + \".h5\")\n\n # Get Start F1\n print(\"# First test\", flush=True)\n data_dev.create_batches()\n model.eval()\n thres, obj_score, num_test_batch_instance = packed_evaluate_multiclass_without_none(model, data_dev, len_dev,\n labels, thres_lists)\n best_thres = thres\n best_performance = obj_score * 100\n print(\"## Init F1: {}\".format(best_performance), flush=True)\n else:\n print(\"-\" * 10 + \" Pseudo data \" + \"-\" * 10, flush=True)\n\n for ep in range(epoch):\n total_loss = torch.Tensor([0])\n early_stopping_count += 1\n\n print(model_id, 'epoch {0}'.format(ep + 1), flush=True)\n\n if bert_vec_holder is not None and ep != 0:\n dataset, pseudo_hdf5 = bert_vec_holder.create_dataset()\n data_train.reset_dataset_with_pseudo(dataset, pseudo_hdf5)\n\n print('# Train...', flush=True)\n data_train.create_batches()\n model.train()\n for n, (xss, yss) in tqdm(enumerate(data_train), total=len_train, mininterval=5):\n if xss[0].size(1) > MAX_SENTENCE_LENGTH:\n continue\n\n optimizer.zero_grad()\n model.zero_grad()\n\n if torch.cuda.is_available():\n yss = [Variable(ys).cuda() for ys in yss]\n else:\n yss = [Variable(ys) for ys in yss]\n\n scores = model(xss)\n\n loss = 0\n for i in range(len(yss)):\n loss += loss_function(scores[i], yss[i])\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n optimizer.step()\n total_loss += loss.data.cpu()\n\n if half_checkpoint and n == int(len_train / 2):\n print(\"## loss:\", total_loss[0], \"lr:\", lr)\n losses.append(total_loss)\n print(\"\", flush=True)\n print('# Test... (half point)', flush=True)\n data_dev.create_batches()\n model.eval()\n thres, obj_score, num_test_batch_instance = packed_evaluate_multiclass_without_none(model, data_dev,\n len_dev,\n labels, thres_lists)\n f = obj_score * 100\n if f > best_performance:\n best_performance = f\n early_stopping_count = 0\n best_epoch = ep + 1\n best_thres = thres\n best_lr = lr\n print(\"## save model\", flush=True)\n torch.save(model.state_dict(), out_dir + \"/{}model-\".format(train_type) + model_id + \".h5\")\n\n best_f1_history.append((best_performance, best_epoch))\n\n elif early_stopping_count >= early_stopping_thres:\n # break\n if lr > lr_min + lr_epsilon:\n new_lr = lr * lr_reduce_factor\n lr = max(new_lr, lr_min)\n print(\"load model: epoch{0}\".format(best_epoch), flush=True)\n model.load_state_dict(torch.load(out_dir + \"/{}model-\".format(train_type) + model_id + \".h5\"))\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)\n early_stopping_count = 0\n else:\n break\n print(model_id, \"\\tcurrent best epoch\", best_epoch, \"\\t\", best_thres, \"\\t\", \"lr:\", best_lr, \"\\t\",\n \"f:\", best_performance)\n total_loss = torch.Tensor([0])\n early_stopping_count += 1\n\n print(model_id, 'epoch {0}'.format(ep + 1), flush=True)\n\n print('# Train...', flush=True)\n data_train.create_batches()\n model.train()\n\n print(\"## loss:\", total_loss[0], \"lr:\", lr)\n losses.append(total_loss)\n print(\"\", flush=True)\n print('# Test...', flush=True)\n\n data_dev.create_batches()\n model.eval()\n thres, obj_score, num_test_batch_instance = packed_evaluate_multiclass_without_none(model, data_dev, len_dev,\n labels, thres_lists)\n f = obj_score * 100\n if f > best_performance:\n best_performance = f\n early_stopping_count = 0\n best_epoch = ep + 1\n best_thres = thres\n best_lr = lr\n print(\"## save model\", flush=True)\n torch.save(model.state_dict(), out_dir + \"/{}model-\".format(train_type) + model_id + \".h5\")\n\n best_f1_history.append((best_performance, best_epoch))\n\n elif early_stopping_count >= early_stopping_thres:\n # break\n if lr > lr_min + lr_epsilon:\n new_lr = lr * lr_reduce_factor\n lr = max(new_lr, lr_min)\n print(\"load model: epoch{0}\".format(best_epoch), flush=True)\n model.load_state_dict(torch.load(out_dir + \"/{}model-\".format(train_type) + model_id + \".h5\"))\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)\n early_stopping_count = 0\n else:\n break\n print(model_id, \"\\tcurrent best epoch\", best_epoch, \"\\t\", best_thres, \"\\t\", \"lr:\", best_lr, \"\\t\",\n \"f:\", best_performance)\n\n print(model_id, \"\\tbest in epoch\", best_epoch, \"\\t\", best_thres, \"\\t\", \"lr:\", best_lr, \"\\t\",\n \"f:\", best_performance)\n\n print(\"Update History: {}\".format(best_f1_history), flush=True)\n\n return best_thres\n\n\ndef extract_name(file):\n name = path.basename(file)\n name, _ = name.split(\".\", 1)\n\n return name\n\n\ndef create_model_id(args):\n model_id = datetime.today().strftime(\"%m%d%H%M\")\n model_id += \"-\" + extract_name(args.pseudo) if args.pseudo else \"\"\n model_id += \"-\" + extract_name(args.test) if args.test else \"\"\n model_id += \"-\" + args.train_method\n model_id += \"-wiki\" if args.wiki else \"\"\n model_id += \"-bert\" if args.bert else \"\"\n model_id += \"-mp\" if args.multi_predicate else \"\"\n model_id += \"-zero_drop\" if args.zero_drop else \"\"\n model_id += \"-no_mask\" if args.mapping_pseudo_train else \"\"\n model_id += \"-loss_stop\" if args.loss_stop else \"\"\n model_id += \"-half_point\" if args.half_checkpoint else \"\"\n model_id += \"-lr\" + str(args.lr)\n model_id += \"-plr\" + str(args.pseudo_lr) if args.train_method == \"pre-train\" else \"\"\n model_id += \"-embdrop\" + str(args.embed_dropout)\n model_id += \"-seed\" + str(args.seed)\n model_id += \"-\" + args.comment if args.comment else \"\"\n\n return model_id\n\n\ndef main():\n parser = create_arg_parser()\n args = parser.parse_args()\n if args.pseudo == \"None\":\n args.pseudo = None\n\n if not path.exists(args.out_dir):\n os.mkdir(args.out_dir)\n print(\"# Make directory: {}\".format(args.out_dir))\n\n # Log\n model_id = create_model_id(args)\n log_dir = path.join(args.out_dir, model_id)\n if path.exists(log_dir):\n raise FileExistsError(\"'{}' Already exists.\".format(log_dir))\n os.mkdir(log_dir)\n print(log_dir)\n set_log_file(log_dir, \"train\", model_id)\n log = StandardLogger(path.join(log_dir, \"log-\" + model_id + \".txt\"))\n log.write(args=args, comment=model_id)\n write_args_log(args, path.join(log_dir, \"args.json\"))\n\n # Seed\n torch.manual_seed(args.seed)\n\n # Load Dataset\n data_train = load_dataset(args.train, args.data_size)\n data_pseudo = load_dataset(args.pseudo, args.data_size) if args.pseudo else []\n if args.train_method == \"concat\":\n data_train += data_pseudo\n data_dev = load_dataset(args.dev, 100)\n\n data_train = NtcBucketIterator(data_train, args.batch_size, shuffle=True, multi_predicate=args.multi_predicate,\n zero_drop=args.zero_drop, bert=args.bert, loss_stop=args.loss_stop,\n load_cpu=args.load_cpu, mapping_pseudo_train=args.mapping_pseudo_train,\n bert_embed_file=args.train_bert_embed_file,\n pseudo_bert_embed_file=args.pseudo_bert_embed_file)\n data_dev = NtcBucketIterator(data_dev, args.batch_size, multi_predicate=args.multi_predicate, bert=args.bert,\n load_cpu=args.load_cpu, bert_embed_file=args.dev_bert_embed_file)\n if args.train_method == \"pre-train\":\n data_pseudo = NtcBucketIterator(data_pseudo, args.batch_size, shuffle=True,\n multi_predicate=args.multi_predicate,\n zero_drop=args.zero_drop, bert=args.bert, loss_stop=args.loss_stop,\n load_cpu=args.load_cpu, mapping_pseudo_train=args.mapping_pseudo_train,\n pseudo_bert_embed_file=args.pseudo_bert_embed_file)\n\n bert_vec_holder = None\n if args.epoch_shuffle:\n bert_vec_holder = BertVecHolder(train_json=args.train,\n train_hdf5=args.train_bert_embed_file,\n pseudo_json=args.pseudo,\n pseudo_hdf5=args.pseudo_bert_embed_file,\n data_size=args.data_size)\n\n word_embedding_matrix = pretrained_word_vecs(args.wiki_embed_dir, \"/wordIndex.txt\") if args.wiki else None\n model = PackedE2EStackedBiRNN(hidden_dim=args.hidden_dim,\n n_layers=args.n_layers,\n out_dim=4,\n embedding_matrix=word_embedding_matrix,\n fixed_word_vec=args.fixed_word_vec,\n multi_predicate=args.multi_predicate,\n use_wiki_vec=args.wiki,\n use_bert_vec=args.bert,\n bert_dim=BERT_DIM,\n train_bert_embed_file=args.train_bert_embed_file,\n dev_bert_embed_file=args.dev_bert_embed_file,\n pseudo_bert_embed_file=args.pseudo_bert_embed_file,\n load_cpu=args.load_cpu,\n dropout=args.dropout,\n embed_dropout=args.embed_dropout)\n\n if torch.cuda.is_available():\n model = model.cuda()\n\n # Training Method\n print(\"# Training Method: {}\".format(args.train_method), flush=True)\n if args.train_method == \"pre-train\":\n pretrain_best_thresh = train(log_dir, data_pseudo, data_dev, model, model_id, args.max_epoch,\n args.pseudo_lr, args.pseudo_lr / 20,\n args.half_checkpoint, bert_vec_holder, \"pretrained_\")\n with open(path.join(log_dir, \"best.pretrain_thresh\"), \"w\") as fo:\n json.dump(pretrain_best_thresh, fo)\n best_thresh = train(log_dir, data_train, data_dev, model, model_id, args.max_epoch,\n args.lr, args.lr / 20, args.half_checkpoint, bert_vec_holder)\n with open(path.join(log_dir, \"best.thresh\"), \"w\") as fo:\n json.dump(best_thresh, fo)\n log.write_endtime()\n\n if args.decode:\n data_decode = load_dataset(args.test, 100) if args.test else load_dataset(args.dev, 100)\n data_decode = NtcBucketIterator(data_decode, args.batch_size, bert=args.bert,\n multi_predicate=args.multi_predicate,\n decode=True, load_cpu=args.load_cpu,\n bert_embed_file=args.test_bert_embed_file if args.test else args.dev_bert_embed_file)\n tag = \"test\" if args.test else \"dev\"\n if args.train_method == \"pre-train\":\n new_model_id = model_id + \"-\" + \"-\".join(str(i) for i in pretrain_best_thresh)\n model.load_state_dict(torch.load(log_dir + \"/pretrained_model-\" + model_id + \".h5\"))\n if args.test:\n model.dev_bert_vec = h5py.File(args.test_bert_embed_file, \"r\")\n decode(log_dir, data_decode, \"pretrained_\" + tag, model, new_model_id, pretrain_best_thresh)\n new_model_id = model_id + \"-\" + \"-\".join(str(i) for i in best_thresh)\n model.load_state_dict(torch.load(log_dir + \"/model-\" + model_id + \".h5\"))\n decode(log_dir, data_decode, tag, model, new_model_id, best_thresh)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/jp_pas/train_pseudo.py","file_name":"train_pseudo.py","file_ext":"py","file_size_in_byte":19564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"294036847","text":"\"\"\"Creates all user based routes.\"\"\"\nimport os\n\nfrom flask import Response, request\nfrom flask_cors import cross_origin\n\nfrom devcenter.requests.user import (\n\tset_navbar_item, get_navbar_items,\n\tget_jql_links, get_statuses, get_ticket_history\n)\nfrom devcenter.requests.jira import get_profile\n\n\ndef define_routes(app, g):\n\t\"\"\"Creates all user based routes.\"\"\"\n\ttry:\n\t\tAPP_NAME = os.environ['APP_NAME']\n\texcept KeyError:\n\t\tAPP_NAME = ''\n\n\t@app.route(f'/{APP_NAME}/jira/profile/')\n\t@cross_origin()\n\tdef get_profile_route(username):\n\t\tdata = {\"cred_hash\": g.cred_hash, \"username\": username}\n\t\tdata = get_profile(data=data)\n\t\treturn Response(data, mimetype='application/json')\n\n\t@app.route(f'/{APP_NAME}/skipcreds/navbar', methods=['GET', 'POST'])\n\t@cross_origin()\n\tdef get_navbar():\n\t\tresponse = {'status': False, 'data': ''}\n\n\t\tif request.method == 'POST':\n\t\t\tdata = request.get_json()\n\t\t\tresponse = set_navbar_item(data=data.get('item', {}))\n\t\telse:\n\t\t\tresponse = get_navbar_items()\n\n\t\treturn Response(response, mimetype='application/json')\n\n\t@app.route(f'/{APP_NAME}/skipcreds/jql_links', methods=['GET'])\n\t@cross_origin()\n\tdef get_jql_links_route():\n\t\tresponse = get_jql_links()\n\t\treturn Response(response, mimetype='application/json')\n\n\t@app.route(f'/{APP_NAME}/skipcreds/statuses', methods=['GET'])\n\t@cross_origin()\n\tdef get_statuses_request():\n\t\tresponse = get_statuses()\n\t\treturn Response(response, mimetype='application/json')\n\n\t@app.route(f'/{APP_NAME}/skipcreds/ticket_history', methods=['GET'])\n\t@cross_origin()\n\tdef get_ticket_history_request():\n\t\tresponse = get_ticket_history()\n\t\treturn Response(response, mimetype='application/json')\n","sub_path":"devcenter/routes/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"323501882","text":"def list_reverse(node_head):\r\n current_node = node_head\r\n previous_node = None\r\n future_node = None\r\n\r\n while current_node is not None:\r\n future_node = current_node.next_node\r\n current_node.next_node = previous_node\r\n previous_node = current_node\r\n current_node = future_node\r\n new_head = previous_node\r\n return new_head\r\n \r\ndef list_print(node_head):\r\n current_node = node_head\r\n while current_node is not None:\r\n print(current_node.value)\r\n current_node = current_node.next_node\r\n \r\nclass linked_list_node:\r\n def __init__(self, value, next_node=None):\r\n self.value = value\r\n self.next_node = next_node\r\n\r\nnode1 = linked_list_node(3) # \"3\"\r\nnode2 = linked_list_node(7) # \"7\"\r\nnode3 = linked_list_node(10) # \"10\"\r\nnode4 = linked_list_node(11)\r\nnode5 = linked_list_node(12)\r\nnode6 = linked_list_node(13)\r\n\r\nnode1.next_node = node2\r\nnode2.next_node = node3\r\nnode3.next_node = node4\r\nnode4.next_node = node5\r\nnode5.next_node = node6\r\n\r\nprint(\"original:\")\r\nlist_print(node1)\r\nnew_node1 = list_reverse(node1)\r\nprint(\"reversed:\")\r\nlist_print(new_node1)\r\n\r\n\r\n\r\n\r\n","sub_path":"linkedlist_trial.py","file_name":"linkedlist_trial.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"578941209","text":"#!/usr/bin/env python\n\nimport logging\nimport threading\nimport sortedcontainers\nimport datetime\n\nclass wtimer():\n def __init__(self):\n\n self.last_state = None\n self.callback = None\n self.timeouts = sortedcontainers.SortedDict()\n\n self.timer = threading.Timer(1, self.on_timeout)\n self.timer.start()\n\n def exit(self):\n self.timer.cancel()\n \n def set_callback(self, callback):\n self.callback = callback\n \n def on_timeout(self):\n # restart the timer\n self.timer = threading.Timer(1, self.on_timeout)\n self.timer.start()\n \n if (self.callback == None):\n return\n\n if len(self.timeouts) == 0:\n return\n\n now = datetime.datetime.now()\n nowsec = now.weekday() * 86400 + now.hour * 3600 + now.minute * 60 + now.second\n\n ind = self.timeouts.bisect(nowsec)\n tlen = len(self.timeouts)\n # if current time is bigger than last in timeouts () or\n # lower than first one then take value from last timeout\n if ind == tlen or ind == 0:\n ind = tlen - 1\n else:\n # else take most recent timeout\n ind -= 1\n \n new_state = self.timeouts.get(self.timeouts.iloc[ind])\n if new_state != self.last_state:\n self.last_state = new_state\n self.callback(new_state)\n \n def add(self, day_of_week, time_of_day, state):\n l = time_of_day.split(':')\n todsec = int(l[0]) * 3600 + int(l[1]) * 60\n if len(l) > 2:\n todsec += int(l[2])\n timeout = day_of_week*86400 + todsec\n self.timeouts[timeout] = state\n ","sub_path":"io-control/wtimer.py","file_name":"wtimer.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"491582372","text":"import time\nfrom binary_search_tree import BinarySearchTree\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = []\nbinarySearchTree = None\n\nfor nameOne in names_1:\n if binarySearchTree is None:\n binarySearchTree = BinarySearchTree(nameOne)\n else:\n binarySearchTree.insert(nameOne)\nfor nameTwo in names_2:\n if binarySearchTree.contains(nameTwo):\n duplicates.append(nameTwo)\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"30073343","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n\n# Load data from CSV file\ndef load_data(fp):\n\n\tdata = pd.read_csv(fp)\n\tfilenames = data['file']\n\tY = data['age']\n\tX = data.drop(['age','gender','file','id'], axis=1)\n\n\treturn X, Y, filenames\n\n\n# Saves predictions\ndef save_predictions(filenames, predictions, output_path):\n\tpred_df = pd.DataFrame({'file_id': filenames, 'predictions': predictions})\n\tpred_df.to_csv(output_path, index=False)\n\n\n# plot training history\ndef plot_training_history(epochs, plottable, ylabel='', name=''):\n\tplt.clf()\n\tplt.xlabel('Epoch')\n\tplt.ylabel(ylabel)\n\tif len(plottable) == 1:\n\t\tplt.plot(np.arange(epochs), plottable[0], label='Loss')\n\telif len(plottable) == 2:\n\t\tplt.plot(np.arange(epochs), plottable[0], label='Acc')\n\t\tplt.plot(np.arange(epochs), plottable[1], label='UAR')\n\telse:\n\t\traise ValueError('plottable passed to plot function has incorrect dim.')\n\tplt.legend()\n\tplt.savefig('%s.png' % (name), bbox_inches='tight')\n","sub_path":"age/lib/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"351727175","text":"from puzzle import Puzzle\r\nclass WordLadderPuzzle(Puzzle):\r\n \"\"\"\r\n A word-ladder puzzle that may be solved, unsolved, or even unsolvable.\r\n \"\"\"\r\n\r\n def __init__(self, from_word, to_word, ws):\r\n \"\"\"\r\n Create a new word-ladder puzzle with the aim of stepping\r\n from from_word to to_word using words in ws, changing one\r\n character at each step.\r\n\r\n @type from_word: str\r\n @type to_word: str\r\n @type ws: set[str]\r\n @rtype: None\r\n \"\"\"\r\n (self._from_word, self._to_word, self._word_set) = (from_word,\r\n to_word, ws)\r\n # set of characters to use for 1-character changes\r\n self._chars = \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n\r\n def __eq__(self, other):\r\n '''\r\n If two word ladder puzzle objects are equal returns true\r\n @type self: WordLadderPuzzle\r\n @type other: WordLadderPuzzle | Any\r\n @rtype: bool\r\n\r\n >>> from_word1 = \"same\"\r\n >>> to_word1 = \"cost\"\r\n >>> word_set1 = {\"word1\",\"word2\",\"word3\",\"word4\",\"word5\",\"word6\",\"word7\"}\r\n >>> word_ladder1 = WordLadderPuzzle(from_word1,to_word1,word_set1)\r\n >>> from_word2 = \"same\"\r\n >>> to_word2 = \"cost\"\r\n >>> word_set2 = {\"word1\",\"word2\",\"word3\",\"word4\",\"word5\",\"word6\",\"word7\"}\r\n >>> word_ladder2 = WordLadderPuzzle(from_word1,to_word1,word_set1)\r\n >>> word_ladder1.__eq__(word_ladder2)\r\n True\r\n >>> from_word3 = \"poop\"\r\n >>> to_word3 = \"hell\"\r\n >>> word_set3 = {\"word1\",\"word2\",\"word3\",\"word4\",\"word5\",\"word6\",\"word7\"}\r\n >>> word_ladder3 = WordLadderPuzzle(from_word3,to_word3,word_set3)\r\n >>> word_ladder1.__eq__(word_ladder3)\r\n False\r\n '''\r\n return (type(self) == type(other) and\r\n self._from_word == other._from_word and\r\n self._to_word == other._to_word and\r\n #Returns True if all parameters are equal\r\n self._word_set == other._word_set)\r\n\r\n def __str__(self):\r\n '''\r\n Returns a string representation of a WordLadderPuzzle object\r\n @type self: WordLadderPuzzle\r\n\r\n >>> from1 = \"same\"\r\n >>> to1 = \"cost\"\r\n >>> word_set1 = {\"word1\",\"word2\",\"word3\",\"word4\",\"word5\",\"word6\",\"word7\"}\r\n >>> word_ladder = WordLadderPuzzle(from1,to1,word_set1)\r\n >>> print(word_ladder)\r\n From same to cost\r\n '''\r\n #Self explanatory\r\n return (\"From {0} to {1}\". format(self._from_word,self._to_word))\r\n\r\n def extensions(self):\r\n '''\r\n Creates a list of all possible words that can be returned by changing\r\n one letter from from_word. Then returns WordLadderPuzzle objects using\r\n the words from the list that was created.\r\n @type self: WordLadderPuzzle\r\n @rtype: list[WordLadderPuzzle]\r\n\r\n >>> from_word1 = \"same\"\r\n >>> to_word1 = \"cost\"\r\n >>> word_set1 = {\"same\",\"game\",\"lame\",\"sane\",\"cost\",\"poop\"}\r\n >>> word_ladder1 = WordLadderPuzzle(from_word1,to_word1,word_set1)\r\n >>> ext = word_ladder1.extensions()\r\n >>> print(ext[0])\r\n From game to cost\r\n >>> print(ext[1])\r\n From lame to cost\r\n >>> print(ext[2])\r\n From sane to cost\r\n '''\r\n options_list = []\r\n object_list = []\r\n characters = self._chars\r\n temp = self._from_word\r\n #makes sure puzzle is not solved\r\n if self._from_word != self._to_word:\r\n options_list.append(temp)\r\n for letter in range(len(temp)):\r\n for i in range(len(characters)):\r\n #changes every letter in the word to every letter in the aphabet\r\n temp = temp[:letter] + characters[i] + temp[letter+1:]\r\n if (temp in self._word_set) and (temp not in options_list):\r\n #if the word is in the dictionary appends it to options list\r\n options_list.append(temp)\r\n temp = self._from_word\r\n #goes through every word except for the first one to avoid repetetion creates an object from every word\r\n for word in options_list[1:]:\r\n object_list.append(WordLadderPuzzle(word,self._to_word,self._word_set))\r\n return object_list\r\n\r\n def is_solved(self):\r\n '''\r\n If from_word is equal to to_word then the uzzle is solved, therefore returning True\r\n @type self: WordLadderPuzzle\r\n @rtype: bool\r\n\r\n >>> from_word1 = \"same\"\r\n >>> to_word1 = \"same\"\r\n >>> word_set1 = {\"word1\",\"word2\",\"word3\",\"word4\",\"word5\",\"word6\",\"word7\"}\r\n >>> word_ladder1 = WordLadderPuzzle(from_word1,to_word1,word_set1)\r\n >>> word_ladder1.is_solved()\r\n True\r\n >>> from_word2 = \"same\"\r\n >>> to_word2 = \"cost\"\r\n >>> word_set2 = {\"word1\",\"word2\",\"word3\",\"word4\",\"word5\",\"word6\",\"word7\"}\r\n >>> word_ladder2 = WordLadderPuzzle(from_word2,to_word2,word_set1)\r\n >>> word_ladder2.is_solved()\r\n False\r\n '''\r\n return(self._from_word == self._to_word)\r\n\r\n\r\nif __name__ == '__main__':\r\n import doctest\r\n doctest.testmod()\r\n from puzzle_tools import breadth_first_solve, depth_first_solve\r\n from time import time\r\n with open(\"words.txt\", \"r\") as words:\r\n word_set = set(words.read().split())\r\n w = WordLadderPuzzle(\"same\", \"cost\", word_set)\r\n start = time()\r\n sol = breadth_first_solve(w)\r\n end = time()\r\n print(\"Solving word ladder from same->cost\")\r\n print(\"...using breadth-first-search\")\r\n print(\"Solutions: {} took {} seconds.\".format(sol, end - start))\r\n start = time()\r\n sol = depth_first_solve(w)\r\n end = time()\r\n print(\"Solving word ladder from same->cost\")\r\n print(\"...using depth-first-search\")\r\n print(\"Solutions: {} took {} seconds.\".format(sol, end - start))\r\n","sub_path":"word_ladder_puzzle.py","file_name":"word_ladder_puzzle.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"184433102","text":"# users/forms.py\nfrom django import forms\nfrom dobwidget import DateOfBirthWidget\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom .models import CustomUser, Userpost, Blog, Expenses\n\nclass CustomUserCreationForm(UserCreationForm):\n\n class Meta(UserCreationForm.Meta):\n model = CustomUser\n fields = ('username', 'name', 'email', 'age', 'gender', 'security_question', 'dp', 'birthday')\n widgets = {\n 'birthday': DateOfBirthWidget(),\n }\n\nclass CustomUserChangeForm(UserChangeForm):\n\n class Meta:\n model = CustomUser\n fields = UserChangeForm.Meta.fields\n\nclass PostForm(forms.ModelForm):\n\n class Meta:\n model = Userpost\n fields = ('title', 'text')\n widgets = {'text': forms.Textarea(attrs={'rows': 25,'cols': 70,'style': 'resize:none;'})}\n\n\nclass BlogForm(forms.ModelForm):\n class Meta:\n model = Blog\n fields = ('blogtitle', 'blogtext')\n widgets = {'blogtext': forms.Textarea(attrs={'rows': 25,'cols': 70,'style': 'resize:none;'})}\n\nclass ExpenseForm(forms.ModelForm):\n class Meta:\n model = Expenses\n fields = ('expensetitle', 'expensetext', 'expensepdf')\n widgets = {'expensetext': forms.Textarea(attrs={'rows': 25, 'cols': 70, 'style': 'resize:none;'})}","sub_path":"users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}