diff --git "a/4900.jsonl" "b/4900.jsonl" new file mode 100644--- /dev/null +++ "b/4900.jsonl" @@ -0,0 +1,628 @@ +{"seq_id":"279288337","text":"# Copyright Hugh Perkins 2016\n\"\"\"\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport subprocess\nimport pyopencl as cl\nimport pytest\nimport os\nfrom os import path\nfrom test import test_common\n\n\n@pytest.fixture(scope='module')\ndef dotdotdot_cl():\n # lets check it's compileable ll first, using llvm\n ll_filepath = 'test/dotdotdot.ll'\n cl_filepath = 'test/generated/dotdotdot.cl'\n\n print(subprocess.check_output([\n test_common.clang_path,\n '-c', ll_filepath,\n '-O3',\n '-o', '/tmp/~foo'\n ]).decode('utf-8'))\n\n if not path.isdir('test/generated'):\n os.makedirs('test/generated')\n print(subprocess.check_output([\n 'build/ir-to-opencl',\n # '--debug',\n '--inputfile', ll_filepath,\n '--outputfile', cl_filepath,\n '--kernelname', '_Z7test_siPf'\n ]).decode('utf-8'))\n return cl_filepath\n\n\n@pytest.fixture(scope='module')\ndef dotdotdot(context, dotdotdot_cl):\n with open(dotdotdot_cl, 'r') as f:\n sourcecode = f.read()\n\n prog = cl.Program(context, sourcecode).build()\n return prog\n\n\ndef test_program_compiles(dotdotdot):\n pass\n\n\n# def test_copy_float(extract_value, q, float_data, float_data_gpu):\n# extract_value.__getattr__(test_common.mangle('test_floats', ['float *']))(q, (32,), (32,), float_data_gpu)\n# cl.enqueue_copy(q, float_data, float_data_gpu)\n# q.finish()\n# assert float_data[0] == float_data[1]\n","sub_path":"test/test_dotdotdot.py","file_name":"test_dotdotdot.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"168134226","text":"#!/usr/bin/env python3\n\nfrom src.DFS import searchComp\nfrom src.GraphClass import Graph\nfrom src.draw import draw_graph\nfrom src.Hamilton import searchHamiltonPath\nfrom src.GraphicSeries import graphicStringFromList\nfrom src.GraphicSeries import changeRandomEdge\nfrom src.RegularGraph import random_K_RegularGraph\nfrom src.Euler import GenerateEuler\n\n#zadanie 1\n# Sprawdza czy sekwencja liczb naturalnych jest ciągiem graficznym. Konstruuje graf\n# prosty o stopniach wierzchołków zadanych przez wprowadzony ciag.\n# #4332211 \n\nprint('\\n\\n\\n Zadanie 1')\nG1 = graphicStringFromList()\ntry:\n draw_graph(G1, 'zestaw2/przed_zamiana.png')\nexcept ZeroDivisionError:\n print(\"Brak wizualizacji grafu\") \n\n#zadanie 2\n# Randomizuje grafy proste o zadanych stopniach wierzchołków. Operuje na \n# grafie stworzonym w poprzednim zadaniu.\n \nprint('\\n\\n\\n Zadanie 2')\nG2 = changeRandomEdge(G1, 1)\ntry:\n draw_graph(G2, 'zestaw2/po_zamianie.png')\nexcept: \n print(\"brak wizualizacji grafu\") \n\n\n#zadanie 3\n# Znajduje największą wspólną składową w grafie\n# Wypisanie listy kolejno odwiedzonych wierzcholkow\nprint('\\n\\n\\n Zadanie 3')\nG3 = Graph()\nG3.importFromFile(\"zestaw2/adj.txt\")\nprint(searchComp(G3))\ndraw_graph(G3, 'zestaw2/najwieksza_składowa.png')\n\n#zad 4\n# Generuje graf Eulerowski i znajduje sciezke eulera\n# Argumentami jest zakres, z jakiego ma byc wylosowana liczba wierzcholkow\n\n\nG4 = GenerateEuler(5, 6)\ntry:\n draw_graph(G4, 'zestaw2/graf_Eulerowski.png')\nexcept: \n print(\"brak wizualizacji grafu\") \n\n\n#zadanie 5\n# Generuje graf losowy k-regularny\n\nprint('\\n\\n\\n Zadanie 5')\nrandG = random_K_RegularGraph()\ndraw_graph(randG, 'zestaw2/rand_k_reg.png')\n\n#zadanie 6\n# Sprawdza czy graf hamiltonowski. Wypisuje informacje czy hamiltonowski oraz ścieżkę lub cykl. \n# Szuka od zadanego wierzcholka, wyisuje kolejno odwiedzone\n\nprint('\\n\\n\\n Zadanie 6')\nprint(searchHamiltonPath(G3, 0))\n\n\n\n\n\n","sub_path":"project_2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"359639649","text":"# Text Classification\n\n#%%\nimport pickle\nimport pandas as pd\nimport numpy as np\n\nfrom pathlib import Path\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\ndata_path = Path('data')\nmodel_path = Path('models')\nsource_path = data_path / 'source/reddit/'\ninterim_path = data_path / 'interim/'\nprocessed_path = data_path / 'processed/'\n\ndef task3(param1):\n \"\"\"Task 3 takes the training data from Task 2 and runs through it through\n a data model pipeline. Here we are testing Naive Bayes (MultinomialNB) and\n Logistic Regression with both and L1 and L2 penalty and some hyperparameter\n optimization searching. The resulting model is outputted to the model folder.\n\n Args:\n param1 (str): Filename to process\n\n Returns:\n bool: True if the process succeeds in writing the output files,\n False if it fails.\n \"\"\"\n\n # Load X_Training Data\n pickle_in_X_train_path = interim_path / (param1 + '_X_train.pkl')\n pickle_in_features = open(pickle_in_X_train_path, 'rb')\n features = pickle.load(pickle_in_features)\n pickle_in_features.close()\n\n # Load y_Training Data\n pickle_in_y_train_path = interim_path / (param1 + '_y_train.pkl')\n pickle_in_targets = open(pickle_in_y_train_path, 'rb')\n targets = pickle.load(pickle_in_targets)\n pickle_in_targets.close()\n\n # Create a pipeline\n pipe = Pipeline([(\"classifier\", LogisticRegression())])\n\n # Create dictionary with candidate learning algorithms and their hyperparameters\n search_space = [{\"classifier\": [LogisticRegression()],\n \"classifier__penalty\": ['l1', 'l2'],\n \"classifier__C\": np.logspace(0, 4, 10)},\n {\"classifier\": [MultinomialNB()]}]\n\n # Create grid search\n gridsearch = GridSearchCV(pipe, search_space, cv=3, verbose=0)\n\n # Fit grid search\n model = gridsearch.fit(features, targets)\n\n # Assign pickle path and param1 for model and training data.\n pickle_path_model = model_path / (param1 + '_model.pkl')\n\n try:\n # Create / Open pickle files\n pickle_out_model = open(pickle_path_model, \"wb\")\n pickle.dump(model, pickle_out_model)\n pickle_out_model.close()\n\n except:\n return False\n\n else:\n pickle_in_X_train_path.replace(processed_path / (param1 + '_X_train.pkl'))\n pickle_in_y_train_path.replace(processed_path / (param1 + '_y_train.pkl'))\n return True\n\n#%%\ntask3('controversial-comments')\n\n#%%\n","sub_path":"dsc550/workflows/tasks/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"297005098","text":"import coreapi\nimport coreschema\nfrom django.db.models.expressions import OrderBy, RawSQL\nfrom django.utils.dateparse import parse_date\nfrom rest_framework import filters\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.filters import OrderingFilter\nfrom url_filter.integrations.drf import DjangoFilterBackend\n\n\nclass JSONFieldOrderingFilter(OrderingFilter):\n\n def get_ordering(self, request, queryset, view):\n ordering = super().get_ordering(request, queryset, view)\n\n if not ordering:\n ordering = []\n\n params = request.query_params.get(self.ordering_param)\n if params:\n fields = [param.strip() for param in params.split(',')]\n for field in fields:\n json_nested = field.split('__')\n model_field = json_nested.pop(0)\n descending = False\n\n if (model_field in ordering\n or not self.remove_invalid_fields(queryset,\n [model_field, ],\n view,\n request)):\n # The model_field must be an authorized field\n continue\n\n if model_field.startswith('-'):\n descending = True\n model_field = model_field[1:]\n\n tpl = model_field + ''.join(\n ['->>%s' for x in range(len(json_nested))])\n\n ordering.append(\n OrderBy(RawSQL('lower({})'.format(tpl), json_nested),\n descending=descending))\n\n if not ordering:\n ordering = None\n\n return ordering\n\n\nclass DateFilterBackend(filters.BaseFilterBackend):\n \"\"\"\n Filter from date to date.\n\n You must specify which field to search on with `date_search_field` in\n your view.\n\n Example:\n date_search_field = 'created_at'\n \"\"\"\n\n def get_schema_fields(self, view):\n super().get_schema_fields(view)\n search_field = getattr(view, 'date_search_field', None)\n return [\n coreapi.Field(\n name='date_from',\n required=False,\n location='query',\n schema=coreschema.String(\n title=\"Begin date\",\n description=f\"Begin date for {search_field}\",\n pattern='[0-9]{4}-[0-9]{2}-[0-9]{2}',\n ),\n ),\n coreapi.Field(\n name='date_to',\n required=False,\n location='query',\n schema=coreschema.String(\n title=\"End date\",\n description=f\"End date for {search_field}\",\n pattern='[0-9]{4}-[0-9]{2}-[0-9]{2}',\n ),\n ),\n ]\n\n def filter_queryset(self, request, queryset, view):\n search_field = getattr(view, 'date_search_field', None)\n date_from = self.parse_date(request.GET.get('date_from', None))\n date_to = self.parse_date(request.GET.get('date_to', None))\n\n if date_from and date_to and date_from > date_to:\n raise ValidationError\n\n if date_from is not None and date_to is not None:\n queryset = queryset.filter(\n **{f'{search_field}__range': (date_from, date_to)}\n )\n elif date_from is not None:\n queryset = queryset.filter(**{f'{search_field}__gte': date_from})\n elif date_to is not None:\n queryset = queryset.filter(**{f'{search_field}__lte': date_to})\n\n return queryset\n\n @staticmethod\n def parse_date(value):\n \"\"\"\n Shamelessly taken from DateField.to_python\n\n :param value:\n :return:\n \"\"\"\n if value is None:\n return value\n\n try:\n parsed = parse_date(value)\n if parsed is not None:\n return parsed\n except ValueError:\n raise ValidationError\n\n raise ValidationError\n\n\nclass SchemaAwareDjangoFilterBackend(DjangoFilterBackend):\n def get_schema_fields(self, view):\n \"\"\"\n Get coreapi filter definitions\n\n Returns all schemas defined in filter_fields_schema attribute.\n \"\"\"\n super().get_schema_fields(view)\n return getattr(view, 'filter_fields_schema', [])\n","sub_path":"terra_utils/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"565689450","text":"from pandas import DataFrame\n\ndef join_and_keep_order(left, right, remove_duplicates, keep='first', **kwargs):\n\t\"\"\"\n\t:type left: DataFrame\n\t:type right: DataFrame\n\t:rtype: DataFrame\n\t\"\"\"\n\tleft = left.copy()\n\tright = right.copy()\n\tleft['_left_id'] = range(left.shape[0])\n\tright['_right_id'] = range(right.shape[0])\n\tresult = left.merge(right=right, **kwargs)\n\tresult.sort_values(axis='index', by=['_left_id', '_right_id'], inplace=True)\n\tif remove_duplicates: result = result[(~result['_left_id'].duplicated(keep=keep)) & (~result['_right_id'].duplicated(keep=keep))]\n\treturn result.drop(columns=['_left_id', '_right_id'])\n\n\ndef join_wisely(left, right, remove_duplicates=True, echo=False, **kwargs):\n\t\"\"\"\n\tjoins two dataframes and returns a dictionary with 3 members: left_only, right_only, and both (the results of the two joins)\n\t:type left: DataFrame\n\t:type right: DataFrame\n\t:type kwargs: dict\n\t:rtype: dict of DataFrames\n\t\"\"\"\n\tleft=left.copy()\n\tright=right.copy()\n\n\tleft['_left_id'] = range(left.shape[0])\n\tright['_right_id'] = range(right.shape[0])\n\n\tboth_data = left.merge(right=right, how='inner', **kwargs)\n\tif remove_duplicates:\n\t\tboth_data.sort_values(axis=0, by=['_left_id', '_right_id'], inplace=True)\n\t\tleft_id_duplicated = both_data._left_id.duplicated(keep='first')\n\t\tright_id_duplicated = both_data._right_id.duplicated(keep='first')\n\t\tboth_data = both_data[~left_id_duplicated & ~right_id_duplicated]\n\tleft_only_data = left[~left['_left_id'].isin(both_data['_left_id'])].copy()\n\tright_only_data = right[~right['_right_id'].isin(both_data['_right_id'])].copy()\n\n\tboth_data.drop(labels=['_left_id', '_right_id'], axis=1, inplace=True)\n\tleft_only_data.drop(labels='_left_id', axis=1, inplace=True)\n\tright_only_data.drop(labels='_right_id', axis=1, inplace=True)\n\tif echo:\n\t\tprint(f'left:{left.shape}, right:{right.shape}\\nboth:{both_data.shape}, left_only:{left_only_data.shape}, right_only:{right_only_data.shape}')\n\n\treturn {'both':both_data, 'left_only':left_only_data, 'right_only':right_only_data}\n\n","sub_path":"ravenclaw/wrangling/join_wisely.py","file_name":"join_wisely.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"41347603","text":"'''\nScript to multiply Qbits together with pauli gates specifically 3 qubit combinations\n'''\n\nimport numpy as np\n\ndef tripleKron(A,B,C):\n return np.kron(A,np.kron(B,C))\n\npx = np.array([[0, 1], [1, 0]], dtype=np.complex128)\npy = np.array([[0, -1j], [1j, 0]], dtype=np.complex128)\npz = np.array([[1, 0], [0, -1]], dtype=np.complex128)\nid = np.array([[1, 0], [0, 1]], dtype=np.complex128)\n\nq1 = np.array([0,1])\nq2 = np.array([0,1])\nq3 = np.array([0,1])\n\nqBit = np.kron(q1,np.kron(q2,q3))\nprint(qBit)\n\n'''\nPrepare the Toffoli Decomposition\n'''\n\nT1 = tripleKron(id,id,id) * 0.75\nT2 = tripleKron(id,id,pz) * 0.25\nT3 = tripleKron(id,px,id) * 0.25\nT4 = tripleKron(id,px,pz) * -0.25\nT5 = tripleKron(pz,id,id) * 0.25\nT6 = tripleKron(pz,id,pz) * -0.25\nT7 = tripleKron(pz,px,id) * -0.25\nT8 = tripleKron(pz,px,pz) * 0.25\n\nout1 = np.matmul(T1,qBit)\nout2 = np.matmul(T2,qBit)\nout3 = np.matmul(T3,qBit)\nout4 = np.matmul(T4,qBit)\nout5 = np.matmul(T5,qBit)\nout6 = np.matmul(T6,qBit)\nout7 = np.matmul(T7,qBit)\nout8 = np.matmul(T8,qBit)\n\nout = out1 + out2 + out3 + out4 + out5 + out6 + out7 + out8\n\nprint(out1)\nprint(out2)\nprint(out3)\nprint(out4)\nprint(out5)\nprint(out6)\nprint(out7)\nprint(out8)\n\n\n\n","sub_path":"venv/Toffoli/Mltiplication.py","file_name":"Mltiplication.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"432362028","text":"def u_input():\n num = int(input(\"enter the number:\"))\n if num<0:\n print(\"Enter the positive number:\")\n else:\n sum = 0\n while(num>0):\n sum +=num\n num -=1\n print(\"The sum of \",sum)\n \nif __name__ == \"__main__\":\n u_input()","sub_path":"sum_numbers.py","file_name":"sum_numbers.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"450882621","text":"#\n# @lc app=leetcode id=61 lang=python3\n#\n# [61] Rotate List\n#\n# https://leetcode.com/problems/rotate-list/description/\n#\n# algorithms\n# Medium (26.66%)\n# Total Accepted: 181.8K\n# Total Submissions: 681.3K\n# Testcase Example: '[1,2,3,4,5]\\n2'\n#\n# Given a linked list, rotate the list to the right by k places, where k is\n# non-negative.\n# \n# Example 1:\n# \n# \n# Input: 1->2->3->4->5->NULL, k = 2\n# Output: 4->5->1->2->3->NULL\n# Explanation:\n# rotate 1 steps to the right: 5->1->2->3->4->NULL\n# rotate 2 steps to the right: 4->5->1->2->3->NULL\n# \n# \n# Example 2:\n# \n# \n# Input: 0->1->2->NULL, k = 4\n# Output: 2->0->1->NULL\n# Explanation:\n# rotate 1 steps to the right: 2->0->1->NULL\n# rotate 2 steps to the right: 1->2->0->NULL\n# rotate 3 steps to the right: 0->1->2->NULL\n# rotate 4 steps to the right: 2->0->1->NULL\n# \n#\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\n len_list = 0\n h = head\n while head:\n len_list+=1\n head=head.next\n if len_list == 0: return h\n n = k%len_list\n if n == 0 or len_list == 1: return h\n \n count = 0\n head = h\n new_tail, new_h, tail = None, None, None\n new_h_id = len_list-n\n while head:\n if count == new_h_id - 1:\n new_tail = head\n elif count == new_h_id:\n new_h = head\n if count == len_list-1:\n tail = head\n count += 1\n head = head.next\n # assert tail != None and new_h != None\n new_tail.next = None\n tail.next = h\n return new_h\n\n","sub_path":"61.rotate-list.py","file_name":"61.rotate-list.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"398526559","text":"#!/usr/bin/env python\nimport pika\nimport requests\nimport json\nimport base64\nimport psycopg2\nfrom urllib.request import urlopen\nfrom urllib.parse import urlparse\nfrom os.path import splitext\nimport uuid\n\n# RabbitMQ connection credentials\nHOST = \"3.126.254.35\"\ncredentials = pika.PlainCredentials('signals', 'insecure')\nparameters = pika.ConnectionParameters(HOST, 5672, 'vhost', credentials)\n\n\ndef callback(ch, method, properties, body):\n print(\"Creating a report in Facilitator ....\")\n data = json.loads(body)['signals']\n print(data)\n url = data.pop('url', None)\n img_present = False\n\n if url:\n img_present = True\n img = urlopen(url)\n path = urlparse(url).path\n ext = splitext(path)[1]\n # print(img.headers)\n name = str(uuid.uuid4()) + ext\n \n files = {'report_pic': (name, img.read(), \"multipart/form-data\")}\n\n api = 'http://facilitator.dev.mcc.kpnappfactory.nl/index.php/apinewchanges/submit_report_api'\n \n try:\n if img_present:\n response = requests.post(api, data=data, files=files)\n else:\n response = requests.post(api, data=data)\n\n print(response.status_code)\n # print(response.text)\n if response.status_code == 201:\n print(\"Created.....\")\n\n except:\n print(\"Image is not passed !!!\")\n\n print(\"\\n\")\n\n\nconnection = pika.BlockingConnection(parameters)\nchannel = connection.channel()\n# channel.queue_declare(queue='SEDA-FC', durable=True)\nchannel.basic_consume(queue='SEDA-FC', on_message_callback=callback, auto_ack=True)\n\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()","sub_path":"subscribe-SEDA-FC.py","file_name":"subscribe-SEDA-FC.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"148783334","text":"from django.conf.urls import patterns, url\n\n\nurlpatterns = patterns(\n 'apps.sliders.views',\n url(r'^$', 'view_sliders', name='dashboard_sliders'),\n url(r'^/add_slider$', 'add_slider', name='dashboard_add_slider'),\n url(r'^/edit_slider_(?P[0-9]+)$', 'edit_slider', name='dashboard_edit_slider'),\n url(r'^/delete_sliders$', 'delete_sliders', name='dashboard_delete_sliders'),\n url(r'^/add_banner$', 'add_banner', name='dashboard_add_banner'),\n url(r'^/edit_banner_(?P[0-9]+)$', 'edit_banner', name='dashboard_edit_banner'),\n url(r'^/delete_banners$', 'delete_banners', name='dashboard_delete_banners'),\n)\n","sub_path":"apps/sliders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"327272178","text":"#This files contains variables used by the other files\n#A list of xml's for each node\nxml_list = []\n#correspond to n(...)'s format in config_file \nnode_url = []\n#(ip used by node1 to forward to node 2, node1, node2)\nllist = []\n#p(prefix, domain of prefix)\nnames = dict()\n#adjacency matrix of all planned links\nplanned_links = []\n#adjacency matrix of all current links detected\ncurrent_links = []\n#list of prefixes that should be skipped\ninvalid_prefix = []\n#hash table of valid prefixes for fast lookup\n#key: perfix #value: 1 (value is not important)\nvalid_prefix = dict()\n#hash table of valid links for fast lookup\n#key: node1's abbreviated name>>node2's abbreviated name\n#e.g. UA>>UCLA, look at setup function in tbs.py for more details\n#value: ip used for node1 to forward to node2\n#valid_link = dict()\n#hash table of all valid ip's for every node\n#key: ip, value: node's abbreviated name\nip_for_node = dict()\n#contains tuples n(...) in wp_config_file\nwp_node_url = []\n#stores xml's from wget for web proxy nodes\nwp_xml_list = []\n#prefix = key, domain the prefix belongs to = value, \n#look for p(...) in wp_config_file\nwp_names = dict()\n#urls of port 9696 webproxy\nwp_wp_url = []\n#wget result for urls in wp_wp_url\nwp_wp_res = []\n#more later\n","sub_path":"ndn_prefix/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"650324154","text":"# tkinter AIO Tool 2.0\n\nimport tkinter\nimport subprocess as sp\n\nwindow = tkinter.Tk()\nwindow.title(\"Acdop100's AIO tool launcher for Win10\")\nwindow.geometry(\"400x550\")\n\n\ndef pathwrite():\n newpath = configwindow.get()\n config = open(\"config.txt\", 'a')\n config.write(newpath)\n\n\ndef launchappone():\n sp.run(app1)\n print(app1)\n\n\ndef launchapp3():\n sp.run(app3)\n print(app3)\n\n\ndef launchapp2():\n sp.run(app2)\n print(app2)\n\n\ndef launchapp4():\n sp.run(app4)\n print(app4)\n\n\ndef launchapp5():\n sp.run(app5)\n print(app5)\n\n\ndef refresh():\n apponebtn.configure(text=\"Launch \" + app1)\n app2btn.configure(text=\"Launch \" + app2)\n app3btn.configure(text=\"Launch \" + app3)\n app4btn.configure(text=\"Launch \" + app4)\n app5btn.configure(text=\"Launch \" + app5)\n\n\nconfiglbl = tkinter.Label(window, text=\"Place Path Of Executable here\")\nconfigwindow = tkinter.Entry(window)\n\nsavebtn = tkinter.Button(window, text=\"Save To Config.txt\", command=pathwrite)\n\nrefreshbtn = tkinter.Button(window, text=\"Press to refresh programs\", command=refresh)\n\nwith open(\"config.txt\") as config:\n lines = config.read().splitlines()\n app1 = \"'\" + lines[0] + \"'\"\n app2 = \"'\" + lines[1] + \"'\"\n app3 = \"'\" + lines[2] + \"'\"\n app4 = \"'\" + lines[3] + \"'\"\n app5 = \"'\" + lines[4] + \"'\"\n app6 = \"'\" + lines[5] + \"'\"\n\napponelbl = tkinter.Label(window, text=app1)\napponebtn = tkinter.Button(window, text=\"\", command=launchappone)\n\napp2lbl = tkinter.Label(window, text=app2)\napp2btn = tkinter.Button(window, text=\"\", command=launchapp2)\n\napp3lbl = tkinter.Label(window, text=app3)\napp3btn = tkinter.Button(window, text=\"\", command=launchapp3)\n\napp4lbl = tkinter.Label(window, text=app4)\napp4btn = tkinter.Button(window, text=\"\", command=launchapp4)\n\napp5lbl = tkinter.Label(window, text=app5)\napp5btn = tkinter.Button(window, text=\"\", command=launchapp5)\n\nconfig.close()\nconfiglbl.pack()\nconfigwindow.pack()\nsavebtn.pack()\nrefreshbtn.pack()\napponelbl.pack()\napponebtn.pack()\napp2lbl.pack()\napp2btn.pack()\napp3lbl.pack()\napp3btn.pack()\napp4lbl.pack()\napp4btn.pack()\napp5lbl.pack()\napp5btn.pack()\n","sub_path":"AIO_Tool.py","file_name":"AIO_Tool.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"11096743","text":"#!/usr/bin/env python3\n\"tuple is unchanged/container\"\n# atuple=(10,20,[1,2,3])\n# print(atuple.count(10))\n# print(atuple.index(10))\n# atuple[-1].append(10)\n# print(atuple)\n# a=(10)\n# print(type(a))\n# b=(10,)\n# print(type(b))\n# print(len(b))\n# # print(len(a))#error , int has no len()\n\n\"zhan gong neng\"\nalist=[]\n\ndef ladd():\n while True:\n ch=input(\"input the stack,0 is quit>\")\n if ch == '0':\n print('quit')\n break\n alist.append(ch)\n\n\n\ndef lremove():\n ch=alist.pop()\n print(\"remove ch\")\n\ndef lsee():\n print(alist)\n # print(\"lsee\")\n\nif __name__ == '__main__':\n prompt=\"\"\"\nplease: \n1 ladd();\n2 lremove();\n3 lsee(); \n\"\"\"\n while True:\n num=input(prompt)\n if num=='0':\n print(\"quit\")\n break\n if num == '1':\n ladd()\n elif num == '2':\n lremove()\n elif num == '3':\n lsee()\n else:\n print(\"please input write number.\")","sub_path":"n2_python/day01/pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"379567146","text":"#==========PACKAGE AND LIBRARY IMPORTS=====================================================================\r\n\r\nimport tweepy\r\nimport pandas as pd\r\nimport time\r\nfrom pandas import DataFrame,set_option\r\nfrom datetime import datetime\r\n\r\nset_option('display.max_colwidth',100)\r\npd.set_option('display.expand_frame_repr', True)\r\n\r\n\r\n#==========FILE AND CLASS IMPORTS==========================================================================\r\n\r\nfrom Config import Consumer_API_Keys\r\nfrom Config import Access_Tokens\r\nfrom tweets import Tweets\r\n\r\nGreg = Tweets()\r\nGreg_Tweets = Greg.get_content()\r\n\r\nAPI_Key = Consumer_API_Keys.API_Key\r\nAPI_Secret_Key = Consumer_API_Keys.API_Secret_Key\r\n\r\nAccess_Token = Access_Tokens.Access_Token\r\nAccess_Token_Secret = Access_Tokens.Access_Token_Secret\r\n\r\nauth = tweepy.OAuthHandler(API_Key,API_Secret_Key)\r\nauth.set_access_token(Access_Token,Access_Token_Secret)\r\n\r\napi = tweepy.API(auth,wait_on_rate_limit = True,\r\nwait_on_rate_limit_notify = True)\r\n\r\nopinion = 0\r\nproceed = 1\r\nif opinion == 0:\r\n#==========DEFINING BOT FUNTIONALITIES=====================================================================\r\n\r\n class Functionalities:\r\n def __init__(self, api, auth, Greg_Tweets):\r\n self.api = api\r\n self.auth = auth\r\n self.Greg_Tweets = Greg_Tweets\r\n\r\n def Authentication(self):\r\n try:\r\n self.api.verify_credentials()\r\n return \"Authenticated\"\r\n except Exception as e:\r\n return str(e)\r\n\r\n\r\n def Choice(self):\r\n choice = input(\"Please select a functionality to access: \\n>\")\r\n choice = choice.lower()\r\n return choice\r\n\r\n def Tweet(self):\r\n tweet = input(\"\\nTweet String: \\n>\")\r\n try:\r\n self.api.update_status(tweet)\r\n return \"\\nTweeted Successfully\"\r\n except Exception as e:\r\n print(e)\r\n\r\n def Get_User_Details(self):\r\n user = input(\"\\nInput user to fetch details about: \\n>\")\r\n account_details = {}\r\n\r\n try:\r\n user_details = self.api.get_user(user)\r\n\r\n name = user_details.name\r\n description = user_details.description\r\n location = user_details.location\r\n\r\n account_details = {\"Name \":name,\"Description\":description,\"Location\":location}\r\n\r\n account_details_df = DataFrame.from_dict(account_details,orient = 'index', columns = ['Details'])\r\n\r\n return account_details_df\r\n\r\n print(\"\\nFetched Successfully\")\r\n\r\n except Exception as e:\r\n return e\r\n\r\n def Get_recent_tweets(self):\r\n timeline = api.home_timeline(tweet_mode='extended')\r\n recent_tweets = {}\r\n\r\n try:\r\n for tweet in timeline:\r\n recent_tweets.update({tweet.user.name:tweet.full_text})\r\n\r\n recent_tweets_df = DataFrame.from_dict(recent_tweets,orient = 'index',columns = ['Tweet'])\r\n\r\n return recent_tweets_df\r\n\r\n print(\"\\nFetched Successfully\")\r\n\r\n except Exception as e:\r\n return e\r\n\r\n def Timed_Tweet(self):\r\n correct_input = 1\r\n time_between_tweets = input(\"Time between tweets: \\n>\")\r\n\r\n while correct_input == 1:\r\n try:\r\n time_between_tweets = int(time_between_tweets)\r\n correct_input = 0\r\n except ValueError:\r\n time_between_tweets = input(\"Please input a number:\")\r\n\r\n for tweet in Greg_Tweets:\r\n successful = 0\r\n tweet_string = (\"@onision \"+tweet)\r\n while successful == 0:\r\n try:\r\n time.sleep(2)\r\n self.api.update_status(tweet_string)\r\n now = datetime.now()\r\n day = now.strftime(\"%A %B %dth\")\r\n current_time = now.strftime(\"%I:%M:%S %p\")\r\n print(\"\\nSuccessfully tweeted at \" + day+\" \"+current_time)\r\n time.sleep(time_between_tweets)\r\n successful = 1\r\n\r\n except Exception as e:\r\n print(e)\r\n successful = 1\r\n\r\n def Terminate(self):\r\n\r\n return 0\r\n\r\n\r\n #==========CREATING A BOT INSTANCE=========================================================================\r\n\r\n Session = Functionalities(api,auth,Greg_Tweets)\r\n authenticate = Session.Authentication()\r\n if (authenticate == \"Authenticated\"):\r\n print(authenticate)\r\n else:\r\n print(\"AUTHENTICATION ERROR CODE: \"+authenticate)\r\n print(\" \\n Please verify login credentials\")\r\n trivial_input = input(\"\\n Press Enter key to terminate window \\n >\")\r\n proceed = Session.Terminate()\r\n #==========CHOOSING FUNCTIONALITY AND LOOPING==========================================================================\r\n\r\n\r\n while proceed==1:\r\n\r\n print(\"\\n-Tweet \\n-Search Users \\n-Recent Tweets \\n-Timed Tweets \\n-Terminate \\n\")\r\n\r\n choice = Session.Choice()\r\n choice = choice.lower()\r\n\r\n #==========DEFINING FUNCTIONALITY CLAUSES==================================================================\r\n\r\n def Session_function_choice(choice):\r\n\r\n if choice in ('search user','user search','search users','users search'):\r\n return Session.Get_User_Details()\r\n\r\n elif choice in ('recent tweets','recents tweets'):\r\n output = Session.Get_recent_tweets()\r\n print(output)\r\n\r\n subchoice = (input(\"\\nView full tweet by row index? : \\n>\"))\r\n if subchoice in (\"yes\",\"Yes\",\"YES\",\"yeah\",\"Yeah,\"\"YEAH\",\"sure\",\"Sure\",\"SURE\"):\r\n subchoice_int = int(input(\"\\nRow number: \\n>\"))\r\n subchoice_int-=1\r\n set_option('display.max_colwidth',1000)\r\n print(output.iat[subchoice_int,0])\r\n else:\r\n print(\"Tweets closed\")\r\n\r\n elif choice in ('tweet',\"tweets\"):\r\n return Session.Tweet()\r\n\r\n elif choice in ('timed','timed tweets','timedtweets','timed tweet','timedtweet'):\r\n return Session.Timed_Tweet()\r\n\r\n elif choice in (\"Terminate\",\"terminate\",\"end\",\"End\",\"Stop\",\"stop\"):\r\n return Session.Terminate()\r\n\r\n try:\r\n print(Session_function_choice(choice))\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n cont = input(\"Would you like like to continue?\")\r\n if cont not in (\"y\",\"ye\",\"yes\",\"Yes\",\"YES\",\"yeah\",\"Yeah,\"\"YEAH\",\"sure\",\"Sure\",\"SURE\"):\r\n #print(\"SURE\",\"yes\",\"Yes\",\"YES\",\"yeah\",\"Yeah,\"\"YEAH\",\"sure\",\"Sure\")\r\n proceed = 0\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"130391105","text":"from functools import partial\n\nfrom core_data_modules.cleaners import swahili, Codes\nfrom core_data_modules.data_models import CodeScheme\nfrom core_data_modules.traced_data.util.fold_traced_data import FoldStrategies\n\nfrom configurations import code_imputation_functions\nfrom configurations.code_schemes import CodeSchemes\nfrom src.lib.configuration_objects import CodingConfiguration, CodingModes, CodingPlan\n\n\ndef clean_age_with_range_filter(text):\n \"\"\"\n Cleans age from the given `text`, setting to NC if the cleaned age is not in the range 10 <= age < 100.\n \"\"\"\n age = swahili.DemographicCleaner.clean_age(text)\n if type(age) == int and 10 <= age < 100:\n return str(age)\n # TODO: Once the cleaners are updated to not return Codes.NOT_CODED, this should be updated to still return\n # NC in the case where age is an int but is out of range\n else:\n return Codes.NOT_CODED\n\n\ndef get_rqa_coding_plans(pipeline_name):\n return [\n CodingPlan(\n raw_field=\"appointment_views_raw\",\n dataset_name=\"appointment_views\",\n time_field=\"sent_on\",\n run_id_field=\"appointment_views_run_id\",\n coda_filename=\"KE_CONSTITUTION_REVIEW_appointment_views.json\",\n icr_filename=\"appointment_views.csv\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.MULTIPLE,\n code_scheme=CodeSchemes.APPOINTMENT_VIEWS,\n coded_field=\"appointment_views_coded\",\n analysis_file_key=\"appointment_views\",\n fold_strategy=partial(FoldStrategies.list_of_labels, CodeSchemes.APPOINTMENT_VIEWS)\n )\n ],\n raw_field_fold_strategy=FoldStrategies.concatenate\n ),\n CodingPlan(\n raw_field=\"appointment_benefits_raw\",\n dataset_name=\"appointment_benefits\",\n time_field=\"sent_on\",\n run_id_field=\"appointment_benefits_run_id\",\n coda_filename=\"KE_CONSTITUTION_REVIEW_appointment_benefits.json\",\n icr_filename=\"appointment_benefits.csv\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.MULTIPLE,\n code_scheme=CodeSchemes.APPOINTMENT_BENEFITS,\n coded_field=\"appointment_benefits_coded\",\n analysis_file_key=\"appointment_benefits\",\n fold_strategy=partial(FoldStrategies.list_of_labels, CodeSchemes.APPOINTMENT_BENEFITS)\n )\n ],\n raw_field_fold_strategy=FoldStrategies.concatenate\n ),\n CodingPlan(\n raw_field=\"appointment_challenges_raw\",\n dataset_name=\"appointment_challenges\",\n time_field=\"sent_on\",\n run_id_field=\"appointment_challenges_run_id\",\n coda_filename=\"KE_CONSTITUTION_REVIEW_appointment_challenges.json\",\n icr_filename=\"appointment_challenges.csv\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.MULTIPLE,\n code_scheme=CodeSchemes.APPOINTMENT_CHALLENGES,\n coded_field=\"appointment_challenges_coded\",\n analysis_file_key=\"appointment_challenges\",\n fold_strategy=partial(FoldStrategies.list_of_labels, CodeSchemes.APPOINTMENT_CHALLENGES)\n )\n ],\n raw_field_fold_strategy=FoldStrategies.concatenate\n ),\n CodingPlan(\n raw_field=\"other_messages_raw\",\n dataset_name=\"other_messages\",\n time_field=\"sent_on\",\n run_id_field=\"other_messages_run_id\",\n coda_filename=\"KE_CONSTITUTION_REVIEW_other_messages.json\",\n icr_filename=\"other_messages.csv\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.MULTIPLE,\n code_scheme=CodeSchemes.OTHER_MESSAGES,\n coded_field=\"other_messages_coded\",\n analysis_file_key=\"other_messages\",\n fold_strategy=partial(FoldStrategies.list_of_labels, CodeSchemes.OTHER_MESSAGES)\n )\n ],\n raw_field_fold_strategy=FoldStrategies.concatenate\n )\n ]\n\n\ndef get_demog_coding_plans(pipeline_name):\n return [\n CodingPlan(\n dataset_name=\"gender\",\n raw_field=\"gender_raw\",\n time_field=\"gender_time\",\n coda_filename=\"Kenya_Pool_gender.json\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.SINGLE,\n code_scheme=CodeSchemes.GENDER,\n cleaner=swahili.DemographicCleaner.clean_gender,\n coded_field=\"gender_coded\",\n analysis_file_key=\"gender\",\n fold_strategy=FoldStrategies.assert_label_ids_equal\n )\n ],\n ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value(\"gender\"),\n raw_field_fold_strategy=FoldStrategies.assert_equal\n ),\n\n CodingPlan(dataset_name=\"age\",\n raw_field=\"age_raw\",\n time_field=\"age_time\",\n coda_filename=\"Kenya_Pool_age.json\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.SINGLE,\n code_scheme=CodeSchemes.AGE,\n cleaner=clean_age_with_range_filter,\n coded_field=\"age_coded\",\n analysis_file_key=\"age\",\n include_in_theme_distribution=False,\n fold_strategy=FoldStrategies.assert_label_ids_equal\n ),\n CodingConfiguration(\n coding_mode=CodingModes.SINGLE,\n code_scheme=CodeSchemes.AGE_CATEGORY,\n coded_field=\"age_category_coded\",\n analysis_file_key=\"age_category\",\n fold_strategy=FoldStrategies.assert_label_ids_equal\n )\n ],\n code_imputation_function=code_imputation_functions.impute_age_category,\n ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value(\"age\"),\n raw_field_fold_strategy=FoldStrategies.assert_equal),\n\n CodingPlan(dataset_name=\"location\",\n raw_field=\"location_raw\",\n time_field=\"location_time\",\n coda_filename=\"Kenya_Pool_location.json\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.SINGLE,\n code_scheme=CodeSchemes.KENYA_COUNTY,\n coded_field=\"county_coded\",\n analysis_file_key=\"county\",\n fold_strategy=FoldStrategies.assert_label_ids_equal\n ),\n CodingConfiguration(\n coding_mode=CodingModes.SINGLE,\n code_scheme=CodeSchemes.KENYA_CONSTITUENCY,\n coded_field=\"constituency_coded\",\n analysis_file_key=\"constituency\",\n fold_strategy=FoldStrategies.assert_label_ids_equal\n )\n ],\n code_imputation_function=code_imputation_functions.impute_kenya_location_codes,\n ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value(\"location\"),\n raw_field_fold_strategy=FoldStrategies.assert_equal),\n\n CodingPlan(dataset_name=\"disabled\",\n raw_field=\"disabled_raw\",\n time_field=\"disabled_time\",\n coda_filename=\"Kenya_Pool_disabled.json\",\n coding_configurations=[\n CodingConfiguration(\n coding_mode=CodingModes.SINGLE,\n code_scheme=CodeSchemes.DISABLED,\n coded_field=\"disabled_coded\",\n analysis_file_key=\"disabled\",\n fold_strategy=FoldStrategies.assert_label_ids_equal\n )\n ],\n ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value(\"disabled\"),\n raw_field_fold_strategy=FoldStrategies.assert_equal)\n ]\n\n\ndef get_ws_correct_dataset_scheme(pipeline_name):\n return CodeSchemes.WS_CORRECT_DATASET\n\n\ndef get_follow_up_coding_plans(pipeline_name):\n return []\n","sub_path":"configurations/coding_plans.py","file_name":"coding_plans.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"273805202","text":"\"\"\"\r\nAuthor: Ibrahim Sherif\r\nDate: October, 2021\r\nThis script used for training, evaluting and saving the model\r\n\"\"\"\r\nimport sys\r\nimport joblib\r\nimport logging\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport config\r\nfrom pipeline.train import train\r\nfrom pipeline.evaluate import evaluate\r\nfrom pipeline.data import get_clean_data\r\nfrom pipeline.slicing import evaluate_slices\r\n\r\n\r\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\r\n\r\n\r\ndef run():\r\n \"\"\"\r\n Main entry point\r\n \"\"\"\r\n logging.info(\"Loading and getting clean data\")\r\n X, y = get_clean_data(config.DATA_DIR)\r\n\r\n logging.info(\"Splitting data to train and test\")\r\n X_train, X_test, y_train, y_test = train_test_split(\r\n X, y, test_size=config.TEST_SIZE, random_state=config.RANDOM_STATE, stratify=y)\r\n\r\n logging.info(\"Started model training\")\r\n model_pipe = train(\r\n config.MODEL,\r\n X_train,\r\n y_train,\r\n config.PARAM_GRID,\r\n config.FEATURES)\r\n\r\n logging.info(\"Evaluating and saving metrics to file\")\r\n with open(config.EVAL_DIR, 'w') as file:\r\n evaluate(file, model_pipe, X_train, y_train, \"train\")\r\n evaluate(file, model_pipe, X_test, y_test, \"test\")\r\n\r\n logging.info(\"Evaluating slices and saving to file\")\r\n with open(config.SLICE_DIR, 'w') as file:\r\n for col in config.SLICE_COLUMNS:\r\n evaluate_slices(file, model_pipe, col, X_train, y_train, \"train\")\r\n evaluate_slices(file, model_pipe, col, X_test, y_test, \"test\")\r\n\r\n logging.info(\"Saving model\")\r\n joblib.dump(model_pipe, config.MODEL_DIR)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n","sub_path":"src/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"557720813","text":"while True:\r\n list = range(0,2000)\r\n for eleman in list:\r\n tmp = list[eleman]\r\n birler = tmp % 10\r\n onlar = int(tmp / 10)\r\n onlar = int(onlar % 10)\r\n yüzler = int(tmp / 100)\r\n yüzler = int(yüzler % 10)\r\n binler = int(tmp / 1000)\r\n\r\n islem = binler ** 4 + yüzler ** 4 + onlar ** 4 + birler ** 4\r\n if(islem==list[eleman]):\r\n print(eleman, \"=\" ,islem)\r\n if(eleman==len(list)):\r\n break","sub_path":"kod.py","file_name":"kod.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"611346542","text":"import sublime\nimport sublime_plugin\nimport threading\nimport os\n\nfrom .createfile import CreateFileThread\n\nclass CreateFormsThread(threading.Thread):\n def __init__(self, solution_id, solution_path, solution_item, turbina):\n self.solution_id = solution_id\n self.solution_path = solution_path\n self.solution_item = solution_item\n self.turbina = turbina\n threading.Thread.__init__(self)\n\n def run(self):\n formated_form = '- '+'Formulário: '+self.solution_item['name']+' - '+self.solution_item['key']+' carregado!'\n sublime.status_message(formated_form)\n form_detail = self.turbina.getFormBySolution(self.solution_id, self.solution_item['id'])\n\n # se tiver conteudo html\n if 'html' in form_detail.keys():\n html = form_detail['html']\n else:\n html = ''\n\n # se tiver conteudo javascript\n if 'javascript' in form_detail.keys():\n javascript = form_detail['javascript']\n else:\n javascript = ''\n\n # cabeçalho para aparecer no arquivo que vai ser editado\n file_name_html = self.solution_path+ '/' +form_detail['key'] + '.html'\n content_html = '\\n'\n content_html += '\\n\\n\\n'\n content_html += html\n\n # cabeçalho para aparecer no arquivo que vai ser editado\n file_name_js = self.solution_path+ '/' +form_detail['key'] + '.js'\n content_js = '// @=solution_id:'+self.solution_id+';solution_item:forms;item_content:javascript;item_id:'+self.solution_item['id']+'=@\\n'\n content_js += '// ==========================================================================================\\n\\n\\n'\n content_js += javascript\n\n\n create_file_html = CreateFileThread(file_name_html, content_html)\n create_file_html.start()\n\n create_file_js = CreateFileThread(file_name_js, content_js)\n create_file_js.start()","sub_path":"turbina_core/helpers/createform.py","file_name":"createform.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"649226689","text":"import cv2\nimport dlib\n\n\nclass FaceDetector:\n \"\"\"\n HOG + Linear SVM face detector\n HOG stands for Histogram of Gradients\n \"\"\"\n def __init__(self):\n self.detector = dlib.get_frontal_face_detector()\n # Number of times to upsample an image before applying face detection.\n # To detect small faces in a large input image, we may wish to increase\n # the resolution of the input image, thereby making the smaller faces appear larger.\n # Doing so allows our sliding window to detect the face.\n self.upsample = 1\n\n def detect(self, image):\n # Convert the image from BGR to RGB channel ordering (dlib expects RGB images)\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n faces = self.detector(rgb, self.upsample)\n faces = [\n (\n max(0, face.left()),\n max(0, face.top()),\n min(face.right(), image.shape[1]),\n min(face.bottom(), image.shape[0])\n )\n for face in faces\n ]\n return faces, [1] * len(faces)\n","sub_path":"detectors/face_detector/dlib_hog/face_detector.py","file_name":"face_detector.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"580074792","text":"load(\"@bazel_tools//tools/cpp:toolchain_utils.bzl\", \"find_cpp_toolchain\")\nload(\"@bazel_tools//tools/build_defs/cc:action_names.bzl\", \"C_COMPILE_ACTION_NAME\")\n\nCompilationAspect = provider()\n\ndef _get_project_info(target, ctx):\n cc = target[CcInfo]\n cc_toolchain = find_cpp_toolchain(ctx)\n feature_configuration = cc_common.configure_features(\n ctx = ctx,\n cc_toolchain = cc_toolchain,\n requested_features = ctx.features,\n unsupported_features = ctx.disabled_features,\n )\n compile_variables = cc_common.create_compile_variables(\n feature_configuration = feature_configuration,\n cc_toolchain = cc_toolchain,\n user_compile_flags = ctx.fragments.cpp.copts,\n )\n compiler_options = cc_common.get_memory_inefficient_command_line(\n feature_configuration = feature_configuration,\n action_name = C_COMPILE_ACTION_NAME,\n variables = compile_variables,\n )\n if cc:\n cc_info = struct(\n include_dirs = cc.compilation_context.includes.to_list(),\n system_include_dirs = cc.compilation_context.system_includes.to_list(),\n quote_include_dirs = cc.compilation_context.quote_includes.to_list(),\n compile_flags = compiler_options + (ctx.rule.attr.copts if \"copts\" in dir(ctx.rule.attr) else []) + ctx.fragments.cpp.cxxopts + ctx.fragments.cpp.copts,\n defines = cc.compilation_context.defines.to_list(),\n )\n else:\n cc_info = None\n return struct(\n build_file_path = ctx.build_file_path,\n workspace_root = ctx.label.workspace_root,\n package = ctx.label.package,\n\n files = struct(**{name: _get_file_group(ctx.rule.attr, name) for name in ['srcs', 'hdrs']}),\n deps = [str(dep.label) for dep in getattr(ctx.rule.attr, 'deps', [])],\n target = struct(label=str(target.label), files=[f.path for f in target.files.to_list()]),\n kind = ctx.rule.kind,\n\n cc = cc_info,\n )\n\ndef _get_file_group(rule_attrs, attr_name):\n file_targets = getattr(rule_attrs, attr_name, None)\n if not file_targets: return []\n return [file.path for t in file_targets for file in t.files.to_list()]\n\ndef _msbuild_aspect_impl(target, ctx):\n info_file = ctx.actions.declare_file(target.label.name + '.msbuild')\n content = _get_project_info(target, ctx).to_json()\n ctx.actions.write(info_file, content, is_executable=False)\n\n outputs = depset([info_file]).to_list()\n for dep in getattr(ctx.rule.attr, 'deps', []):\n outputs += dep[OutputGroupInfo].msbuild_outputs.to_list()\n return [OutputGroupInfo(msbuild_outputs=outputs)]\n\nmsbuild_aspect = aspect(\n attr_aspects = [\"deps\"],\n attrs = {\n \"_cc_toolchain\": attr.label(\n default = Label(\"@bazel_tools//tools/cpp:current_cc_toolchain\"),\n ),\n },\n fragments = [\"cpp\"],\n required_aspect_providers = [CompilationAspect],\n implementation = _msbuild_aspect_impl,\n)\n","sub_path":"bazel/bazel-msbuild/msbuild.bzl","file_name":"msbuild.bzl","file_ext":"bzl","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"75113540","text":"i =1 # init\nwhile i<10: #condition\n\n print(i)\n \n\n i = i+1#incrementer\n\n#print in reverse\ni =10\nwhile i>0:\n print(i,end=',')\n i = i - 1\n\nprint('------------------')\n#pirnt table of 4\n\ni = 1\nwhile i<=10:\n print(i*4)\n i=i+1\n \nprint('------------------')\n#or\ni = 4\nwhile i<=40:\n print(i)\n i = i+4\n\n\nprint('------------------')\n#print all odd numbers between two given input\nn1 = int(input('enter start no. '))\nn2 =input('enter end no. ')\n\n\n#print(type(n1))\n#print(type(n2))\n\n\nn2 = int(n2)\n\n\nif n1%2 ==0:\n n1 = n1+1\n \nwhile n1<=n2:\n\n print(n1)\n n1 = n1+2\n\nprint('---------')\n\n#### for loop\nfor i in range(1,11): # from 1 to 10 (last index -1) , default incrementer is 1\n print(i)\n \n\n \nfor i in range(1,11,2): # from 1 to 10 (last index -1) , default incrementer is 1\n print(i)\n\nfor i in range(10,0,-1):\n print(i)\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n","sub_path":"Python 20 Jan/loopEx.py","file_name":"loopEx.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"43563323","text":"import cv2\nfrom imutils.video import VideoStream\nimport time\nfrom operator import xor\nimport numpy as np\nimport imutils\nfrom utils.rect_control import RectControl\nfrom utils.hsv_and_save import *\nimport csv\nimport os\nimport copy\n\nimg_count = 0\nrange_filter = 'HSV'\nsetup_trackbars(range_filter)\nisTrackbar = True\n\nargs = get_arguments()\n\nlabels = ['Filename', 'x1', 'y1', 'x2', 'y2', 'Class']\n\nfile_exists = os.path.isfile('coords_labels.csv')\nwith open('coords_labels.csv', 'a') as file:\n if not file_exists:\n writer = csv.DictWriter(file, fieldnames=labels)\n writer.writeheader()\n\nrect = RectControl()\n\nvs = VideoStream(src=0).start()\nwhile True:\n\n frame = vs.read()\n frame = cv2.flip(frame, 1)\n frame_orig = copy.deepcopy(frame)\n frame_to_thresh = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n if isTrackbar:\n v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values(\n range_filter)\n thresh = cv2.inRange(\n frame_to_thresh, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))\n thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=2)\n\n if isTrackbar == True:\n thresh = np.stack((thresh, )*3, axis=-1)\n frame = np.concatenate((frame, thresh), axis=1)\n\n else:\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n center = None\n if len(cnts) > 0:\n c = max(cnts, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n if radius > 10:\n frame = rect.createRect(center, frame)\n\n frame = frame[:, :640, :]\n\n cv2.imshow(\"Frame\", frame)\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q') or key == ord('Q'):\n vs.stop()\n break\n if key == ord('t') or key == ord('T'):\n if isTrackbar:\n vX = get_trackbar_values(range_filter)\n cv2.destroyWindow('Trackbars')\n print(f\"vals ---> {vX}\")\n if not isTrackbar:\n restartTrackbar(range_filter, vX)\n\n isTrackbar = not isTrackbar\n\n if key == ord('s') or key == ord('S'):\n img_count += 1\n if not os.path.exists(args[\"class\"]):\n os.mkdir(os.path.join(os.getcwd(), args[\"class\"]))\n img_save_name = os.path.join(args[\"class\"], str(img_count)+'.png')\n cv2.imwrite(img_save_name, frame_orig)\n\n # CSV\n csv_dict = rect.get_csv_vals(args['class'])\n csv_dict['Filename'] = img_save_name\n print(csv_dict)\n with open('coords_labels.csv', 'a') as file:\n row_write = csv.DictWriter(file, fieldnames=labels)\n row_write.writerow(csv_dict)\n\n if key == ord('c') or key == ord('C'):\n args['class'] = input(\"Please enter new class name : \")\n rect.control(key)\n\ncv2.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"354575424","text":"import synapse.common as s_common\nimport synapse.tests.utils as s_t_utils\n\nclass CnGovTest(s_t_utils.SynTest):\n\n async def test_models_cngov_mucd(self):\n\n async with self.getTestCore() as core:\n\n async with await core.snap() as snap:\n org0 = s_common.guid()\n props = {\n 'org': org0\n }\n node = await snap.addNode('gov:cn:icp', 12345678, props)\n self.eq(node.ndef[1], 12345678)\n self.eq(node.get('org'), org0)\n\n node = await snap.addNode('gov:cn:mucd', 61786)\n self.eq(node.ndef[1], 61786)\n","sub_path":"synapse/tests/test_model_gov_cn.py","file_name":"test_model_gov_cn.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"456812893","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# 购物车程序\n\nproducts = [['iphone8',6888],['MacPro',14800],['小米6',2499],['Coffee',31],['Book',80],['Nike Shoes',799]]\nshopping_cart = []\nrun_flag = True\nwhile run_flag:\n print(\"------------商品列表------------\")\n for index,p in enumerate(products):\n print(\"%s.%s %s\" %(index,p[0],p[1]) )\n choice = input(\"请输入想购买的商品编号: \")\n if choice.isdigit():\n choice = int(choice)\n if 0 <= choice < len(products):\n shopping_cart.append(products[choice])\n print(\"Added product %s into shopping cart.\" % (products[choice]) )\n elif choice == 'q':\n if len(shopping_cart) > 0:\n print(\"你购买的商品列表如下: %s\" %(shopping_cart))\n run_flag = False\n else :\n print(\"请输入正确的商品编号.\")\n","sub_path":"shopping.py","file_name":"shopping.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"71560096","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 15 14:30:38 2019\n@author: junying\n\"\"\"\nimport json\n\nfrom base_handler import BaseHandler\nfrom utils import decimal_default,str_to_decimal,get_linenumber\nfrom .proxy import EthereumProxy\n#from constants import ETH_IP_ADDR,ETH_RPC_PORT,ETH_DEFAULT_GAS_PRICE,ETH_BLK_BUFFER_SIZE\nfrom constants import ETH_IP_ADDR,ETH_RPC_PORT,ETH_BLK_BUFFER_SIZE #,ETH_DEFAULT_GAS_PRICE,\n\nip_addr, port = ETH_IP_ADDR,ETH_RPC_PORT\n#default_gas = 21000\n#default_gasprice = ETH_DEFAULT_GAS_PRICE\n\n \nclass ETH_GetBalance(BaseHandler):\n @staticmethod\n def get_balance(rpc_connection,addr):\n balance = rpc_connection.eth_getBalance(addr)\n #return balance/float(10**18)\n return balance\n\n def post(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n address = self.get_argument(\"address\")\n if len(address) != 42:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"arguments error\")))\n return\n balance = ETH_GetBalance.get_balance(rpc_connection,address)\n self.write(json.dumps(BaseHandler.success_ret_with_data(str(balance)), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_GetBalance error:{0} in {1}\".format(e,get_linenumber()))\n \n\n\n#class ETH_GasPrice(BaseHandler):\n# def get(self):\n# rpc_connection = EthereumProxy(ip_addr, port)\n# try:\n# data = rpc_connection.eth_gasPrice()\n# self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n# except Exception as e:\n# self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n# print(\"ETH_GasPrice error:{0} in {1}\".format(e,get_linenumber()))\n \nclass ETH_PendingTransactions(BaseHandler):\n def get(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n data = rpc_connection.eth_pendingTransactions()\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_PendingTransactions error:{0} in {1}\".format(e,get_linenumber()))\n\n\nclass ETH_SendRawTransaction(BaseHandler):\n def post(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n data = str(self.get_argument(\"data\"))\n # 0x checking\n rlpdata = \"0x\" + data if \"0x\" not in data else data\n # sending raw transaction\n rsp = rpc_connection.eth_sendRawTransaction(rlpdata)\n self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_SendRawTransaction error:{0} in {1}\".format(e,get_linenumber()))\n\nclass ETH_ListAccounts(BaseHandler):\n @staticmethod\n def addresses():\n from sql import run\n accounts = run('select address from t_eth_accounts')\n return [account['address'].strip() for account in accounts]\n\n def get(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n data = ETH_ListAccounts.addresses()\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_Accounts error:{0} in {1}\".format(e,get_linenumber()))\n \nclass ETH_BlockNumber(BaseHandler):\n @staticmethod\n def latest(rpc_connection):\n lastestBlockNum = int(rpc_connection.eth_blockNumber())\n return lastestBlockNum\n\n def get(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n data = ETH_BlockNumber.latest(rpc_connection)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_BlockNumber error:{0} in {1}\".format(e,get_linenumber()))\n\nclass ETH_GetBlockTransactionCount(BaseHandler):\n @staticmethod\n def fromGetBlock(rpc_connection,blknumber):\n blkheader = rpc_connection.eth_getBlockByNumber(blknumber)\n return len(blkheader['transactions']) if blkheader else 0\n\n @staticmethod\n def process(rpc_connection,blknumber):\n blknumber = rpc_connection.eth_getBlockTransactionCountByNumber(blknumber)\n return int(blknumber) if blknumber else 0\n\n def get(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n blknumber = int(self.get_argument(\"blknumber\")) if self.get_argument(\"blknumber\") else int(ETH_BlockNumber.latest(rpc_connection))\n data = ETH_GetBlockTransactionCount.fromGetBlock(rpc_connection,blknumber)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_GetBlockTransactionCount error:{0} in {1}\".format(e,get_linenumber()))\n\nclass ETH_GetTransactionFromBlock(BaseHandler):\n @staticmethod\n def process(rpc_connection,blknumber,txindex):\n txdata = rpc_connection.eth_getTransactionByBlockNumberAndIndex(blknumber,txindex)\n blockData = rpc_connection.eth_getBlockByNumber(blknumber)\n txdata[\"blocktime\"] = blockData[\"timestamp\"] if blockData and \"timestamp\" in blockData else 0\n txdata[\"confirmations\"] = ETH_BlockNumber.latest(rpc_connection) - blknumber\n txdata[\"blockNumber\"] = blknumber\n from utils import filtered,alterkeyname\n retData = filtered(alterkeyname(txdata,'hash','txid'),[\"confirmations\", \"blocktime\", \"blockNumber\",\"nonce\",\"txid\",\"from\",\"to\",\"value\",\"gas\",\"gasPrice\"]) if txdata else False\n import decimal\n from decimal import Decimal\n decimal.getcontext().prec = 50\n for key in [\"nonce\", \"gas\", \"value\", \"gasPrice\", \"blocktime\"]:\n if \"0x\" in retData[key]: retData[key] = str(int(retData[key], 16))\n\n #only convert \"value\" to ether, DO NOT convert \"gas\" \"gasprice\" to ether!!!! by yqq 2019-04-29\n if key in [\"value\"]: retData[key] = \"%.8f\" % (float(retData[key]) / (10**18 )) #set precision for 8. adapt exchange configurations , 2019-04-29 by yqq\n #if key in [\"gas\", \"value\", \"gasPrice\"]: retData[key] = str ((decimal.Decimal(retData[key]) / decimal.Decimal(10**18)).quantize(Decimal('0.000000000000000000')))\n return retData\n\n def get(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n blknumber = int(self.get_argument(\"blknumber\")) if self.get_argument(\"blknumber\") else int(ETH_BlockNumber.latest(rpc_connection))\n txindex = int(self.get_argument(\"txindex\")) if self.get_argument(\"txindex\") else 0\n ret = ETH_GetTransactionFromBlock.process(rpc_connection,blknumber,txindex)\n if not ret:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"no corresponding transaction or block body not found!!!\")))\n return\n self.write(json.dumps(BaseHandler.success_ret_with_data(ret), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_GetTransactionFromBlock error:{0} in {1}\".format(e,get_linenumber()))\n\n\nclass ETH_GetBlockTransactions(BaseHandler):\n @staticmethod\n def process(rpc_connection,blknumber,txcount):\n txlist = []\n for index in range(txcount):\n txdata = ETH_GetTransactionFromBlock.process(rpc_connection,blknumber,index)\n if not txdata:\n break\n if any(txdata[address] in ETH_ListAccounts.addresses() for address in ['to','from']):\n txlist.append(txdata)\n return txlist\n\n def post(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n blknumber = int(self.get_argument(\"blknumber\")) if self.get_argument(\"blknumber\") else ETH_BlockNumber.latest(rpc_connection)\n txcount = ETH_GetBlockTransactionCount.fromGetBlock(rpc_connection,blknumber)\n data = ETH_GetBlockTransactions.process(rpc_connection,blknumber,txcount)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_GetBlockTransactions error:{0} in {1}\".format(e,get_linenumber()))\n\n\n\n#2019-05-01 yqq\n#获取用户充币信息的接口, 直接从数据库中获取交易数据\n#不再临时扫描区块\nclass ETH_CrawlTxData(BaseHandler):\n\n def GetTxDataFromDB(self, nBegin, nEnd):\n if not (isinstance(nBegin, int) and (isinstance(nEnd, int) or isinstance(nEnd, long) )):\n #print(type(nBegin))\n #print(type(nEnd))\n print(\"nBegin or nEnd is not int type.\")\n return []\n \n txRet = []\n\n import sql\n strSql = \"\"\"SELECT txdata FROM t_eth_charge WHERE height >= {0} and height <= {1} LIMIT 100;\"\"\".format(nBegin, nEnd)\n #print(strSql)\n sqlRet = sql.run(strSql)\n #print(sqlRet)\n if not isinstance(sqlRet, list):\n return []\n for item in sqlRet:\n txListStr = item[\"txdata\"]\n txList = json.loads(txListStr)\n txRet.extend(txList)\n return txRet\n\n #@staticmethod\n def process(self, rpc_connection, nStart):\n txRet = self.GetTxDataFromDB(nStart, (1<<64) - 1)\n return txRet \n\n\n def post(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n nStart = int(self.get_argument(\"blknumber\"))\n data = self.process(rpc_connection, nStart)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_CrawlTxData error:{0} in {1}\".format(e,get_linenumber()))\n\n\nclass ETH_GetTransactionCount(BaseHandler):\n def post(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n address = self.get_argument(\"address\")\n nonce = rpc_connection.eth_getTransactionCount(address)\n self.write(json.dumps(BaseHandler.success_ret_with_data(str(nonce)), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_GetTransactionCount error:{0} in {1}\".format(e,get_linenumber()))\n\nclass ETH_GetBlockByNumber(BaseHandler):\n def get(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n block_number = str(self.get_argument(\"number\"))\n block_number = int(block_number,16) if '0x' in block_number else int(block_number)\n tx_infos = rpc_connection.eth_getBlockByNumber(block_number)\n self.write(json.dumps(BaseHandler.success_ret_with_data(tx_infos), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_GetBlockByNumber Error:{0} in {1}\".format(e,get_linenumber()))\n\nclass ETH_GetTransactionByHash(BaseHandler):\n def post(self):\n rpc_connection = EthereumProxy(ip_addr, port)\n try:\n tx_hash = self.get_argument(\"tx_hash\")#?????not ready\n tx_info = rpc_connection.eth_getTransactionByHash(tx_hash)\n self.write(json.dumps(BaseHandler.success_ret_with_data(tx_info), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"ETH_GetTransactionByHash error:{0} in {1}\".format(e,get_linenumber()))\n","sub_path":"项目/wallet_server/rpc/eth/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":12363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"349678705","text":"import uuid\nfrom mock import patch, call\nfrom django.test import TestCase\nimport tempfile\nimport os\nfrom orchestrator.commands import CommandType, Command\nfrom orchestrator.models import Job, Status, PipelineType\nfrom orchestrator.exceptions import RetryException\nfrom orchestrator.tasks import (\n command_processor,\n check_job_status,\n process_jobs,\n cleanup_folders,\n get_job_info_path,\n set_permission,\n)\n\n\nclass TasksTest(TestCase):\n def setUp(self):\n pass\n\n def test_status_transition(self):\n created = Status.CREATED\n self.assertFalse(created.transition(Status.CREATED))\n self.assertTrue(created.transition(Status.SUBMITTING))\n self.assertFalse(created.transition(Status.SUBMITTED))\n self.assertFalse(created.transition(Status.PENDING))\n self.assertFalse(created.transition(Status.RUNNING))\n self.assertFalse(created.transition(Status.COMPLETED))\n self.assertFalse(created.transition(Status.FAILED))\n self.assertTrue(created.transition(Status.TERMINATED))\n self.assertFalse(created.transition(Status.SUSPENDED))\n self.assertFalse(created.transition(Status.UNKNOWN))\n\n submitting = Status.SUBMITTING\n self.assertFalse(submitting.transition(Status.CREATED))\n self.assertFalse(submitting.transition(Status.SUBMITTING))\n self.assertTrue(submitting.transition(Status.SUBMITTED))\n self.assertFalse(submitting.transition(Status.PENDING))\n self.assertFalse(submitting.transition(Status.RUNNING))\n self.assertFalse(submitting.transition(Status.COMPLETED))\n self.assertFalse(submitting.transition(Status.FAILED))\n self.assertTrue(submitting.transition(Status.TERMINATED))\n self.assertFalse(submitting.transition(Status.SUSPENDED))\n self.assertFalse(submitting.transition(Status.UNKNOWN))\n\n submited = Status.SUBMITTED\n self.assertFalse(submited.transition(Status.CREATED))\n self.assertFalse(submited.transition(Status.SUBMITTING))\n self.assertFalse(submited.transition(Status.SUBMITTED))\n self.assertTrue(submited.transition(Status.PENDING))\n self.assertTrue(submited.transition(Status.RUNNING))\n self.assertTrue(submited.transition(Status.COMPLETED))\n self.assertTrue(submited.transition(Status.FAILED))\n self.assertTrue(submited.transition(Status.TERMINATED))\n self.assertTrue(submited.transition(Status.UNKNOWN))\n self.assertTrue(submited.transition(Status.SUSPENDED))\n\n pending = Status.PENDING\n self.assertFalse(pending.transition(Status.CREATED))\n self.assertFalse(pending.transition(Status.SUBMITTING))\n self.assertFalse(pending.transition(Status.SUBMITTED))\n self.assertTrue(pending.transition(Status.PENDING))\n self.assertTrue(pending.transition(Status.RUNNING))\n self.assertTrue(pending.transition(Status.COMPLETED))\n self.assertTrue(pending.transition(Status.FAILED))\n self.assertTrue(pending.transition(Status.TERMINATED))\n self.assertTrue(pending.transition(Status.UNKNOWN))\n self.assertTrue(pending.transition(Status.SUSPENDED))\n\n running = Status.RUNNING\n self.assertFalse(running.transition(Status.CREATED))\n self.assertFalse(running.transition(Status.SUBMITTING))\n self.assertFalse(running.transition(Status.SUBMITTED))\n self.assertFalse(running.transition(Status.PENDING))\n self.assertTrue(running.transition(Status.RUNNING))\n self.assertTrue(running.transition(Status.COMPLETED))\n self.assertTrue(running.transition(Status.FAILED))\n self.assertTrue(running.transition(Status.TERMINATED))\n self.assertTrue(running.transition(Status.UNKNOWN))\n self.assertTrue(running.transition(Status.SUSPENDED))\n\n terminated = Status.TERMINATED\n self.assertFalse(terminated.transition(Status.CREATED))\n self.assertFalse(terminated.transition(Status.SUBMITTING))\n self.assertFalse(terminated.transition(Status.SUBMITTED))\n self.assertFalse(terminated.transition(Status.PENDING))\n self.assertFalse(terminated.transition(Status.RUNNING))\n self.assertFalse(terminated.transition(Status.COMPLETED))\n self.assertFalse(terminated.transition(Status.FAILED))\n self.assertFalse(terminated.transition(Status.TERMINATED))\n self.assertFalse(terminated.transition(Status.UNKNOWN))\n self.assertFalse(terminated.transition(Status.SUSPENDED))\n\n suspended = Status.SUSPENDED\n self.assertFalse(suspended.transition(Status.CREATED))\n self.assertFalse(suspended.transition(Status.SUBMITTING))\n self.assertFalse(suspended.transition(Status.SUBMITTED))\n self.assertTrue(suspended.transition(Status.PENDING))\n self.assertTrue(suspended.transition(Status.RUNNING))\n self.assertFalse(suspended.transition(Status.COMPLETED))\n self.assertFalse(suspended.transition(Status.FAILED))\n self.assertTrue(suspended.transition(Status.TERMINATED))\n self.assertFalse(suspended.transition(Status.UNKNOWN))\n self.assertFalse(suspended.transition(Status.SUSPENDED))\n\n unknown = Status.UNKNOWN\n self.assertFalse(unknown.transition(Status.CREATED))\n self.assertFalse(unknown.transition(Status.SUBMITTING))\n self.assertFalse(unknown.transition(Status.SUBMITTED))\n self.assertTrue(unknown.transition(Status.PENDING))\n self.assertTrue(unknown.transition(Status.RUNNING))\n self.assertTrue(unknown.transition(Status.COMPLETED))\n self.assertTrue(unknown.transition(Status.FAILED))\n self.assertTrue(unknown.transition(Status.TERMINATED))\n self.assertTrue(unknown.transition(Status.UNKNOWN))\n self.assertTrue(unknown.transition(Status.SUSPENDED))\n\n completed = Status.COMPLETED\n self.assertFalse(completed.transition(Status.RUNNING))\n self.assertFalse(completed.transition(Status.COMPLETED))\n self.assertFalse(completed.transition(Status.FAILED))\n self.assertFalse(completed.transition(Status.TERMINATED))\n self.assertFalse(completed.transition(Status.SUSPENDED))\n self.assertFalse(completed.transition(Status.UNKNOWN))\n self.assertFalse(completed.transition(Status.PENDING))\n self.assertFalse(completed.transition(Status.CREATED))\n\n failed = Status.FAILED\n self.assertFalse(failed.transition(Status.RUNNING))\n self.assertFalse(failed.transition(Status.COMPLETED))\n self.assertFalse(failed.transition(Status.FAILED))\n self.assertFalse(failed.transition(Status.TERMINATED))\n self.assertFalse(failed.transition(Status.SUSPENDED))\n self.assertFalse(failed.transition(Status.UNKNOWN))\n self.assertFalse(failed.transition(Status.PENDING))\n self.assertFalse(failed.transition(Status.CREATED))\n\n @patch(\"django.core.cache.cache.delete\")\n @patch(\"django.core.cache.cache.add\")\n def test_check_status_unexisting_job(self, add, delete):\n add.return_value = True\n delete.return_value = True\n command_processor(Command(CommandType.SUBMIT, str(uuid.uuid4())).to_dict())\n\n @patch(\"orchestrator.tasks.command_processor.delay\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.status\")\n def test_submitted_to_pending(self, status, command_processor):\n job = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.SUBMITTED,\n )\n status.return_value = Status.PENDING, \"\"\n command_processor.return_value = True\n check_job_status(job)\n job.refresh_from_db()\n self.assertEqual(job.status, Status.PENDING)\n\n @patch(\"orchestrator.tasks.command_processor.delay\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.status\")\n def test_pending_to_running(self, status, command_processor):\n job = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.PENDING,\n )\n status.return_value = Status.RUNNING, \"\"\n command_processor.return_value = True\n check_job_status(job)\n job.refresh_from_db()\n self.assertEqual(job.status, Status.RUNNING)\n\n @patch(\"orchestrator.tasks.command_processor.delay\")\n @patch(\"submitter.toil_submitter.ToilJobSubmitter.get_outputs\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.status\")\n @patch(\"orchestrator.tasks.set_permission\")\n def test_running_to_completed(self, permission, status, get_outputs, command_processor):\n job = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.RUNNING,\n )\n permission.return_value = None\n status.return_value = Status.COMPLETED, \"\"\n outputs = {\"output\": \"test_value\"}\n get_outputs.return_value = outputs, None\n command_processor.return_value = None\n check_job_status(job)\n job.refresh_from_db()\n self.assertEqual(job.status, Status.COMPLETED)\n self.assertEqual(job.outputs, outputs)\n\n @patch(\"orchestrator.tasks.command_processor.delay\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.status\")\n def test_failed(self, status, command_processor):\n job = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.RUNNING,\n )\n status.return_value = Status.FAILED, \"\"\n command_processor.return_value = True\n check_job_status(job)\n job.refresh_from_db()\n self.assertEqual(job.status, Status.FAILED)\n\n @patch(\"django.core.cache.cache.delete\")\n @patch(\"django.core.cache.cache.add\")\n @patch(\"orchestrator.tasks.command_processor.delay\")\n def test_process_jobs(self, command_processor, add, delete):\n job_pending_1 = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.PENDING,\n )\n job_created_1 = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.CREATED,\n )\n add.return_value = True\n delete.return_value = True\n\n process_jobs()\n calls = [\n call(Command(CommandType.CHECK_STATUS_ON_LSF, str(job_pending_1.id)).to_dict()),\n call(Command(CommandType.SUBMIT, str(job_created_1.id)).to_dict()),\n ]\n\n command_processor.assert_has_calls(calls, any_order=True)\n\n @patch(\"django.core.cache.cache.delete\")\n @patch(\"django.core.cache.cache.add\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.status\")\n def test_check_status_command_processor(self, status, add, delete):\n def _raise_retryable_exception():\n raise Exception()\n\n job_pending_1 = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.PENDING,\n )\n add.return_value = True\n delete.return_value = True\n status.side_effect = _raise_retryable_exception\n with self.assertRaises(RetryException):\n command_processor(Command(CommandType.CHECK_STATUS_ON_LSF, str(job_pending_1.id)).to_dict())\n\n @patch(\"django.core.cache.cache.delete\")\n @patch(\"django.core.cache.cache.add\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.terminate\")\n def test_command_processor_term(self, terminate, add, delete):\n job_pending = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.PENDING,\n )\n add.return_value = True\n delete.return_value = True\n terminate.return_value = True\n command_processor(Command(CommandType.TERMINATE, str(job_pending.id)).to_dict())\n job_pending.refresh_from_db()\n self.assertEqual(job_pending.status, Status.TERMINATED)\n\n @patch(\"django.core.cache.cache.delete\")\n @patch(\"django.core.cache.cache.add\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.suspend\")\n def test_command_processor_suspend(self, suspend, add, delete):\n job_running = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.RUNNING,\n )\n add.return_value = True\n delete.return_value = True\n suspend.return_value = True\n command_processor(Command(CommandType.SUSPEND, str(job_running.id)).to_dict())\n job_running.refresh_from_db()\n self.assertEqual(job_running.status, Status.SUSPENDED)\n\n @patch(\"django.core.cache.cache.delete\")\n @patch(\"django.core.cache.cache.add\")\n @patch(\"batch_systems.lsf_client.lsf_client.LSFClient.resume\")\n def test_command_processor_resume(self, suspend, add, delete):\n job_suspended = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.SUSPENDED,\n )\n add.return_value = True\n delete.return_value = True\n suspend.return_value = True\n command_processor(Command(CommandType.RESUME, str(job_suspended.id)).to_dict())\n job_suspended.refresh_from_db()\n self.assertEqual(job_suspended.status, Status.RUNNING)\n\n @patch(\"orchestrator.tasks.clean_directory\")\n def test_cleanup_folders(self, clean_directory):\n cleanup_job = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n external_id=\"ext_id\",\n status=Status.FAILED,\n )\n clean_directory.return_value = True\n cleanup_folders(str(cleanup_job.id), exclude=[])\n cleanup_job.refresh_from_db()\n self.assertIsNotNone(cleanup_job.job_store_clean_up)\n self.assertIsNotNone(cleanup_job.job_store_clean_up)\n\n def test_get_job_info_path(self):\n with self.settings(TOIL_WORK_DIR_ROOT=\"/toil/work/dir/root\"):\n res = get_job_info_path(\"job_id\")\n self.assertEqual(res, \"/toil/work/dir/root/job_id/.run.info\")\n\n def test_permission(self):\n with tempfile.TemporaryDirectory() as temp_path:\n expected_permission = \"750\"\n job_completed = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n root_dir=temp_path,\n base_dir=\"/\".join(temp_path.split(\"/\")[:-1]) + \"/\",\n root_permission=expected_permission,\n external_id=\"ext_id\",\n status=Status.COMPLETED,\n )\n set_permission(job_completed)\n current_permission = oct(os.stat(temp_path).st_mode)[-3:]\n self.assertEqual(current_permission, expected_permission)\n\n def test_permission_wrong_permission(self):\n with self.assertRaises(TypeError):\n with tempfile.TemporaryDirectory() as temp_path:\n expected_permission = \"auk\"\n job_completed = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n root_dir=temp_path,\n base_dir=\"/\".join(temp_path.split(\"/\")[:-1]) + \"/\",\n root_permission=expected_permission,\n external_id=\"ext_id\",\n status=Status.COMPLETED,\n )\n set_permission(job_completed)\n\n def test_permission_wrong_path(self):\n with self.assertRaises(RuntimeError):\n expected_permission = \"750\"\n job_completed = Job.objects.create(\n type=PipelineType.CWL,\n app={\n \"github\": {\n \"version\": \"1.0.0\",\n \"entrypoint\": \"test.cwl\",\n \"repository\": \"\",\n }\n },\n root_dir=\"/awk\",\n root_permission=expected_permission,\n external_id=\"ext_id\",\n status=Status.COMPLETED,\n )\n set_permission(job_completed)\n","sub_path":"orchestrator/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":18753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"468672854","text":"from subprocess import call\nimport os\nimport random\nimport time\n\ncommand = \"./main\"\nrmCommand = \"rm \"+\"inputFile.txt\"\n\nfor _ in range(5):\n\t\n\tlimit = random.randint(1,20000)\n\tmyList = []\n\tfor i in range(8192):\n\t\tmyList.append(random.randint(1,limit))\n\t\n\twith open(\"inputFile.txt\", 'w') as inputFile:\n\t\tfor i in myList:\n\t\t\tinputFile.write(\"%d\\n\"%i)\n\n\tprint(\"Expected Maximum:\",max(myList))\t\n\tos.system(command)\n\tprint(\"*******\")\n\ttime.sleep(1)\n\tos.system(rmCommand)\n\t\n\t\n\t\n","sub_path":"practice_2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"386103003","text":"'''\r\n @Author: Shuming Ma\r\n @mail: shumingma@pku.edu.cn\r\n @homepage : shumingma.com\r\n'''\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport modules\r\nimport modules.rnn as rnn\r\nfrom typing import List, Dict, Iterator\r\nfrom allennlp.nn.util import sequence_cross_entropy_with_logits\r\nfrom allennlp.models import Model\r\nfrom allennlp.data import Vocabulary\r\nfrom metrics import BLEU, SequenceAccuracy\r\n\r\n\r\nclass Seq2Seq(Model):\r\n\r\n def __init__(self, \r\n emb_size: int, \r\n hidden_size: int, \r\n enc_layers: int, \r\n dec_layers: int, \r\n dropout: float, \r\n bidirectional: bool, \r\n beam_size: int, \r\n label_smoothing: float, \r\n vocab: Vocabulary) -> None:\r\n\r\n super().__init__(vocab)\r\n\r\n self.vocab = vocab\r\n self.vocab_size = vocab.get_vocab_size('tokens')\r\n self.beam_size = beam_size\r\n self.label_smoothing = label_smoothing\r\n self._bos = self.vocab.get_token_index('@@BOS@@')\r\n self._eos = self.vocab.get_token_index('@@EOS@@')\r\n self.encoder = rnn.rnn_encoder(emb_size, hidden_size, enc_layers, self.vocab_size, dropout, bidirectional, embedding=None)\r\n self.decoder = rnn.rnn_decoder(emb_size, hidden_size, dec_layers, self.vocab_size, dropout, embedding=self.encoder.embedding)\r\n #self.generator = nn.Linear(hidden_size, self.vocab_size)\r\n #self.linear = nn.Linear(hidden_size, emb_size)\r\n #self.generator = lambda x: torch.matmul(self.linear(x), self.encoder.embedding.weight.t())\r\n self.generator = lambda x: torch.matmul(x, self.encoder.embedding.weight.t())\r\n self.accuracy = SequenceAccuracy()\r\n\r\n\r\n def _get_lengths(self, x: torch.Tensor) -> torch.Tensor:\r\n lengths = (x > 0).sum(-1)\r\n return lengths\r\n\r\n\r\n def forward(self,\r\n src: Dict[str, torch.Tensor],\r\n tgt: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\r\n \r\n src, tgt = src['tokens'], tgt['tokens']\r\n lengths = self._get_lengths(src)\r\n lengths, indices = lengths.sort(dim=0, descending=True)\r\n src = src.index_select(dim=0, index=indices)\r\n tgt = tgt.index_select(dim=0, index=indices)\r\n\r\n encode_outputs = self.encoder(src, lengths)\r\n decode_outputs = self.decoder(tgt[:, :-1], encode_outputs)\r\n out_logits = self.generator(decode_outputs['hidden_outputs'])\r\n targets = tgt[:, 1:].contiguous()\r\n seq_mask = (targets>0).float()\r\n\r\n self.accuracy(predictions=out_logits, gold_labels=targets, mask=seq_mask)\r\n loss = sequence_cross_entropy_with_logits(logits=out_logits, \r\n targets=targets,\r\n weights=seq_mask,\r\n average='token',\r\n label_smoothing=self.label_smoothing)\r\n outputs = {'loss': loss}\r\n\r\n return outputs\r\n \r\n\r\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\r\n return {'accuracy': self.accuracy.get_metric(reset)}\r\n\r\n\r\n def predict(self,\r\n src: Dict[str, torch.Tensor],\r\n max_decoding_step: int) -> Dict[str, torch.Tensor]:\r\n\r\n with torch.no_grad(): \r\n if self.beam_size == 1:\r\n return self.greedy_search(src, max_decoding_step)\r\n else:\r\n return self.beam_search(src, max_decoding_step)\r\n\r\n\r\n def greedy_search(self, \r\n src: Dict[str, torch.Tensor],\r\n max_decoding_step: int) -> Dict[str, torch.Tensor]:\r\n\r\n src = src['tokens']\r\n lengths = self._get_lengths(src)\r\n lengths, indices = lengths.sort(dim=0, descending=True)\r\n rev_indices = indices.sort()[1]\r\n src = src.index_select(dim=0, index=indices)\r\n bos = torch.ones(src.size(0)).long().fill_(self._bos).cuda()\r\n\r\n encode_outputs = self.encoder(src, lengths)\r\n \r\n inputs, state, contexts = [bos], encode_outputs['final_state'], encode_outputs['hidden_outputs']\r\n output_ids, attention_weights = [], []\r\n\r\n for i in range(max_decoding_step):\r\n outputs = self.decoder.decode_step(inputs[i], state, contexts)\r\n hidden_output, state, attn_weight = outputs['hidden_output'], outputs['state'], outputs['attention_weights']\r\n logits = self.generator(hidden_output)\r\n next_id = logits.max(1)[1]\r\n inputs += [next_id]\r\n output_ids += [next_id]\r\n attention_weights += [attn_weight]\r\n\r\n output_ids = torch.stack(output_ids, dim=1)\r\n attention_weights = torch.stack(attention_weights, dim=1)\r\n\r\n alignments = attention_weights.max(2)[1]\r\n output_ids = output_ids.index_select(dim=0, index=rev_indices)\r\n alignments = alignments.index_select(dim=0, index=rev_indices)\r\n outputs = {'output_ids': output_ids.tolist(), 'alignments': alignments.tolist()}\r\n\r\n return outputs\r\n\r\n\r\n def beam_search(self, \r\n src: Dict[str, torch.Tensor], \r\n max_decoding_step: int) -> Dict[str, torch.Tensor]:\r\n\r\n beam_size = self.beam_size\r\n src = src['tokens']\r\n lengths = self._get_lengths(src)\r\n batch_size = src.size(0)\r\n lengths, indices = lengths.sort(dim=0, descending=True)\r\n rev_indices = indices.sort()[1]\r\n src = src.index_select(dim=0, index=indices)\r\n\r\n encode_outputs = self.encoder(src, lengths)\r\n contexts, encState = encode_outputs['hidden_outputs'], encode_outputs['final_state']\r\n\r\n contexts = contexts.repeat(beam_size, 1, 1)\r\n decState = encState[0].repeat(1, beam_size, 1), encState[1].repeat(1, beam_size, 1)\r\n beam = [modules.beam.Beam(beam_size, bos = self._bos, eos = self._eos, n_best = 1)\r\n for _ in range(batch_size)]\r\n\r\n for i in range(max_decoding_step):\r\n\r\n if all((b.done() for b in beam)):\r\n break\r\n\r\n inp = torch.stack([b.getCurrentState() for b in beam]).t().contiguous().view(-1)\r\n\r\n outputs = self.decoder.decode_step(inp, decState, contexts)\r\n output, decState, attn = outputs['hidden_output'], outputs['state'], outputs['attention_weights']\r\n logits = self.generator(output)\r\n\r\n output = torch.nn.functional.log_softmax(logits, dim=-1).view(beam_size, batch_size, -1)\r\n attn = attn.view(beam_size, batch_size, -1)\r\n\r\n for j, b in enumerate(beam):\r\n b.advance(output.data[:, j], attn.data[:, j])\r\n b.beam_update(decState, j)\r\n\r\n allHyps, allScores, allAttn = [], [], []\r\n\r\n for j in rev_indices:\r\n b = beam[j]\r\n n_best = 1\r\n scores, ks = b.sortFinished(minimum=n_best)\r\n hyps, attn = [], []\r\n for i, (times, k) in enumerate(ks[:n_best]):\r\n hyp, att = b.getHyp(times, k)\r\n hyps.append(hyp)\r\n attn.append(att.max(1)[1])\r\n allHyps.append(hyps[0])\r\n allScores.append(scores[0])\r\n allAttn.append(attn[0])\r\n\r\n outputs = {'output_ids': allHyps, 'alignments': allAttn}\r\n return outputs","sub_path":"models/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"505360256","text":"import numpy as np\n\ndef default_kernel(x1, x2):\n return x1.dot(x2.T)\n\nclass PCA_Kernel:\n def __init__(self, n_components, kernel=default_kernel):\n self.d = n_components\n self.kernel = kernel\n # end\n\n def fit_transform(self, X):\n m,n = X.shape\n K = np.zeros((m,m))\n for s in range(m):\n for r in range(m):\n K[s][r] = self.kernel(X[s],X[r])\n J = np.ones((m,m)) * (1.0 / m)\n K = K - J.dot(K) -K.dot(J) + J.dot(K).dot(J)\n eigen_values, eigen_vectors = np.linalg.eig(K) \n pairs = [(eigen_values[i], eigen_vectors[:,i]) for i in range(m)]\n pairs = sorted(pairs, key = lambda pair: pair[0], reverse = True)\n Z = np.array([pairs[i][1] * np.sqrt(pairs[i][0]) for i in range(self.d)]).T\n return Z\n # end\n# end\n","sub_path":"Modules/PCA_Kernel.py","file_name":"PCA_Kernel.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"588010681","text":"import requests\nfrom bs4 import BeautifulSoup\n\ngenres=\"news\"\nsearch=\"코알라\"\n\nfor page_number in range(1, 4):\n data = requests.get(\"https://search.daum.net/search?nil_suggest=btn&w=\"+str(genres)+\"&DA=PGD&cluster=y&q=\"+str(search)+\"&p=+str(page_number)\")\n result = BeautifulSoup(data.text,)\n list_news = result.select(\"div.cont_inner\")\n for news in list_news:\n title = news.select_one(\"a.f_link_b\").text\n value = news.select_one(\"p.f_eb.desc\").text\n print(title, value)\n\n","sub_path":"WEEK3_HW2.py","file_name":"WEEK3_HW2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"625876957","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\n\nclass AlbertHeijnSpider(scrapy.Spider):\n name = 'albert_heijn'\n item_attributes = {'brand': \"Albert Heijn\"}\n allowed_domains = ['www.ah.nl']\n\n def start_requests(self):\n url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n stores = json.loads(response.body_as_unicode())\n for store in stores['stores']:\n try:\n phone_number = store['phoneNumber']\n except:\n phone_number = \"\"\n yield GeojsonPointItem(\n lat=store['lat'],\n lon=store['lng'],\n addr_full=\"%s %s\" % (store['street'], store[\"housenr\"]),\n city=store['city'],\n phone=phone_number,\n state=\"\",\n postcode=store['zip'],\n ref=store['no'],\n country=\"Netherlands\",\n website=\"https://www.ah.nl/winkel/albert-heijn/%s/%s/%s\" % (store['city'], store['street'], store['no'])\n )\n","sub_path":"locations/spiders/albert_heijn.py","file_name":"albert_heijn.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"5794878","text":"from collections import deque\r\n\r\nfood_quantity = int(input())\r\norders = deque(int(ors) for ors in input().split(' '))\r\n\r\nprint(max(orders))\r\n\r\nfor _ in range(len(orders)):\r\n order = orders[0]\r\n if order <= food_quantity:\r\n food_quantity -= order\r\n orders.popleft()\r\n else:\r\n break\r\n\r\nif orders:\r\n print(f'Orders left:', *orders)\r\nelse:\r\n print('Orders complete')\r\n","sub_path":"PyCharm_projects_2020/Advanced/queues_stacks/fast_food.py","file_name":"fast_food.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"295135106","text":"# You can run this file to operate your own image\r\n\r\nimport torch\r\nfrom PIL import Image\r\nimport torchvision.transforms as transforms\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass ResBlock(nn.Module):\r\n \"\"\"Residual(残差) Block\"\"\"\r\n\r\n def __init__(self, inChannels, outChannels):\r\n \"\"\"Initialize residual block\"\"\"\r\n super(ResBlock, self).__init__()\r\n # The super () function is used to call the parent class(父类)\r\n # to solve the problem of multiple inheritance(多继承).\r\n\r\n # If u want get more about convolution, batch normalization and other concepts(概念), please Baidu it.\r\n\r\n self.conv1 = nn.Conv2d(inChannels, outChannels, kernel_size=1, bias=False)\r\n # Why there uses 1 x 1 kernel?\r\n # Because 1 x 1 kernel can not only deeper the feature map, but also can shallow the feature map.\r\n # More details please Baidu it.\r\n\r\n self.conv2 = nn.Conv2d(outChannels, outChannels, kernel_size=3, stride=1, padding=1, bias=False)\r\n # padding means \"expand\" the area, e.x. a 3x3 feature map padding=1 -> 5x5\r\n\r\n self.conv3 = nn.Conv2d(outChannels, outChannels, kernel_size=1, bias=False)\r\n self.relu = nn.PReLU()\r\n # The activation function can introduce nonlinear factors to solve the problems\r\n # that can not be solved by linear model.\r\n\r\n def forward(self, x):\r\n \"\"\"Forward Spread\"\"\"\r\n\r\n resudial = x\r\n\r\n out = self.conv1(x)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(x)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(x)\r\n\r\n out += resudial\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\nclass Generator(nn.Module):\r\n \"\"\"Generate Model(4x)\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize Model Configuration(配置)\"\"\"\r\n\r\n super(Generator, self).__init__()\r\n # Convolution Model 1\r\n self.conv1 = nn.Conv2d(3, 64, kernel_size=9, padding=4, padding_mode='reflect', stride=1)\r\n self.relu = nn.PReLU()\r\n\r\n # Residual Model\r\n self.resBlock = self._makeLayer_(ResBlock, 64, 64, 5)\r\n\r\n # Convolution Model 2\r\n self.conv2 = nn.Conv2d(64, 64, kernel_size=1, stride=1)\r\n self.relu2 = nn.PReLU()\r\n\r\n # Subpixel(子像素) convolution\r\n self.convPos1 = nn.Conv2d(64, 256, kernel_size=3, stride=1, padding=2, padding_mode='reflect')\r\n self.pixelShuffler1 = nn.PixelShuffle(2)\r\n self.reluPos1 = nn.PReLU()\r\n\r\n self.convPos2 = nn.Conv2d(64, 256, kernel_size=3, stride=1, padding=1, padding_mode='reflect')\r\n self.pixelShuffler2 = nn.PixelShuffle(2)\r\n self.reluPos2 = nn.PReLU()\r\n\r\n self.finConv = nn.Conv2d(64, 3, kernel_size=9, stride=1)\r\n\r\n def _makeLayer_(self, block, inChannels, outChannels, blocks):\r\n \"\"\"Construct Residual Block\"\"\"\r\n layers = []\r\n layers.append(block(inChannels, outChannels))\r\n\r\n for i in range(1, blocks):\r\n layers.append(block(outChannels, outChannels))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n \"\"\"Forward Procession\"\"\"\r\n x = self.conv1(x)\r\n x = self.relu(x)\r\n residual = x\r\n\r\n out = self.resBlock(x)\r\n\r\n out = self.conv2(out)\r\n out += residual\r\n\r\n out = self.convPos1(out)\r\n out = self.pixelShuffler1(out)\r\n out = self.reluPos1(out)\r\n\r\n out = self.convPos2(out)\r\n out = self.pixelShuffler2(out)\r\n out = self.reluPos2(out)\r\n\r\n out = self.finConv(out)\r\n\r\n return out\r\n\r\n\r\ndevice = torch.device(\"cpu\")\r\nnet = Generator()\r\nnet.load_state_dict(torch.load(\"./model/netG_epoch_4_61.pth\", map_location=torch.device('cpu')))\r\n\r\n\r\ndef imshow(path, sourceImg=True):\r\n \"\"\"Show results\"\"\"\r\n preTransform = transforms.Compose([transforms.ToTensor()])\r\n pilImg = Image.open(path)\r\n img = preTransform(pilImg).unsqueeze(0).to(device)\r\n\r\n source = net(img)[0, :, :, :]\r\n source = source.cpu().detach().numpy() # Turn to numpy\r\n source = source.transpose((1, 2, 0)) # Transform shape\r\n source = np.clip(source, 0, 1) # Correct pictures\r\n\r\n if sourceImg:\r\n temp = np.clip(img[0, :, :, :].cpu().detach().numpy().transpose((1, 2, 0)), 0, 1)\r\n shape = temp.shape\r\n source[-shape[0]:, :shape[1], :] = temp\r\n plt.imshow(source)\r\n img = Image.fromarray(np.uint8(source * 255))\r\n img.save('./result/' + path.split('/')[-1][:-4] + '_result.jpg') # Save array as pictures\r\n return\r\n\r\n plt.imshow(source)\r\n img = Image.fromarray(np.uint8(source * 255))\r\n img.save(path[:-4] + '_result.jpg') # Save arrays as pictures\r\n\r\n\r\nimshow(\"yuzi.jpg\", sourceImg=True)","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"502244782","text":"\"\"\"\r\nThe n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.\r\nGiven an integer n, return all distinct solutions to the n-queens puzzle.\r\n\r\nEach solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively.\r\n\r\nFor example,\r\nThere exist two distinct solutions to the 4-queens puzzle:\r\n\r\n[\r\n [\".Q..\", // Solution 1\r\n \"...Q\",\r\n \"Q...\",\r\n \"..Q.\"],\r\n\r\n [\"..Q.\", // Solution 2\r\n \"Q...\",\r\n \"...Q\",\r\n \".Q..\"]\r\n]\"\"\"\r\nclass Solution(object):\r\n def solveNQueens(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: List[List[str]]\r\n \"\"\"\r\n def isqueens(depth,j):\r\n for i in range(depth):\r\n if board[i] == j or abs(depth - i) == abs(board[i] - j):\r\n return False\r\n return True\r\n def dfs(depth,row):\r\n if depth == n:\r\n ans.append(row);return\r\n for i in range(n):\r\n if isqueens(depth,i):\r\n board[depth]= i\r\n dfs(depth + 1,row + ['.'*i + 'Q' + '.'*(n - i - 1)])\r\n board = [-1 for i in range(n)]\r\n ans = []\r\n dfs(0,[])\r\n return ans ","sub_path":"51_N-Queens.py","file_name":"51_N-Queens.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"639630568","text":"import time\r\nimport requests\r\ndef getHTMLText(url):\r\n\ttry:\r\n\t\tr = requests.head(url, timeout =30)\r\n\t\tr.raise_for_status() #返回值不为200时抛出异常\r\n\t\tr.encoding = r.apparent_encoding #encoding 为\r\n\t\treturn r.text\r\n\texcept:\r\n\t\treturn \"1\"\r\n\r\nif __name__ ==\"__main__\":\r\n\tprint(\"begin\")\r\n\turl = \"https://www.baidu.com\"\r\n\tstart_time = time.time()\r\n\tfor i in range(100):\r\n\t\ta = getHTMLText(url)\r\n\t\tif(a==\"1\"):\r\n\t\t\tprint(\"爬取失败\")\r\n\tend_time = time.time()\r\n\tprint(\"爬取100次网页一共消耗:{}秒\".format(end_time-start_time))\r\n\r\n#get耗时比head长,抓取数据越大,耗时越大","sub_path":"requests_performance_head.py","file_name":"requests_performance_head.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"49997375","text":"import sys\nimport os\n\npwd = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(pwd+\"/..\")\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"day08.settings\")\n\nimport django\ndjango.setup()\n\nfrom cmdb.models import Host\nfrom utils.alisdk import ECSHandler\n\nALICLOUD = {\n 'access_key': ('LTAI4G1aNcr6pRTwRLEn6T9H','ZGxVgA4KjSft6aVvbqzKl1DSBazzgm'),\n 'region': 'cn-beijing'\n}\n\ndef get_hosts_from_aliyun():\n \"\"\"\n 从阿里云获取ECS实例并入库\n :return:\n \"\"\"\n ecs = ECSHandler(*ALICLOUD['access_key'], ALICLOUD['region'])\n page = 1\n while True:\n instances, exception, next_page = ecs.get_instances(page=page, page_size=10)\n print(instances)\n print(exception)\n print(next_page)\n if instances:\n for i in instances:\n i['public_cloud'] = 'aliyun'\n print(i)\n host, created = Host.objects.update_or_create(instance_id=i['instance_id'], defaults=i)\n if created:\n print('阿里云 {} 新主机入库'.format(host.instance_name))\n else:\n print('阿里云 {} 更新主机入库'.format(host.instance_name))\n page += 1\n if not next_page:\n break\n return True\n\nget_hosts_from_aliyun()\n","sub_path":"day08/utils/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"396945361","text":"import os\r\nfrom os import path\r\nimport datetime\r\nfrom datetime import date, time, timedelta\r\nimport time\r\nimport math\r\nimport urllib.request\r\nimport json\r\nf = open(\"file.txt\", \"w+\");\r\nfor i in range(1, 10):\r\n f.write(\"The single digit numbers are \" + str(i) + \"\\r\\n\");\r\n\r\ndef printResults(data):\r\n theJSON = json.loads(data)\r\n if \"latitude\" and \"longitude\" in theJSON[\"iss_position\"]:\r\n f.write(\"Latitude: \" + str(theJSON[\"iss_position\"][\"latitude\"]) + \" Longitude: \" + str(theJSON[\"iss_position\"][\"longitude\"]))\r\n else:\r\n print(\"Cannot print results\") \r\nprint(os.name)\r\nprint(\"Path exists: \" + str(path.exists(\"file.txt\")))\r\nprint(\"Path is a file: \" + str(path.isfile(\"file.txt\")))\r\nprint(\"Path is a directory: \" + str(path.isdir(\"file.txt\")))\r\ndef main():\r\n urldata = \"http://api.open-notify.org/iss-now.json\"\r\n\r\n webUrl = urllib.request.urlopen(urldata)\r\n print(\"result code: \" + str(webUrl.getcode()))\r\n if (webUrl.getcode() == 200):\r\n data = webUrl.read().decode(\"utf-8\")\r\n printResults(data)\r\n else:\r\n print(\"Error, cannot parse results\")\r\n\r\nif __name__ == \"__main__\":\r\n main() \r\n","sub_path":"p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"441114201","text":"import random\nfrom lutainspermon import luta\nfrom evolucaoinspermon import evolucao\ndef digite(limite):\n\tnumb = int(input(\"Digite um número de 1-{0}, se você acertar, podera fugir\".format(limite)))\n\tresultadofuga = random.randint(1,limite)\n\tprint (resultadofuga)\n\n\tif numb == resultadofuga:\n\t\treturn True\n\n\telse:\n\t\treturn False\n\t\t\ndef fuga (jogador,inimigo,personagens,indiceevolucao):\t\n\tyes = [\"sim\",\"yes\",\"y\", \"y\"]\n\tno = [\"não\",\"nao\",\"n\",\"no\"]\n\tfuga = input('Você qur fugir dessa batalha? ')\n\t#Colocando tudo em minuscula\n\tfuga = fuga.lower()\n\n\tif fuga in yes:\n\t\tdigite(2)\n\n\t\t\n\t\tif digite == True:\n\t\t\tprint('Você fugiu')\n\t\t\tend()\n\t\telse:\n\t\t\tprint('Você não conseguiu fugir!')\n\t\t\tevo = luta(jogador,inimigo)\n\t\t\tevolucao(jogador,evo,personagens,indiceevolucao)\n\n\n\telif fuga in no:\n\t\tevo = luta(jogador,inimigo)\n\t\tevolucao(jogador,evo,personagens,indiceevolucao)\n\t\tprint(jogador)\n\t#mostra o resultado da luta e volta ao início\n\n\telse:\n\t\tprint('Você deve responder Sim ou Não.')\n","sub_path":"fugainspermon.py","file_name":"fugainspermon.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"208837731","text":"import pandas as pd\nimport numpy as np\nfrom mlearn.base import get_numpy_data, predict_output\nfrom mlearn.learning_algo import gradient_descent\n\n# Load in house sales data\n# Dataset is from house sales in King County, the region where the city of Seattle, WA is located\n\ndtype_dict = {'bathrooms': float, 'waterfront': int, 'sqft_above': int, 'sqft_living15': float, 'yr_renovated': int,\n 'grade': int, 'price': float, 'bedrooms': float, 'zipcode': str, 'long': float, 'sqft_lot15': float,\n 'sqft_living': float, 'floors': str, 'condition': int, 'lat': float, 'date': str, 'sqft_basement': int,\n 'yr_built': int, 'id': str, 'sqft_lot': int, 'view': int}\n\n\ndf_train_data = pd.read_csv('data/kc_house_train_data.csv', dtype=dtype_dict)\ndf_test_data = pd.read_csv('data/kc_house_test_data.csv', dtype=dtype_dict)\n\n# set feature list and target variable\nlst_features = ['sqft_living', 'sqft_living15']\ntarget_variable = 'price'\n\n\n# get features matrix and output vector\nfeature_matrix, output = get_numpy_data(df_train_data, lst_features, target_variable)\n\n\n# set initial weights and learning parameters\ninitial_weights = np.array([-100000., 1., 1.])\nstep_size = 4e-12\ntolerance = 1e9\nmax_iter = 1000\n\n# train model\nweights_model = gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance, max_iter)\nprint('feature weights: %s' % weights_model)\n\n# evaluate mode performance\ntest_feature_matrix, test_output = get_numpy_data(df_test_data, lst_features, target_variable)\npredictions_model = predict_output(test_feature_matrix, weights_model)\nrss_model = np.sum(np.square(test_output - predictions_model))\nprint('Residual Sum of Squares: %s' % rss_model)\n","sub_path":"tutorials/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"41236281","text":"\"\"\"Utility functions for all database interactions.\n\nNone of the functions defined herein are \"connection safe\", and may\ncause issues when asynchronously called in rapid succession.\n\nBe sure to use a pooled connection or guard database access with a\nlock. These functions only act as helpers to segregate SQL interactions\nand Python objects.\n\n\"\"\"\n\nimport datetime\nimport logging\n\nimport asyncpg\n\nlog = logging.getLogger('listener')\n\n\nasync def base_control(timestamp: datetime.datetime, base_id: int,\n new_faction_id: int, old_faction_id: int,\n server_id: int, continent_id: int,\n conn: asyncpg.Connection) -> bool:\n \"\"\"Dispatch a ``BaseControl`` blip to the database.\"\"\"\n try:\n await conn.execute( # type: ignore\n \"\"\"--sql\n INSERT INTO \"event\".\"BaseControl\" (\n \"timestamp\", \"server_id\", \"continent_id\", \"base_id\",\n \"old_faction_id\", \"new_faction_id\"\n )\n VALUES (\n $1, $2, $3, $4, $5, $6\n );\"\"\",\n timestamp, server_id, continent_id, base_id,\n old_faction_id, new_faction_id)\n except asyncpg.exceptions.ForeignKeyViolationError as err:\n log.debug('Ignored FK violation: %s', err)\n return False\n return True\n\n\nasync def player_blip(timestamp: datetime.datetime, player_id: int,\n base_id: int, server_id: int, continent_id: int,\n conn: asyncpg.Connection) -> bool:\n \"\"\"Dispatch a ``PlayerBlip`` to the database.\"\"\"\n try:\n await conn.execute( # type: ignore\n \"\"\"--sql\n INSERT INTO \"event\".\"PlayerBlip\" (\n \"timestamp\", \"server_id\", \"continent_id\", \"player_id\", \"base_id\"\n )\n VALUES (\n $1, $2, $3, $4, $5\n );\"\"\",\n timestamp, server_id, continent_id, player_id, base_id)\n except asyncpg.exceptions.ForeignKeyViolationError as err:\n log.debug('Ignored FK violation: %s', err)\n return False\n return True\n\n\nasync def relative_player_blip(timestamp: datetime.datetime, player_a_id: int,\n player_b_id: int, server_id: int,\n continent_id: int, conn: asyncpg.Connection\n ) -> bool:\n \"\"\"Dispatch a ``RelativePlayerBlip`` to the database.\"\"\"\n try:\n await conn.execute( # type: ignore\n \"\"\"--sql\n INSERT INTO \"event\".\"RelativePlayerBlip\" (\n \"timestamp\", \"server_id\", \"continent_id\",\n \"player_a_id\", \"player_b_id\"\n )\n VALUES (\n $1, $2, $3, $4, $5\n );\"\"\",\n timestamp, server_id, continent_id, player_a_id, player_b_id)\n except asyncpg.exceptions.ForeignKeyViolationError as err:\n log.debug('Ignored FK violation: %s', err)\n return False\n return True\n\n\nasync def player_logout(timestamp: datetime.datetime, player_id: int,\n conn: asyncpg.Connection) -> bool:\n \"\"\"Dispatch a ``PlayerLogout`` blip to the database.\"\"\"\n await conn.execute( # type: ignore\n \"\"\"--sql\n INSERT INTO \"event\".\"PlayerLogout\" (\n \"timestamp\", \"player_id\"\n )\n VALUES (\n $1, $2\n );\"\"\",\n timestamp, player_id)\n return True\n","sub_path":"apl_listener/_dispatch.py","file_name":"_dispatch.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"522885693","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom latexify import latexify\nfrom matplotlib import patches\n\n# Configuro latexify a dos columnas\nlatexify(fig_width=5, fontawesome=True, siunitx=True)\n\n# FROM OSOAA manual\n# 4.3.3.2 Sea molecule profile\ng = 0.069\n# Grafico\nx = np.linspace(400, 700)\nbb = 0.00288*(x/500)**(-4.32)\nplt.plot(x, bb)\n\nplt.ylabel(r\"$b_b$ [\\si{\\per\\meter}]\")\nplt.xlabel(r\"$\\lambda$ [\\si{\\nano\\meter}]\")\nplt.tight_layout()\nplt.savefig(\"curso-ico/figs/fig:t2-water-bb.pdf\")\nplt.savefig(\"curso-ico/figs/fig:t2-water-bb.png\", dpi=300)\nplt.close()\n\n# Grafico\na_data = np.loadtxt(\"data/OSOAA_SEA_MOL_COEFFS_JUNE_2013.txt\")\na = np.interp(x, a_data[:,0], a_data[:,1])\nplt.plot(x, a)\nplt.yscale(\"log\")\nplt.yticks([0.001,0.01,0.1,1,10,100], [0.001,0.01,0.1,1,10,100])\nplt.ylabel(r\"$a$ [\\si{\\per\\meter}]\")\nplt.xlabel(r\"$\\lambda$ [\\si{\\nano\\meter}]\")\nplt.tight_layout()\nplt.savefig(\"curso-ico/figs/fig:t2-water-a.pdf\")\nplt.savefig(\"curso-ico/figs/fig:t2-water-a.png\", dpi=300)\nplt.close()\n\n# Grafico\nplt.plot(x, g*bb/(a+bb))\nplt.ylabel(r\"$R$ \")\nplt.xlabel(r\"$\\lambda$ [\\si{\\nano\\meter}]\")\nplt.tight_layout()\nplt.savefig(\"curso-ico/figs/fig:t2-water-R.pdf\")\nplt.savefig(\"curso-ico/figs/fig:t2-water-R.png\", dpi=300)\nplt.close()","sub_path":"curso-ico/fig:t2-water.py","file_name":"fig:t2-water.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"201421264","text":"import asyncio\n\n\nasync def wait(seconds):\n \"\"\" Coroutine -- unit of work like thread or process. \"\"\"\n await asyncio.sleep(seconds) # asyncio.sleep is awaitable object\n print(f'waited {seconds} seconds')\n\n\nasync def main():\n await wait(1) # stop and wait coro 1 second. Total - 1 sec\n await wait(2) # stop and wait coro 2 seconds. Total - 1 + 2 = 3 secs\n await wait(3) # stop and awit coro 3 seconds. Total - 1 + 2 + 3 = 6 secs\n # main coroutine takes 6 seconds to execute\n\nif __name__ == '__main__':\n event_loop = asyncio.new_event_loop()\n event_loop.run_until_complete(main())\n","sub_path":"examples/sleep_1.py","file_name":"sleep_1.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"43698556","text":"import numpy as np\n\n# To retrieve our MPI Executor\nfrom libensemble.executors.executor import Executor\n\n# Optional status codes to display in libE_stats.txt for each gen or sim\nfrom libensemble.message_numbers import TASK_FAILED, WORKER_DONE\n\n# Optional - to print GPU settings\nfrom libensemble.tools.test_support import check_gpu_setting\n\n\ndef run_forces(H, persis_info, sim_specs, libE_info):\n \"\"\"Launches the forces MPI app and auto-assigns ranks and GPU resources.\n\n Assigns one MPI rank to each GPU assigned to the worker.\n \"\"\"\n\n calc_status = 0\n\n # Parse out num particles, from generator function\n particles = str(int(H[\"x\"][0][0]))\n\n # app arguments: num particles, timesteps, also using num particles as seed\n args = particles + \" \" + str(10) + \" \" + particles\n\n # Retrieve our MPI Executor\n exctr = Executor.executor\n\n # Submit our forces app for execution.\n task = exctr.submit(\n app_name=\"forces\",\n app_args=args,\n auto_assign_gpus=True,\n match_procs_to_gpus=True,\n )\n\n # Block until the task finishes\n task.wait()\n\n # Optional - prints GPU assignment (method and numbers)\n check_gpu_setting(task, assert_setting=False, print_setting=True)\n\n # Stat file to check for bad runs\n statfile = \"forces.stat\"\n\n # Try loading final energy reading, set the sim's status\n try:\n data = np.loadtxt(statfile)\n final_energy = data[-1]\n calc_status = WORKER_DONE\n except Exception:\n final_energy = np.nan\n calc_status = TASK_FAILED\n\n # Define our output array, populate with energy reading\n output = np.zeros(1, dtype=sim_specs[\"out\"])\n output[\"energy\"] = final_energy\n\n # Return final information to worker, for reporting to manager\n return output, persis_info, calc_status\n","sub_path":"libensemble/tests/scaling_tests/forces/forces_gpu/forces_simf.py","file_name":"forces_simf.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"317041602","text":"# -*- coding: utf-8 -*-\n\"\"\"\n data.version1.BeaconV1\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n BeaconV1 class\n\n :copyright: Conceptual Vision Consulting LLC 2018-2021, see AUTHORS for more details.\n :license: MIT, see LICENSE for more details.\n\"\"\"\nfrom typing import Any\n\nfrom pip_services3_commons.data import IStringIdentifiable\n\n\nclass BeaconV1(IStringIdentifiable):\n def __init__(self, id: str = None, site_id: str = None, type: str = None, udi: str = None, label: str = None,\n center: Any = None, radius: float = None):\n super(BeaconV1, self).__init__()\n self.id = id\n self.site_id = site_id\n self.type = type\n self.udi = udi\n self.label = label\n self.center = center\n self.radius = radius\n","sub_path":"service_beacons_python/data/version1/BeaconV1.py","file_name":"BeaconV1.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"42887704","text":"from __future__ import absolute_import\n\nimport logging, argparse, apache_beam as beam\nfrom google.cloud import datastore, storage\nfrom requests import get, post\nfrom pytz import timezone, datetime\nimport json, time\n\ndef parse_pubsub(message):\n from google.cloud import datastore, storage\n from requests import get, post\n from pytz import timezone, datetime\n import json, time\n\t\n logging.getLogger().info(\"Starting for : \" + message)\n print(message)\n client = datastore.Client()\n storage_client = storage.Client()\n\n entity = client.get(client.key(\"webapi\", message.strip()))\n #Don't process the entity if we are already processing it\n if entity[\"Running\"]:\n return\n #mark the entity processing to true\n entity[\"Running\"] = True\n #update entity\n client.put(entity)\n try:\n while True:\n #fetch the entity using its key, which is table name\n entity = client.get(client.key(\"webapi\", message.strip()))\n url = entity[\"URL\"]\n date = datetime.datetime.strptime(entity[\"FromDate\"], \"%d/%m/%Y\")\n todate = date + datetime.timedelta(days=1)\n bucket = entity[\"Bucket\"]\n output_folder = entity[\"OutputFolder\"]\n table_name = entity[\"TableName\"]\n #Set output folder for data file\n output_file = output_folder + \"/\" + table_name + \"/\" + date.strftime(\"%Y/%m/%d\") + \".csv\"\n #Set output folder for schema file\n schema_file = \"schemas/\" + output_folder + \"/\" + table_name + \".csv\"\n\n data = { \"FromDate\" : date.strftime(\"%m/%d/%Y %H:%M:%S\"), \n \"ToDate\" : todate.strftime(\"%m/%d/%Y %H:%M:%S\"), \n \"CompressionType\" : \"A\" }\n\n logging.getLogger().info(\"POST Payload : \" + json.dumps(data))\n #Call the web api\n res = post(url, json=data)\n #If api call fails\n if res.status_code != 200:\n entity[\"Running\"] = False\n client.put(entity)\n break\n #If api call is successful\n lines = []\n columns = \"\"\n # join values by comma to create csv\n for line in res.json():\n value = \",\".join(map(str, line.values()))\n lines.append(value)\n columns = \",\".join(map(str, line.keys()))\n\n bucket = storage_client.get_bucket(bucket)\n # create gcs object\n blob = bucket.blob(output_file)\n #Upload data file to gcs bucket\n blob.upload_from_string(\"\\n\".join(lines))\n #Upload schema file to gcs bucket\n if columns:\n blob = bucket.blob(schema_file)\n blob.upload_from_string(columns)\n #update FromDate column for the entity\n if datetime.datetime.now(timezone(\"Asia/Kolkata\")).date() >= todate.date():\n entity[\"FromDate\"] = unicode(todate.strftime(\"%d/%m/%Y\"))\n client.put(entity)\n else:\n entity[\"Running\"] = False\n client.put(entity)\n break\n except:\n entity[\"Running\"] = False\n client.put(entity)\n\n\ndef run(argv=None):\n parser = argparse.ArgumentParser()\n known_args, pipeline_args = parser.parse_known_args(argv)\n client = datastore.Client()\n #fetch all entities from datastore, each entity contains information about a table\n entities = list(client.query(kind=\"webapi\").fetch())\n names = [str(entity.key.name) for entity in entities]\n with beam.Pipeline(argv=pipeline_args) as p:\n\t lines = ( p | beam.Create(names) | beam.Map(parse_pubsub))\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n","sub_path":"batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"108086317","text":"import json\nfrom pymongo import MongoClient\nfrom bson import ObjectId\nfrom aggregators import get_security_enabled_fields, redact_field\nfrom flask import g, current_app, abort\n\n\ndef check_perms_for_patch(resource, request, lookup):\n \"\"\"\n Checks the object in the database to ensure that the user has permission to modify the existing data.\n Does not check user's permissions for new data - use pre-insert methods to check new data.\n \"\"\"\n rsc = resource[:-6] # take off \"_write\" from the end of the string\n current_app.logger.info('Checking permissions for user {} to update {} object.'.format(g.username, rsc))\n oid = request.url.split('{}/'.format(resource))[1]\n\n updates = json.loads(request.data)\n\n pipeline = []\n pipeline.append({\"$match\": {\"_id\": ObjectId(oid=oid)}})\n pipeline = redact_field('', pipeline) # Security at the top level\n sec_enabled_fields = get_security_enabled_fields(rsc)\n for key in updates.keys():\n if key in sec_enabled_fields:\n pipeline = redact_field(key, pipeline)\n\n # Get object from DB, confirm user has permissions to update the fields in the PATCH\n coll = MongoClient(current_app.config.get('MONGO_HOST'), 27017)[current_app.config.get('MONGO_DBNAME')][rsc]\n\n agg_result = list(coll.aggregate(pipeline))\n if len(agg_result) > 0:\n agg_result = agg_result[0]\n else:\n agg_result = {}\n\n # Check document level\n if \"false\" in agg_result.get('cat_matches') or \"false\" in agg_result.get('diss_matches'):\n current_app.logger.info('User {} has insufficient permissions to modify data in the {} object'.format(\n g.username, rsc))\n abort(403)\n\n # Check field level for all requested fields\n for key in updates.keys():\n val = agg_result.get(key)\n if type(val) == dict:\n if \"false\" in val.get('cat_matches', []):\n current_app.logger.info('User {} has insufficient permissions to modify data in the {} object'.format(\n g.username, rsc))\n abort(403)\n if \"false\" in val.get('diss_matches', []):\n current_app.logger.info('User {} has insufficient permissions to modify data in the {} object'.format(\n g.username, rsc))\n abort(403)\n current_app.logger.info('User {} has sufficient permissions to modify data in the {} object.'.format(\n g.username, rsc))\n","sub_path":"src/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"159473220","text":"# -*- coding: utf-8 -*-\nimport logging\nimport ujson\nimport pickle\n\nfrom pathlib import Path\nfrom dotenv import find_dotenv, load_dotenv\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\n\nRANDOM_SEED = 42\n\nPATH_DATA = \"../data/\"\n\n\ndef main():\n \"\"\"\n Runs code to turn interim data into features ready to be used by ML models\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Creating features from interim data')\n\n with open(PATH_DATA+\"interim/\"+\"positive_text.json\") as f:\n positive_text = ujson.load(f)\n\n with open(PATH_DATA+\"interim/\"+\"negative_text.json\") as f:\n negative_text = ujson.load(f)\n\n X = positive_text + negative_text\n y = [1 for _ in positive_text] + [0 for _ in negative_text]\n\n vectorizer = TfidfVectorizer(analyzer=\"word\", ngram_range=(1, 3), stop_words=\"english\", lowercase=True)\n X_tfidf = vectorizer.fit_transform(X)\n\n X_train, X_test, y_train, y_test = train_test_split(X_tfidf, y, test_size=0.2, random_state=RANDOM_SEED)\n\n with open(\"../data/processed/X_train.pickle\", \"wb\") as f:\n pickle.dump(X_train, f)\n with open(\"../data/processed/X_test.pickle\", \"wb\") as f:\n pickle.dump(X_test, f)\n with open(\"../data/processed/y_train.pickle\", \"wb\") as f:\n pickle.dump(y_train, f)\n with open(\"../data/processed/y_test.pickle\", \"wb\") as f:\n pickle.dump(y_test, f)\n with open(\"../models/vectorizer_tfidf.pickle\", \"wb\") as f:\n pickle.dump(vectorizer, f)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = Path(__file__).resolve().parents[2]\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n","sub_path":"ml_endpoints/researchablyai/features/build_features.py","file_name":"build_features.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"601946754","text":"# k번째 수\n\narray = [1,5,2,6,3,7,4]\nprint(sorted(array[2-1:5])[3-1])\n\ndef solution(array, commands):\n answer = []\n for n in commands:\n answer.append(sorted(array[n[0]-1:n[1]])[n[2]-1])\n\n return answer","sub_path":"algorithm_basic/weeklyquiz/final/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"137016608","text":"from django.views.decorators.http import require_POST\nfrom django.http import HttpResponse\n\nimport json\nimport logging\n\nfrom slackauth.decorators import slack_request\nfrom .tasks import handle_message\n\n\nlogger = logging.getLogger(__name__)\n\n\n@require_POST\n@slack_request\ndef slack_index(request):\n req = json.loads(request.body)\n req_type = req.get('type')\n if req_type == 'url_verification':\n return HttpResponse(req.get('challenge'))\n elif req_type == 'event_callback':\n event_type = req.get('event').get('type')\n if event_type == 'message':\n handle_message.delay(req)\n return HttpResponse()\n else:\n logger.info('Unhandled Slack event')\n else:\n logger.info('Unhandled Slack request type')\n return HttpResponse()\n","sub_path":"chatbot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"148007671","text":"from pprint import pprint as pp\nfrom datetime import datetime\n\nfrom .csv_reader import read_csv\nfrom ..config import config\n\n\nclass BinkMain:\n def __init__(self):\n csvfile_path = config.read_config().csvfile_path\n self.csv_dict = read_csv(csvfile_path)\n self._action = {\n 'all': self._perform_all_actions,\n 'top5': self._get_top_5_current_rents,\n 'lease_years_25': self._get_masts_with_lease_years_25,\n 'tenant_mast_count': self._get_tenant_mast_count,\n 'lease_start_date': self._get_rentals_wth_lease_mentioned_start_date,\n }\n\n def _get_top_5_current_rents(self):\n \"\"\"Produce a list sorted by “Current Rent” in ascending order\n Obtain the first 5 items from the resultant list and output to the console\n \"\"\"\n print('Task 1\\n')\n rent_dict = {k: float(v['Current Rent']) for (k, v) in self.csv_dict.items()}\n sorted_rent_dict = {k: v for k, v in sorted(rent_dict.items(), key=lambda item: item[1])}\n result = [(k, v) for k, v in sorted_rent_dict.items()][:5]\n print(result)\n\n def _get_masts_with_lease_years_25(self):\n print('Task 2\\n')\n masts_dict = {k: v for k, v in self.csv_dict.items() if v['Lease Years'] == '25'}\n pp(masts_dict)\n total_rent = sum([float(v['Current Rent']) for v in masts_dict.values()])\n print('Total Rent: {}'.format(total_rent))\n\n def _get_tenant_mast_count(self):\n print('Task 3\\n')\n list_of_values = [v for v in self.csv_dict.values()]\n for item in list_of_values:\n unnormalised_tenant_name = item['Tenant Name']\n if 'Everything' in unnormalised_tenant_name:\n item['Tenant Name'] = 'Everything Everywhere Ltd & Hutchinson 3G UK'\n if 'Arqiva' in unnormalised_tenant_name:\n item['Tenant Name'] = 'Arqiva Services ltd'\n if 'Vodafone' in unnormalised_tenant_name:\n item['Tenant Name'] = 'Vodafone Ltd'\n if 'O2' in unnormalised_tenant_name:\n item['Tenant Name'] = 'O2'\n\n from collections import Counter\n c = Counter()\n for item in list_of_values:\n c[item['Tenant Name']] += 1\n pp(dict(c))\n\n def _get_rentals_wth_lease_mentioned_start_date(self):\n print('Task 4\\n')\n result = dict()\n for k, v in self.csv_dict.items():\n lease_start_date_object = datetime.strptime(v['Lease Start Date'], '%d %b %Y')\n v['Lease Start Date'] = lease_start_date_object.strftime('%d/%m/%Y')\n if datetime(1999, 6, 1) <= lease_start_date_object <= datetime(2007, 5, 31):\n result[k] = v\n\n pp(result)\n\n def _perform_all_actions(self):\n self._get_top_5_current_rents()\n self._get_masts_with_lease_years_25()\n self._get_tenant_mast_count()\n self._get_rentals_wth_lease_mentioned_start_date()\n\n def run(self, args):\n actions_to_perform = [key for key, value in vars(args).items() if value is True]\n if not actions_to_perform:\n self._perform_all_actions()\n else:\n for action in actions_to_perform:\n self._action[action]()\n","sub_path":"core/bink_main.py","file_name":"bink_main.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"75393141","text":"import time\nimport threading\n\ndef func():\n print('func start:',time.ctime())\n time.sleep(3)\n print('func end',time.ctime())\n return None\n\nprint('main start')\nt = threading.Thread(target=func,args=())\nt.daemon = True\nt.start()\ntime.sleep(2)\nprint('main end')","sub_path":"tulingxueyuan/多线程/p02.py","file_name":"p02.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"495045205","text":"import torch.nn as nn\nfrom model.backbone import resnet34\nfrom . import TFEncoder\nfrom . import TFDecoder\n\n\nclass NRTR(nn.Module):\n def __init__(self,\n train_cfg=None,\n test_cfg=None,\n max_seq_len=40):\n super().__init__()\n\n self.backbone = resnet34()\n self.encoder = TFEncoder()\n self.decoder = TFDecoder()\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.max_seq_len = max_seq_len\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone.\"\"\"\n x = self.backbone(img)\n return x\n\n def forward_train(self, img, img_metas):\n \"\"\"\n Args:\n img (tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A list of image info dict where each dict\n contains: 'img_shape', 'filename', and may also contain\n 'ori_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n\n Returns:\n dict[str, tensor]: A dictionary of loss components.\n \"\"\"\n feat = self.extract_feat(img)\n\n gt_labels = [img_meta['text'] for img_meta in img_metas]\n\n targets_dict = self.label_convertor.str2tensor(gt_labels)\n\n out_enc = None\n if self.encoder is not None:\n out_enc = self.encoder(feat, img_metas)\n\n out_dec = self.decoder(\n feat, out_enc, targets_dict, img_metas, train_mode=True)\n\n loss_inputs = (\n out_dec,\n targets_dict,\n )\n losses = self.loss(*loss_inputs)\n\n return losses\n\n def simple_test(self, img, img_metas, **kwargs):\n \"\"\"Test function with test time augmentation.\n\n Args:\n imgs (torch.Tensor): Image input tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n list[str]: Text label result of each image.\n \"\"\"\n feat = self.extract_feat(img)\n\n out_enc = None\n if self.encoder is not None:\n out_enc = self.encoder(feat, img_metas)\n\n out_dec = self.decoder(\n feat, out_enc, None, img_metas, train_mode=False)\n\n # label_indexes, label_scores = self.label_convertor.tensor2idx(\n # out_dec, img_metas)\n # label_strings = self.label_convertor.idx2str(label_indexes)\n #\n # # flatten batch results\n # results = []\n # for string, score in zip(label_strings, label_scores):\n # results.append(dict(text=string, score=score))\n\n return out_dec\n\n def forward(self, img, **kwargs):\n return self.simple_test(img, None, **kwargs)\n","sub_path":"benchmarks/models/nrtr.py","file_name":"nrtr.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"374019904","text":"import addressbook3_pb2 as pb\nimport sys\n\n\ndef PromptForAddress(person):\n\tperson.id = int(input(\"Enter person ID number: \"))\n\tperson.name = input(\"Enter name: \")\n\n\temail = input(\"Enter email address (blank for none): \")\n\tif email != \"\":\n\t\tperson.email = email\n\n\twhile True:\n\t\tpnumber = input(\"Enter a phone number (or leave blank to finish): \")\n\t\tif pnumber == \"\":\n\t\t\tbreak\n\n\t\tphone_number = person.phones.add()\n\t\tphone_number.number = pnumber\n\n\t\ttype = input(\"Is this a mobile, home, or work phone? \")\n\t\tif type == \"mobile\":\n\t\t\tphone_number.type = pb.Person.MOBILE\n\t\telif type == \"home\":\n\t\t\tphone_number.type = pb.Person.HOME\n\t\telif type == \"work\":\n\t\t\tphone_number.type = pb.Person.WORK\n\t\telse:\n\t\t\tprint(\"unknown phone type leaving as default value\")\n\n\n# Main procedure: Reads the entire address book from a file\n# adds one person based on user input, then writes it back out to the same file\n\nif len(sys.argv) != 2:\n\tprint(\"usage: \", sys.argv[0], \"ADDRESS_BOOK_FILE\")\n\tsys.exit(-1)\n\naddress_book = pb.AddressBook()\n\n# Read the existing address book.\ntry:\n\tf = open(sys.argv[1], \"rb\")\n\taddress_book.ParseFromString(f.read())\n\tf.close()\nexcept IOError:\n\tprint(sys.argv[1] + \": Could not open file. creating a new one.\")\n\n# Add an address\nPromptForAddress(address_book.people.add())\n\n\n# Write the new address book back to disk\nf = open(sys.argv[1], \"ab\")\nf.write(address_book.SerializeToString())\nf.close()\n","sub_path":"Tutorials/code/protobuf_example_addressbook/encoding_message.py","file_name":"encoding_message.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"412521186","text":"S = input().strip()\nstate1 = state2 = state3 = S[0]\np1 = p2 = p3 = 0\nfor pref in S[1:]:\n if pref != state1:\n p1 += 1\n state1 = pref\n if pref != 'U':\n p1 += 1\n state1 = 'U'\n if pref != state2:\n p2 += 1\n state2 = pref\n if pref != 'D':\n p2 += 1\n state2 = 'D'\n\n if pref != state3:\n p3 += 1\n state3 = pref\nprint(\"%s\\n%s\\n%s\" %(p1, p2, p3))","sub_path":"python_problems_competitive/toilet.py","file_name":"toilet.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"119919421","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport datetime\nimport json\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.settings import Settings\nfrom scrapy import Item, Field\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import TakeFirst\nfrom random import shuffle\nfrom os.path import join, abspath\n\ntry:\n import urlparse\n from urllib import urlencode\nexcept: # For Python 3\n import urllib.parse as urlparse\n from urllib.parse import urlencode\n\nres_per_second = 10\nlimit_per_day = 500\ngeogle_api_key = \"\"\ntarget_today = []\noutput_directory = './'\n\ndef folder_file_to_abs_path(*paths):\n return abspath(join('./',*paths))\n\ndef load_csv_col_to_list(folder_name,file_name,col_name):\n path_csv = folder_file_to_abs_path(folder_name,file_name)\n df = pd.DataFrame.from_csv(path_csv,sep=',',index_col=None)\n full_target_list = df[col_name].dropna().unique().tolist()\n shuffle(full_target_list)\n return full_target_list\n\ndef url_with_params(url,params={}):\n url_parts = list(urlparse.urlparse(url))\n query = dict(urlparse.parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n return urlparse.urlunparse(url_parts)\n\ndef address_to_url(address, api=geogle_api_key):\n endpoint = \"https://maps.googleapis.com/maps/api/geocode/json\"\n parms = [('address', address.encode(\"utf-8\")), ('sensor', 'false'), ('key', api)]\n return url_with_params(endpoint,dict(parms))\n\n\nclass PropertiesItem(Item):\n # Primary fields\n address = Field(output_processor=TakeFirst())\n\n # Calculated fields\n geocode = Field(output_processor=TakeFirst())\n\n # Housekeeping fields\n url = Field(output_processor=TakeFirst())\n spider = Field(output_processor=TakeFirst())\n date = Field(output_processor=TakeFirst())\n\nclass JsonWriterPipeline(object):\n def __init__(self):\n self.file = open(folder_file_to_abs_path(output_directory,'geoitems.jl'),\n 'ab')\n \n def close_spider(self, spider):\n self.file.close()\n \n def process_item(self, item, spider):\n line = json.dumps(dict(item)) + \"\\n\"\n self.file.write(line)\n return item\n\nclass GeocodeSpider(scrapy.Spider):\n name = \"geocode\"\n\n def start_requests(self):\n address_list = target_today\n \n urls = list(map(address_to_url,address_list))\n for url, address in zip(urls, address_list):\n request = scrapy.Request(url=url, callback=self.parse)\n request.meta['address'] = address\n yield request\n\n def parse(self, response):\n l = ItemLoader(item=PropertiesItem(), response=response)\n \n address = response.meta['address']\n content = json.loads(response.body_as_unicode())\n if content['status'] == 'OK':\n geo = content['results'][0][\"geometry\"][\"location\"]\n \n l.add_value('address', address)\n l.add_value('geocode', {\"lat\": geo[\"lat\"], \"lon\": geo[\"lng\"]})\n \n l.add_value('url', response.url)\n l.add_value('spider', self.name)\n l.add_value('date', str(datetime.datetime.now()))\n \n yield l.load_item()\n \n else:\n errmsg = 'Unexpected status=\"%s\" for address=\"%s\"' %(content['status'], address)\n self.logger.error(errmsg)\n print(errmsg)\n\ndef initialize_scrapy_settings():\n # default settings from scrapy\n settings = Settings()\n \n settings.set(\"ROBOTSTXT_OBEY\", False)\n #Control by download limit per second\n settings.set(\"DOWNLOAD_DELAY\", 1. / float(res_per_second))\n settings.set('ITEM_PIPELINES', {'__main__.JsonWriterPipeline': 100})\n return settings\n\ndef run_scrapy():\n settings = initialize_scrapy_settings()\n process = CrawlerProcess(settings)\n process.crawl(GeocodeSpider)\n process.start()\n\ndef main(input_folder,input_file,input_col_name,output_folder):\n global target_today, output_directory\n target_today = load_csv_col_to_list(input_folder,\n input_file,\n input_col_name)[:limit_per_day]\n output_directory = folder_file_to_abs_path(output_folder)\n run_scrapy()\n\nif __name__ == \"__main__\":\n main(input_folder='datasets',\n input_file='address.csv',\n input_col_name='full_address', \n output_folder='')","sub_path":"geocoder.py","file_name":"geocoder.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"353244915","text":"import pyimgsaliency as psal\nimport cv2\nimport sys\n\n# path to the image\ndef photo2sketch(inPath, outPath):\n\trbd = psal.get_saliency_rbd(inPath).astype('uint8')\n\tth = 120\n\tbinary_sal = (rbd > th)\n\tcv2.imwrite(outPath, 255 *binary_sal.astype('uint8'))\n\nif __name__ == '__main__':\n\ta1, a2 = sys.argv[1], sys.argv[2]\n\tphoto2sketch(a1, a2)","sub_path":"project/pipeline/non-cpp/python/photo2sketch/imgsaliency/pyimgsaliency/photo2sketch.py","file_name":"photo2sketch.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"594567388","text":"from pandas import *\nimport numpy\n\ndef imputation(filename):\n # Read the CSV data Impute any missing values in our Lahman baseball\n # data sets 'weight' column by setting them equal to the average weight.\n\n baseball = pandas.read_csv(filename)\n \n baseball['weight']=baseball['weight'].fillna(numpy.mean(baseball['weight']))\n\n return baseball\n\nif __name__ == \"__main__\":\n input_filename = \"Master.csv\"\n output_filename = \"output.csv\"\n baseball_weight = imputation(input_filename)\n baseball_weight.to_csv(output_filename)\n","sub_path":"intro_to_ds_programming_files/lesson_2/imputation_quiz/imputation.py","file_name":"imputation.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"627850573","text":"import numpy as np\nimport pandas as pd\nfrom math import sqrt\n\ndata = pd.read_csv(\"data.csv\")\nx = data.iloc[:,:-1].values #values\ny = data.iloc[:,1].values\n\nfrom sklearn.model_selection import train_test_split #moddel\nX_train,X_test,Y_train,Y_test = train_test_split(x,y,test_size = 1/3,random_state = 1)#test_size,random_state\n\nfrom sklearn.linear_model import LinearRegression as lr #linear\nregression = lr()\nregression = regression.fit(X_train,Y_train)\nY_pred = regression.predict(X_test)\n\nfrom sklearn.metrics import r2_score,mean_squared_error#r2_score\n\nprint(\"Accuracy: \",r2_score(Y_test,Y_pred))\n\nprint(regression.coef_,\"*x+\",regression.intercept_)\n\nrmse = sqrt(mean_squared_error(Y_test,Y_pred))\nprint(\"Root mean Squared error: \",rmse)\n\ninputval = input(\"Enter no of hours: \")\ninputval = np.array(inputval,dtype = np.float64).reshape(-1,1)#dtype\nprint(\"Risk = \",regression.predict(inputval))\n\nimport matplotlib.pyplot as plt\nplt.scatter(X_train,Y_train,color = \"black\")\nplt.plot(X_test,Y_pred,color=\"blue\")\nplt.title(\"Train data\")\nplt.xlabel(\"No of hours\")\nplt.ylabel(\"Risk\")\nplt.show()","sub_path":"ML/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"130659762","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport requests\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render, redirect\nfrom django.template import RequestContext\nfrom django.http import HttpResponse, JsonResponse, Http404\nfrom django.forms.models import model_to_dict\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom .models import Author, Question, Answer, Collection, CollectionAnswer\n\nPERP_PAGE_OPRIONS = [10, 20, 50]\n\n\ndef handler404(request):\n response = render('404.html', {},\n context_instance=RequestContext(request))\n response.status_code = 404\n return response\n\n\ndef handler500(request):\n response = render('500.html', {},\n context_instance=RequestContext(request))\n response.status_code = 500\n return response\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef questions(request):\n question_query = Question.objects.all()\n paginator = Paginator(question_query, 25)\n\n page = request.GET.get('page')\n try:\n questions = paginator.page(page)\n except PageNotAnInteger:\n questions = paginator.page(1)\n except EmptyPage:\n questions = paginator.page(paginator.num_pages)\n\n content = {\n 'questions': questions\n }\n return render(request, 'question/index.html', content)\n\n\ndef question_detail(request, question_id):\n question = Question.objects.filter(id=question_id).first()\n if not question:\n raise Http404\n\n answer_query = Answer.objects.filter(question_id=question_id).all()\n paginator = Paginator(answer_query, 25)\n\n page = request.GET.get('page')\n try:\n answers = paginator.page(page)\n except PageNotAnInteger:\n answers = paginator.page(1)\n except EmptyPage:\n answers = paginator.page(paginator.num_pages)\n\n content = {\n 'question': question,\n 'answers': answers,\n }\n return render(request, 'question/detail.html', content)\n\n\ndef collections(request):\n collection_query = Collection.objects.all()\n paginator = Paginator(collection_query, 25)\n\n page = request.GET.get('page')\n try:\n collections = paginator.page(page)\n except PageNotAnInteger:\n collections = paginator.page(1)\n except EmptyPage:\n collections = paginator.page(paginator.num_pages)\n\n content = {\n 'collections': collections\n }\n return render(request, 'collection/index.html', content)\n\n\ndef collection_detail(request, collection_id):\n collection = Collection.objects.filter(id=collection_id).first()\n if not collection:\n raise Http404\n\n collection_answer_query = CollectionAnswer.objects.filter(\n collection_id=collection_id).all()\n paginator = Paginator(collection_answer_query, 25)\n\n page = request.GET.get('page')\n try:\n collection_answers = paginator.page(page)\n except PageNotAnInteger:\n collection_answers = paginator.page(1)\n except EmptyPage:\n collection_answers = paginator.page(paginator.num_pages)\n\n answer_ids = [\n collection_answer.answer_id for collection_answer in collection_answers.object_list]\n\n answers = Answer.objects.filter(\n id__in=answer_ids).order_by('-created_at').all()\n\n content = {\n 'collection': collection,\n 'answers': answers\n }\n return render(request, 'collection/detail.html', content)\n\n\ndef answers(request):\n asnwer_query = Answer.objects.all()\n paginator = Paginator(asnwer_query, 25)\n\n page = request.GET.get('page')\n try:\n answers = paginator.page(page)\n except PageNotAnInteger:\n answers = paginator.page(1)\n except EmptyPage:\n answers = paginator.page(paginator.num_pages)\n\n content = {\n 'answers': answers\n }\n return render(request, 'answer/index.html', content)\n\n\ndef answer_detail(request, answer_id):\n answer = Answer.objects.filter(id=answer_id).first()\n if not answer:\n raise Http404\n\n content = {\n 'answer': answer\n }\n return render(request, 'answer/detail.html', content)\n\n\ndef image(request):\n # @todo: add cache\n url = request.GET.get('src')\n r = requests.get(url, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',\n })\n return HttpResponse(r.content, content_type=r.headers['content-type'])\n\n\ndef question_detail_api(request, question_id):\n question = Question.objects.filter(id=question_id).first()\n if question:\n return JsonResponse(model_to_dict(question))\n else:\n return JsonResponse({}, status=404)\n\n\ndef question_answers_api(request, question_id):\n page = request.GET.get('page', 1)\n per_page = request.GET.get('per_page', 10)\n\n if per_page not in PERP_PAGE_OPRIONS:\n per_page = 10\n\n answers = Answer.objects.filter(\n question_id=question_id).order_by('-created_time').all()\n paginator = Paginator(answers, per_page)\n\n try:\n answers = paginator.page(page)\n except PageNotAnInteger:\n answers = paginator.page(1)\n except EmptyPage:\n answers = paginator.page(paginator.num_pages)\n\n return JsonResponse({\n 'count': paginator.count,\n 'num_pages': paginator.num_pages,\n 'data': [model_to_dict(answer) for answer in answers.object_list],\n 'has_next': answers.has_next(),\n 'has_previous': answers.has_previous(),\n })\n\n\ndef author_detail_api(request, url_token):\n author = Author.objects.filter(url_token=url_token).first()\n if author:\n return JsonResponse(model_to_dict(author))\n else:\n return JsonResponse({}, status=404)\n\n\ndef answer_detail_api(request, answer_id):\n answer = Answer.objects.filter(id=answer_id).first()\n if answer:\n return JsonResponse(model_to_dict(answer))\n else:\n return JsonResponse({}, status=404)\n\n\ndef collection_detail_api(request, collection_id):\n collection = Collection.objects.filter(id=collection_id).first()\n if collection:\n return JsonResponse(model_to_dict(collection))\n else:\n return JsonResponse({}, 404)\n\n\ndef collection_answer_api(request, collection_id):\n page = request.GET.get('page', 1)\n per_page = request.GET.get('per_page', 10)\n\n if per_page not in PERP_PAGE_OPRIONS:\n per_page = 10\n\n collection_answer_query = CollectionAnswer.objects.filter(\n collection_id=collection_id).order_by('-created_at').all()\n\n paginator = Paginator(collection_answer_query, per_page)\n\n try:\n collection_answers = paginator.page(page)\n except PageNotAnInteger:\n collection_answers = paginator.page(1)\n except EmptyPage:\n collection_answers = paginator.page(paginator.num_pages)\n\n answer_ids = [\n collection_answer.answer_id for collection_answer in collection_answers.object_list]\n answers = Answer.objects.filter(\n id__in=answer_ids).order_by('-created_at').all()\n\n return JsonResponse({\n 'count': paginator.count,\n 'num_pages': paginator.num_pages,\n 'data': [model_to_dict(answer) for answer in answers],\n 'has_next': collection_answers.has_next(),\n 'has_previous': collection_answers.has_previous(),\n })\n","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"38458927","text":"\"\"\"Solve 2\n使用hashmap存储子串\n滑动窗口i和j, 若s[j]不在d中,则j右移\n若s[j]在d中, 则i右移\n时间复杂度O(n)\n\"\"\"\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n d = {}\n result = 0\n i, j = 0, 0\n n = len(s)\n\n while (i < n and j < n):\n if s[j] not in d:\n d[s[j]] = 0\n j = j + 1\n result = max(result, j - i)\n else:\n d.pop(s[i])\n i = i + 1\n\n return result\n","sub_path":"1-50/03.Longest Substring Without Repeating Characters/solve_2.py","file_name":"solve_2.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"184850831","text":"from io import StringIO\nimport json\n\nfrom django.core.management import call_command\n\nfrom concepts.models import Concept, ConceptDefinition, ConceptReference, ItemDependency, ConceptMeta\nfrom main.elasticsearch import delete_doc\nfrom equations.models import Equation, ItemEquation\nfrom main.management.commands import clean\nfrom mathitems.models import ItemTypes, MathItem, node_to_markup\nfrom media.models import Media, ItemMediaDependency\nfrom project.server_com import render_item\n\n# import logging\n# logger = logging.getLogger(__name__)\n\n\ndef get_node_refs(node, refs, media_refs):\n if 'item' in node:\n refs.add(node['item'])\n if 'media' in node:\n media_refs.add(node['media'])\n for child in node.get('children', []):\n get_node_refs(child, refs, media_refs)\n\n\ndef get_document_refs(document):\n item_names = set()\n media_names = set()\n get_node_refs(document, item_names, media_names)\n item_info = {}\n for item_name in item_names:\n try:\n item = MathItem.objects.get_by_name(item_name)\n data = {'url': item.get_absolute_url()}\n if item.item_type == ItemTypes.DEF:\n data['defines'] = list(Concept.objects.filter(conceptdefinition__item=item)\n .values_list('name', flat=True))\n item_info[item_name] = data\n except MathItem.DoesNotExist:\n pass\n media_info = {}\n for media_name in media_names:\n try:\n media = Media.objects.get_by_name(media_name)\n media_info[media_name] = {\n 'url': media.get_absolute_url(),\n 'html': media.get_html()\n }\n except Media.DoesNotExist:\n pass\n return item_info, media_info\n\n\ndef get_refs_and_render(item_type, document, eqns, concepts):\n refs, media_refs = get_document_refs(document)\n return render_item(item_type, document, eqns, concepts, refs, media_refs)\n\n\ndef create_item_meta_data(item):\n eqns, concept_defs, concept_refs, item_refs, media_refs = item.analyze()\n\n wildcard_concept = Concept.objects.get(name='*')\n\n if item.item_type == ItemTypes.DEF:\n ConceptDefinition.objects.bulk_create(\n ConceptDefinition(item=item, concept_id=id)\n for id in concept_defs)\n\n ConceptReference.objects.bulk_create(\n ConceptReference(item=item, concept_id=id)\n for id in concept_refs)\n\n for item_id, item_data in item_refs.items():\n dest_item = MathItem.objects.get_by_name(item_id)\n dep = ItemDependency.objects.create(item=item, uses=dest_item)\n if dest_item.is_def():\n concepts = list(item_data.get('concepts', []))\n if item_data.get('whole'):\n concepts.append(wildcard_concept.id)\n dep.concepts = concepts\n\n ItemMediaDependency.objects.bulk_create(\n ItemMediaDependency(item=item, uses=Media.objects.get_by_name(media))\n for media in media_refs)\n\n ItemEquation.objects.bulk_create(\n ItemEquation(item=item, equation_id=eqn_id)\n for eqn_id in eqns)\n\n return concept_defs | concept_refs\n\n\ndef item_to_markup(item):\n eqns, concept_defs, concept_refs, item_refs, media_refs = item.analyze()\n concepts = concept_defs | concept_refs\n for data in item_refs.values():\n concepts |= data.get('concepts', set())\n concept_map = {id: Concept.objects.get(id=id).name for id in concepts}\n eqn_map = {id: Equation.objects.get(id=id).to_markup() for id in eqns}\n return node_to_markup(item.get_body_root(), concept_map, eqn_map)\n\n\ndef create_concept_meta(concept_id):\n ConceptMeta.objects.update_or_create(\n concept_id=concept_id,\n defaults={\n 'ref_count': ConceptReference.objects.filter(concept_id=concept_id).count(),\n 'def_count': ConceptDefinition.objects.filter(concept_id=concept_id).count()\n }\n )\n\n\n# item can be both an MathItem and an Media instance\ndef delete_item(item):\n lines = ['Deleted {}'.format(item)]\n item_id = item.get_name()\n r = item.delete()\n lines.append(json.dumps(r))\n r = delete_doc(item_id)\n lines.append(('Deleted' if r else 'Failed deleting') + ' from elasticsearch index')\n with StringIO() as st:\n call_command(clean.Command(), stdout=st)\n lines.append(st.getvalue())\n return '\\n\\n'.join(lines)\n","sub_path":"main/item_helpers.py","file_name":"item_helpers.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"309017850","text":"from base64 import b64decode\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Cipher import AES\n\ndef PKCS(text: bytes, blocksize, pad=b'\\x00'):\n if len(text) % blocksize == 0:\n return text\n text += pad\n return PKCS(text, blocksize, pad)\n\ndef encryption_oracle(plain_text):\n ''' Generate random keys and random IV'''\n key = get_random_bytes(16)\n iv = get_random_bytes(16)\n '''Add a random number of random bytes before and after the plain text'''\n append_length = [5, 6, 7, 8, 9, 10]\n random_length = append_length[(get_random_bytes(1)[0]) % 6]\n before_plain_text = get_random_bytes(random_length) + plain_text\n random_length = append_length[(get_random_bytes(1)[0]) % 6]\n plain_text = before_plain_text + get_random_bytes(random_length)\n plain_text = PKCS(plain_text, 16)\n ''' Encrypt using Two modes EBC or CBC, the mode is choosen randomly'''\n ''' EBC 0\n CBC 1\n '''\n AES_MODE = (get_random_bytes(1)[0]) % 2\n if AES_MODE == 0: # EBC Mode\n print('AES_EBC mode')\n ciphering = AES.new(key, AES.MODE_ECB)\n cipher_text = ciphering.encrypt(plain_text)\n return cipher_text\n print('AES_CBC mode')\n ciphering = AES.new(key, AES.MODE_CBC, iv=iv)\n cipher_text = ciphering.encrypt(plain_text)\n return cipher_text\n\ndef decryption_oracle(cipher_text,key):\n ciphering = AES.new(key, AES.MODE_ECB)\n return ciphering.decrypt(cipher_text)\n\ndef AES_128_ECB(plain_text, random_key):\n ''' Encrypt using AES-ECB with random key and append some unknown string to the key '''\n unknown_string = 'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg' \\\n 'aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq' \\\n 'dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg' \\\n 'YnkK'\n plain_text += b64decode(unknown_string)\n plain_text = PKCS(plain_text, 16)\n ''' Encrypt using Two modes EBC'''\n # print('AES_EBC mode')\n ciphering = AES.new(random_key, AES.MODE_ECB)\n cipher_text = ciphering.encrypt(plain_text)\n return cipher_text\n\ndef get_blocksize(func):\n ''' You can determine the block size by getting the cipher text\n from encrypt function by passing 1 additional byte each time\n and monitor the length of the cipher text. when it increments\n the increment is the block size\n '''\n ''' future work it should take the function name as a parameter\n to test many functions not only the AES_128\n '''\n test_text = b'A'\n key = get_random_bytes(16)\n initial_length = len(AES_128_ECB(test_text, key))\n final_length = initial_length\n while True:\n if final_length != initial_length:\n return final_length - initial_length\n test_text += b'A'\n final_length = len(AES_128_ECB(test_text, key))\n\ndef get_unknowstringsize(func):\n ''' To determine the unknown string appended to plain text\n by the encrypt function. Increment the size of the test\n text by one. When the size of cipher text increment by\n 1 block size return the difference between the inital\n length and the counter\n '''\n ''' future work it should take the function name as a parameter\n to test many functions not only the AES_128\n '''\n key = get_random_bytes(16)\n test_text = b'A'\n initial_length = len(AES_128_ECB(test_text, key))\n final_length = initial_length\n i = 1\n while True:\n if final_length != initial_length:\n return initial_length - i + 1\n test_text += b'A'\n i += 1\n final_length = len(AES_128_ECB(test_text, key))\n\ndef is_AES_ECB_mode(func):\n ''' This function will test against the AES ECB mode\n if it find match it will return true\n '''\n key = get_random_bytes(16)\n blocksize = get_blocksize(AES_128_ECB(b' ', key))\n blocksize_minus1 = PKCS(b'A', blocksize * 2, b'A')\n cipher_text = AES_128_ECB(blocksize_minus1, key)\n if find_match(cipher_text, blocksize)[0]:\n print(\"AES-ECB is used\")\n return True\n return False\n\ndef find_match(cipher, KeyLength=16):\n found = False\n ''' if it is a single row bytes'''\n if str(type(cipher[0])) == '':\n cipher_array = [b'']\n cipher_array[0] = cipher\n cipher = cipher_array\n for k in range(int(len(cipher))):\n for l in range(int(len(cipher[0]) / KeyLength)):\n block1 = cipher[k][(l) * KeyLength:(l + 1) * KeyLength]\n # print(len(block1),block1)\n count_match = 0\n # match_location = [(,)]\n for i in range(int(len(cipher))):\n for j in range(int(len(cipher[i]) / KeyLength)):\n # if hamming_distance(block1,cipher_text[i][j*keylength:(j+1)*keylength]) == 0:\n if block1 == cipher[i][j * KeyLength:(j + 1) * KeyLength]:\n if j != l:\n # print(\"Find Match! in line\",i,'Block',j)\n count_match += 1\n found = True\n return found, count_match\n\ndef AES_byte_swap_attack():\n key = get_random_bytes(16)\n random_text = b'adsf'\n blocksize = get_blocksize(AES_128_ECB(random_text, key))\n attack_result = b''\n extend = int(get_unknowstringsize(AES_128_ECB(random_text,key))/blocksize)+1\n found_byte = b''\n print(is_AES_ECB_mode(AES_128_ECB(random_text,key)))\n if is_AES_ECB_mode(AES_128_ECB(random_text,key)):\n print('AES_ECB Brute force attack will be implemented')\n else:\n exit()\n for j in range(1,get_unknowstringsize(AES_128_ECB(random_text,key))):\n for i in range(256):\n test_block = PKCS(b'A',extend*blocksize-j,b'A')\n test_cipher = AES_128_ECB(test_block,key)\n cipher = AES_128_ECB(test_block+found_byte+i.to_bytes(1,'big'),key)\n print(found_byte+i.to_bytes(1,'big'))\n working_block = int(get_unknowstringsize(AES_128_ECB(random_text,key))/blocksize)\n if test_cipher[(working_block)*blocksize:(working_block+1)*blocksize] == cipher[(working_block)*blocksize:(working_block+1)*blocksize]:\n found_byte += i.to_bytes(1,'big')\n break\n print('The found unknown string by brute force attack is:')\n print(found_byte.decode('utf-8'))\n\ndef parsing(input):\n print('parsing')\n parse1 = input.split('&')\n parse = ['']*len(parse1)\n for i in range(len(parse1)):\n parse[i] = parse1[i].replace('=',': ')\n return parse\n\ndef reverse_parse(input):\n output = ''\n for i in range(len(input)):\n output += input[i].replace(': ','=') + '&'\n return output[:(len(output)-1)]\n\ndef profile_for(input):\n before_parse = 'email='+input+'&uid=10&role=user'\n parse = parsing(before_parse)\n parse_bytes = b''\n blocksize = 16\n key = get_random_bytes(16)\n for i in range(len(parse)):\n parse_bytes += PKCS(parse[i].encode().replace(b': ',b''),(int(len(parse[i].encode().replace(b': ',b''))/blocksize)+1)*blocksize)\n #print((int(len(parse[i].encode().replace(b': ',b''))/blocksize)+1)*blocksize)\n attack = AES_128_ECB(PKCS(b'roleadmin',blocksize),key)[:blocksize]\n print(parse_bytes,len(parse_bytes))\n\n return AES_128_ECB(parse_bytes,key),key,attack\n\ntest = 'foo=bar&baz=qux&zap=zazzle'\ny = profile_for('foo@bar.com')\nblocksize = 16\nx = y[0][:3*blocksize]+y[2]+y[0][4*blocksize:]\nj = decryption_oracle(x,y[1])\nz = j[:2*blocksize].rstrip(b'\\x00').replace(b'email',b'email=')+b'&'+j[2*blocksize:3*blocksize].rstrip(b'\\x00').replace(b'uid',b'uid=')+b'&'+j[3*blocksize:4*blocksize].rstrip(b'\\x00').replace(b'role',b'role=')\n\nprint(z)\nexit()\n","sub_path":"Set2_13.py","file_name":"Set2_13.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"58939334","text":"\"\"\"Extract text from pdf file.\"\"\"\nimport PyPDF2\nimport os\n\nPDF_EXTENSION = '.pdf'\n\ndef PDFRipper(file_name, first_page=1, last_page=None):\n \"\"\"Extract and return text data from .pdf file.\n \n Args:\n file_name: A .pdf file. File to have text extracted.\n first_page: An int. The first page to read from the file.\n Otherwise, use 0 as first page.\n last_page: An int. The last page to read in the file to \n to. Otherwise, None and read to end of file.\n Returns:\n A string. The text data from pages specified.\n Raises:\n ValueError: If the passed in file does not exist.\n If the file is not of type '.pdf'.\n \"\"\"\n if not os.path.isfile(file_name):\n raise IOError('The file_name specified, %s, does not exist.' %\n file_name)\n if not file_name.endswith(PDF_EXTENSION):\n raise IOError('The file_name specified, %s, is not a .pdf file.' %\n file_name)\n text_data = ''\n with open(file_name, 'rb') as f:\n read_pdf = PyPDF2.PdfFileReader(f)\n num_pages = read_pdf.getNumPages() - 1\n if not last_page:\n last_page = num_pages\n if last_page > num_pages:\n last_page = num_pages\n for page in range(first_page - 1, last_page + 1):\n page_read = read_pdf.getPage(page)\n page_content = page_read.extractText()\n text_data += page_content.encode('utf-8')\n print ('Read Page: %d' % page)\n return text_data\n","sub_path":"pdf_ripper.py","file_name":"pdf_ripper.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"583261041","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 3/23/18 4:32 PM\n# @Author : Miracle Young\n# @File : app.py\n\nfrom lib.logger import StreamFileLogger\nfrom lib.excel import Excel\nfrom utils import settings\n\n_sflogger = StreamFileLogger(settings.LOG_FILE, __file__).get_logger()\n\n_sflogger.info(1)\n_sflogger.debug(2)\n\ntry:\n a\nexcept Exception as e:\n _sflogger.error('Failed', exc_info=True)\n\n\n\nexcel_name = settings.SRC_DATA['a.xlsx']\n\n\nexcel = Excel(excel_name)\n\nexcel.get_column_names('CAPS Industry KPIs New', 'A1', 'F28')\n# get all sheetname\nexcel.read_excel_by_pos('CAPS Industry KPIs New', 'A1', 'F28')\n\n#\n\n\n\n","sub_path":"views/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"612706147","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom opendlv_ros.msg import ActuationRequest\n\ncmd_pub = rospy.Publisher(\"opendlv_cmd\", ActuationRequest, queue_size=10)\n\ndef cmd_callback(cmd):\n opendlv_cmd = ActuationRequest()\n opendlv_cmd.delta_req = cmd.angular.z\n opendlv_cmd.ax_req = cmd.linear.x\n opendlv_cmd.header.stamp = rospy.Time.now()\n cmd_pub.publish(opendlv_cmd)\n\ndef cmd_translator(): \n rospy.init_node(\"cmd_translator\", anonymous=True)\n rospy.Subscriber(\"cmd_vel\", Twist, cmd_callback)\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n rate.sleep()\n \nif __name__ == '__main__':\n try:\n cmd_translator()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/cmd_translator_node.py","file_name":"cmd_translator_node.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"543459126","text":"from django.forms import ModelForm\nfrom .models import Book, Author\nfrom django import forms\n\n\nclass CreateBookForm(ModelForm):\n class Meta:\n model = Book\n fields = [\n 'title',\n 'description',\n 'image',\n 'author_name'\n ]\n widgets = {\n 'title': forms.TextInput(attrs={\n 'rows': 2,\n 'placeholder': 'Название'\n }),\n 'description': forms.Textarea(attrs={\n 'placeholder': 'Описание',\n 'rows': 5\n }),\n 'author_name': forms.SelectMultiple(attrs={\n 'multiple': 'multiple',\n 'size': 6,\n\n })\n }\n\n\nclass CreateAuthorForm(ModelForm):\n class Meta:\n model = Author\n fields = [\n 'name',\n 'image'\n ]\n","sub_path":"Dediluk/mybooks/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"649161813","text":"import socket, sys, os\n\n# Utilities functions\n###################################################################################################\n\ndef getRequestInfo():\n url = sys.argv[1]\n urlSplits = url.split(\"/\")\n\n host = urlSplits[2]\n\n if len(urlSplits) == 3:\n \tresource = \"/\"\n else:\n \tresource = \"/\" + urlSplits[3]\n\n return { \"host\": host, \"resource\": resource }\n\ndef getSocket(host, port):\n soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n soc.connect((host, port))\n return soc\n\ndef getIndexCaseInSensitive(source, target):\n sourceUpper = source.upper()\n targetUpper = target.upper()\n if (targetUpper in sourceUpper):\n return sourceUpper.index(targetUpper)\n else:\n return -1\n\n# State machine functions\n###################################################################################################\n\ndef startStateMachine(initialState):\n currentState = 0\n\n while (True):\n if (not currentState): currentState = initialState\n currentState = invokeState(currentState)\n if (not currentState): break\n\ndef invokeState(state):\n stateName = state[\"name\"]\n if (stateName == \"seekStart\"):\n return stateSeekStart(state)\n elif (stateName == \"seekEnd\"):\n return stateSeekEnd(state)\n elif (stateName == \"sendToRtvm\"):\n return stateSendToRtvm(state)\n elif (stateName == \"receiveFromRtvm\"):\n return stateReceiveFromRtvm(state)\n\ndef stateSeekStart(state):\n resourceResponse = resourceSocket.recv(1024).decode(\"UTF-8\")\n startIndex = getIndexCaseInSensitive(resourceResponse, \"\")\n\n if (startIndex < 0):\n return { \"name\": \"seekStart\" }\n else:\n resourceFragment = resourceResponse[startIndex:]\n endIndex = getIndexCaseInSensitive(resourceFragment, \"\")\n if (endIndex >= 0):\n return { \"name\": \"sendToRtvm\", \"resourceContent\": resourceFragment[:endIndex + 7] }\n else:\n return { \"name\": \"seekEnd\", \"resourceFragment\": resourceFragment }\n\ndef stateSeekEnd(state):\n resourceFragment = state[\"resourceFragment\"]\n resourceResponse = resourceSocket.recv(1024).decode(\"UTF-8\")\n resourceFragment = resourceFragment + resourceResponse\n\n endIndex = getIndexCaseInSensitive(resourceFragment, \"\")\n if (endIndex >= 0):\n return { \"name\": \"sendToRtvm\", \"resourceContent\": resourceFragment[:endIndex + 7] }\n else:\n return { \"name\": \"seekEnd\", \"resourceFragment\": resourceFragment }\n\ndef stateSendToRtvm(state):\n resourceContent = state[\"resourceContent\"]\n rtvmSocket.send(resourceContent.encode(\"UTF-8\"))\n return { \"name\": \"receiveFromRtvm\" }\n\ndef stateReceiveFromRtvm(state):\n rtvmResponse = rtvmSocket.recv(1024).decode(\"ASCII\")\n responseEndedIndex = getIndexCaseInSensitive(rtvmResponse, \"COMP173\")\n\n if (responseEndedIndex >= 0):\n print(rtvmResponse[:responseEndedIndex], end='')\n return 0 # We're done; Exit from the state machine.\n else:\n print(rtvmResponse, end='')\n return state\n\n# Main program\n###################################################################################################\n\n# Create RTVM socker\nrtvmSocket = getSocket(\"rtvm.cs.camosun.bc.ca\", 10010)\nrtvmResponse = rtvmSocket.recv(1024)\n\n# Exit if RTVM is not READY\nif (rtvmResponse != b'READY'):\n print(\"RTVM NOT READY!!!\")\n print(rtvmResponse)\n sys.exit(0)\n\n# Parse command arguments for request info\nrequestInfo = getRequestInfo()\n\n# Open resource socket from request info\nresourceSocket = getSocket(requestInfo[\"host\"], 80)\nresourceSocket.send((\"GET \" + requestInfo[\"resource\"] + \" HTTP/1.1\\n\" + \"Host: \" + requestInfo[\"host\"] + \"\\n\\n\").encode(\"UTF-8\"))\n\n# Process resource with RTVM via a state machine\nstartStateMachine({ \"name\": \"seekStart\" })\n\n# CLeanup\nresourceSocket.close()\nrtvmSocket.close()\nsys.exit(0)\n","sub_path":"lab3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"282857162","text":"from .models import Task\nfrom users.models import Person\nimport csv\n\n#it's used to download tasks from file to model!\n\ndef loading():\n with open(\"tasks/text_dimensionality_reduction/snli_1.0_dev.csv\", newline='') as csvFile:\n rowNumber = 0\n reader = csv.reader(csvFile, delimiter=',')\n for row in reader:\n # check if row isnt empty\n if len(row) != 0:\n currentSentence = row[6] # number of column from which we want to extract text\n print(currentSentence)\n\n created = Task.objects.get_or_create(\n task_name = currentSentence,\n assigned_employee = Person.objects.get(first_name = \"temp\"),\n )\n rowNumber += 1\n if rowNumber == 100:\n break","sub_path":"tasks/load_sentences_to_model.py","file_name":"load_sentences_to_model.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"608245104","text":"import sys\r\nimport heapq\r\nimport random\r\nimport numpy as np \r\nsys.path.insert(0, 'C:\\\\Users\\\\Isabella\\\\Documents\\\\Elementos de Física\\\\Collisions\\\\disk')\r\nsys.path.insert(0, 'C:\\\\Users\\\\Isabella\\\\Documents\\\\Elementos de Física\\\\Collisions\\\\event')\r\n\r\nimport disk as dk\r\nimport event as ev\r\n\r\n\"\"\"\r\nDEFINA EL TAMAÑO DEL CONTENEDOR.\r\n\"\"\"\r\n\r\nLX = 100\r\nLY = 100\r\n\r\nclass System:\r\n\r\n \"\"\"\r\n Representa al sistema de discos. La lista de discos es representada por su atributo particles.\r\n El atributo minpq es un min binary heap que guarda todos los tiempos de colisiones distintos a -1\r\n (infinito) de todas las interacciones posibles entre los discos y entre cada disco con el muro vertical\r\n y horizontal.\r\n \"\"\"\r\n\r\n def __init__(self, time, disks):\r\n self.TIME_MAX = time\r\n self.time_sim = 0\r\n\r\n self.minpq = []\r\n self.events = []\r\n self.particles = disks\r\n self.lista_grande = []\r\n self.N = len(disks)\r\n for disco in self.particles:\r\n self.lista_grande.append([[],[]])\r\n\r\n self.momentos_x = []\r\n self.momentos_y = []\r\n self.temperaturas = []\r\n self.pressures = []\r\n self.cumulative_pressure = 0\r\n self.free_time = []\r\n self.free_time_val = 0\r\n self.res_mean_vel_2 = []\r\n self.l1 = 0\r\n self.l2 = 0\r\n\r\n def create_events(self, list, list_pairs):\r\n if len(list) == 0:\r\n self.events = list_pairs\r\n return list_pairs\r\n else:\r\n i = list[0]\r\n list = list[1:]\r\n if len(list) != 0:\r\n for j in list:\r\n list_pairs.append([i,j])\r\n list_pairs.append([None, i])\r\n list_pairs.append([i, None])\r\n else:\r\n list_pairs.append([None, i])\r\n list_pairs.append([i, None])\r\n return self.create_events(list, list_pairs)\r\n\r\n def initialize(self):\r\n self.set_random_velocities()\r\n self.set_random_positions()\r\n\r\n def set_random_velocities(self):\r\n for disco in self.particles:\r\n disco.vx = random.uniform(-5, 5)\r\n disco.vy = random.uniform(-5, 5)\r\n\r\n def set_velocities(self, in_kinetic):\r\n vxSum = 0\r\n vySum = 0\r\n for disco in self.particles:\r\n disco.vx = np.random.random() - 0.5\r\n disco.vy = np.random.random() - 0.5\r\n vxSum += disco.vx\r\n vySum += disco.vy\r\n vxCM = vxSum / len(self.particles)\r\n vyCM = vySum / len(self.partciles)\r\n for disco in self.particles:\r\n disco.vx -= vxCM\r\n disco.vy -= vyCM\r\n v2Sum = 0\r\n for disco in self.particles:\r\n v2Sum += disco.vx**2 + disco.vy**2\r\n k_energy_per_particle = (0.5) * v2Sum / len(self.particles)\r\n rescale = np.sqrt(in_kinetic / k_energy_per_particle)\r\n for disco in self.particles:\r\n disco.vx *= rescale\r\n disco.vy *= rescale\r\n\r\n def set_random_positions(self):\r\n self.particles[0].x = LX/2\r\n self.particles[0].y = LY/2\r\n\r\n for idx, idish in enumerate(self.particles[1:], start = 1):\r\n irad, overlap = idish.RADIUS, True\r\n\r\n while overlap:\r\n jdx, overlap = 0, False\r\n dicex = (LX - 2.0 * irad) * np.random.random() + irad\r\n dicey = (LY - 2.0 * irad) * np.random.random() + irad\r\n tmp_pos = np.array([dicex, dicey])\r\n\r\n while jdx < idx and not overlap:\r\n jdish = self.particles[jdx]\r\n otro = np.array([jdish.x, jdish.y])\r\n metric = np.linalg.norm(tmp_pos - otro)\r\n\r\n if metric <= irad + jdish.RADIUS:\r\n overlap = True\r\n jdx += 1\r\n idish.x, idish.y = tmp_pos[0], tmp_pos[1]\r\n\r\n \"\"\"\r\n Para la red rectangular:\r\n -n es el número de discos por fila, es decir, el número de columnas.\r\n -m es el número de discos por columnas, es decir, el número de filas.\r\n \"\"\"\r\n\r\n def set_rect_red(self, n, m):\r\n step_x = LX/n\r\n step_y = LY/m\r\n center = (step_x/2, step_y/2)\r\n if center[0] < self.particles[0].RADIUS or center[1] < self.particles[0].RADIUS:\r\n print(\"No puedo hacer la malla! #Falta corregir este caso\")\r\n return\r\n else:\r\n k = 0\r\n for i in range(n):\r\n for j in range(m):\r\n self.particles[k].x = center[0] + ((i) * step_x)\r\n self.particles[k].y = center[1] + ((j) * step_y)\r\n k += 1\r\n for disco in self.particles:\r\n print(disco)\r\n\r\n def build_binary_heap(self):\r\n for pair in self.events:\r\n evn = ev.Event(pair[0], pair[1])\r\n evn.calculate_time()\r\n if evn.time != np.inf:\r\n heapq.heappush(self.minpq, evn)\r\n\r\n def valid(self, evn):\r\n\r\n if evn.disk_a != None and evn.disk_b != None:\r\n if evn.TOTAL_COLLS == evn.disk_a.disk_colls + evn.disk_a.wall_colls + evn.disk_b.disk_colls + evn.disk_b.wall_colls:\r\n evn.valid = True\r\n else:\r\n evn.valid = False\r\n elif evn.disk_a == None and evn.disk_b != None:\r\n if evn.TOTAL_COLLS == evn.disk_b.disk_colls + evn.disk_b.wall_colls:\r\n evn.valid = True\r\n else:\r\n evn.valid = False\r\n else:\r\n if evn.TOTAL_COLLS == evn.disk_a.disk_colls + evn.disk_a.wall_colls:\r\n evn.valid = True\r\n else:\r\n evn.valif = False\r\n\r\n def res_collision(self, evn):\r\n if evn.disk_a != None and evn.disk_b != None: #DISCO CON DISCO\r\n Vij = [evn.disk_a.vx - evn.disk_b.vx , evn.disk_a.vy - evn.disk_b.vy] #Vector Vi - Vj\r\n Vji = [evn.disk_b.vx - evn.disk_a.vx , evn.disk_b.vy - evn.disk_a.vy]\r\n Rij = [evn.disk_a.x - evn.disk_b.x , evn.disk_a.y - evn.disk_b.y] #Vector Ri - Rj\r\n Rji = [evn.disk_b.x - evn.disk_a.x , evn.disk_b.y - evn.disk_a.y]\r\n Vij_Rij = Vij[0]*Rij[0] + Vij[1]*Rij[1] #Producto punto\r\n Vji_Rji = Vji[0]*Rji[0] + Vji[1]*Rji[1]\r\n ci = (2*evn.disk_a.MASS)/(evn.disk_a.MASS + evn.disk_b.MASS)\r\n cj = (2*evn.disk_b.MASS)/(evn.disk_a.MASS + evn.disk_b.MASS)\r\n evn.disk_a.vx = (-(cj)/(evn.disk_a.RADIUS + evn.disk_b.RADIUS)**2)*Vij_Rij*Rij[0] + evn.disk_a.vx\r\n evn.disk_a.vy = (-(cj)/(evn.disk_a.RADIUS + evn.disk_b.RADIUS)**2)*Vij_Rij*Rij[1] + evn.disk_a.vy\r\n evn.disk_b.vx = (-(ci)/(evn.disk_a.RADIUS + evn.disk_b.RADIUS)**2)*Vji_Rji*Rji[0] + evn.disk_b.vx\r\n evn.disk_b.vy = (-(ci)/(evn.disk_a.RADIUS + evn.disk_b.RADIUS)**2)*Vji_Rji*Rji[1] + evn.disk_b.vy\r\n evn.disk_a.disk_colls += 1\r\n evn.disk_b.disk_colls += 1\r\n\r\n elif evn.disk_a == None and evn.disk_b != None: #DISCO CON MURO VERTICAL\r\n evn.disk_b.vx = -1*evn.disk_b.vx\r\n evn.disk_b.wall_colls += 1\r\n\r\n else: #DISCO CON MURO HORIZONTAL\r\n evn.disk_a.vy = -1*evn.disk_a.vy\r\n evn.disk_a.wall_colls += 1\r\n\r\n def new_colls(self, evn):\r\n if evn.disk_a != None and evn.disk_b != None:\r\n for disco in self.particles:\r\n if disco != evn.disk_a and disco != evn.disk_b:\r\n ev1 = ev.Event(evn.disk_a, disco)\r\n ev1.calculate_time()\r\n ev1.time += self.time_sim\r\n\r\n ev2 = ev.Event(evn.disk_b, disco)\r\n ev2.calculate_time()\r\n ev2.time += self.time_sim\r\n\r\n if ev1.time != np.inf:\r\n heapq.heappush(self.minpq, ev1)\r\n if ev2.time != np.inf:\r\n heapq.heappush(self.minpq, ev2)\r\n\r\n ev1_vert = ev.Event(None, evn.disk_a)\r\n ev1_vert.calculate_time()\r\n ev1_vert.time += self.time_sim\r\n\r\n ev1_horz = ev.Event(evn.disk_a, None)\r\n ev1_horz.calculate_time()\r\n ev1_horz.time += self.time_sim\r\n\r\n ev2_vert = ev.Event(None, evn.disk_b)\r\n ev2_vert.calculate_time()\r\n ev2_vert.time += self.time_sim\r\n\r\n ev2_horz = ev.Event(evn.disk_b, None)\r\n ev2_horz.calculate_time()\r\n ev2_horz.time += self.time_sim\r\n\r\n if ev1_vert.time != np.inf:\r\n heapq.heappush(self.minpq, ev1_vert)\r\n if ev1_horz.time != np.inf:\r\n heapq.heappush(self.minpq, ev1_horz)\r\n if ev2_vert.time != np.inf:\r\n heapq.heappush(self.minpq, ev2_vert)\r\n if ev2_horz.time != np.inf:\r\n heapq.heappush(self.minpq, ev2_horz)\r\n\r\n elif evn.disk_a == None and evn.disk_b != None: #DISCO CON MURO VERTICAL\r\n for disco in self.particles:\r\n if disco != evn.disk_b:\r\n ev1 = ev.Event(evn.disk_b, disco)\r\n ev1.calculate_time()\r\n ev1.time += self.time_sim\r\n\r\n if ev1.time != np.inf:\r\n heapq.heappush(self.minpq, ev1)\r\n\r\n ev1_vert = ev.Event(None, evn.disk_b)\r\n ev1_vert.calculate_time()\r\n ev1_vert.time += self.time_sim\r\n\r\n ev1_horz = ev.Event(evn.disk_b, None)\r\n ev1_horz.calculate_time()\r\n ev1_horz.time += self.time_sim\r\n\r\n if ev1_vert.time != np.inf:\r\n heapq.heappush(self.minpq, ev1_vert)\r\n if ev1_horz.time != np.inf:\r\n heapq.heappush(self.minpq, ev1_horz)\r\n\r\n else: #DISCO CON MURO HORIZONTAL\r\n for disco in self.particles:\r\n if disco != evn.disk_a:\r\n ev1 = ev.Event(evn.disk_a, disco)\r\n ev1.calculate_time()\r\n ev1.time += self.time_sim\r\n\r\n if ev1.time != np.inf:\r\n heapq.heappush(self.minpq, ev1)\r\n\r\n ev1_vert = ev.Event(None, evn.disk_a)\r\n ev1_vert.calculate_time()\r\n ev1_vert.time += self.time_sim\r\n\r\n ev1_horz = ev.Event(evn.disk_a, None)\r\n ev1_horz.calculate_time()\r\n ev1_horz.time += self.time_sim\r\n\r\n if ev1_vert.time != np.inf:\r\n heapq.heappush(self.minpq, ev1_vert)\r\n if ev1_horz.time != np.inf:\r\n heapq.heappush(self.minpq, ev1_horz)\r\n\r\n def fill_list(self):\r\n i = 0\r\n for disco in self.particles:\r\n x, y = disco.get_state()\r\n self.lista_grande[i][0].append(x)\r\n self.lista_grande[i][1].append(y)\r\n i += 1\r\n\r\n def move_particles(self, deltat):\r\n for disco in self.particles:\r\n disco.move(deltat)\r\n\r\n def momentum(self):\r\n Sum = np.array([0., 0.])\r\n for disco in self.particles:\r\n m = disco.MASS\r\n v = np.array([disco.vx, disco.vy])\r\n Sum += m*v\r\n return Sum / len(self.particles)\r\n\r\n def momentum_2part(self, evn):\r\n if evn.disk_a != None and evn.disk_b != None:\r\n va = np.array([evn.disk_a.vx, evn.disk_a.vy])\r\n vb = np.array([evn.disk_b.vx, evn.disk_b.vy])\r\n return (evn.disk_a.MASS * va + evn.disk_b.MASS * vb)\r\n elif evn.disk_a == None and evn.disk_b != None:\r\n vb = np.array([evn.disk_b.vx, evn.disk_b.vy])\r\n return evn.disk_b.MASS * vb\r\n else:\r\n va = np.array([evn.disk_a.vx, evn.disk_a.vy])\r\n return evn.disk_a.MASS * va\r\n\r\n \"\"\"\r\n La temperatura se maneja en kB por T.\r\n \"\"\"\r\n def temperatura(self):\r\n Sum = 0\r\n for disco in self.particles:\r\n v = disco.vx**2 + disco.vy**2\r\n Sum += (disco.MASS/2)*v\r\n return Sum / len(self.particles)\r\n\r\n def cum_pre(self, evn, vel_bef):\r\n vi_p = np.array([evn.disk_a.vx, evn.disk_a.vy])\r\n vj_p = np.array([evn.disk_b.vx, evn.disk_b.vy])\r\n vi = np.array(vel_bef[0])\r\n vj = np.array(vel_bef[1])\r\n delta_v = vi_p - vi - (vj_p - vj)\r\n rij = np.array([evn.disk_a.x - evn.disk_b.x , evn.disk_a.y - evn.disk_b.y])\r\n self.cumulative_pressure += (evn.disk_a.MASS/2) * np.dot(delta_v, rij)\r\n\r\n def free_t(self):\r\n sum = 0\r\n for disco in self.particles:\r\n sum += disco.wall_colls + disco.disk_colls\r\n res = self.time_sim / sum\r\n self.free_time.append(res)\r\n #print(res)\r\n\r\n def mean_vel_2(self):\r\n sum = 0\r\n for disco in self.particles:\r\n v = np.array([disco.vx, disco.vy])\r\n sum += np.dot(v,v)\r\n res = sum / self.N\r\n self.res_mean_vel_2.append(res)\r\n #print(res)\r\n\r\n def main_loop(self):\r\n run = True\r\n self.fill_list()\r\n\r\n mtum = self.momentum()\r\n self.momentos_x.append(mtum[0])\r\n self.momentos_y.append(mtum[1])\r\n self.temperaturas.append(self.temperatura())\r\n\r\n while(run):\r\n\r\n if len(self.minpq) == 0:\r\n break\r\n if self.time_sim >= self.TIME_MAX:\r\n break\r\n\r\n evn = heapq.heappop(self.minpq)\r\n self.valid(evn)\r\n\r\n if evn.time > self.time_sim and evn.valid:\r\n self.move_particles(evn.time - self.time_sim)\r\n self.time_sim = evn.time\r\n vel_before = evn.get_velocities()\r\n self.res_collision(evn)\r\n self.free_t()\r\n\r\n self.mean_vel_2()\r\n if evn.CLASS == 0:\r\n self.cum_pre(evn, vel_before)\r\n \r\n self.new_colls(evn)\r\n self.fill_list()\r\n\r\n mtum = self.momentum()\r\n self.momentos_x.append(mtum[0])\r\n self.momentos_y.append(mtum[1])\r\n temp = self.temperatura()\r\n print(temp)\r\n self.temperaturas.append(temp)\r\n if evn.CLASS == 0:\r\n p = (len(self.particles)*temp/(LX*LY)) + (self.cumulative_pressure/(LX*LY*self.time_sim))\r\n self.pressures.append(p)\r\n\r\n self.free_time_val = self.free_time[-1]\r\n #self.l1 = self.free_time_val * np.sqrt(sum(self.res_mean_vel_2) / self.time_sim)\r\n self.l2 = self.free_time_val * np.sqrt(self.res_mean_vel_2[-1])\r\n","sub_path":"collisions/system/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":14572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"406474425","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView, TemplateView\nfrom iip_smr_web_app import views\n\n\nadmin.autodiscover()\n\nurlpatterns = [\n\n url(r'^admin/', include(admin.site.urls)),\n url( r'^login/$', views.login, name=u'login_url' ),\n url( r'^logout/$', views.logout, name=u'logout_url' ),\n url( r'^old_search/$', views.old_results, name=u'oldsearch_url' ),\n url( r'^api/$', views.api_wrapper, name=u\"api_wrapper\"),\n url( r'^viewinscr/(?P.*)/$', views.viewinscr, name='inscription_url'),\n url( r'^view_xml/(?P.*)/$', views.view_xml, name=u'xml_url' ),\n\n # url( r'^info/(?P.*)/$', views.info, name=u'info_url' ),\n url( r'^info/$', views.version, name='version_url' ),\n\n url( r'^about/(?P.*)/$', views.info, name=u'info_url' ),\n\n url( r'^resources/(?P.*)/$', views.resources_general, name=u'resources_general_url' ),\n\n url( r'^edit_info/$', views.edit_info, name=u'edit_info_url' ),\n url( r'^results/$', views.results, name=u'results_url' ),\n\n ##NEW\n url(r'^about/$', views.why_inscription, name=u'about_url' ),\n url(r'^about/why_inscription/$', views.why_inscription, name=u'why_inscription_url' ),\n url(r'^about/project_description/$', views.project_description, name=u'project_description_url' ),\n url(r'^about/documentation/$', views.documentation, name=u'documentation_url' ),\n url(r'^about/api/$', views.api, name=u'api_url' ),\n url(r'^about/funding/$', views.funding, name=u'funding_url' ),\n url(r'^about/team/$', views.team, name=u'team_url' ),\n url(r'^about/copyright/$', views.copyright, name=u'copyright_url' ),\n url(r'^index/$', views.index, name=u'index_url' ),\n url(r'^contact/$', views.contact, name=u'contact_url' ),\n url(r'^mapsearch/$', views.results, name=u'mapsearch_url' ),\n url(r'^mapsearch/load_layers/$', views.load_layers, name='load_layers'),\n url(r'^resources/$', views.bibliography, name=u'resources_url' ),\n url(r'^resources/bibliography/$', views.bibliography, name=u'bibliography_url' ),\n url(r'^resources/timeline/$', views.timeline, name=u'timeline_url' ),\n url(r'^resources/guide_to_searching/$', views.guide_to_searching, name=u'guide_to_searching_url' ),\n url(r'^resources/glossary/$', views.glossary, name=u'glossary_url' ),\n url(r'^resources/conventional_transcription_symbols/$', views.conventional_transcription_symbols, name=u'conventional_transcription_symbols_url'),\n url(r'^stories/$', views.stories, name=u'stories_url' ),\n url( r'^stories/(?P.*)/$', views.individual_story, name=u'test_url' ),\n\n url( r'^$', RedirectView.as_view(pattern_name='index_url') ),\n\n ]\n","sub_path":"iip_smr_config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"483641702","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom math import sqrt\n\n\nclass Labelmapper:\n\n def __init__(self, config):\n\n self.config = config\n sigma = config.transform_params.sigma\n thre = config.transform_params.paf_thre\n self.double_sigma2 = 2 * sigma * sigma\n self.thre = thre\n\n # cached common parameters which same for all iterations and all pictures\n\n stride = self.config.stride\n width = self.config.width//stride\n height = self.config.height//stride\n\n # this is coordinates of centers of bigger grid\n self.grid_x = np.arange(width)*stride + stride/2-0.5\n self.grid_y = np.arange(height)*stride + stride/2-0.5\n\n self.Y, self.X = np.mgrid[0:self.config.height:stride, 0:self.config.width:stride]\n\n # TODO: check it again\n # basically we should use center of grid, but in this place classic implementation uses left-top point.\n # self.X = self.X + stride / 2 - 0.5\n # self.Y = self.Y + stride / 2 - 0.5\n\n def create_labels(self, joints, mask):\n # number of layers is paf+heat+1\n maps = np.zeros(self.config.parts_shape, dtype=np.float)\n\n # put heatmaps\n for i in range(self.config.num_parts):\n visible = joints[:, i, 2] < 2\n self.put_gaussian_maps(maps, i, joints[visible, i, 0:2])\n\n sl = slice(self.config.heat_start, self.config.heat_start + self.config.heat_layers)\n maps[:, :, self.config.bkg_start] = 1. - np.amax(maps[:, :, sl], axis=2)\n\n # put pafs\n for (i, (fr, to)) in enumerate(self.config.limbs_conn):\n visible_from = joints[:, fr, 2] < 2\n visible_to = joints[:, to, 2] < 2\n visible = visible_from & visible_to\n\n layer_x, layer_y = (self.config.paf_start + i*2, self.config.paf_start + i*2 + 1)\n self.put_vector_maps(maps, layer_x, layer_y, joints[visible, fr, 0:2], joints[visible, to, 0:2])\n\n maps *= mask\n\n return maps\n\n def put_gaussian_maps(self, maps, layer, joints):\n\n # actually exp(a+b) = exp(a)*exp(b), lets use it calculating 2d exponent, it could just be calculated by\n for i in range(joints.shape[0]):\n exp_x = np.exp(-(self.grid_x-joints[i, 0])**2/self.double_sigma2)\n exp_y = np.exp(-(self.grid_y-joints[i, 1])**2/self.double_sigma2)\n exp = np.outer(exp_y, exp_x)\n maps[:, :, self.config.heat_start + layer] = np.maximum(maps[:, :, self.config.heat_start + layer], exp)\n\n def put_vector_maps(self, maps, layer_x, layer_y, joint_from, joint_to):\n\n count = np.zeros(maps.shape[:-1], dtype=np.int)\n\n for i in range(joint_from.shape[0]):\n (x1, y1) = joint_from[i]\n (x2, y2) = joint_to[i]\n\n dx = x2-x1\n dy = y2-y1\n l2_norm = sqrt(dx**2 + dy**2)\n\n if l2_norm < 1e-8:\n continue\n\n dx = dx / l2_norm\n dy = dy / l2_norm\n\n min_sx, max_sx = (x1, x2) if x1 < x2 else (x2, x1)\n min_sy, max_sy = (y1, y2) if y1 < y2 else (y2, y1)\n\n min_sx = int(round((min_sx - self.thre) / self.config.stride))\n min_sy = int(round((min_sy - self.thre) / self.config.stride))\n max_sx = int(round((max_sx + self.thre) / self.config.stride))\n max_sy = int(round((max_sy + self.thre) / self.config.stride))\n\n # check PAF off screen. do not really need to do it with max > grid size\n if max_sy < 0 or max_sx < 0:\n continue\n\n min_sx = 0 if min_sx < 0 else min_sx\n min_sy = 0 if min_sy < 0 else min_sy\n\n # TODO: check it again\n slice_x = slice(min_sx, max_sx) # + 1 this mask is not only speed up but crops paf really. This copied from original code\n slice_y = slice(min_sy, max_sy) # + 1 int g_y = min_y; g_y < max_y; g_y++ -- note strict <\n\n dist = distances(self.X[slice_y, slice_x], self.Y[slice_y, slice_x], x1, y1, x2, y2)\n dist = dist <= self.thre\n\n # TODO: averaging by pafs mentioned in the paper but never worked in C++ augmentation code\n maps[slice_y, slice_x, layer_x][dist] = (dist * dx)[dist] # += dist * dx\n maps[slice_y, slice_x, layer_y][dist] = (dist * dy)[dist] # += dist * dy\n count[slice_y, slice_x][dist] += 1\n\n # TODO: averaging by pafs mentioned in the paper but never worked in C++ augmentation code\n # heatmaps[:, :, layerX][count > 0] /= count[count > 0]\n # heatmaps[:, :, layerY][count > 0] /= count[count > 0]\n\n\ndef distances(x, y, x1, y1, x2, y2):\n \"\"\"\n calculates the distance between a point (x,y) to a line (x1,y1) -> (x2,y2)\n how: dot(V-perp, (p-(x,y))) / l2_norm\n \"\"\"\n # this is the vector of V-perpendicular\n dx = (y2-y1)\n dy = (x1-x2)\n l2_norm = sqrt(dx**2 + dy**2)\n dist = dx*(x-x1) + dy*(y-y1)\n dist /= l2_norm\n\n return np.abs(dist)\n\n\nif __name__ == \"__main__\":\n # test the distance between (1,2) to line (1,1) -> (2,1)\n print(distances(1, 2, 1, 1, 2, 1))\n","sub_path":"openpose/server/labelmapper.py","file_name":"labelmapper.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"384730562","text":"#import pygame\nfrom .gobj import Gobj\nfrom .GridWalker import GridWalker\nGON = 1\nTAKEN = 32 #bad acronym for processed\ndimen = 30\n\nclass EdgeDetector(object):\t\n\tdef __init__(self, hgrid, gdim, cellDim):\n\t\tsuper(EdgeDetector, self).__init__()\n\t\tself.vgrid = hgrid\n\t\tself.gdim = gdim\t\t\n\t\tself.cellDim = cellDim\n\t\tself.gobjs = []\n\n\tdef processGrid(self, grid):\t\t\n\t\twalker = GridWalker(grid, self.gdim, self.cellDim)\n\t\twhile True:\n\t\t\tr = next(walker.getNextEdgeSet())\t\t\t\n\t\t\tif (r == False):\n\t\t\t\tbreak\n\t\t\tedges = r\n\t\t\tgobj = Gobj(edges)\n\t\t\tif (gobj.compOuterEdge()):\n\t\t\t\t#was successful\n\t\t\t\twalker.cleanTaken()\t\t\n\t\t\t\tself.gobjs.append(gobj)\n\t\t\t\t#print(\"Added obj with \", len(gobj.pts), \"edges\")\n\t\t\telse:\n\t\t\t\t#print(\"splitting\")\t\n\t\t\t\tedges = gobj.edges # get remaining edges which coat the hole/s\n\t\t\t\tedges = [edge for edge in edges if edge[2]] #get only vertical edges\n\t\t\t\tedges = sorted(edges, key = lambda edge: edge[0]) #sort by edge.first.x ASC\n\t\t\t\tx = edges[0][0][0] #will split at this\n\t\t\t\tx /= self.cellDim\n\n\t\t\t\tleft = walker.onlyTaken() #get a grid with only the current gobj\n\t\t\t\tright = left.__copy__()\n\n\t\t\t\tgwLeft = GridWalker(left, self.gdim, self.cellDim) #these grid walkers aren't actually used, only used to clear stuff\n\t\t\t\tgwLeft.clearRight(x)\n\t\t\t\tself.processGrid(gwLeft.grid)\n\n\t\t\t\tgwRight = GridWalker(right, self.gdim, self.cellDim)\n\t\t\t\tgwRight.clearLeft(x)\n\t\t\t\tself.processGrid(gwRight.grid)\n\n\tdef calcCorners(self):\t\t\t\t\t\t\n\t\tself.processGrid(self.vgrid)\t\t\n\n\n\tdef quadify(self):\n\t\tfor g in self.gobjs:\n\t\t\tg.quadify()\t\t\n\n\tdef getQuads(self):\n\t\tqs = []\n\t\tfor g in self.gobjs:\n\t\t\tfor q in g.quads:\n\t\t\t\tqs.append(q)\n\t\treturn qs\n\t\t\t\n\t# def draw(self, wso):\n\t# \tcolor = pygame.Color(50, 150, 250)\n\t# \twhite = pygame.Color(255, 255, 255)\n\t# \tfor g in self.gobjs:\n\t# \t\tg.draw(wso, color, white)\n\t\n\n\n\n\n\n\n\n","sub_path":"edgeDetector.py","file_name":"edgeDetector.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"368071107","text":"from typing import List\n\nfrom model.Žmogus import Žmogus\n\n\nclass Pora:\n žmonių_skaičius: int = 2\n žmonės: List[Žmogus]\n\n def mock(self):\n self.žmonės = [Žmogus() for _ in range(2)]\n return self\n","sub_path":"ZP18-1/hotel/model/Pora.py","file_name":"Pora.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"126926901","text":"from django.shortcuts import render\nfrom .models import OrderItem\nfrom .forms import OrderCreateForm\nfrom Cart.cart import Cart\n\ndef order_create(request):\n template = '.html'\n cart = Cart(request)\n if request.method == 'POST':\n form = OrderCreateForm(request.POST)\n if form.is_valid():\n order = form.save()\n for item in cart:\n OrderItem.objects.create(order=order,\n item=item['item'],\n unit_price=item['unit_price'],\n quantity=item['quantity'])\n cart.clear()\n return render(request, template, {'order': order})\n else:\n form = OrderCreateForm()\n\n return render(request, template, {'cart': cart, 'form': form})","sub_path":"GreenStand/Orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"200368700","text":"import pytest\n\nfrom api.base.settings.defaults import API_BASE\nfrom osf_tests.factories import (\n AuthUserFactory,\n CollectionProviderFactory,\n)\n\n\n@pytest.mark.django_db\nclass TestCollectionProviderList:\n\n @pytest.fixture()\n def url(self, request):\n return '/{}providers/collections/'.format(API_BASE)\n\n @pytest.fixture()\n def user(self):\n return AuthUserFactory()\n\n @pytest.fixture()\n def provider_one(self):\n return CollectionProviderFactory(name='Sockarxiv')\n\n @pytest.fixture()\n def provider_two(self):\n provider = CollectionProviderFactory(name='Spotarxiv')\n provider.allow_submissions = False\n provider.domain = 'https://www.spotarxiv.com'\n provider.description = 'spots not dots'\n provider.domain_redirect_enabled = True\n provider._id = 'spot'\n provider.save()\n return provider\n\n def test_provider_list(\n self, app, url, user, provider_one, provider_two):\n # Test length and not auth\n res = app.get(url)\n assert res.status_code == 200\n assert len(res.json['data']) == 2\n\n # Test length and auth\n res = app.get(url, auth=user.auth)\n assert res.status_code == 200\n assert len(res.json['data']) == 2\n\n @pytest.mark.parametrize('filter_type,filter_value', [\n ('allow_submissions', True),\n ('description', 'spots%20not%20dots'),\n ('domain', 'https://www.spotarxiv.com'),\n ('domain_redirect_enabled', True),\n ('id', 'spot'),\n ('name', 'Spotarxiv'),\n ])\n def test_provider_list_filtering(\n self, filter_type, filter_value, app, url,\n provider_one, provider_two):\n res = app.get('{}?filter[{}]={}'.format(\n url, filter_type, filter_value))\n assert res.status_code == 200\n assert len(res.json['data']) == 1\n","sub_path":"api_tests/providers/collections/views/test_collection_provider_list.py","file_name":"test_collection_provider_list.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"272157118","text":"import os\nfrom dataclasses import dataclass, asdict\nimport json\nimport pathlib\nfrom typing import (\n List,\n)\n\nfrom paths import build_model_directory, build_training_info_path\n\n\n@dataclass\nclass BatchInfo:\n batch_num: int\n self_play_start_time: float\n self_play_end_time: float\n training_start_time: float\n training_end_time: float\n assessment_start_time: float\n assessment_end_time: float\n generation_self_play: int\n generation_trained: int\n assessed_awr: float\n\n # These are updated after the fact\n num_games: int = None\n num_positions: int = None\n total_mcts_considerations: int = None\n self_play_cpu_time: int = None\n\n def marshall(self):\n return asdict(self)\n\n @classmethod\n def unmarshall(cls, data):\n return cls(**data)\n\n def start_time(self):\n return self.self_play_start_time\n\n def end_time(self):\n return self.assessment_end_time\n\n\n@dataclass\nclass TrainingInfo:\n environment: str\n species: str\n batches: List[BatchInfo]\n\n def current_self_play_generation(self):\n '''\n Find the highest generation that passed gating.\n '''\n for batch in self.batches[::-1]:\n if batch.generation_trained is None:\n continue\n return batch.generation_trained\n return 1\n\n @classmethod\n def load(cls, environment, species):\n training_info_path = build_training_info_path(environment, species)\n if not os.path.exists(training_info_path):\n return cls(\n environment=environment,\n species=species,\n batches=[],\n )\n data = json.loads(open(training_info_path, 'r').read())\n return cls.unmarshall(data)\n\n def finalize_batch(\n self,\n self_play_start_time,\n self_play_end_time,\n training_start_time,\n training_end_time,\n assessment_start_time,\n assessment_end_time,\n generation_self_play,\n generation_trained,\n assessed_awr,\n ):\n self.batches.append(\n BatchInfo(\n len(self.batches) + 1,\n self_play_start_time,\n self_play_end_time,\n training_start_time,\n training_end_time,\n assessment_start_time,\n assessment_end_time,\n generation_self_play,\n generation_trained,\n assessed_awr,\n )\n )\n self.save()\n\n def marshall(self):\n return asdict(self)\n\n @classmethod\n def unmarshall(cls, data):\n data[\"batches\"] = [BatchInfo.unmarshall(x) for x in data[\"batches\"]]\n return cls(**data)\n\n def save(self):\n data = self.marshall()\n training_info_path = build_training_info_path(self.environment, self.species)\n with open(training_info_path, 'w') as f:\n f.write(json.dumps(data))\n print(\"Saved training info to\", training_info_path)\n\n\ndef setup_filesystem(environment, species, generation):\n model_dir = build_model_directory(environment, species, generation)\n pathlib.Path(model_dir).mkdir(parents=True, exist_ok=True)\n","sub_path":"training_info.py","file_name":"training_info.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"59043428","text":"import logging\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom src.storage_broker.utils import config\nfrom src.storage_broker.utils import metrics\n\nlogger = logging.getLogger(config.APP_NAME)\n\ns3 = boto3.client(\n \"s3\",\n endpoint_url=config.S3_ENDPOINT_URL,\n aws_access_key_id=config.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY,\n)\n\n\n@metrics.storage_copy_time.time()\ndef copy(key, src, dest, new_key, size, service):\n metrics.payload_size.labels(service=service).observe(size)\n copy_src = {\"Bucket\": src, \"Key\": key}\n try:\n s3.copy(copy_src, dest, new_key)\n logger.info(\"Request ID [%s] moved to [%s]\", new_key, dest)\n metrics.storage_copy_success.labels(bucket=dest).inc()\n except Exception:\n logger.exception(\"Unable to move %s to %s bucket\", key, dest)\n metrics.storage_copy_error.labels(bucket=dest).inc()\n\n@metrics.storage_key_check_time.time()\ndef check_key(bucket, request_id):\n try:\n s3.head_object(Bucket=bucket, Key=request_id)\n return None\n except ClientError as e:\n return e\n\n@metrics.presigned_url_gen_time.time()\ndef get_url(bucket, request_id, expiry):\n try:\n return s3.generate_presigned_url(\"get_object\", Params={\"Bucket\": bucket, \"Key\": request_id}, ExpiresIn=expiry)\n except ClientError:\n raise\n","sub_path":"src/storage_broker/storage/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"91216847","text":"# coding: utf-8\n'''\n此文件用以下载数据集\n\n手写数字识别:\n http://yann.lecun.com/exdb/mnist/\n 训练集:\n train-images-idx3-ubyte.gz, train-labels-idx1-ubyte.gz\n 测试集:\n t10k-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz\n\n\n'''\nimport gzip\nimport struct\nimport logging\nimport os\nimport errno\n\ntry:\n import numpy as np\nexcept ImportError:\n # in rare cases numpy may be not installed\n pass\ntry:\n import requests\nexcept ImportError:\n # in rare cases requests may be not installed\n pass\n\nlogger = logging.getLogger('dataset_download')\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.INFO)\n\ndef download(url, fname=None, dirname=\"data/\", overwrite=False):\n \"\"\"Download an given URL\n Parameters\n ----------\n url : str\n URL to download\n fname : str, optional\n filename of the downloaded file. If None, then will guess a filename\n from url.\n dirname : str, optional\n output directory name. If None, then guess from fname or use the current\n directory\n overwrite : bool, optional\n Default is false, which means skipping download if the local file\n exists. If true, then download the url to overwrite the local file if\n exists.\n Returns\n -------\n str\n The filename of the downloaded file\n \"\"\"\n if fname is None:\n fname = url.split('/')[-1]\n\n if dirname is None:\n dirname = os.path.dirname(fname)\n else:\n fname = os.path.join(dirname, fname)\n\n if not overwrite and os.path.exists(fname):\n logger.info(\"%s exists, skipping download\", fname)\n return fname\n\n if dirname != \"\":\n if not os.path.exists(dirname):\n try:\n logger.info('create directory %s', dirname)\n os.makedirs(dirname)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise OSError('failed to create ' + dirname)\n\n r = requests.get(url, stream=True)\n assert r.status_code == 200, \"failed to open %s\" % url\n with open(fname, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n logger.info(\"downloaded %s into %s successfully\", url, fname)\n return fname\n\ndef read_label(data, struct_format=\">II\", dtype=np.int8):\n struct.unpack(struct_format, data.read(8))\n label = np.fromstring(data.read(), dtype=dtype)\n\n return label\n\ndef read_image(data, image_size, image_num, struct_format=\">IIII\", dtype=np.uint8, normalize=True):\n _, _, rows, cols = struct.unpack(struct_format, data.read(16))\n image = np.fromstring(data.read(), dtype=dtype).reshape(image_num, rows, cols)\n image = image.reshape(image.shape[0], 1, image_size[0], image_size[1])\n if normalize:\n image = image.astype(np.float32) / 255\n return image\n\ndef mnist():\n def read_data(label_url, image_url):\n with gzip.open(download(label_url)) as flbl:\n label = read_label(flbl)\n with gzip.open(download(image_url), 'rb') as fimg:\n image = read_image(fimg, (28, 28), len(label))\n return (label, image)\n\n path = 'http://yann.lecun.com/exdb/mnist/'\n (train_lbl, train_img) = read_data(\n path + 'train-labels-idx1-ubyte.gz', path + 'train-images-idx3-ubyte.gz')\n (test_lbl, test_img) = read_data(\n path + 't10k-labels-idx1-ubyte.gz', path + 't10k-images-idx3-ubyte.gz')\n return {'train_data': train_img, 'train_label': train_lbl,\n 'test_data': test_img, 'test_label': test_lbl}\n\nif __name__ == '__main__':\n mnist()\n","sub_path":"longling/dataset_download.py","file_name":"dataset_download.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"64270617","text":"''' Search two highest maximums. '''\n\n\ndef two_maximums(numbers):\n arrayOfNumbers = [ int(item) for item in numbers ]\n firstMaximum = secondMaximum = min(arrayOfNumbers)\n for item in arrayOfNumbers:\n if item >= firstMaximum:\n secondMaximum = firstMaximum\n firstMaximum = item\n elif item > secondMaximum:\n secondMaximum = item\n return [secondMaximum, firstMaximum]\n\ndef two_max(numbers):\n return sorted(numbers)[-2:]\n\ntwo_maximums = lambda numbers:sorted(numbers)[-2:]\n","sub_path":"2HighestMaximums.py","file_name":"2HighestMaximums.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"481150874","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom .import views\nfrom .views import ListBlogView,PostCreateAPIView,PostDeleteAPIView,PostUpdateAPIView\n\napp_name = \"article\"\n\nurlpatterns = [\n path('dashboard/',views.dashboard,name = \"dashboard\"),\n path('addArticle/',views.addArticle,name = \"addArticle\"),\n path('article/',views.detail,name = \"detail\"),\n path('update/',views.updateArticle,name = \"update\"),\n path('delete/',views.deleteArticle,name = \"delete\"),\n path('',views.articles,name = \"articles\"),\n path('gal/',views.galary,name = \"gal\"),\n path('preview/',views.preview,name = \"preview\"),\n\n\n path('comment/',views.addComment,name = \"comment\"),\n path('blog/', ListBlogView.as_view(), name=\"blog\"),\n path('create/', PostCreateAPIView.as_view(), name='create'),\n path('deletes/', PostDeleteAPIView.as_view(), name='Del'),\n\n path('Updates/', PostUpdateAPIView.as_view(), name='Up'),\n\n\n\n\n\n\n \n]\n","sub_path":"article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"556087735","text":"# *_* coding:utf-8 *_*\r\n\r\n# 开发团队:中国软件开发团队\r\n# 开发人员:Administrator\r\n# 开发时间:2019/3/23 5:16\r\n# 文件名称:weatherSpider\r\n# 开发工具:PyCharm\r\n\r\n\r\nimport tkinter\r\nimport tkinter.messagebox\r\nfrom tkinter import ttk\r\nimport requests\r\n# from PIL import ImageTk as itk\r\nfrom selenium import webdriver\r\n# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport re\r\n\r\n\r\n'''\r\n获取本地所在城市名称\r\n'''\r\n\r\n\r\ndef get_local_city():\r\n chrome_options = Options()\r\n chrome_options.add_argument('--headless')\r\n chrome_options.add_argument('--disable-gpu')\r\n #\r\n # 更换头部\r\n chrome_options.add_argument(\r\n 'user-agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\"')\r\n\r\n driver = webdriver.Chrome(\r\n executable_path='./chromedriver.exe', chrome_options=chrome_options)\r\n driver.get(\"http://www.weather.com.cn\")\r\n text = driver.page_source\r\n\r\n result = re.findall(\r\n '(.*?)', text, re.S)\r\n driver.close()\r\n return result[0]\r\n\r\n\r\nclass MyFrame(tkinter.Frame):\r\n def __init__(self, default_city):\r\n self.root = tkinter.Tk()\r\n\r\n self.root.title(\"天气查询\")\r\n self.root.geometry('1200x700+400+220')\r\n # 修改默认应用程序图标\r\n self.root.iconbitmap('camero.ico')\r\n\r\n bg = tkinter.Canvas(self.root, width=1200, height=600, bg='white')\r\n # self.img = itk.PhotoImage(file=\"bg.jpg\")\r\n bg.place(x=100, y=40)\r\n # bg.create_image(0, 0, anchor=tkinter.NW, image=self.img)\r\n\r\n self.city = tkinter.Entry(\r\n self.root, width=16, font=(\"仿宋\", 18, \"normal\"))\r\n self.city.place(x=200, y=60)\r\n self.city.insert(0, default_city)\r\n\r\n citylabel = tkinter.Label(\r\n self.root, text='查询城市', font=(\"仿宋\", 18, \"normal\"))\r\n citylabel.place(x=80, y=60)\r\n\r\n # 查询按钮\r\n chaxun = tkinter.Button(self.root, width=10, height=3, text=\"查询\",bg='#00CCFF', bd=5, font=\"bold\", command=self.search)\r\n # chaxun.bind(\"\", self.search)\r\n chaxun.place(x=800, y=50)\r\n\r\n # 清除按钮\r\n clearbtn = tkinter.Button(self.root, width=10, height=3, text=\"清除\",bg='#00CCFF', bd=5, font=\"bold\", command=self.clear)\r\n # clearbtn.bind(\"\", self.clear)\r\n clearbtn.place(x=950, y=50)\r\n\r\n poslabel = tkinter.Label(\r\n self.root, text='选择位置', font=(\"仿宋\", 18, \"normal\"))\r\n poslabel.place(x=80, y=100)\r\n\r\n comvalue = tkinter.StringVar() # 窗体自带的文本,新建一个值\r\n self.comboxlist = ttk.Combobox(self.root, width=30, height=18, font=(\"仿宋\", 18, \"normal\"),\r\n textvariable=comvalue) # 初始化\r\n self.comboxlist[\"values\"] = (\"1\", \"2\", \"3\")\r\n self.comboxlist.current(0) # 选择第一个\r\n # 绑定事件,(下拉列表框被选中时,绑定go()函数)\r\n self.comboxlist.bind(\"<>\", self.choose)\r\n self.comboxlist.place(x=200, y=100)\r\n\r\n self.result = tkinter.Listbox(\r\n self.root, heigh=18, width=65, font=(\"仿宋\", 20, \"normal\")) # 显示天气框\r\n self.result.place(x=125, y=150)\r\n\r\n self.citys = []\r\n\r\n self.headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\r\n 'Cookie': '__guid=182823328.3322839646442213000.1543932524694.901; vjuids=1858d43b6.167798cbdb7.0.8c4d7463d5c5d; vjlast=1543932526.1543932526.30; userNewsPort0=1; f_city=%E5%B9%B3%E9%A1%B6%E5%B1%B1%7C101180501%7C; Hm_lvt_080dabacb001ad3dc8b9b9049b36d43b=1543932526,1543932551,1543932579; Wa_lvt_1=1547464114,1547464115,1547880054,1547983123; defaultCty=101181001; defaultCtyName=%u5546%u4E18; monitor_count=6; Wa_lpvt_1=1547983809'\r\n }\r\n\r\n # 开启本地天气查询\r\n if (default_city != ''):\r\n self.tianqiforecast(default_city)\r\n\r\n def tianqiforecast(self, searchcity):\r\n\r\n city = searchcity\r\n url = 'http://toy1.weather.com.cn/search?cityname=' + \\\r\n city + '&callback=success_jsonpCallback&_=1548048506469'\r\n response = requests.get(url, headers=self.headers)\r\n html1 = response.content.decode('utf-8')\r\n self.citys = re.findall(\r\n '\"ref\":\"(.*?)~.*?~(.*?)~.*?~(.*?)~.*?~.*?~.*?~.*?~(.*?)\"', html1, re.S)\r\n if (len(self.citys) == 0):\r\n a = \"出错了,未查找到该城市\"\r\n self.result.insert(tkinter.END, a)\r\n return\r\n # 显示当前城市常用查询点\r\n plist = []\r\n for i in range(0, len(self.citys)):\r\n # print(i + 1, ':%14s ' % \"\".join(citys[i]))\r\n plist.append(self.citys[i][1])\r\n pos = tuple(plist)\r\n self.comboxlist[\"values\"] = pos\r\n self.comboxlist.current(0)\r\n if len(self.citys) != 0:\r\n self.query(0)\r\n\r\n def search(self):\r\n mycity = self.city.get()\r\n if (mycity != ''):\r\n self.clear()\r\n self.tianqiforecast(mycity)\r\n\r\n def query(self, choose):\r\n if (len(self.citys[choose][0]) == 9):\r\n if (self.citys[choose][0][0] != '1' or self.citys[choose][0][1] != '0' or self.citys[choose][0][2] != '1'):\r\n # 查询国外天气\r\n\r\n url2 = 'http://www.weather.com.cn/weathern/' + \\\r\n self.citys[choose][0] + '.shtml'\r\n responseweather = requests.get(url2, headers=self.headers)\r\n html2 = responseweather.content.decode('utf-8')\r\n\r\n weather = re.findall(\r\n '
  • (.*?)(.*?)<.*?', html2, re.S)\r\n temp_weather = re.findall(\r\n '

    (.*?)

    .*?title=\"(.*?)\".*?title=\"(.*?)\".*?

    (.*?)

    ',\r\n html2, re.S)\r\n if len(temp_weather) < 7:\r\n # 当天\r\n today1 = re.findall(\r\n '
  • (.*?)
    \\\\n
  • ',\r\n html2, re.S)\r\n today = re.findall('

    (.*?)

    .*?

    (.*?)

    ', today1[0],\r\n re.S)\r\n print(today)\r\n # 后6天\r\n weather.append(temp_weather)\r\n else:\r\n weather.append(temp_weather)\r\n\r\n Hightempture = re.findall(\r\n '