diff --git "a/3941.jsonl" "b/3941.jsonl" new file mode 100644--- /dev/null +++ "b/3941.jsonl" @@ -0,0 +1,666 @@ +{"seq_id":"43246768","text":"from rest_framework.decorators import action\r\nfrom rest_framework.viewsets import mixins as mx\r\nfrom rest_framework import viewsets\r\nfrom rest_framework.response import Response\r\nfrom django.db import transaction\r\nfrom django.utils.decorators import method_decorator\r\n\r\nfrom drf_yasg import openapi\r\nfrom drf_yasg.utils import swagger_auto_schema, no_body\r\n\r\nfrom grader import serializers\r\nfrom grader import models\r\nfrom grader.filters import ProblemFilter\r\n\r\nfrom grader.tasks.grade_celery import grade_code\r\n\r\n\r\nclass GetProblemViewSet(mx.ListModelMixin,\r\n mx.RetrieveModelMixin,\r\n viewsets.GenericViewSet):\r\n serializer_class = serializers.ProblemSerializer\r\n queryset = models.Problem.objects.all()\r\n ordering = ['-id']\r\n filterset_class = ProblemFilter\r\n lookup_url_kwarg = 'id'\r\n\r\n def get_queryset(self):\r\n qs = super(GetProblemViewSet, self).get_queryset()\r\n if self.action == 'testcase':\r\n qs = qs.filter(**{\r\n self.lookup_field: self.kwargs[self.lookup_url_kwarg],\r\n })\r\n return qs\r\n\r\n @action(detail=False, methods=['GET'],\r\n url_path=r'(?P[0-9]+)/testcase',\r\n url_name='testcase',\r\n lookup_field='problem_id',\r\n lookup_url_kwarg='id',\r\n filterset_class=None,\r\n queryset=models.TestCase.objects.all(),\r\n serializer_class=serializers.TestCaseSerializer)\r\n def testcase(self, request, *args, **kwargs):\r\n return self.list(request, *args, **kwargs)\r\n\r\n @action(detail=True, methods=['GET'],\r\n url_path=r'checker',\r\n url_name='checker',\r\n lookup_field='problem_id',\r\n lookup_url_kwarg='id',\r\n filterset_class=None,\r\n queryset=models.Checker.objects.all(),\r\n serializer_class=serializers.CheckerSerializer)\r\n def checker(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)\r\n\r\n\r\n@method_decorator(name='create', decorator=swagger_auto_schema(\r\n operation_description=\"\"\"\r\n 문제 생성 요청.\r\n + language_id - 언어 ID => 특정 언어에 한정된 문제인 경우에만 작성,\r\n + name - 문제이름,\r\n + contents - 문제 pdf 파일 경로 => 추후 텍스트 작성 가능,\r\n + template - 템플릿 코드,\r\n + time - 제한시간(ms)\r\n + memory - 제한메모리(MB)\r\n + problem_type - 문제타입 => [S,C,F]\r\n \\t+ Solution(S) - 입력에 대한 출력이 고정된 문제\r\n \\t+ Checker(C) - 입력에 대한 출력이 고정되지 않은 문제\r\n \\t+ Follow(F) - 주어진 예제를 똑같이 따라하는 문제\r\n + testcase - order(채점 순서), input(입력), output(출력)\r\n 1개 이상 리스트 형태로 작성. 문제타입에 따라 작성방법이 다름.\r\n \\t+ S - 일반적으로 입력과 출력을 모두 작성. 단순 출력 문제의 경우 출력만 작성. \r\n \\t+ C - 입력만 작성\r\n \\t+ F - 출력만 작성\r\n + checker - 문제타입이 'C'인 경우만 작성.\r\n + categories - 문제 태그. 리스트 형태로 작성.\r\n \"\"\"\r\n))\r\nclass ProblemViewSet(mx.CreateModelMixin,\r\n mx.UpdateModelMixin,\r\n mx.DestroyModelMixin,\r\n viewsets.GenericViewSet):\r\n serializer_class = serializers.ProblemCreateSerializer\r\n queryset = models.Problem.objects.all()\r\n ordering = ['-id']\r\n\r\n def create(self, request, *args, **kwargs):\r\n serializer = self.get_serializer(data=request.data)\r\n serializer.is_valid(raise_exception=True)\r\n\r\n categories = serializer.validated_data.pop('categories')\r\n\r\n checker = None\r\n testcase = serializer.validated_data.pop('testcase_set')\r\n if 'checker' in serializer.validated_data:\r\n checker = serializer.validated_data.pop('checker')\r\n\r\n with transaction.atomic():\r\n instance = models.Problem.objects.create(**serializer.validated_data)\r\n instance.categories.add(*categories)\r\n\r\n if checker:\r\n checker.setdefault('problem', instance)\r\n models.Checker.objects.create(**checker)\r\n\r\n for case in testcase:\r\n case.setdefault('problem', instance)\r\n models.TestCase.objects.create(**case)\r\n\r\n serializer = self.serializer_class(instance)\r\n return Response(serializer.data)\r\n\r\n def update(self, request, *args, **kwargs):\r\n serializer = self.get_serializer(data=request.data)\r\n serializer.is_valid(raise_exception=True)\r\n\r\n categories = serializer.validated_data.pop('categories')\r\n\r\n checker = None\r\n testcase = serializer.validated_data.pop('testcase_set')\r\n if 'checker' in serializer.validated_data:\r\n checker = serializer.validated_data.pop('checker')\r\n\r\n with transaction.atomic():\r\n models.Problem.objects.filter(id=kwargs['pk']).update(**serializer.validated_data)\r\n if checker:\r\n models.Checker.objects.filter(problem_id=kwargs['pk']).update(**checker)\r\n\r\n models.TestCase.objects.filter(problem_id=kwargs['pk']).delete()\r\n for case in testcase:\r\n case.setdefault('problem_id', kwargs['pk'])\r\n models.TestCase.objects.create(**case)\r\n\r\n instance = models.Problem.objects.get(id=kwargs['pk'])\r\n instance.categories.clear()\r\n instance.categories.add(*categories)\r\n\r\n\r\n serializer = self.serializer_class(instance)\r\n return Response(serializer.data)\r\n","sub_path":"grader/views/api/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"422397530","text":"'''melhore o desafio 61 perguntando se ele quer mostrar mais termo'''\nprint('=' * 20)\nprint('10 TERMOS DE UMA PA')\nprint('=' * 20)\nt1 = int(input('Primeiro termo :'))\nr1 = int(input('Qual a razão:'))\ntermo = t1\nc = 1\ntotal = 0\nmais = 10\nwhile mais != 0:\n total = total + mais\n while c <= total:\n termo += r1\n c += 1\n print('{} - '.format(termo), end=' =>')\n print('PAUSA !!!')\n mais = int(input('Deseja adionar mais quantos ? '))\nprint('Progreção finalizada com {} termos '.format(total))\n\n\n","sub_path":"ex062.py","file_name":"ex062.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"257742050","text":"import cPickle as pickle\n\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nMediumFn = \"pickles/Medium_Distance_Histogram.pickle\"\nLargeFn = 'pickles/Large_Distance_Histogram.pickle'\n\n\ndef main():\n \n \n MediumHistogram= pickle.load(open(MediumFn,'rb'))\n LargeHistogram = pickle.load(open(LargeFn,'rb'))\n \n x = range(len(MediumHistogram))\n y = MediumHistogram\n\n fig = plt.figure()\n\n axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)\n axes.bar(x,y,align='center',width=0.5)\n axes.set_xlabel('Distances')\n axes.set_ylabel('Count')\n axes.set_title('Distance Distribution Medium Network');\n fig.savefig(\"diagrams/Medium_Distance_Distribution.png\")\n\n x = range(len(LargeHistogram))\n y = LargeHistogram\n\n fig = plt.figure()\n\n axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)\n axes.bar(x,y,align='center',width=0.5)\n axes.set_xlabel('Distances')\n axes.set_ylabel('Count')\n axes.set_title('Distance Distribution Large Network');\n fig.savefig(\"diagrams/Large_Distance_Distribution.png\")\n\n\nmain()","sub_path":"snacs_assignment_1_Q_2_5_plot.py","file_name":"snacs_assignment_1_Q_2_5_plot.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"390864701","text":"# -----------------------------------\r\n# TRAINING OUR MODEL\r\n# -----------------------------------\r\n\r\n# import the necessary packages\r\nimport constants\r\nimport h5py\r\nimport numpy as np\r\nfrom sklearn.externals import joblib\r\nimport glob\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n\r\ndef svc_param_selection(X, y, nfolds, svc):\r\n Cs = [0.001, 0.01, 0.1, 1, 10]\r\n gammas = [0.001, 0.01, 0.1, 1, 10]\r\n param_grid = {'C': Cs, 'gamma': gammas}\r\n grid_search = GridSearchCV(svc, param_grid, cv=nfolds, verbose=10)\r\n grid_search.fit(X, y)\r\n # grid_search.best_params_\r\n with open('best_svm_params.txt', 'a') as file:\r\n file.write(\"score : {}\\nC : {}\\nKernel : {}\\nGamma : {}\".format(grid_search.best_score_,\r\n grid_search.best_estimator_.C,\r\n grid_search.best_estimator_.kernel,\r\n grid_search.best_estimator_.gamma))\r\n return grid_search\r\n\r\n\r\n# variables to hold the results and names\r\nresults = []\r\nnames = []\r\nscoring = \"accuracy\"\r\n\r\n# import the feature vector and trained labels\r\nh5f_data = h5py.File('output/data.h5', 'r')\r\nh5f_label = h5py.File('output/labels.h5', 'r')\r\n\r\nglobal_features_string = h5f_data['dataset_1']\r\nglobal_labels_string = h5f_label['dataset_1']\r\n\r\nglobal_features = np.array(global_features_string)\r\nglobal_labels = np.array(global_labels_string)\r\n\r\nh5f_data.close()\r\nh5f_label.close()\r\n\r\n# verify the shape of the feature vector and labels\r\nprint(\"[STATUS] features shape: {}\".format(global_features.shape))\r\nprint(\"[STATUS] labels shape: {}\".format(global_labels.shape))\r\n\r\nprint(\"[STATUS] training started...\")\r\n\r\n# split the training and testing data\r\n(trainDataGlobal, testDataGlobal, trainLabelsGlobal, testLabelsGlobal) = train_test_split(\r\n np.array(global_features),\r\n np.array(\r\n global_labels),\r\n test_size=constants.test_size,\r\n random_state=constants.seed)\r\n\r\n\r\n# save test data for calc accuracy\r\nh5f_data = h5py.File('output/test_data.h5', 'w')\r\nh5f_data.create_dataset('dataset_1', data=np.array(testDataGlobal))\r\n\r\nprint(\"[STATUS] splitted train and test data...\")\r\nprint(\"Train data : {}\".format(trainDataGlobal.shape))\r\nprint(\"Test data : {}\".format(testDataGlobal.shape))\r\nprint(\"Train labels: {}\".format(trainLabelsGlobal.shape))\r\nprint(\"Test labels : {}\".format(testLabelsGlobal.shape))\r\n\r\n# -----------------------------------\r\n# TESTING OUR MODEL\r\n# -----------------------------------\r\n\r\n# to visualize results\r\n\r\nfilename = './svmmodel.joblib.pkl'\r\n\r\nsvc = SVC(kernel='rbf', verbose=True)\r\nclf = svc_param_selection(trainDataGlobal, trainLabelsGlobal, 5, svc)\r\n\r\njoblib.dump(clf, filename, compress=9)\r\n","sub_path":"code/classification/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"562349716","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\npath_base = \"/home/behnam/workspace/OpenCVProjects\"\n# image_path = \"/images/Unequalized_Hawkes_Bay_NZ.jpg\"\nimage_path = \"/images/00001.png\"\n\n\nimage = cv2.imread(path_base+image_path, cv2.IMREAD_GRAYSCALE)\n\nimage_equalize_hist = cv2.equalizeHist(image)\n\n\n# create a CLAHE object (Arguments are optional).\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\nimage_clahe = clahe.apply(image)\n\n\nplt.figure(figsize=(18, 6))\n\nplt.subplot(2, 3, 1)\nplt.imshow(image, cmap=\"gray\")\nplt.title(\"Original Image\")\n\n\nplt.subplot(2, 3, 2)\nplt.imshow(image_equalize_hist, cmap=\"gray\")\nplt.title(\"Equalized Image\")\n\n\nplt.subplot(2, 3, 3)\nplt.imshow(image_clahe, cmap='gray')\nplt.title(\"image_clahe\")\n\n\nplt.subplot(2, 3, 4)\nplt.hist(image.ravel(), 256, [0, 256])\nplt.title(\"image hist\")\n\n\nplt.subplot(2, 3, 5)\nplt.hist(image_equalize_hist.ravel(), 256, [0, 256])\nplt.title(\"image_equalize_hist\")\n\nplt.subplot(2, 3, 6)\nplt.hist(image_clahe.ravel(), 256, [0, 256])\nplt.title(\"image_clahe\")\n\nplt.tight_layout()\nplt.show()\n","sub_path":"scripts/contrast_limited_adaptive_histogram_equalization.py","file_name":"contrast_limited_adaptive_histogram_equalization.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"223849687","text":"import os\nimport time\nimport pylab as plt\nimport numpy as np\nfrom numpy.random import randn\nimport brian2 as b2\nfrom brian2.equations.equations import Equations\nimport logging\n# logger = logging.getLogger('ftpuploader')\n\n\n'''\nconnect three inhibitory exponential LIF cells in feed forward loop\nwith alpha shape conductance base synapse\nedges : ([0,1], [0,2], [1,2])\n'''\n\nb2.seed(2)\nnp.random.seed(2)\n\n\ndef simulate():\n\n common_params = { # Parameters common to all neurons.\n 'C': 100*b2.pfarad,\n 'tau_m': 10*b2.ms,\n 'EL': -60*b2.mV,\n 'DeltaT': 2*b2.mV,\n 'Vreset': -65, # *b2.mV\n 'VTmean': -50*b2.mV,\n 'VTsd': 2*b2.mV\n }\n common_params['gL'] = common_params['C'] / common_params['tau_m']\n\n param_I_syn = {\n \"Erev_i\": -80.0*b2.mV,\n \"Tau_i\": 3.0*b2.ms,\n \"w_i\": 1.5, # ! *b2.nsiemens 0.1\n \"p_i\": 1.0,\n }\n\n I_cell_params = {'Ncells': num_I_cells,\n 'IXmean': 150.*b2.pA,\n 'IXsd': 20*b2.pA}\n\n eqs = \"\"\"\n VT : volt\n IX : amp\n I_syn_i = g_syn_i * (Erev_i - vm): amp\n Im = IX + \n gL * (EL - vm) + \n gL * DeltaT * exp((vm - VT) / DeltaT) : amp\n \n ds_i/dt = -s_i / Tau_i : siemens \n dg_syn_i/dt = (s_i - g_syn_i) / Tau_i : siemens \n dvm/dt = (Im + I_syn_i) / C : volt\n \"\"\"\n\n I_cells = b2.NeuronGroup(I_cell_params['Ncells'],\n model=eqs,\n dt=dt0,\n method=integration_method,\n threshold=\"vm > 0.*mV\",\n reset=\"vm={}*mV\".format(common_params['Vreset']),\n refractory=\"vm > 0.*mV\",\n namespace={**common_params,\n **param_I_syn,\n })\n\n cII = b2.Synapses(I_cells, I_cells,\n on_pre='s_i += {}*nsiemens'.format(\n param_I_syn[\"w_i\"]),\n dt=dt0,\n method=integration_method,\n namespace={**common_params,\n **param_I_syn,\n })\n adj = np.array([[0, 0, 0],\n [1, 0, 0],\n [1, 1, 0]])\n cols, rows = np.nonzero(adj)\n cII.connect(i=rows, j=cols) \n \n\n # Initialise random parameters.\n I_cells.VT = [common_params['VTmean']] * I_cell_params[\"Ncells\"]\n I_cells.IX = (randn(len(I_cells)) *\n I_cell_params['IXsd'] + I_cell_params['IXmean'])\n\n I_cells.vm = randn(len(I_cells)) * 10 * b2.mV - 60 * b2.mV\n\n spike_monitor_I = b2.SpikeMonitor(I_cells)\n\n state_monitor_I = None\n if record_volrages:\n state_monitor_I = b2.StateMonitor(I_cells,\n [\"vm\", \"g_syn_i\", \"I_syn_i\"],\n record=True,\n dt=dt0)\n\n net = b2.Network(I_cells)\n if record_volrages:\n net.add(state_monitor_I)\n net.add(cII)\n net.add(spike_monitor_I)\n # Randomise initial membrane potentials.\n\n print('Simulation running...')\n start_time = time.time()\n b2.run(sim_duration*b2.ms)\n duration = time.time() - start_time\n print('Simulation time:', duration, 'seconds')\n\n return spike_monitor_I, state_monitor_I\n\n\ndef plot(spike_monitor,\n state_monitor,\n plot_voltages=False):\n\n fig, ax = plt.subplots(4, figsize=(10, 5), sharex=True)\n\n ax[0].plot(spike_monitor.t/b2.ms, spike_monitor.i, '.k', ms=3)\n\n if plot_voltages:\n for i in range(num_I_cells):\n ax[1].plot(state_monitor.t/b2.ms,\n state_monitor.vm[i]/b2.mV, label=str(i+1))\n # ax[2].plot(state_monitor_E.t/b2.ms,\n # state_monitor_E.I_syn_e[1]/b2.amp,\n # lw=1, color=\"r\")\n ax[2].plot(state_monitor.t/b2.ms,\n state_monitor.g_syn_i[2]/b2.nsiemens,\n lw=1, color=\"b\", ls=\"--\")\n ax[3].plot(state_monitor.t/b2.ms,\n state_monitor.I_syn_i[2]/b2.pA,\n lw=1, color=\"b\", ls=\"--\")\n ax[2].set_ylabel(r\"$g_{syn}$(nS)\")\n ax[3].set_ylabel(r\"$I_{syn}$(pA)\")\n\n ax[0].set_ylabel(\"I cells\")\n ax[1].legend(loc=\"upper right\")\n ax[1].set_ylabel(\"Voltages I\")\n ax[-1].set_xlabel(\"time (ms)\")\n plt.savefig(\"data/I_cell.png\", dpi=150)\n plt.show()\n\n\nif __name__ == \"__main__\":\n\n num_I_cells = 3\n dt0 = 0.1*b2.ms\n\n sim_duration = 200\n state = \"beta\"\n\n integration_method = \"rk2\"\n record_volrages = True\n plot_voltages = record_volrages\n\n sp_mon, st_mon = simulate()\n plot(sp_mon, st_mon, plot_voltages)\n","sub_path":"Brain2/three_cells_i.py","file_name":"three_cells_i.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"111105330","text":"# cofing: utf-8\n\nimport os\nimport time\nimport re\nimport json\nimport random\nimport pprint\nfrom slackclient import SlackClient\n\nslack_token = os.environ[\"SLACK_API_TOKEN\"]\nsc = SlackClient(slack_token)\n\npat_ns_res = re.compile(r\"nosetting respond\",re.IGNORECASE)\npat_ns_rnd = re.compile(r\"nosetting randomres\",re.IGNORECASE)\npat_ns_SC = re.compile(r\"nosetting show Channels\",re.IGNORECASE)\npat_ns_SR = re.compile(r\"nosetting show responses\",re.IGNORECASE)\npat_ns_AC = re.compile(r\"nosetting addThisChannel\",re.IGNORECASE)\npat_ns_DC = re.compile(r\"nosetting disableThisChannel\",re.IGNORECASE)\npat_ns_help = re.compile(r\"nosetting help\",re.IGNORECASE)\npat_space = re.compile(r\"^\\s+\")\npat_space2 = re.compile(r\"\\s+$\")\n\ndef postMsg(msg, channel,unfurl=True):\n sc.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=msg,\n icon_emoji=\":mawarunos:\",\n unfurl_links=unfurl,\n username=\"nosponse\"\n )\ndef responseMsg(rtm, msg):\n postMsg(msg, channel=rtm[\"channel\"])\n\ndef postRandMsg(rtm, lis):\n postMsg(random.choice(lis), channel=rtm[\"channel\"])\n\ndef response(rtm):\n for er in enableResponses.keys():\n if rtm[\"text\"] == er:\n if isinstance(enableResponses[er], str):\n responseMsg(rtm, enableResponses[er])\n else:\n postRandMsg(rtm, enableResponses[er])\n\ndef addRespond(rtm):\n if pat_ns_res.match(rtm[\"text\"]):\n string = pat_ns_res.sub(\"\",rtm[\"text\"],count=1)\n list = re.split(r\" to \", string ,maxsplit=1, flags=re.IGNORECASE)\n if len(list) != 2:\n responseMsg(rtm,\"Error!\")\n return\n res = list[0]\n mes = list[1]\n res = pat_space.sub(\"\",res)\n mes = pat_space.sub(\"\",mes)\n res = pat_space2.sub(\"\",res)\n mes = pat_space2.sub(\"\",mes)\n if mes == \"\":\n responseMsg(rtm,\"Error!\")\n return\n if res == \"\":\n for resp in enableResponses.keys():\n if resp == mes:\n del enableResponses[resp]\n responseMsg(rtm,\"Deleted the response!\")\n response_file = open(\"responses.json\", \"w\",encoding=\"utf-8\")\n json.dump(enableResponses, response_file, indent=4)\n response_file.close()\n return\n responseMsg(rtm,\"Error!\")\n return\n enableResponses[mes] = res\n response_file = open(\"responses.json\", \"w\",encoding=\"utf-8\")\n json.dump(enableResponses, response_file, indent=4)\n response_file.close()\n responseMsg(rtm,\"Success!\")\n\ndef addRandrespond(rtm):\n if pat_ns_rnd.match(rtm[\"text\"]):\n string = pat_ns_rnd.sub(\"\",rtm[\"text\"],count=1)\n list = re.split(r\"\\n\", string)\n if len(list) <=2:\n responseMsg(rtm,\"Error!\")\n return\n for li in range(len(list)):\n list[li] = pat_space.sub(\"\",list[li])\n list[li] = pat_space2.sub(\"\",list[li])\n if list[li] == \"\":\n responseMsg(rtm,\"Error!\")\n return\n mes = list.pop(0)\n res = list\n enableResponses[mes] = res\n response_file = open(\"responses.json\", \"w\",encoding=\"utf-8\")\n json.dump(enableResponses, response_file, indent=4)\n response_file.close()\n responseMsg(rtm,\"Success!\")\n \ndef showDetails(rtm):\n if pat_ns_SC.match(rtm[\"text\"]):\n ch_link = []\n for chs in enableChannels.keys():\n ch_link.append(\"<#\" + chs +\"|\"+ enableChannels[chs]+\">\")\n responseMsg(rtm,ch_link)\n if pat_ns_SR.match(rtm[\"text\"]):\n res = pprint.pformat(enableResponses, indent=4)\n postMsg(escape_uid(res), rtm[\"channel\"], unfurl=False)\n\ndef addChannel(rtm, inCh):\n if pat_ns_AC.match(rtm[\"text\"]):\n enableChannels[rtm[\"channel\"]] = get_channel_name(rtm[\"channel\"])\n channel_file = open(\"enable_channels.json\", \"w\",encoding=\"utf-8\")\n json.dump(enableChannels, channel_file, indent=4)\n channel_file.close()\n if inCh:\n responseMsg(rtm,\"Updated!\")\n else:\n responseMsg(rtm,\"Success!\")\n\ndef disChannel(rtm):\n if pat_ns_DC.match(rtm[\"text\"]):\n del enableChannels[rtm[\"channel\"]]\n channel_file = open(\"enable_channels.json\", \"w\",encoding=\"utf-8\")\n json.dump(enableChannels, channel_file, indent=4)\n channel_file.close()\n responseMsg(rtm,\"Success!\")\n\ndef showhelp(rtm):\n if pat_ns_help.match(rtm[\"text\"]):\n responseMsg(rtm,\"`nosetting respond A to B` : BにAと返す反応を追加します。\\n\"+\"`nosetting randomres A \\\\n B\\\\n C\\\\n ...` : Aに対してB,C...をランダムに返す反応を追加します。\\n\"+\"`nosetting addThisChannel` : そのチャンネルでこのbotを有効化します。\\n\"+\"`nosetting disableThisChannel` : そのチャンネルでこのbotを無効化します。\\n\"+\"`nosetting show Channels` : このbotが有効なチャンネルを表示します。\\n\"+\"`nosetting show responses` : 設定されている反応を表示します。\")\n\ndef get_channel_name(channelid):\n channelname = \"\"\n ch_list = sc.api_call(\"channels.list\")\n if ch_list[\"ok\"]:\n for channel in ch_list[\"channels\"]:\n if channel[\"id\"] == channelid:\n channelname = channel[\"name\"]\n return channelname\n\ndef get_channel_id(channelname):\n channelid = \"\"\n ch_list = sc.api_call(\"channels.list\")\n if ch_list[\"ok\"]:\n for channel in ch_list[\"channels\"]:\n if channel[\"name\"] == channelname:\n channelid = channel[\"id\"]\n return channelid\n\ndef get_user_name(userid):\n username = \"\"\n u_list = sc.api_call(\"users.list\")\n if u_list[\"ok\"]:\n for user in u_list[\"members\"]:\n if user[\"id\"] == userid:\n username = user[\"name\"]\n return username\n\ndef escape_uid(text):\n res = text.replace(\"!\",\"!\")\n for found in re.findall(r\"<@(.*?)>\", text):\n res = res.replace(found, get_user_name(found))\n res = res.replace(\"@\",\"@\")\n return res\n\nchannel_file = open(\"enable_channels.json\", \"r\", encoding=\"utf-8\")\nenableChannels = json.load(channel_file)\nchannel_file.close()\n\nresponse_file = open(\"responses.json\", \"r\", encoding=\"utf-8\")\nenableResponses = json.load(response_file)\nresponse_file.close()\n\nif sc.rtm_connect():\n for ch in enableChannels.keys():\n enableChannels[ch] = get_channel_name(ch)\n while True:\n for rtm in sc.rtm_read():\n print(rtm)\n if rtm[\"type\"] == \"message\":\n if \"subtype\" not in rtm and \"text\" in rtm:\n inCh = False\n for ec in enableChannels.keys():\n if rtm[\"channel\"] == ec:\n inCh = True\n break\n if inCh:\n response(rtm)\n addRespond(rtm)\n disChannel(rtm)\n showDetails(rtm)\n showhelp(rtm)\n addChannel(rtm, inCh)\n addRandrespond(rtm)\n else:\n addChannel(rtm, inCh)\n time.sleep(0.5)\nelse:\n print(\"Connection Failed\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"387862309","text":"class UnionFind5(object):\n # private\n def __init__(self, n):\n self.__parent = [x for x in range(n)] # at first, every id is different, every value belongs to their own group\n self.__rank = [ 1 for x in range(n)] # this rank is used to record the height of each group\n self.__count = n\n\n # find the root of p\n def find(self, p):\n if p>=0 and p self.__rank[qRoot]:\n self.__parent[qRoot] = pRoot\n else:\n self.__parent[pRoot] = qRoot\n rank[qRoot] += 1","sub_path":"unionFind/unionFind5.py","file_name":"unionFind5.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54370550","text":"#this is a test scraper it just scraps the link\nfrom pymongo import MongoClient\nimport requests\nimport urllib2\nfrom bs4 import BeautifulSoup\n#import BeautifulSoup\nfrom datetime import datetime\nurl = \"http://stackoverflow.com\"\npage = urllib2.urlopen(url)\n#r = requests.get('http://swapi.co/api/people/1')\nsoup = BeautifulSoup(page)\ni = 1\nclient = MongoClient()\ndb = client.test\nprint(soup)\nid1 = datetime.now()\nr = requests.get('http://swapi.co/api/people/1')\nall_links = soup.find_all(\"a\")\nlinks = []\npids = []\nfor link in all_links:\n\tprint(link.get(\"href\"))\n\tid = datetime.now()\n\ttry:\n\t\tdb.testdb.insert_one({\"id\": id,\"pid\": id1 ,\"link\":str(link)})\n\texcept :\n\t\tprint(\"error while encoding\")\n\tpids.append(id)\n\tlinks.append(link)\n\n\n#this function scarps all the links in a web page and then does the same thing for all the links it gives an id to each link and a pid to the parent link from which it was scraped.\ndef crawl(links,pid):\n\tfor link,pid in zip(links,pids):\n\t\tlinks2 = []\n\t\tpid2 = []\n\t\tpage = urllib2.urlopen(url)\n\t\tsoup = BeautifulSoup(page)\n\t\tall_links = soup.find_all(\"a\")\n\t\tfor link in all_links:\n\t\t\tid = datetime.now()\n\t\t\tpid2.append(id)\n\t\t\ttry:\n\t\t\t\tdb.testdb.insert_one({\"id\": id,\"pid\": pid ,\"link\":str(link)})\n\t\t\texcept:\n\t\t\t\tprint(\"error while encoding\")\n\t\t\tprint(link.get(\"href\"))\n\t\t\tlinks2.append(link)\n\t\tcrawl(links2,pid2)\ncrawl(links,pids)\n\t\t\n'''\nwhile r.status_code == 200:\n\tr = requests.get('http://swapi.co/api/people/'+ str(i))\n\tdoc = json.loads(r.content)\n\t#es.index(index='sw', doc_type='people', id=i, body=json.loads(r.content))\n\tresult = db.stars.insert_one(json.loads(r.content))\n\ti=i+1\n'''\n","sub_path":"basic_test_scrapper.py","file_name":"basic_test_scrapper.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"104557815","text":"#\r\n# Copyright 2013 TeamSWAP\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n\r\nimport sys\r\nimport atexit\r\nfrom datetime import datetime\r\nfrom threading import Lock\r\n\r\nfrom const import *\r\n\r\nDEBUG_TO_FILE = True\r\n\r\nthreadLock = Lock()\r\nredirector = None\r\n\r\nclass LogRedirector:\r\n def __init__(self, tag):\r\n self.fileName = 'debug-%s.log'%tag\r\n self.fileLock = Lock()\r\n if DEBUG_TO_FILE:\r\n f = open(self.fileName, 'w')\r\n f.close()\r\n self.stdOut = sys.stdout\r\n self.stdErr = sys.stderr\r\n sys.stdout = self\r\n sys.stderr = self\r\n self.closed = False\r\n\r\n def write(self, text):\r\n time = datetime.now()\r\n timeTxt = \"[%02d:%02d:%02d] \"%(time.hour, time.minute, time.second)\r\n text = timeTxt + text\r\n if text.endswith(\"\\n\"):\r\n text = text[:-1].replace(\"\\n\", \"\\n\" + timeTxt) + \"\\n\"\r\n else:\r\n text = text.replace(\"\\n\", \"\\n\" + timeTxt)\r\n if DEBUG_TO_FILE:\r\n with self.fileLock:\r\n f = open(self.fileName, 'a+')\r\n f.write(text)\r\n f.close()\r\n self.stdOut.write(text)\r\n\r\n def close(self):\r\n sys.stdout = self.stdOut\r\n sys.stderr = self.stdErr\r\n self.closed = True\r\n\r\n def __del__(self):\r\n if not self.closed:\r\n self.close()\r\n\r\ndef setupLogging(tag):\r\n global redirector\r\n redirector = LogRedirector(tag)\r\n atexit.register(redirector.close)\r\n\r\ndef prnt(*text):\r\n text = ' '.join(map(lambda x:str(x), text))\r\n with threadLock:\r\n redirector.write(text + '\\n')\r\n","sub_path":"src/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184016488","text":"# MIT License\n#\n# Copyright (c) 2017 Matthias Rost, Alexander Elvers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n__author__ = \"Matthias Rost, Alexander Elvers (mrost / aelvers inet.tu-berlin.de)\"\n\nimport copy\n\nfrom evaluation.abstract_data_extractor import AbstractDataExtractor\n\n\nclass IncrementalDataExtractor(AbstractDataExtractor):\n def __init__(self):\n super().__init__()\n self.probing_points = set()\n\n def extract_data_from_experiment_manager(self, exp_mgr):\n for scenario_key, algorithm_dict in exp_mgr.scenario_solutions.items():\n if scenario_key in self.scenario_keys:\n raise Exception(f\"Already have stored the data for the scenario key {scenario_key}\")\n\n topology_name = scenario_key[3]\n if topology_name not in self.topology_name_to_size:\n self.topology_name_to_size[topology_name] = exp_mgr.suitable_substrates.names_to_nodes[topology_name]\n\n if scenario_key[5] not in self.probing_points:\n self.probing_points.add(scenario_key[5])\n\n self.extracted_solution_data[scenario_key] = {}\n\n self.scenario_keys.add(scenario_key)\n for algorithm_id, algorithm_result in algorithm_dict.items():\n self.algorithms_keys.add(algorithm_id)\n\n self.extracted_solution_data[scenario_key][algorithm_id] = algorithm_result\n\n def check_completeness(self, other_scenario_keys=None):\n\n print(\"Starting check of completeness..\")\n\n flattened_scenario_keys = [tuple(a[:5]) for a in self.scenario_keys]\n print(flattened_scenario_keys)\n flattened_scenario_keys = set(flattened_scenario_keys)\n\n if other_scenario_keys is not None:\n copy_of_scenario_keys = copy.deepcopy(flattened_scenario_keys)\n\n for other_scenario_key in flattened_scenario_keys:\n if other_scenario_key not in copy_of_scenario_keys:\n raise Exception(f\"DataExtractor has no information for scenario key {other_scenario_key}\")\n\n copy_of_scenario_keys.remove(other_scenario_key)\n\n if len(copy_of_scenario_keys) > 0:\n for copy_key in copy_of_scenario_keys:\n raise Exception(f\"Scenario keys was found in data but not in the given set of scenario_keys\"\n f\" (overall {copy_key} many unknown items)\")\n\n for x in flattened_scenario_keys:\n for probing_point in self.probing_points:\n if tuple(x[:5]) + (probing_point,) not in self.scenario_keys:\n raise Exception(\"invalid probing points\")\n\n print(\"\\tsets of scenario keys are identical! \")\n\n for scenario_key in self.scenario_keys:\n for algorithm_id in self.algorithms_keys:\n if algorithm_id not in self.extracted_solution_data[scenario_key]:\n raise Exception(f\"Missing information for algorithm {algorithm_id} for scenario key {scenario_key}\"\n f\" (overall {len(self.algorithms_keys)} many algorithms were detected)\")\n\n print(\"\\ta single result for each algorithm! \")\n\n def print_it(self):\n print(\"Printing data extractor contents...\")\n for scenario_key in self.scenario_keys:\n print(f\"\\t{scenario_key}..\")\n for algorithm_id in self.algorithms_keys:\n extracted_data = self.extracted_solution_data[scenario_key][algorithm_id]\n print(f\"\\t\\t{extracted_data}\")\n","sub_path":"src/evaluation/incremental_data_extractor.py","file_name":"incremental_data_extractor.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392236785","text":"import pygame\nfrom math import radians, sin, cos, floor\nBLACK = (0,0,0)\ncolor2 = (240, 30, 20)\n\ndef drawAxis(c):\n pygame.draw.line(screen, color, [c[0]/2, 0], [c[0]/2, c[1]] , 2)\n pygame.draw.line(screen, color, [0, c[1]/2], [c[0], c[1]/2] , 2)\n\n\ndef transLine(p, c):\n \"\"\" This function receives the line \"p\" (array, point coord) and the weidht and height \n of the screen \"c\" (array too), then, it returns the point transformed\"\"\"\n return ([c[0]/2 +p[0], c[1]/2 -p[1]])\n\n\ndef rotateCounterCl(p, angle):\n \"\"\" p is the point to rotate, and angle is the angle of rotationg in degrees \"\"\"\n \n radAngle = radians(angle)\n return [floor(p[0] * cos(radAngle)-p[1] * sin(radAngle)), floor(p[0] * sin(radAngle) + p[1] * cos(radAngle))]\n\n\n\nif __name__ == '__main__':\n pygame.init()\n\n screenWH =[900, 600]\n center =[450, 300]\n point3 = [100, 200]\n point4= [50, 250]\n screen = pygame.display.set_mode(screenWH)\n pygame.display.flip()\n end = True\n color = (250, 0, 250)\n posList = []\n n = 3\n\n while end:\n drawAxis(screenWH)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n end = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n e = list(event.pos)\n posList.append(event.pos) \n \n \n #Dibujar el poligono\n if event.type == pygame.KEYDOWN:\n\n pygame.draw.polygon(screen, color2, posList )\n pygame.display.flip()\n \n \n\n \n ","sub_path":"DrawForms/dibujarpoligono.py","file_name":"dibujarpoligono.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"451042574","text":"#!/usr/bin/python3\nimport requests\n\ndef hack():\n headers = {\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'en-GB,en;q=0.9,fr;q=0.8,en-US;q=0.7,es;q=0.6',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Content-Length': '71',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Cookie': '',\n 'Host': '158.69.76.135',\n 'Origin': 'http://158.69.76.135',\n 'Referer': 'http://158.69.76.135/level2.php',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\n }\n\n data = {\n 'id':'2483',\n 'holdthedoor':'Submit',\n 'key':''\n }\n\n URL = \"http://158.69.76.135/level2.php\"\n response = requests.get(URL)\n jar = response.cookies\n cks = response.cookies['HoldTheDoor']\n data['key'] = cks\n full_cks = \"PHPSESSID=knufqho189hue3dala10rdvp04; HoldTheDoor=\" + cks\n headers['Cookie'] = full_cks\n page = requests.post(URL, data=data, headers=headers, cookies=jar)\n print(page)\n\ndef main():\n for i in range(0, 1011):\n hack()\n print(\"Page hacked successfully!\\nForm number: {}\\n\".format(i + 1))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"level_2/lvl2.py","file_name":"lvl2.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435555832","text":"import os,shutil\nrenamelist=open('C:\\\\Python27\\\\rename_AGSTier2.txt')\n\ntargetname=renamelist.readlines()\nfor p in targetname:\n\tp=p.strip()\n\tif not os.path.exists(os.path.join('C:\\\\temp\\\\SEDIT-3677',p)):\n os.makedirs(os.path.join('C:\\\\temp\\\\SEDIT-3677',p))\n\t#shutil.copy(os.path.join('C:\\\\temp\\\\SEDIT-3677',\"sample.xlsx\"),os.path.join('C:\\\\temp\\\\SEDIT-3677',p))\n\t#os.rename(os.path.join('D:\\\\Q3_Blurb\\\\BlukPublishing\\\\ToBePublish',\"sample.csv\"),os.path.join('D:\\\\Q3_Blurb\\\\BlukPublishing\\\\ToBePublish',p))\n\nrenamelist.close()\n","sub_path":"CreatPlaceholder4AGSTier2.py","file_name":"CreatPlaceholder4AGSTier2.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"557799409","text":"import bson\nfrom dateutil.parser import parse\n\ndef test_groups(as_user, as_admin, data_builder):\n # Cannot find a non-existant group\n r = as_admin.get('/groups/non-existent')\n assert r.status_code == 404\n\n group = data_builder.create_group()\n user_id = data_builder.create_user(_id='test-user@user.com')\n r = as_admin.post('/groups/' + group + '/permissions', json={'_id': 'user@user.com', 'access': 'admin'})\n assert r.ok\n\n # Able to find new group\n r = as_user.get('/groups/' + group)\n assert r.ok\n initial_modified = r.json()['modified']\n created = r.json()['created']\n\n # Test that POST group with same id doesn't update created\n r = as_admin.post('/groups', json={'_id': group})\n assert r.ok\n r = as_admin.get('/groups/' + group)\n assert r.ok\n first_modified = r.json()['modified']\n d1 = parse(initial_modified)\n d2 = parse(first_modified)\n assert d2 >= d1\n\n assert r.json()['created'] == created\n\n # Test to make sure that list of roles nor name exists in a newly created group\n r = as_admin.get('/groups/' + group)\n assert r.json().get('roles', 'No Roles') == 'No Roles'\n assert r.json().get('name', 'No Name') == 'No Name'\n\n # Able to change group label\n group_label = 'New group label'\n r = as_user.put('/groups/' + group, json={'label': group_label})\n assert r.ok\n\n # Get the group again to compare timestamps\n r = as_user.get('/groups/' + group)\n assert r.ok\n second_modified = r.json()['modified']\n d1 = parse(first_modified)\n d2 = parse(second_modified)\n assert d2 > d1\n\n # Try adding a tag with a slash\n tag_name = 'Grey/2'\n r = as_user.post('/groups/' + group + '/tags', json={'value': tag_name})\n assert r.status_code == 400\n\n # Add a tag to the group\n tag_name = 'Grey2'\n r = as_user.post('/groups/' + group + '/tags', json={'value': tag_name})\n assert r.ok\n\n # Get the group again to compare timestamps for the Add tag test groups\n r = as_user.get('/groups/' + group)\n assert r.ok\n third_modified = r.json()['modified']\n d3 = parse(third_modified)\n assert d3 > d2\n\n # Try editting the tag so that it includes a slash\n new_tag_name = 'B/rown'\n r = as_user.put('/groups/' + group + '/tags/' + tag_name, json={'value': new_tag_name})\n assert r.status_code == 400\n\n # Edit the tag\n new_tag_name = 'Brown'\n r = as_user.put('/groups/' + group + '/tags/' + tag_name, json={'value': new_tag_name})\n assert r.ok\n\n # Get the group again to compare timestamps for the Edit tag test groups\n r = as_user.get('/groups/' + group)\n assert r.ok\n fourth_modified = r.json()['modified']\n d4 = parse(fourth_modified)\n assert d4 > d3\n\n # Delete the tag\n r = as_user.delete('/groups/' + group + '/tags/' + new_tag_name)\n assert r.ok\n\n # Get the group again to compare timestamps for the Delete tag test groups\n r = as_user.get('/groups/' + group)\n assert r.ok\n fith_modified = r.json()['modified']\n d5 = parse(fith_modified)\n assert d5 > d4\n\n # Add a permission to the group\n user = {'access': 'rw', '_id': user_id}\n r = as_user.post('/groups/' + group + '/permissions', json=user)\n assert r.ok\n\n # Get the group again to compare timestamps for the Add permission test groups\n r = as_user.get('/groups/' + group)\n assert r.ok\n six_modified = r.json()['modified']\n d6 = parse(six_modified)\n assert d6 > d5\n\n # Edit a permission in the group\n user = {'access': 'ro', '_id': user_id}\n r = as_user.put('/groups/' + group + '/permissions/' + user['_id'], json=user)\n assert r.ok\n\n # Get all permissions for each group\n r = as_admin.get('/users/admin@user.com/groups')\n assert r.ok\n assert r.json()[0].get(\"permissions\")[0].get(\"_id\") == \"admin@user.com\"\n\n # Get the group again to compare timestamps for the Edit permission test groups\n r = as_user.get('/groups/' + group)\n assert r.ok\n seven_modified = r.json()['modified']\n d7 = parse(seven_modified)\n assert d7 > d6\n\n # Delete a permission in the group\n r = as_user.delete('/groups/' + group + '/permissions/' + user['_id'])\n assert r.ok\n\n # Get the group again to compare timestamps for the Edit permission test groups\n r = as_user.get('/groups/' + group)\n assert r.ok\n eight_modified = r.json()['modified']\n d8 = parse(eight_modified)\n assert d8 > d7\n\n group2 = data_builder.create_group()\n r = as_admin.post('/groups/' + group2 + '/permissions', json={'access':'admin','_id':'user@user.com'})\n assert r.ok\n\n # Test User can get group2\n r = as_user.get('/groups/' + group2)\n assert r.ok\n\n # Test that group2 shows up in group list for user\n r = as_user.get('/groups')\n assert r.ok\n assert len(r.json()) == 2\n\n assert r.json()[0].get('permissions', []) != []\n r = as_admin.get('/groups')\n assert r.ok\n assert len(r.json()) > 1\n\n # Empty put request should 400\n r = as_admin.put('/groups/' + group, json={})\n assert r.status_code == 400\n\n r = as_admin.get('/groups/' + group)\n assert r.ok\n assert r.json()['label'] == group_label\n\n # Test join=projects\n project = data_builder.create_project(group=group2)\n r = as_admin.get('/groups', params={'join': 'projects'})\n assert r.ok\n for group in r.json():\n if group[\"_id\"] == group2:\n assert group.get(\"projects\")[0].get(\"_id\") == project\n\ndef test_groups_blacklist(as_admin):\n r = as_admin.post('/groups', json={'_id': 'unknown', 'label': 'Unknown group'})\n assert r.status_code == 400\n\n r = as_admin.post('/groups', json={'_id': 'site', 'label': 'Site group'})\n assert r.status_code == 400\n\ndef test_groups_upsert(as_admin, data_builder):\n group_id = data_builder.create_group(label='Original Label')\n\n r = as_admin.get('/groups/' + group_id)\n assert r.ok\n original_group = r.json()\n\n r = as_admin.post('/groups', json={'_id': group_id, 'label': 'Fubar'})\n assert r.status_code == 202\n assert r.json() == {'_id': group_id}\n\n r = as_admin.get('/groups/' + group_id)\n assert r.ok\n updated_group = r.json()\n\n assert original_group['label'] == updated_group['label']\n assert original_group['created'] == updated_group['created']\n assert original_group['modified'] == updated_group['modified']\n\ndef test_groups_editions(as_admin):\n # Test adding empty editions\n group_id = str(bson.ObjectId())\n r = as_admin.post('/groups', json={'_id': group_id, 'label': 'empty_editions'})\n assert r.ok\n r = as_admin.get('/groups/' + group_id)\n group = r.json()\n # By default lab edition is False\n assert group['editions']['lab'] == False\n\ndef test_groups_editions_feature(as_admin, data_builder, as_user, with_site_settings, api_db):\n group = data_builder.create_group()\n\n r = as_user.put('/groups/'+ group, json={'editions': {'lab': True}})\n assert r.status_code == 403\n\n # Default group has no providers, and we cant change them, as tested in other test cases\n r = as_admin.put('/groups/'+ group, json={'editions': {'lab': True}})\n assert r.status_code == 422\n\n provider = api_db.providers.find_one({'label': 'Static Compute'})\n provider = str(provider['_id'])\n\n api_db.groups.update_one({'_id': group}, {'$unset': {'providers' : 1}})\n r = as_admin.put('/groups/'+ group, json={\n 'editions': {\n 'lab': True\n },\n 'providers': {\n 'compute': provider\n }\n })\n assert r.status_code == 422\n\n r = as_admin.put('/groups/'+ group, json={\n 'editions': {\n 'lab': True\n },\n 'providers': {\n 'storage': 'deadbeefdeadbeefdeadbeef'\n }\n })\n assert r.status_code == 422\n\n # Can enable with valid providers\n r = as_admin.put('/groups/'+ group, json={\n 'editions': {\n 'lab': True\n },\n 'providers': {\n 'compute': provider,\n 'storage': 'deadbeefdeadbeefdeadbeef'\n }\n })\n assert r.status_code == 200\n\n r = as_admin.put('/groups/'+ group, json={\n 'editions': {\n 'lab': False\n },\n })\n assert r.status_code == 200\n assert as_admin.get('/groups/' + group).json()['editions']['lab'] == False\n\n # Can be enabled if providers are already set\n r = as_admin.put('/groups/'+ group, json={\n 'editions': {\n 'lab': True\n }\n })\n assert r.status_code == 200\n assert as_admin.get('/groups/' + group).json()['editions']['lab'] == True\n\n\n # Now add a couple projects with lab true can verify they are disabled on group disable\n r = as_admin.post('/projects', json={'label': 'test-lab-1', 'group': group, 'editions': {'lab': True}})\n assert r.ok\n project1 = r.json()['_id']\n\n r = as_admin.post('/projects', json={'label': 'test-lab-2', 'group': group, 'editions': {'lab': True}})\n assert r.ok\n project2 = r.json()['_id']\n\n assert as_admin.get('/projects/' + project1).json()['editions']['lab'] == True\n assert as_admin.get('/projects/' + project2).json()['editions']['lab'] == True\n r = as_admin.get('/projects/' + project1)\n r = as_admin.put('/groups/'+ group, json={\n 'editions': {\n 'lab': False\n }\n })\n assert r.status_code == 200\n assert as_admin.get('/groups/' + group).json()['editions']['lab'] == False\n assert as_admin.get('/projects/' + project1).json()['editions']['lab'] == False\n assert as_admin.get('/projects/' + project2).json()['editions']['lab'] == False\n\n\n # Cleanup\n assert as_admin.delete('/projects/' + project1).ok\n assert as_admin.delete('/projects/' + project2).ok\n\n\ndef test_group_project_access(as_admin, as_user, data_builder):\n group_id = data_builder.create_group()\n project_id = data_builder.create_project(group=group_id)\n\n # Test user without permissions to project can't access group\n r = as_user.get('/groups/' + group_id)\n assert r.status_code == 403\n\n # Add user to project permissions but not group\n user_id = as_user.get('/users/self').json()['_id']\n r = as_admin.post('/projects/' + project_id + '/permissions',\n json={'_id': user_id, 'access': 'ro'})\n assert r.ok\n\n # Test that user can access group directly\n r = as_user.get('/groups/' + group_id)\n assert r.ok\n assert group_id == r.json()['_id']\n\n # Test that user cannot write to group\n r = as_user.put('/groups/' + group_id, json={'label': 'NewGroup'})\n assert r.status_code == 403\n\n","sub_path":"tests/integration_tests/python/test_groups.py","file_name":"test_groups.py","file_ext":"py","file_size_in_byte":10604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"95558556","text":"a=[(10, 20 , 30) , (40 , 50 , 60) , (70 , 80 , 90)]\r\nprint([t[:-1] + (100,) for t in a])\r\n\r\nletter = \"tHis iS mE\"\r\nprint(letter.swapcase())\r\n\r\nstring = 'ABCDCDC'\r\nsubstring = 'CDC'\r\ncount = string.count(substring)\r\nprint(count)","sub_path":"NumTwo.py","file_name":"NumTwo.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"531189240","text":"from operator import itemgetter\nimport itertools\nimport datetime\nimport json\n\n\ndef _load_document(folder,user):\n\treturn open(folder+'/'+user+'.json')\n\ndef _build_data(games , pref , timeU ):\n\tdata = []\n\tfor game in games:\n\t\tif game['perf'] == pref:\n\t\t\tif timeU:\n\t\t\t\tD = float(game['date']) / 1000.\n\t\t\telse:\n\t\t\t\tD = datetime.datetime.fromtimestamp(float(game['date'])/1000.)\n\t\t\tG = game['ranking']\n\t\t\tdata.append((D,G))\n\treturn data\n\t\ndef _divide_list(l,n):\n\treturn [l[i:i+n] for i in range(0, len(l), n) ]\n\ndef _find_value(lst,value):\n\t\n\tif value == 'max':\n\t\treturn max(lst,key=itemgetter(1))\n\telif value == 'min':\n\t\treturn min(lst,key=itemgetter(1))\n\telif value == 'avg':\n\t\tdata = [(dt , rat) for dt,rat in lst] # THIS REDUNDANT BECAUSE SOMETIMES GET A GROUPBY OBJECT AND NEED CONVERT A LIST\n\t\treturn (data[0][0] , sum(rat for dt,rat in data) / float(len(data)))\n\t\n\treturn lst # the first value\n\ndef _groupby(data, group, value):\n\n\tdata = sorted(data, key=lambda x:x[0].date()) # IS NECESARY ORDER \n\t\n\tif group == 'month':\n\t\tdata = [_find_value(group,value) for dt, group in itertools.groupby(data, key=lambda x:x[0].month or x[0].year)] \n\telif group == 'day':\n\t\tdata = [_find_value(group,value) for dt, group in itertools.groupby(data, key=lambda x:x[0].date())] \n\treturn data\n\ndef load_data_timestamp(folder,user, pref , batch=5 , value = 'avg'):\n\n\tgames = json.load(_load_document(folder , user))\n\tdata = _build_data(games,pref,True)\n\tdata = _divide_list(data , batch)\n\tdata = [_find_value(x,value) for x in data]\n\n\treturn data\n\ndef load_data_datetime(folder,user, pref , group , value = 'avg'):\n\n\tgames = json.load(_load_document(folder , user))\n\tdata = _build_data(games,pref,False)\n\tdata = _groupby(data , group , value)\n\n\treturn data\n\n","sub_path":"libs/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386701530","text":"import zmq\nimport os\nimport time\nimport sys\nfrom math import ceil\nfrom os import listdir\nimport hashlib\n\ndef encrypt_string(hash_string):\n sha_signature = \\\n hashlib.sha256(hash_string.encode()).hexdigest()\n return sha_signature\n\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP) # REP -> Reply\nsocket.bind(\"tcp://*:5555\")\nenvio=1024*1024*10\ncontador=0\n\n\ndef subirarchivos(nombrearchivo): \n global contador\n contador=contador+1\n print(contador)\n filename, file_extension = os.path.splitext(nombrearchivo)\n socket.send(b\"subearchivo\")\n tamañoarchivo = socket.recv().decode()\n filenameencriptado= encrypt_string(filename+tamañoarchivo+str(contador))\n socket.send(b\"procediendo a subir archivo\")\n for i in range (0,ceil(int(tamañoarchivo)/envio)):\n archivito = socket.recv()\n with open(filenameencriptado+file_extension, 'ab') as archivo:\n archivo.write(archivito)\n socket.send(b\"ready\")\n socket.recv()\n socket.send(b\"El nombre para descargar el archivo \"+filename.encode()+file_extension.encode()+ b\" es \"+filenameencriptado.encode()+file_extension.encode())\n\n\n\ndef bajararchivos(nombrearchivobajar):\n filename, file_extension = os.path.splitext(nombrearchivobajar)\n size= os.path.getsize(nombrearchivobajar)\n print(ceil(size/envio))\n\n socket.send(b\"bajararchivos\")\n message=socket.recv()\n print(message.decode())\n socket.send(str(size).encode())\n\n with open (nombrearchivobajar, 'rb') as archivoenviado:\n for i in range(0,ceil(size/envio)): \n message = socket.recv()\n archivoenviado.seek(i*envio)\n t=archivoenviado.read(envio)\n print(\"hola server\")\n socket.send(t)\n \n\ndef archivosservidor(ruta = '.'):\n socket.send(b\"listar\")\n socket.recv()\n archivos=listdir(ruta)\n for archivitos in archivos:\n socket.send(archivitos.encode())\n socket.recv()\n socket.send(b\"\")\n\n\nif __name__ == \"__main__\":\n\n while True:\n funcion, archivo= socket.recv_multipart()\n if funcion == b\"subirarchivos\":\n subirarchivos(archivo.decode())\n\n if funcion == b\"bajararchivos\":\n bajararchivos(archivo.decode())\n\n if funcion == b\"listar\":\n archivosservidor()\n\n\"\"\"\n\n\"\"\"\n\n\n\n\n\n","sub_path":"clase 2/serverarchivos.py","file_name":"serverarchivos.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15968092","text":"#!/usr/bin/env python\n\n#import os\nfrom molecule import Molecule\nfrom module import Module,Property,SubModule\n\n#considering that the only change to be done in this file is regarding the plane and the symmetry\n#nothing else is been taken from the mol and the scf.inp files\ndef write_london(template, scf, molecule):\n printable = ''\n wave_function = scf.contains(template.wave_function.name)\n if wave_function:\n scf_submodule = wave_function.submodules.get(template.scf.name)\n if scf_submodule:\n atomst = scf_submodule.properties.pop(template.atomst.name)\n\n for module in scf.modules:\n if module.name != template.visual.name:\n printable += module.__str__()\n\n nmr = SubModule('*NMR')\n nmr.add_property(template, '.LONDON')\n nmr.add_property(template, '.DOEPRN')\n nmr.add_property(template, '.INTFLG', ['1 1 0'])#calculating just large-large large-small\n\n newModule = Module('**PROPERTIES')\n newModule.add_property(template, '.' + molecule.magnetic_field)\n newModule.submodules.update({'*NMR':nmr})\n\n printable += newModule.__str__()\n printable += '*END OF\\n'\n\n if atomst:\n scf_submodule.properties.update({atomst.name:atomst})\n\n return printable\n","sub_path":"london.py","file_name":"london.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"395107628","text":"import math\n\nFRAME_COUNT_THRESHOLD = 6\n\ndef get_car_center(car):\n x1 = car[0][0]\n y1 = car[0][1]\n x2 = car[1][0]\n y2 = car[1][1]\n return (int((x1+x2)/2), int((y1+y2)/2))\n\ndef check_is_same_car(car1, car2):\n car1_center = car1[1]\n car2_center = car2[1]\n car1_x = car1_center[0]\n car1_y = car1_center[1]\n car2_x = car2_center[0]\n car2_y = car2_center[1]\n delta = 64/2 # Half of smallest window size that we check for cars\n\n if (abs(car1_x - car2_x) < delta) and (abs(car1_y - car2_y) < delta):\n return True\n\n car1_window = car1[0]\n car2_window = car2[0] \n\n # check for overlap\n if (car1_window[0][0] <= car2_window[0][0]) and (car1_window[1][0] >= car2_window[1][0]) and (car1_window[0][1] <= car2_window[0][1]) and (car1_window[1][1] >= car2_window[1][1]):\n # car 1 superset of car 2\n return True\n\n if (car2_window[0][0] <= car1_window[0][0]) and (car2_window[1][0] >= car1_window[1][0]) and (car2_window[0][1] <= car1_window[0][1]) and (car2_window[1][1] >= car1_window[1][1]):\n # car 2 superset of car 1\n return True\n\n return False\n\n\nclass Cars:\n def __init__(self):\n self.car_list = []\n self.cars_tracked = []\n\n def match_cars_with_tracked(self, new_car):\n match = False\n car = None\n for known_car in self.cars_tracked:\n if check_is_same_car(new_car, known_car):\n known_car[4] = 1\n print(\"Found match\")\n car = known_car\n match = True\n break;\n return match, car\n \n def updateCarsTracked(self, car_list):\n self.car_list = car_list\n if len(self.cars_tracked) == 0:\n if len(self.car_list) > 0:\n print(\"Adding {} cars to empty tracked list\".format(len(self.car_list)))\n self.cars_tracked = self.car_list\n else:\n for car in self.cars_tracked:\n # mark nothing is tracked\n car[4] = 0\n for car in self.cars_tracked:\n # mark nothing is tracked\n if car[4]:\n print(\"What happened !!!!!!!!!\")\n new_cars_tracked = []\n # Match the cars in new frame with tracked cars\n for car in car_list:\n match, known_car = self.match_cars_with_tracked(car)\n if match:\n car[4] = 1\n car[3] = 0\n car[2] = known_car[2] + 1\n print(\"Update match\")\n new_cars_tracked.append(car.copy())\n else:\n print(\"Add new car\")\n new_cars_tracked.append(car.copy())\n \n # go over tracked cars that were not matched and see to include them or retire them.\n for known_car in self.cars_tracked:\n if not known_car[4]:\n if (known_car[3] >= FRAME_COUNT_THRESHOLD) or (known_car[3] > known_car[2]):\n print(\"Retiring car\")\n else:\n print(\"Add previously tracked car\")\n known_car[3] += 1\n new_cars_tracked.append(known_car.copy())\n self.cars_tracked = new_cars_tracked\n \n # return cars windows to be drawn\n cars_to_draw = []\n for car in self.cars_tracked:\n cars_to_draw.append(car[0])\n print(\"Returning {} cars to draw\".format(len(cars_to_draw)))\n\n return cars_to_draw\n\n def updateNewCars(self, cars):\n car_list = []\n for car in cars:\n center = get_car_center(car)\n # format is window, center, hits, miss, tracked in current round\n car_list.append([car, center, 1, 0, 0])\n cars_to_draw = self.updateCarsTracked(car_list)\n return cars_to_draw\n","sub_path":"cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"257976256","text":"import csv\nimport sys\nfrom math import *\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfrom mpl_toolkits.axes_grid1 import host_subplot\nimport mpl_toolkits.axisartist as AA\n\n\t#### Qui vanno i dati \t####\ndataG1 = np.genfromtxt(\"../dati/scope_1.csv\", delimiter=',')\ndataG2 = np.genfromtxt(\"../dati/scope_8.csv\", delimiter=',') #o 5, o 7\ndataG3 = np.genfromtxt(\"../dati/scope_3.csv\", delimiter=',')\nr=10\nT=1E3\n\nt1\t= T*dataG1[2:,0]\nV1\t= dataG1[2:,1]\n\nt2\t= T*dataG2[2:,0]-0.20\nV2\t= dataG2[2:,1]\n\nt3\t= T*dataG3[2:,0]\nV3\t= dataG3[2:,1]\n\n\t####\t####\t####\t####\n\n# Creo un grafico la dimensione è in pollici\nfig1 = plt.figure(figsize=(14, 5.5))\n# Titolo del grafico\nfig1.suptitle(\"Oscillatore a ponte di Wien\", y=0.97, fontsize=15)\n\n######\n# GRAFICO 1\n#f1 = fig1.add_subplot(1, 3, 1)\nf1 = host_subplot(131, axes_class=AA.Axes)\n\ng1 = f1.errorbar(\tx=t1,\ty=V1,\t\tfmt='-', c='0.5')\n\nf1.grid(True)\nf1.set_ylim((-14, 14))\nf1.set_xlim((-87,70))\n\nf1.text(-97, 0, u'd.d.p. [$V$]', size=14, va='center', ha='center',rotation='90')\nf1.set_xlabel(u'Tempo [$ms$]', labelpad=0, fontsize=14)\n\nf1.legend((g1, ), (r'$V_{out}$', ), 'lower center', prop={'size': 13})\n\n######\n# GRAFICO 2\n#f2 = fig1.add_subplot(1, 3, 2)\nf2 = host_subplot(132, axes_class=AA.Axes)\n\ng2 = f2.errorbar(\tx=t2,\ty=V2,\t\tfmt='-', c='black')\n\nf2.grid(True)\nf2.axis[\"left\"].major_ticklabels.set_visible(False)\nf2.set_ylim((-14, 14))\nf2.set_xlim((-1.8,1.8))\n\nf2.set_xlabel(u'Tempo [$ms$]', labelpad=0, fontsize=14)\n\nf2.legend((g2, ), (r'$V_{out}$', ), 'lower center', prop={'size': 13})\n \n######\n# GRAFICO 3\n#f3 = fig1.add_subplot(1, 3, 3)\nf3 = host_subplot(133, axes_class=AA.Axes)\n\ng3 = f3.errorbar(\tx=t3-640,\ty=V3,\t\tfmt='-', c='0.5')\n\nf3.grid(True)\nf3.axis[\"left\"].major_ticklabels.set_visible(False)\nf3b = f3.twin()\nf3b.axis[\"top\"].major_ticklabels.set_visible(False)\nf3.set_ylim((-14, 14))\nf3.set_xlim((-27,87))\n\nf3.text(94.5, 0, u'd.d.p. [$V$]', size=14, va='center', ha='center',rotation='-90')\nf3.set_xlabel(u'Tempo [$ms$]', labelpad=0, fontsize=14)\n\nf3.legend((g3, ), (r'$V_{out}$', ), 'lower center', prop={'size': 13})\n \n######\n\n# questo imposta i bordi del grafico\nfig1.subplots_adjust(left=0.04, right=0.96, top=0.93, bottom=0.09, hspace=0.05, wspace=0.03)\n# mostra grafico\nplt.show()\n","sub_path":"E08/analisi/grey_wien.py","file_name":"grey_wien.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"430790921","text":"import pandas as pd\nimport os, sys, glob, time\nimport requests, json\nimport csv\nfrom flask import Flask\n\n# Global Variables\nurl = \"https://rickandmortyapi.com/api/character/?Species=Human&status=alive&origin=earth\"\ncsv_file = \"ricknmorty.csv\"\napp = Flask(__name__)\n\ndef download_json(url):\n try:\n response = requests.get(url)\n json_data = response.json()\n except ValueError as e:\n print(e)\n print(\"Error downloading json\")\n sys.exit(1)\n return json_data\n\n# Check if a json is valid or not\ndef validate_json(response):\n try:\n json.dumps(response)\n except ValueError as e:\n print(\"Json response is not valid!\")\n sys.exit(1)\n return True\n\ndef convert_json_to_csv(response):\n try:\n data = json.dumps(response)\n df = pd.DataFrame(response[\"results\"], columns=[\"name\", \"image\", \"location\"])\n cols = ['location']\n df[cols] = df[cols].applymap(lambda x: x['name'])\n new = pd.concat([df], ignore_index=True)\n print(new)\n new.to_csv(csv_file)\n except ValueError as e:\n print(e)\n sys.exit(1)\n\ndef convert_csv_to_json(csv_file):\n try:\n df = pd.read_csv(csv_file)\n return df.to_json()\n except ValueError as e:\n print(e)\n print(\"Couldn't convert csv to a valid json. Exiting...\")\n sys.exit(1)\n\ndef main():\n try:\n response = download_json(url)\n validate_json(response)\n convert_json_to_csv(response)\n except ValueError as e:\n print(\"Couldn't fetch a valid json response and convert is to CSV. Exiting...\")\n sys.exit(1)\n\n@app.route('/healthcheck')\ndef healthcheck():\n return \"Healthy!\\n\"\n\n@app.route('/get_results')\ndef get_results():\n return convert_csv_to_json(csv_file)\n\n# Main loop function\nif __name__ == \"__main__\":\n print(\"Staring to parse endpoint's response to CSV file.\")\n main()\n app.run(port=5000, debug=True, host='0.0.0.0')\n","sub_path":"ricknmorty.py","file_name":"ricknmorty.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"127690764","text":"# -*- encoding: utf-8 -*-\n# Copyright (C) 2017 José M. Miotto\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later versionp.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\"\"\"\nThis is a package for calculation of Levy stable distributions\n(probability density function and cumulative density function) and for\nfitting these distributions to data.\n\nIt operates by interpolating values from a table, as direct computation \nof these distributions requires a lengthy numerical integration. This\ninterpolation scheme allows fast fitting of data by Maximum Likelihood .\n\nDoes not support alpha values less than 0.5.\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom scipy.special import gamma\nfrom builtins import range\n\n__version__ = \"0.6\"\n\n# Some constants of the program.\n# Dimensions: 0 - x, 1 - alpha, 2 - beta\nsize = (200, 76, 101) # size of the grid (xs, alpha, beta)\n_lower = np.array([-np.pi / 2 * 0.999, 0.5, -1.0]) # lower limit of parameters\n_upper = np.array([np.pi / 2 * 0.999, 2.0, 1.0]) # upper limit of parameters\n\npar_bounds = ((_lower[1], _upper[1]), (_lower[2], _upper[2]), (None, None), (0.0, None)) # parameter bounds for fit.\npar_names = ['alpha', 'beta', 'mu', 'sigma'] # names of the parameters\ndefault = [1.5, 0.0, 0.0, 1.0] # default values of the parameters for fit.\ndefault = {par_names[i]: default[i] for i in range(4)}\n\"\"\" f_bounds function only useful if minimizing with fmin \"\"\"\nf_bounds = {\n 'alpha': lambda x: _reflect(x, *par_bounds[0]),\n 'beta': lambda x: _reflect(x, *par_bounds[1]),\n 'mu': lambda x: x,\n 'sigma': lambda x: x\n}\n\n\ndef _reflect(x, lower, upper):\n \"\"\" Makes the parameters to be inside the bounds \"\"\"\n while 1:\n if x < lower:\n x = lower - (x - lower)\n elif x > upper:\n x = upper - (x - upper)\n else:\n return x\n\n\ndef _interpolate(points, grid, lower, upper):\n \"\"\" Perform multi-dimensional Catmull-Rom cubic interpolation. \"\"\"\n point_shape = np.shape(points)[:-1]\n points = np.reshape(points, (np.multiply.reduce(point_shape), np.shape(points)[-1]))\n\n grid_shape = np.array(np.shape(grid))\n dims = len(grid_shape)\n points = (points - lower) * ((grid_shape - 1) / (upper - lower))\n floors = np.floor(points).astype('int')\n\n offsets = points - floors\n offsets2 = offsets * offsets\n offsets3 = offsets2 * offsets\n weighters = [\n -0.5 * offsets3 + offsets2 - 0.5 * offsets,\n 1.5 * offsets3 - 2.5 * offsets2 + 1.0,\n -1.5 * offsets3 + 2 * offsets2 + 0.5 * offsets,\n 0.5 * offsets3 - 0.5 * offsets2,\n ]\n\n ravel_grid = np.ravel(grid)\n\n result = np.zeros(np.shape(points)[:-1], 'float64')\n for i in range(1 << (dims * 2)):\n weights = np.ones(np.shape(points)[:-1], 'float64')\n ravel_offset = 0\n for j in range(dims):\n n = (i >> (j * 2)) % 4\n ravel_offset = ravel_offset * grid_shape[j] + \\\n np.maximum(0, np.minimum(grid_shape[j] - 1, floors[:, j] + (n - 1)))\n weights *= weighters[n][:, j]\n\n result += weights * np.take(ravel_grid, ravel_offset)\n\n return np.reshape(result, point_shape)\n\n\nclass Parameters(object):\n \"\"\" This class is a wrap for the parameters;\n it works such that if we fit fixing one or more parameters, the optimization only acts on the other.\n The key thing here is the setter.\n \"\"\"\n\n def __init__(self, **kwargs):\n self._x = np.array([default[k] if kwargs[k] is None else kwargs[k] for k in par_names])\n self.variables = [i for i, k in enumerate(par_names) if kwargs[k] is None]\n self.fixed = [i for i, k in enumerate(par_names) if kwargs[k] is not None]\n self.fixed_values = [kwargs[k] for i, k in enumerate(par_names) if kwargs[k] is not None]\n\n def get_all(self):\n return self._x\n\n def __str__(self):\n return self.x.__str__()\n\n @property\n def x(self):\n return self._x[self.variables]\n\n @x.setter\n def x(self, value):\n for j, i in enumerate(self.variables):\n # If the fmin function is used to optimize, use this line:\n # self._x[i] = f_bounds[par_names[i]](value[j])\n # If the minimize function is used to optimize, use this line:\n self._x[i] = f_bounds[par_names[i]](value.x[j])\n\n\ndef _phi(alpha, beta):\n \"\"\" Common function. \"\"\"\n return beta * np.tan(np.pi * alpha / 2.0)\n\n\ndef _calculate_levy(x, alpha, beta, cdf=False):\n \"\"\" Calculation of Levy stable distribution via numerical integration.\n This is used in the creation of the lookup table.\n Notice that to compute it in a 'true' x, the tangent must be applied.\n Example: levy(2, 1.5, 0) = _calculate_levy(np.tan(2), 1.5, 0)\n \"0\" parameterization as per http://academic2.americanp.edu/~jpnolan/stable/stable.html\n Note: fails for alpha=1.0 (so make sure alpha=1.0 isn't exactly on the interpolation grid)\n \"\"\"\n from scipy import integrate\n\n beta = -beta\n C = _phi(alpha, beta)\n\n def func_cos(u):\n ua = u ** alpha\n # if ua > 700.0: return 0.0\n return np.exp(-ua) * np.cos(C * ua - C * u)\n\n def func_sin(u):\n ua = u ** alpha\n # if ua > 700.0: return 0.0\n return np.exp(-ua) * np.sin(C * ua - C * u)\n\n if cdf:\n # Cumulative density function\n return (integrate.quad(\n lambda u: u and func_cos(u) / u or 0.0, 0.0, np.Inf, weight=\"sin\", wvar=x, limlst=1000)[0]\n + integrate.quad(\n lambda u: u and func_sin(u) / u or 0.0, 0.0, np.Inf, weight=\"cos\", wvar=x, limlst=1000)[0]\n ) / np.pi + 0.5\n else:\n # Probability density function\n return (integrate.quad(\n func_cos, 0.0, np.Inf, weight=\"cos\", wvar=x, limlst=1000)[0]\n - integrate.quad(\n func_sin, 0.0, np.Inf, weight=\"sin\", wvar=x, limlst=1000)[0]\n ) / np.pi\n\n\ndef _approximate(x, alpha, beta, cdf=False):\n mask = (x > 0)\n values = np.sin(np.pi * alpha / 2.0) * gamma(alpha) / np.pi * np.power(np.abs(x), -alpha - 1.0)\n values[mask] *= (1.0 + beta)\n values[~mask] *= (1.0 - beta)\n if cdf:\n return 1.0 - values\n else:\n return values * alpha\n\n\ndef _make_dist_data_file():\n \"\"\" Generates the lookup tables, writes it to .npz files. \"\"\"\n\n xs, alphas, betas = [np.linspace(_lower[i], _upper[i], size[i], endpoint=True) for i in [0, 1, 2]]\n ts = np.tan(xs)\n print(\"Generating levy_data.py ...\")\n\n pdf = np.zeros(size, 'float64')\n for i, alpha in enumerate(alphas):\n for j, beta in enumerate(betas):\n print(\"Calculating alpha={:.2f}, beta={:.2f}\".format(alpha, beta))\n pdf[:, i, j] = [_calculate_levy(t, alpha, beta, False) for t in ts]\n np.savez('pdf.npz', pdf)\n\n cdf = np.zeros(size, 'float64')\n for i, alpha in enumerate(alphas):\n for j, beta in enumerate(betas):\n print(\"Calculating alpha={:.2f}, beta={:.2f}\".format(alpha, beta))\n cdf[:, i, j] = [_calculate_levy(t, alpha, beta, True) for t in ts]\n np.savez('cdf.npz', cdf)\n\n\ndef _int_levy(x, alpha, beta, cdf=False):\n \"\"\" Interpolate densities of the Levy stable distribution specified by alpha and beta.\n\n Specify cdf=True to obtain the *cumulative* density function.\n\n Note: may sometimes return slightly negative values, due to numerical inaccuracies.\n \"\"\"\n points = np.empty(np.shape(x) + (3,), 'float64')\n points[..., 0] = np.arctan(x)\n points[..., 1] = alpha\n points[..., 2] = np.abs(beta)\n\n if cdf:\n what = np.load('cdf.npz')['arr_0']\n else:\n what = np.load('pdf.npz')['arr_0']\n return _interpolate(points, what, _lower, _upper)\n\n\ndef _get_closest_approx(alpha, beta):\n x0, x1, n = -50.0, 10000.0 - 50.0, 100000\n dx = (x1 - x0) / n\n x = np.linspace(x0, x1, num=n, endpoint=True)\n y = 1.0 - _int_levy(x, alpha, beta, cdf=True)\n z = 1.0 - _approximate(x, alpha, beta, cdf=True)\n mask = (10.0 < x) & (x < 500.0)\n return 10.0 + dx * np.argmin((np.log(z[mask]) - np.log(y[mask])) ** 2.0)\n\n\ndef _make_limit_data_file():\n limits = np.zeros(size[1:], 'float64')\n alphas, betas = [np.linspace(_lower[i], _upper[i], size[i], endpoint=True) for i in [1, 2]]\n\n print(\"Generating levy_approx_data.py ...\")\n\n for i, alpha in enumerate(alphas):\n for j, beta in enumerate(betas):\n limits[i, j] = _get_closest_approx(alpha, beta)\n print(\"Calculating alpha={:.2f}, beta={:.2f}, limit={:.2f}\".format(alpha, beta, limits[i, j]))\n\n np.savez('limits.npz', limits)\n\n\ndef change_par(alpha, beta, mu, sigma, par_input, par_output):\n if par_input == par_output:\n return mu\n elif (par_input == 0) and (par_output == 1):\n return mu - sigma * _phi(alpha, beta)\n elif (par_input == 1) and (par_output == 0):\n return mu + sigma * _phi(alpha, beta)\n\n\ndef levy(x, alpha, beta, mu=0.0, sigma=1.0, cdf=False, par=0):\n \"\"\"\n Levy with the tail replaced by the analytical approximation.\n Also, mu, sigma are parameters that shift and rescale the distribution.\n Parametrization can be chosen according to Nolan, par={0,1}.\n \"\"\"\n\n loc = change_par(alpha, beta, mu, sigma, par, 0)\n\n if cdf:\n what = np.load('cdf.npz')['arr_0']\n else:\n what = np.load('pdf.npz')['arr_0']\n limits = np.load('limits.npz')['arr_0']\n\n xr = (x - loc) / sigma\n alpha_index = int((alpha -_lower[1]) / (_upper[1] - _lower[1]) * (size[1] - 1))\n beta_index = int((beta - _lower[2]) / (_upper[2] - _lower[2]) * (size[2] - 1))\n try:\n l = limits[alpha_index, beta_index]\n except IndexError:\n print(alpha, alpha_index)\n print(beta, beta_index)\n raise\n mask = (np.abs(xr) < l)\n z = xr[mask]\n\n points = np.empty(np.shape(z) + (3,), 'float64')\n points[..., 0] = np.arctan(z)\n points[..., 1] = alpha\n points[..., 2] = beta\n\n interpolated = _interpolate(points, what, _lower, _upper)\n approximated = _approximate(xr[~mask], alpha, beta, cdf)\n\n res = np.empty(np.shape(xr), 'float64')\n res[mask] = interpolated\n res[~mask] = approximated\n if cdf is False:\n res /= sigma\n return res\n\n\ndef neglog_levy(x, alpha, beta, mu, sigma, par=0):\n \"\"\"\n Interpolate negative log densities of the Levy stable distribution specified by alpha and beta.\n Small/negative densities are capped at 1e-100 to preserve sanity.\n \"\"\"\n return -np.log(np.maximum(1e-100, levy(x, alpha, beta, mu, sigma, par=par)))\n\n\ndef fit_levy(x, alpha=None, beta=None, mu=None, sigma=None, par=0):\n \"\"\"\n Estimate parameters of Levy stable distribution given data x, using the Maximum Likelihood method.\n\n By default, searches all possible Levy stable distributions.\n However you may restrict the search by specifying the values of one or more parameters.\n Parametrization can be chosen according to Nolan, par={0,1}.\n \n Examples:\n \n levy(x) -- Fit a stable distribution to x\n\n levy(x, beta=0.0) -- Fit a symmetric stable distribution to x\n\n levy(x, beta=0.0, mu=0.0) -- Fit a symmetric distribution centered on zero to x\n\n levy(x, alpha=1.0, beta=0.0) -- Fit a Cauchy distribution to x\n\n Returns a tuple of (alpha, beta, mu, sigma, negative log density)\n \"\"\"\n\n # The parametrization is changed to par=0. At the end, the parametrization will change to par.\n if mu is not None:\n loc = change_par(alpha, beta, mu, sigma, par, 0)\n elif mu is None:\n loc = mu\n\n from scipy import optimize\n\n kwargs = {'alpha': alpha, 'beta': beta, 'mu': loc, 'sigma': sigma}\n parameters = Parameters(**kwargs)\n\n def neglog_density(param):\n p = np.zeros(4)\n p[parameters.variables] = param\n p[parameters.fixed] = parameters.fixed_values\n alpha, beta, mu, sigma = p\n return np.sum(neglog_levy(x, alpha, beta, mu, sigma))\n\n # parameters.x = optimize.fmin(neglog_density, parameters.x, disp=0)\n parameters.x = optimize.minimize(neglog_density, parameters.x, method='L-BFGS-B', bounds=par_bounds)\n alpha, beta, loc, sigma = parameters.get_all()\n mu = change_par(alpha, beta, loc, sigma, 0, par)\n\n return alpha, beta, mu, sigma, neglog_density(parameters.x)\n\n\ndef random(alpha, beta, mu=0.0, sigma=1.0, shape=(), par=0):\n \"\"\"\n Generate random values sampled from an alpha-stable distribution.\n Parametrization can be chosen according to Nolan, par={0,1}.\n \"\"\"\n\n loc = change_par(alpha, beta, mu, sigma, par, 0)\n if alpha == 2:\n return np.random.standard_normal(shape) * np.sqrt(2.0)\n\n # Fails for alpha exactly equal to 1.0\n # but works fine for alpha infinitesimally greater or less than 1.0 \n radius = 1e-15 # <<< this number is *very* small\n if np.absolute(alpha - 1.0) < radius:\n # So doing this will make almost exactly no difference at all\n alpha = 1.0 + radius\n\n r1 = np.random.random(shape)\n r2 = np.random.random(shape)\n pi = np.pi\n\n a = 1.0 - alpha\n b = r1 - 0.5\n c = a * b * pi\n e = _phi(alpha, beta)\n f = (-(np.cos(c) + e * np.sin(c)) / (np.log(r2) * np.cos(b * pi))) ** (a / alpha)\n g = np.tan(pi * b / 2.0)\n h = np.tan(c / 2.0)\n i = 1.0 - g ** 2.0\n j = f * (2.0 * (g - h) * (g * h + 1.0) - (h * i - 2.0 * g) * e * 2.0 * h)\n k = j / (i * (h ** 2.0 + 1.0)) + e * (f - 1.0)\n\n return loc + sigma * k\n\n\nif __name__ == \"__main__\":\n if \"build\" in sys.argv[1:]:\n _make_dist_data_file()\n _make_limit_data_file()\n\n print(\"Testing fit_levy.\")\n\n print(\"1000 points, result should be (1.5, 0.5, 0.0, 1.0).\")\n result = fit_levy(random(1.5, 0.5, 0.0, 1.0, 1000))\n print('alpha={:.2f}, beta={:.2f}, mu_0={:.2f}, sigma={:.2f}, neglog={:.2f}'.format(*result))\n\n","sub_path":"analysis of time series/levy.py","file_name":"levy.py","file_ext":"py","file_size_in_byte":14449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"486251770","text":"#!/usr/bin/python3\n'''\n Starts a Flask web application\n'''\nfrom models import storage\nfrom models.state import State\nfrom os import getenv\nfrom flask import Flask, render_template\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route(\"/states\")\n@app.route(\"/states_list\")\ndef states_list():\n \"\"\"\n display HTML with States\n \"\"\"\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n states = storage.all(\"State\")\n else:\n states = storage.all(State)\n states = states.values()\n\n return render_template(\"7-states_list.html\", states=states)\n\n\n@app.route(\"/cities_by_states\")\ndef cities_by_states():\n \"\"\"\n display HTML with cities in States\n \"\"\"\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n states = storage.all(\"State\").values()\n else:\n states = storage.all(State).values()\n\n return render_template(\"8-cities_by_states.html\", states=states)\n\n\n@app.route(\"/states/\")\ndef states_id(id):\n \"\"\"\n display HTML with states id\n \"\"\"\n single_state = None\n\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n states = storage.all(\"State\").values()\n else:\n states = storage.all(State).values()\n\n for state in states:\n if state.id == id:\n single_state = state\n states = single_state\n\n return render_template(\"9-states.html\", states=states)\n\n\n@app.teardown_appcontext\ndef tear_down(exception):\n \"\"\"\n tear down\n \"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326348895","text":"# -*- coding: utf-8 -*-\n# Author: zhangxinlei\n# Time: 2021/7/31 12:03\nimport FbxCommon\n\n\ndef PrintAllNode(fbxNode):\n\tfor i in range(fbxNode.GetChildCount()):\n\t\tchildFbxNode = fbxNode.GetChild(i)\n\t\tprint(fbxNode.GetName(), childFbxNode.GetName())\n\t\tfor j in range(childFbxNode.GetMaterialCount()):\n\t\t\tmaterial = childFbxNode.GetMaterial(j)\n\t\t\tprint(material.GetName())\n\t\tPrintAllNode(childFbxNode)\n\n\nif __name__ == '__main__':\n\tfilename = \"G:/Projects/S6/design/DATA/SceneEditor/Resources/Model/Cube.FBX\"\n\tfbxManager, fbxScene = FbxCommon.InitializeSdkObjects()\n\tresult = FbxCommon.LoadScene(fbxManager, fbxScene, filename)\n\tprint(result)\n\n\tfbxRoot = fbxScene.GetRootNode()\n\tPrintAllNode(fbxRoot)\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"24579685","text":"import logging\n\nfrom environs import Env\nimport telegram\n\nenv = Env()\nenv.read_env()\n\n\nclass TelegramLogsHandler(logging.Handler):\n def __init__(self, chat_id):\n super().__init__()\n tg_bot = telegram.Bot(token=env.str('BUG_REPORTING_BOT_TOKEN'))\n self.chat_id = chat_id\n self.tg_bot = tg_bot\n\n def emit(self, record):\n log_entry = self.format(record)\n self.tg_bot.send_message(chat_id=self.chat_id, text=log_entry)\n","sub_path":"tg_logs_handler.py","file_name":"tg_logs_handler.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"72792571","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\n\"\"\"\r\n 时间:2018-1-23 18:15:15\r\n 作者:Ficko\r\n 版本:\r\n 1.0 可以爬取页面的第二篇微博,并正确print\r\n 1.1 get_content() 添加了initialize;忽略评论可以带图的功能;更新了原创和转发的判断方法\r\n 2.0 可以识别是否有置顶,并自动选择爬取哪一篇\r\n 3.0 ————————————————\r\n 4.0 完善保存至本地文档\r\n 5.0 增加持续监控的能力,察觉变化并保存\r\n 5.1 将 driver 改为无头浏览器(Firefox内核),添加了定时状态打印功能\r\n 6.0 可以部署在服务器上\r\n 6.1 优化 爬取文件和 log文件 的存储\r\n 6.2 记录出错信息,方便维护\r\n 6.3 添加虚拟显示 virtual display,避免在服务器上出错\r\n 6.4 为 webdriver 添加 Desired 数值,避免在服务器上出错\r\n\"\"\"\r\n\r\nfrom selenium import webdriver\r\nfrom time import sleep\r\nfrom datetime import datetime\r\nfrom selenium.webdriver.firefox.options import Options\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom pyvirtualdisplay import Display\r\nimport sys, os, traceback\r\n\r\n# Variations.\r\n# 引用需要声明全局变量。\r\n# ___ER___ https://weibo.cn/508637358\r\nsecond_domain = '508637358'\r\nweibo_name = '___ER___'\r\n\r\ndef save_to_log(info):\r\n # 将 info 保存到日志文件中\r\n is_weibosave()\r\n m = open('weibosave/weiboPY_running_log_{}.txt'.format(format_weibo_py_running_log_time()), 'a', encoding='utf-8')\r\n m.write(info)\r\n m.close()\r\n\r\n\r\ndef is_weibosave():\r\n # 判断是否存在文件夹 weibosave,没有就创建一个\r\n if os.path.exists('weibosave'):\r\n pass\r\n else:\r\n os.mkdir('weibosave')\r\n\r\n\r\ndef format_time_now():\r\n ct = datetime.now()\r\n time_now = '{}/{}/{}_{}:{}:{}'.format(ct.year, ct.month, ct.day, ct.hour, ct.minute, ct.second)\r\n return time_now\r\n\r\n\r\ndef format_weibo_save_time():\r\n ct = datetime.now()\r\n date_now = '{}_{}_{}'.format(ct.year, ct.month, ct.day)\r\n return date_now\r\n\r\n\r\ndef format_weibo_py_running_log_time():\r\n ct = datetime.now()\r\n hour_now = '{}_{}_{}@{}00'.format(ct.year, ct.month, ct.day, ct.hour)\r\n return hour_now\r\n\r\n\r\ndef get_content():\r\n global driver\r\n \"\"\"\r\n 有2种类型:\r\n 一种是原创,一种是转发\r\n 原创只有一个 div 标签,转发有两个 div 标签\r\n \"\"\"\r\n\r\n # 判断原创还是转发\r\n \"\"\"\r\n 开始使用的方法是:判断 div 元素的个数,一个是原创,多个是转发;\r\n 但如果原创发了图,会被误认为是转发,然后报错。\r\n 因此转为查找「转发了」字眼来进行判断。\r\n \"\"\"\r\n head_text = driver.find_element_by_id(\"M_\").find_element_by_xpath(\"div/span[1]\").text.strip()[:3]\r\n if head_text != '转发了':\r\n content = driver.find_element_by_class_name(\"ctt\").text\r\n content_time = driver.find_element_by_class_name(\"ct\").text\r\n else:\r\n orig_poster = driver.find_element_by_id(\"M_\").find_element_by_xpath(\"div[1]/span[1]/a\").text\r\n orig_content = driver.find_element_by_id(\"M_\").find_element_by_xpath(\"div[1]/span[2]\").text\r\n repo = ''.join(driver.find_element_by_id(\"M_\").find_element_by_xpath(\"div[last()]\").text.split(' ')[:-2])\r\n '''\r\n 这里的repo编写思路:由于微博奇葩的格式,导致直接 find_element 会在后面有两个后缀:「x月x日」和「关注他」;\r\n 由于后缀之间有两个空格,因此用两个空格分割字符,然后将末尾两个抛去;\r\n list --> str 的方法:''.join(list)。这里默认为空。\r\n ============\r\n 忽略转发的图片,使用 last() 直接选取最后的转发内容,忽略中间的 div 元素\r\n '''\r\n content = '@{}:{}\\r\\n{}:{}'.format(weibo_name, repo, orig_poster, orig_content)\r\n content_time = driver.find_element_by_id(\"M_\").find_element_by_xpath(\"div[last()]/span[2]\").text\r\n\r\n local_time = format_time_now()\r\n\r\n # 返回三个值:本机抓取时间,发布时间,内容\r\n return local_time, content_time, content\r\n\r\n\r\ndef is_element_exist_by_xpath(xpath):\r\n \"\"\"\r\n 利用 XPath 确定元素是否存在\r\n \"\"\"\r\n is_exist = False\r\n s = driver.find_elements_by_xpath(xpath)\r\n if len(s) > 0:\r\n is_exist = True\r\n\r\n return is_exist\r\n\r\n\r\ndef is_stick():\r\n s = False\r\n \"\"\"\r\n 得到主页后,判断是否存在置顶微博。如果存在,返回 True,否则 False\r\n \"\"\"\r\n if is_element_exist_by_xpath(\"//span[text()='置顶']\"):\r\n s = True\r\n\r\n return s\r\n\r\n\r\ndef getlatestposturl():\r\n global driver\r\n \"\"\"\r\n 收藏的网址示例:http://weibo.cn/fav/addFav/FEX5UojD7?rl=0&st=b53681\r\n 先用「/」分割,然后取最后一位,再取前9位\r\n \"\"\"\r\n # 根据是否有置顶微博,选择抓取的 list 的位置\r\n if is_stick():\r\n n = 1\r\n else:\r\n n = 0\r\n\r\n add_fav_url = driver.find_elements_by_xpath(\"//div[starts-with(@id, 'M_')]\")[n].find_element_by_link_text(\r\n \"收藏\").get_attribute('href')\r\n postid = add_fav_url.split('/')[-1][:9]\r\n full_post_url = 'https://weibo.cn/comment/' + postid\r\n return full_post_url, postid\r\n\r\n\r\ndef loginsina():\r\n global driver\r\n # 对应的是「登陆」\r\n print('LoginSina,正在登陆中……')\r\n username = ''\r\n password = ''\r\n\r\n sleep(3)\r\n driver.find_element_by_id(\"loginName\").click()\r\n driver.find_element_by_id(\"loginName\").send_keys(username)\r\n driver.find_element_by_id(\"loginPassword\").click()\r\n driver.find_element_by_id(\"loginPassword\").send_keys(password)\r\n driver.find_element_by_id(\"loginAction\").click()\r\n\r\n log = '登陆成功!当前时间:{}。'.format(format_time_now())\r\n print(log)\r\n is_weibosave()\r\n save_to_log(log)\r\n sleep(3)\r\n \"\"\"\r\n 输入文本框如果带有placeholder属性,则使用 .clear() 功能会报错:\r\n 「Element is not currently interactable and may not be manipulated」\r\n 解决方法:直接略过Clear方法。\r\n \"\"\"\r\n\r\n\r\ndef welcomesina():\r\n global driver\r\n # 对应的是「欢迎登陆」\r\n print('WelcomeSina,正在登陆中……')\r\n driver.find_element_by_xpath(\"/html/body/div/div/a[2]\").click()\r\n sleep(4)\r\n\r\n username = ''\r\n password = ''\r\n\r\n sleep(3)\r\n driver.find_element_by_id(\"loginName\").click()\r\n driver.find_element_by_id(\"loginName\").send_keys(username)\r\n driver.find_element_by_id(\"loginPassword\").click()\r\n driver.find_element_by_id(\"loginPassword\").send_keys(password)\r\n driver.find_element_by_id(\"loginAction\").click()\r\n\r\n log = '登陆成功!当前时间:{}。'.format(format_time_now())\r\n print(log)\r\n is_weibosave()\r\n save_to_log(log)\r\n sleep(3)\r\n\r\n\r\ndef get_target_weibo():\r\n driver.get('https://weibo.cn/{}'.format(second_domain))\r\n\r\ndef boot_driver():\r\n global driver\r\n # 添加虚拟显示\r\n display = Display(visible=0, size=(800, 600))\r\n display.start()\r\n\r\n # 以下是在stackoverflow里看到的解决方法\r\n # 我也不知道啥意思,但就是有用\r\n firefox_capabilities = DesiredCapabilities.FIREFOX\r\n firefox_capabilities['marionette'] = True\r\n firefox_capabilities['binary'] = '/usr/bin/firefox'\r\n\r\n options = Options()\r\n options.add_argument('-headless')\r\n driver = webdriver.Firefox(capabilities=firefox_capabilities)\r\n\r\n\r\nif __name__ == '__main__':\r\n global driver\r\n # 获取微博主页内容。\r\n boot_driver()\r\n # 增加出错信息的打印\r\n try:\r\n get_target_weibo()\r\n except Exception:\r\n exc_info = traceback.format_exc()\r\n print(exc_info, format_time_now())\r\n save_to_log('\\n'.join([exc_info, format_time_now()]))\r\n\r\n # 隐式等待时间\r\n driver.implicitly_wait(10)\r\n\r\n old_id = '0' # 初始化\r\n i = 0\r\n j = 0\r\n while True:\r\n driver.get('https://weibo.cn/{}'.format(second_domain))\r\n\r\n # 获取标题,判断页面状态\r\n title = driver.title\r\n if title == '{}的微博'.format(weibo_name):\r\n pass\r\n elif title == '登录 - 新浪微博':\r\n loginsina()\r\n driver.get('https://weibo.cn/{}'.format(second_domain))\r\n elif title == '欢迎登录 - 新浪微博':\r\n welcomesina()\r\n driver.get('https://weibo.cn/{}'.format(second_domain))\r\n else:\r\n sys.exit('无法连接!程序退出。')\r\n\r\n # ==========以下是子页面==========\r\n # 获得最近微博的全文页面\r\n post_id = getlatestposturl()[1]\r\n if post_id == old_id:\r\n j += 1\r\n if j % 10 == 0:\r\n # 防止信息泛滥,每 10 条通报一次\r\n is_weibosave()\r\n log = '正在监控,已动态监测{}次,已捕捉到{}条动态。当前时间:{}。\\r\\n'.format(j, i, format_time_now())\r\n print(log)\r\n save_to_log(log)\r\n\r\n sleep(10)\r\n continue\r\n else:\r\n driver.get(getlatestposturl()[0])\r\n # 获取页面内容\r\n post_info = get_content()\r\n post_content = '抓取时间:{}\\r\\n微博时间:{}\\r\\n{}\\r\\n\\r\\n\\r\\n'.format(post_info[0], post_info[1], post_info[2][1:])\r\n\r\n # 更新 old_id 值\r\n old_id = post_id\r\n\r\n # 写入文件(合并保存)\r\n is_weibosave()\r\n f = open('weibosave/weibo_save_{}.txt'.format(format_weibo_save_time()), 'a', encoding='utf-8')\r\n f.write(post_content)\r\n f.close()\r\n\r\n # 输出状态\r\n i += 1\r\n capture = '捕捉到{}条动态。抓取时间:{}。\\r\\n'.format(i, format_time_now())\r\n print(capture)\r\n save_to_log(capture)\r\n\r\n driver.quit()\r\n","sub_path":"spider_with_headless_Firefox.py","file_name":"spider_with_headless_Firefox.py","file_ext":"py","file_size_in_byte":10079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"489574276","text":"from django import forms\n\n\nclass NoticeForm(forms.Form):\n title = forms.CharField(\n max_length=200,\n label='Write title',\n widget=forms.TextInput(),\n )\n details = forms.CharField(\n max_length=2000,\n label='Write details',\n widget=forms.Textarea(\n attrs={\n 'rows': '40',\n }\n ),\n )\n\n def clean(self):\n cleaned_data = super(NoticeForm, self).clean()\n title = cleaned_data.get('title')\n details = cleaned_data.get('details')\n if not title and not details:\n raise forms.ValidationError('You have to write something!')\n","sub_path":"administrator/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607098972","text":"#출처 https://github.com/lovit/soynlp#noun-extractor\n\nfrom soynlp.noun import LRNounExtractor_v2\nfrom soynlp.word import WordExtractor\nimport openpyxl\nfrom soynlp.tokenizer import LTokenizer\n\nimport pandas as pd\n\nimport re, pickle, csv\n\nexception_list = ['있다', '수', '에', '이', '한다', '있습니다', '것으로', '있는', '것', '할', '및', 'the',\n 'http', 'https', 'sunday','joins','co','and', 'kr', '고', '것이다', '한', 'is', 'www', 'for', 'a', 'of',\n 'in', 'on', '중', '더', '대', '통해', '기자', '서울', '뉴시스', '재배포', '금지', '무단', '전재', '연합뉴',\n '뉴7', '이번', '구독', '사진', '밝혔다', '저작권자', '네이버', '하지만', '이런', '그', '것이', '것은', 'pr', 'to',\n 'se', '부산일보', '연합뉴', 'de', 's', 'be', 'with', 'ha', 'en', 'an', 'PR', 'ac', 'ca', 'N', '로', '대한',\n '등', '를', '위해', '말했', '그러나', '대해', '오후', '이데이일', 'edaily', '합니다' ,'위한', '내년', '올해',\n '파이낸셜뉴스', '한국경제TV', '는', '의', '머니투데', '하는', '이는', 'this', 'it', 'The', 'that', 'will', 'as', 'by',\n 'fi', '의', '가', '은', '들', '는', '좀', '잘', '걍', '과', '도', '를', '으로', '자', '에', '와', '한', '하다',\n '등을', '했다', '경우', '을', '또', '등이', '지난', '말했다', '다양', '사용', '머니투데이', '지난해', '며', '게', '때',\n '때문', '만들어', '전자신문', '며', '나타', '지금', '많이', '하고', '같은', '뉴스1', 'news1', '뉴스', '메인에서', 'news',\n '이데일리', '오전', '헤럴드경제', '했습니다', '다른', '그는', '가장', 'her', '노컷뉴스', 'yna', '만나보세요', '오늘', '바로',\n '달라진', '확', '흥', 'YTN', '있도', '전', '없다', '없는', '큰', '어떤', '제보', '국민일보', '1', '2', '3', '4',\n '5', '6', '7', '8', '9', '10', '따라', '있다는', '것을', '오는', '아이뉴스24']\n\nword_dict = {'서비': '서비스', '살처': '살처분', '네트워':'네트제워크', '데이':'데이터', '콘텐':'콘텐츠', '디자':'디자인', 'SK텔레':'SK텔레콤',\n 'LG유플러': 'LG유플러스'}\n\n\ntext_file = 'full_content_only.txt'\n\nprint(\"loading scores_dictionary\")\nwith open('noun_scores_dictionary.pickle', 'rb') as fr:\n scores_dictionary = pickle.load(fr)\n print(\"load complete\")\n\ndef content_to_token(text_file_name):\n print(\"opening file \" + text_file_name)\n with open(text_file_name, 'r', encoding = \"utf-8\") as f:\n lines = f.read().splitlines()\n re.sub(r\"[\\[\\]<>~]\", ' ', lines[0])\n re.sub(r\"['~]\", ' ', lines[0])\n re.sub(r'\"', ' ', lines[0])\n\n text = []\n for line in lines:\n line = re.sub(r\"[\\[\\]<>~]\", ' ', line)\n line = re.sub(r\"['~]\", ' ', line)\n line = re.sub(r'\"', ' ', line)\n line = re.sub('\\\\W', ' ', line)\n text.append(line)\n\n ltokenizer = LTokenizer(scores = scores_dictionary)\n\n print(\"making list of words\")\n words = []\n for sent in text:\n conclude_sent = []\n #flatten을 False로 주어서 [L명사, R조사]형태로 분류하게 만듦.\n pre_list = ltokenizer.tokenize(sent, flatten=False)\n for LR_list in pre_list:\n word = LR_list[0]\n if word in word_dict:\n word = word_dict[word]\n if word not in exception_list:\n conclude_sent.append(word)\n words.append(conclude_sent)\n\n token_file_name = text_file_name[:-4] + '.csv'\n\n f = open(token_file_name, 'w', newline=\"\")\n wr = csv.writer(f)\n for word in words:\n wr.writerow(word)\n f.close()\n\nif __name__ == '__main__':\n content_to_token(text_file)\n\n\n\n\n# 전체로 한 다음에 토픽 보고. 연도 별로 변화하는 것을 본 다음에,\n# 7에서 하면 토픽 이렇게 이룸 붙이고 12로 뽑으면 토픽 이렇게 나올 것 같\n# 그 달에 나온 단어들 빈도수 보는 것.\n\n","sub_path":"metaverse/tokenization/3_making_token_from_scores.py","file_name":"3_making_token_from_scores.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"457979769","text":"import xml.etree.ElementTree as ET\n\nfrom src.storages.abstract import AbstractStorage\nfrom src.x4_universe.reservations import (\n X4EntityReservationRow,\n X4EntityReservations,\n)\nfrom src.x4_universe.trade import X4EntityTrade, X4EntityTradeRow\n\n\nclass ETTrade():\n \"\"\"Парсер тэга trade в ElementTree.\"\"\"\n\n def __init__(self, component_type: str, code: str, data: ET) -> None:\n self.component_type = component_type\n self.code = code\n\n try:\n self.data = data.find(\"trade\")\n except AttributeError:\n self.data = None\n\n @staticmethod\n def _get_offer_type(trade: ET) -> str:\n \"\"\"Получение типа предложения: покупка или продажа.\"\"\"\n offer_type = \"\"\n\n if trade.attrib.get(\"buyer\"):\n offer_type = \"buyer\"\n\n if trade.attrib.get(\"seller\"):\n offer_type = \"seller\"\n\n return offer_type\n\n def _parse_offers(self, storage: AbstractStorage) -> None:\n \"\"\"Парсинг тэга offers внутри trade.\"\"\"\n try:\n trades = self.data.find(\"offers\").find(\"production\")\n except AttributeError:\n trades = []\n\n if trades:\n storage.save(\n X4EntityTrade(\n entity_code=self.code,\n entity_type=self.component_type,\n rows=[\n X4EntityTradeRow(\n offer_type=self._get_offer_type(trade),\n ware=trade.attrib.get('ware'),\n price=trade.attrib.get('price'),\n amount=trade.attrib.get('amount'),\n desired=trade.attrib.get('desired'),\n )\n for trade in trades\n ],\n )\n )\n\n def _parse_prices(self) -> None:\n \"\"\"Парсинг тэга prices внутри trade.\"\"\"\n pass\n\n def _parse_reservations(self, storage: AbstractStorage) -> None:\n \"\"\"Парсинг тэга reservations внутри trade.\"\"\"\n try:\n reservations = self.data.find(\"reservations\")\n except AttributeError:\n reservations = []\n\n if reservations:\n storage.save(\n X4EntityReservations(\n entity_type=self.component_type,\n entity_code=self.code,\n rows=[\n X4EntityReservationRow(\n ware=reservation.attrib.get('ware'),\n price=reservation.attrib.get('price'),\n amount=reservation.attrib.get('amount'),\n desired=reservation.attrib.get('desired'),\n )\n for reservation in reservations\n ],\n )\n )\n\n def parse(self, storage: AbstractStorage) -> None:\n \"\"\"Парсинг тэга trade в ElementTree.\"\"\"\n self._parse_offers(storage)\n self._parse_prices()\n self._parse_reservations(storage)\n","sub_path":"src/x4_save_processor/component/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"441984384","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/10 下午2:25\n# @Author : czw@rich-f.com\n# @Site : www.rich-f.com\n# @File : permission_views.py\n# @Software: 数据交换管理平台\n# @Function:权限管理主页、权限管理增删改查、权限认证\n\nfrom flask import Blueprint, flash, redirect, render_template, request, url_for, jsonify, Response, session\nfrom richdataxweb.sysadmin.forms import PermissionForm\nfrom richdataxweb.sysadmin.models import Permissionlist, SysUser, RoleList\nfrom richdataxweb.datatables import ColumnDT, DataTables\nimport json\nfrom richdataxweb.database import CRUDMixin\nimport datetime as dt\nfrom flask_login import login_required\nfrom .oper_log import operate_log\n\nblueprint = Blueprint('permission', __name__, url_prefix='/sysadmin/permission', static_folder='../static')\n\n\n@blueprint.route('/', methods=['GET'])\n@login_required\ndef home():\n if PermissionVerify(session['user_id']) == 0:\n return redirect(url_for('permission.nopermission'))\n else:\n form = PermissionForm(request.form)\n return render_template('sysadmin/permission_home.html', **locals())\n\n\n@blueprint.route('/add/', methods=['GET', 'POST'])\ndef AddPermission():\n form = PermissionForm()\n if request.method == 'POST':\n form = PermissionForm(request.form)\n if form.validate():\n name = request.form['name']\n url = request.form['url']\n type = request.form['type']\n if type == '':\n type = '0'\n permission = Permissionlist(name=name, url=url, type=type, create_by=session['username'])\n permission.save()\n operate_log(\n SysUser.query.filter_by(username=session['username']).first().sys_org_id,\n session['username'],\n '数据安全支撑系统-权限管理',\n '添加权限:' + request.form['name'],\n request.remote_addr\n )\n name_dict = {'code': '00', 'desc': '添加成功!', 'desc1': '', 'desc2': ''}\n return json.dumps(name_dict)\n else:\n if len(form.name.errors) == 0:\n desc1 = ''\n else:\n desc1 = form.name.errors[0]\n\n if len(form.url.errors) == 0:\n desc2 = ''\n else:\n desc2 = form.url.errors[0]\n\n name_dict = {'code': '01', 'desc1': desc1, 'desc2': desc2}\n return json.dumps(name_dict)\n\n return render_template('sysadmin/permission_add.html', **locals())\n\n\n@blueprint.route('/edit/', methods=['GET', 'POST'])\ndef EditPermission(id):\n form = PermissionForm()\n permission = Permissionlist.query.get(int(id))\n if request.method == 'POST':\n form = PermissionForm(request.form)\n form.setId(permission.id)\n if form.validate():\n name = request.form['name']\n url = request.form['url']\n type = request.form['type']\n permission.name = name\n permission.url = url\n permission.type = type\n permission.update_date = dt.datetime.now()\n permission.update_by = session['username']\n permission.save()\n operate_log(\n SysUser.query.filter_by(username=session['username']).first().sys_org_id,\n session['username'],\n '数据安全支撑系统-权限管理',\n '编辑权限:' + permission.name,\n request.remote_addr\n )\n name_dict = {'code': '00', 'desc': '编辑成功!', 'desc1': '', 'desc2': ''}\n return json.dumps(name_dict)\n else:\n if len(form.name.errors) == 0:\n desc1 = ''\n else:\n desc1 = form.name.errors[0]\n\n if len(form.url.errors) == 0:\n desc2 = ''\n else:\n desc2 = form.url.errors[0]\n name_dict = {'code': '01', 'desc1': desc1, 'desc2': desc2}\n return json.dumps(name_dict)\n\n return render_template('sysadmin/permission_home.html', **locals())\n\n\n@blueprint.route('/edit/data/', methods=['GET', 'POST'])\ndef edit_get_SysOrg(id):\n permission = Permissionlist.query.get(int(id))\n result = {}\n result['name'] = permission.name\n result['url'] = permission.url\n result['type'] = permission.type\n return json.dumps(result)\n\n\n@blueprint.route('/data/', methods=['GET'])\ndef get_permission_data():\n columns = [\n ColumnDT(Permissionlist.id),\n ColumnDT(Permissionlist.name),\n ColumnDT(Permissionlist.url),\n ColumnDT(Permissionlist.type),\n ColumnDT(Permissionlist.type),\n ColumnDT(Permissionlist.id),\n ]\n query = Permissionlist.query\n params = request.args.to_dict()\n rowTable = DataTables(params, query, columns)\n ro = rowTable.output_result()\n for i in range(len(ro['data'])):\n type = ro['data'][i]['4']\n if type == 0:\n ro['data'][i]['4'] = '限制权限'\n elif type == 1:\n ro['data'][i]['4'] = '默认权限'\n return json.dumps(rowTable.output_result(), cls=PermissionEncoder)\n\n\nclass PermissionEncoder(json.JSONEncoder):\n def default(self, obj):\n \"\"\"\n json 转换对象\n :param obj:\n :return:\n \"\"\"\n if isinstance(obj, Permissionlist):\n return obj.id\n return json.JSONEncoder.default(self, obj)\n\n\n@blueprint.route('/delete/', methods=['GET', 'POST'])\ndef delete():\n json_data = json.loads(request.values.get('ids'))\n list_id_del = json_data['ids']\n for id in list_id_del:\n sysorg = Permissionlist.query.get(int(id))\n operate_log(\n SysUser.query.filter_by(username=session['username']).first().sys_org_id,\n session['username'],\n '数据安全支撑系统-权限管理',\n '删除权限:' + sysorg.name,\n request.remote_addr\n )\n sysorg.delete()\n name_dict = {'code': '00', 'desc': '删除成功!'}\n return json.dumps(name_dict)\n\n\ndef PermissionVerify(value):\n '''权限认证模块,\n 此模块会先判断用户是否是管理员(is_superuser为True),如果是管理员,则具有所有权限,\n 如果不是管理员则获取request.user和request.path两个参数,判断两个参数是否匹配,匹配则有权限,反之则没有。\n '''\n\n iUser = SysUser.get_by_id(value)\n if not iUser.is_superuser: # 判断用户如果是超级管理员则具有所有权限\n # if not iUser.role: #如果用户无角色,直接返回无权限\n # return HttpResponseRedirect(reverse('sysadmin:permission.no'))\n\n role_permission = RoleList.get_by_id(iUser.role_id)\n role_permission_list = role_permission.permission.all()\n\n matchUrl = []\n for x in role_permission_list:\n if request.path == x.url or request.path.rstrip('/') == x.url: # 精确匹配,判断request.path是否与permission表中的某一条相符\n matchUrl.append(x.url)\n elif request.path.startswith(x.url): # 判断request.path是否以permission表中的某一条url开头\n matchUrl.append(x.url)\n else:\n pass\n if matchUrl == []:\n return 0\n else:\n return matchUrl\n else:\n return 1\n\n\n@blueprint.route('/error', methods=['GET'])\n# @login_required\ndef nopermission():\n \"\"\"List members.\"\"\"\n return render_template('sysadmin/no_permission.html', **locals())\n","sub_path":"richdataxweb/sysadmin/permission_views.py","file_name":"permission_views.py","file_ext":"py","file_size_in_byte":7596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343966946","text":"n=int(input())\nfor i in range(n):\n s=\"\"\n s=input()\n l=list(s)\n l.sort()\n if \"a\" in l:\n res=\"a\"\n for m in range(1,len(l)):\n a=l[m]\n res=res+a\n \n print(res)\n else:\n print(-1)","sub_path":"Code/CodeRecords/2608/48721/310521.py","file_name":"310521.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"193275238","text":"\"\"\"\nfrom https://leetcode.com/problems/palindrome-number/\n\"\"\"\n\n\nclass Solution(object):\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n s = str(x)\n pivot = int(len(s) / 2)\n i = 0 # start at 0. If you start at 1, the even case does NOT compare the two core characters, e.g., 2 and 3 in '0230'.\n\n if len(s) <= 1:\n return True\n\n if len(s) % 2 == 0:\n # even (core of 2)\n while (pivot - i - 1) >= 0 and (pivot + i) < len(s):\n if s[pivot - i - 1] != s[pivot + i]:\n break\n i += 1\n if pivot - i - 1 < 0:\n return True\n else:\n # odd (core of 1)\n while (pivot - i) >= 0 and (pivot + i) < len(s):\n if s[pivot - i] != s[pivot + i]:\n break\n i += 1\n\n if pivot - i < 0:\n return True\n\n return False\n","sub_path":"leetcode/python/src/9_palindrome_number.py","file_name":"9_palindrome_number.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"452659638","text":"# coding: utf-8\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n\nfrom flask import Flask\nfrom jsonschema import ValidationError\nfrom mock import create_autospec\nimport pytest\n\nfrom reles import DataStore, modificators\nfrom reles.modificators import Processor, _log_access\nfrom .conftest import check_schema, validate\n\n\n@pytest.fixture\ndef test_schema():\n schema = {\n \"type\": \"object\",\n \"x-es-mapping\": {\n \"properties\": {\n \"log\": {\n \"created\": { \"type\": \"date\" },\n \"changed\": { \"type\": \"date\" },\n \"editor\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"customer\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n }\n }\n }\n },\n \"properties\": {\n \"log\": {\n \"type\": \"object\",\n \"x-log-access\": {\n \"created\": \"created\",\n \"updated\": \"changed\",\n \"user\": \"editor\",\n \"customer\": \"customer\"\n },\n \"properties\": {\n \"created\": {\n \"type\": \"string\",\n \"format\": \"date-time\"\n },\n \"changed\": {\n \"type\": \"string\",\n \"format\": \"date-time\"\n },\n \"editor\": {\n \"type\": \"string\",\n \"format\": \"uuid\",\n \"x-fkey\": {\n \"index\": \"auth\",\n \"doc_type\": \"user\"\n }\n },\n \"customer\": {\n \"type\": \"string\",\n \"format\": \"uuid\",\n \"x-fkey\": {\n \"index\": \"auth\",\n \"doc_type\": \"customer\"\n }\n }\n\n }\n },\n \"name\": {\n \"type\": \"string\"\n }\n }\n }\n\n return check_schema(schema)\n\n\n@pytest.fixture\ndef modification_time():\n return 598997700\n\n\n@pytest.yield_fixture\ndef g(monkeypatch, modification_time):\n app = Flask(__name__)\n ctx = app.app_context()\n\n ctx.g.user = {'email': 'john.mcclane@nyc.gov'}\n ctx.g.customer = {'name': 'NYPD'}\n\n monkeypatch.setattr(modificators, 'time', lambda: modification_time)\n\n with ctx:\n yield ctx.g\n\n\n@pytest.fixture\ndef processor(test_schema):\n\n test_processors = {\n 'x-log-access': _log_access\n }\n\n datastore = create_autospec(DataStore)\n\n return Processor(\n test_schema,\n datastore=datastore,\n processors=test_processors\n )\n\n\ndef test_creation_gets_logged(test_schema, processor, g, modification_time):\n name = 'Nakatomi Plaza'\n\n given = validate(\n test_schema,\n {\n 'name': name\n }\n )\n\n processed = processor.process(given)\n\n assert processed == {\n 'log': {\n 'created': modification_time,\n 'editor': g.user['email'],\n 'customer': g.customer['name'],\n },\n 'name': name\n }\n\n\ndef test_update_gets_logged(test_schema, processor, g, modification_time):\n name = 'Dulles International Airport'\n\n given = validate(\n test_schema,\n {\n 'name': name\n }\n )\n\n processed = processor.process(given, id=1)\n\n assert processed == {\n 'log': {\n 'changed': modification_time,\n 'editor': g.user['email'],\n 'customer': g.customer['name'],\n },\n 'name': name\n }\n\n\ndef test_manual_override_is_rejected(test_schema, processor):\n given = validate(\n test_schema,\n {\n 'name': 'New York',\n 'log': {\n 'editor': 'simon@gruber.net'\n }\n }\n )\n\n with pytest.raises(ValidationError) as exception_info:\n processor.process(given, id=1)\n\n assert 'cannot be overridden manually' in exception_info.value.message\n","sub_path":"tests/modificators/test_log_access.py","file_name":"test_log_access.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"464620736","text":"# = A clone of chemistry simulator. =\r\n## A chemistry self assembly environment for searching parameter\r\nimport datetime\r\nimport math\r\nimport os\r\nimport re\r\nimport sys\r\nimport time\r\n\r\nimport numpy as np\r\nimport powerlaw\r\nimport pygame\r\n\r\nfrom actchem import *\r\nfrom particleChem import *\r\nfrom rl import *\r\n\r\n\r\nclass HChemRule:\r\n def __init__(self, filename):\r\n self.cnt = 0\r\n self.num = None\r\n self.fill = []\r\n self.types = []\r\n self.map = {}\r\n self.color_count = 0\r\n self.colors = []\r\n self.colormap = {}\r\n self.wildcards = ['X', 'Y']\r\n self.wildstates = ['x', 'y']\r\n self.state_max = 10\r\n self.name = []\r\n self.ruleb = {} # Rules for bounded pair\r\n self.ruleu = {} # Rules for unbounded pair\r\n self.rule_texts = []\r\n self.colortable = [\r\n (0, 0, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255),\r\n (255, 255, 0), (255, 0, 255), (0, 255, 255),\r\n (255, 127, 0), (255, 0, 127), (127, 255, 0), (127, 0, 255),\r\n (0, 255, 127), (0, 127, 255)\r\n ]\r\n\r\n self.parse(filename)\r\n for t in self.types:\r\n for s in range(0, self.state_max + 1):\r\n self.to_index(t, str(s))\r\n\r\n def gen_color(self, a):\r\n self.color_count += 1\r\n return self.colortable[(self.color_count - 1) % len(self.colortable)]\r\n\r\n def to_index(self, t, n):\r\n term = t + n\r\n if not term in self.map:\r\n self.map[term] = self.cnt\r\n self.colors.append(self.colormap[t])\r\n self.name.append(term)\r\n self.cnt += 1\r\n return self.map[term]\r\n\r\n def is_valid_type(self, term):\r\n if term in self.map:\r\n return True\r\n else:\r\n return False\r\n\r\n def get_index(self, term):\r\n return self.map[term]\r\n\r\n def get_name(self, idx):\r\n return self.name[idx]\r\n\r\n def parse_expr(self, str):\r\n if \"-\" in str:\r\n bnd = True\r\n M0, M1 = str.split(\"-\")\r\n else:\r\n bnd = False\r\n output_reactants = str.split()\r\n if len(output_reactants) < 2:\r\n raise Exception(\"Too few reactants\")\r\n M0 = output_reactants[0]\r\n M1 = output_reactants[1]\r\n p = re.search(r'([a-wzA-Z]+)(\\d+|[xy])', M0)\r\n q = re.search(r'([a-wzA-Z]+)(\\d+|[xy])', M1)\r\n return (p.group(1), p.group(2), q.group(1), q.group(2), bnd)\r\n\r\n def add_rule(self, L0, l0, L1, l1, lbnd, R0, r0, R1, r1, rbnd, prob):\r\n if l0 in self.wildstates and l1 in self.wildstates:\r\n if l0 == l1:\r\n for s in range(self.state_max + 1):\r\n s = str(s)\r\n _l0 = s\r\n _l1 = s\r\n if r0 == l0:\r\n _r0 = s\r\n else:\r\n _r0 = r0\r\n if r1 == l0:\r\n _r1 = s\r\n else:\r\n _r1 = r1\r\n self.add_rule(L0, _l0, L1, _l1, lbnd, R0, _r0, R1, _r1, rbnd, prob)\r\n return\r\n else:\r\n for s0 in range(self.state_max + 1):\r\n for s1 in range(self.state_max + 1):\r\n if l0 == l1 and s0 > s1: continue\r\n s0 = str(s0)\r\n s1 = str(s1)\r\n _l0 = s0\r\n _l1 = s1\r\n if r0 == l0:\r\n _r0 = s0\r\n elif r0 == l1:\r\n _r0 = s1\r\n else:\r\n _r0 = r0\r\n if r1 == l0:\r\n _r1 = s0\r\n elif r1 == l1:\r\n _r1 = s1\r\n else:\r\n _r1 = r1\r\n self.add_rule(L0, _l0, L1, _l1, lbnd, R0, _r0, R1, _r1, rbnd, prob)\r\n return\r\n elif l0 in self.wildstates or l1 in self.wildstates:\r\n if l0 in self.wildstates:\r\n for s in range(self.state_max + 1):\r\n s = str(s)\r\n _l0 = s\r\n if r0 == l0:\r\n _r0 = s\r\n else:\r\n _r0 = r0\r\n if r1 == l0:\r\n _r1 = s\r\n else:\r\n _r1 = r1\r\n self.add_rule(L0, _l0, L1, l1, lbnd, R0, _r0, R1, _r1, rbnd, prob)\r\n return\r\n else:\r\n for s in range(self.state_max + 1):\r\n s = str(s)\r\n _l1 = s\r\n if r0 == l1:\r\n _r0 = s\r\n else:\r\n _r0 = r0\r\n if r1 == l1:\r\n _r1 = s\r\n else:\r\n _r1 = r1\r\n self.add_rule(L0, l0, L1, _l1, lbnd, R0, _r0, R1, _r1, rbnd, prob)\r\n return\r\n LL0 = self.to_index(L0, l0)\r\n LL1 = self.to_index(L1, l1)\r\n RR0 = self.to_index(R0, r0)\r\n RR1 = self.to_index(R1, r1)\r\n if lbnd:\r\n if not (LL0, LL1) in self.ruleb:\r\n self.ruleb[(LL0, LL1)] = []\r\n if LL0 != LL1:\r\n self.ruleb[(LL1, LL0)] = []\r\n self.ruleb[(LL0, LL1)].append((RR0, RR1, rbnd, prob))\r\n if LL0 != LL1:\r\n self.ruleb[(LL1, LL0)].append((RR1, RR0, rbnd, prob))\r\n else:\r\n if not (LL0, LL1) in self.ruleu:\r\n self.ruleu[(LL0, LL1)] = []\r\n if LL0 != LL1:\r\n self.ruleu[(LL1, LL0)] = []\r\n self.ruleu[(LL0, LL1)].append((RR0, RR1, rbnd, prob))\r\n if LL0 != LL1:\r\n self.ruleu[(LL1, LL0)].append((RR1, RR0, rbnd, prob))\r\n\r\n def parse_rule(self, line):\r\n try:\r\n lhs, rhs = line.split(\"->\")\r\n except:\r\n return\r\n self.rule_texts.append(line)\r\n prob = 1.0\r\n if \":\" in rhs:\r\n rhs, p = rhs.split(\":\")\r\n prob = eval(p.strip())\r\n try:\r\n L0, l0, L1, l1, lbnd = self.parse_expr(lhs.strip())\r\n R0, r0, R1, r1, rbnd = self.parse_expr(rhs.strip())\r\n except Exception as e:\r\n print(\"Error parsing line:\", line)\r\n exit()\r\n if L0 in self.wildcards and L1 in self.wildcards:\r\n if L0 == L1:\r\n for t in self.types:\r\n _L0 = t\r\n _L1 = t\r\n if R0 == L0:\r\n _R0 = t\r\n else:\r\n _R0 = R0\r\n if R1 == L0:\r\n _R1 = t\r\n else:\r\n _R1 = R1\r\n self.add_rule(_L0, l0, _L1, l1, lbnd, _R0, r0, _R1, r1, rbnd, prob)\r\n else:\r\n for t0 in self.types:\r\n for t1 in self.types:\r\n if l0 == l1 and t0 > t1: continue\r\n _L0 = t0\r\n _L1 = t1\r\n if R0 == L0:\r\n _R0 = t0\r\n elif R0 == L1:\r\n _R0 = t1\r\n else:\r\n _R0 = R0\r\n if R1 == L0:\r\n _R1 = t0\r\n elif R1 == L1:\r\n _R1 = t1\r\n else:\r\n _R1 = R1\r\n self.add_rule(_L0, l0, _L1, l1, lbnd, _R0, r0, _R1, r1, rbnd, prob)\r\n elif L0 in self.wildcards or L1 in self.wildcards:\r\n if L0 in self.wildcards:\r\n for t in self.types:\r\n _L0 = t\r\n if R0 == L0:\r\n _R0 = t\r\n else:\r\n _R0 = R0\r\n if R1 == L0:\r\n _R1 = t\r\n else:\r\n _R1 = R1\r\n self.add_rule(_L0, l0, L1, l1, lbnd, _R0, r0, _R1, r1, rbnd, prob)\r\n else:\r\n for t in self.types:\r\n _L1 = t\r\n if R0 == L1:\r\n _R0 = t\r\n else:\r\n _R0 = R0\r\n if R1 == L1:\r\n _R1 = t\r\n else:\r\n _R1 = R1\r\n self.add_rule(L0, l0, _L1, l1, lbnd, _R0, r0, _R1, r1, rbnd, prob)\r\n else:\r\n self.add_rule(L0, l0, L1, l1, lbnd, R0, r0, R1, r1, rbnd, prob)\r\n\r\n def add_type(self, t):\r\n self.types.append(t)\r\n self.colormap[t] = self.gen_color(t)\r\n\r\n def setup_types(self, str):\r\n lhs, rhs = str.split(\":\")\r\n for t in rhs.split(\",\"):\r\n self.add_type(t.strip())\r\n\r\n def setup_fill(self, str):\r\n lhs, rhs = str.split(\":\")\r\n for decl in rhs.split(\",\"):\r\n t, p = decl.strip().split(\" \")\r\n self.fill.append((t, eval(p)))\r\n\r\n def parse(self, filename):\r\n f = open(filename, \"r\")\r\n while True:\r\n line = f.readline()\r\n if not line: break\r\n if line[0] == '#': continue\r\n line = line.strip()\r\n if line.find(\"type\") == 0:\r\n self.setup_types(line)\r\n elif line.find(\"number of particles\") == 0:\r\n self.num = int(line.split(\":\")[1].strip())\r\n elif line.find(\"state max\") == 0:\r\n self.state_max = int(line.split(\":\")[1].strip())\r\n elif line.find(\"fill\") == 0:\r\n self.setup_fill(line)\r\n elif \"->\" in line:\r\n self.parse_rule(line)\r\n f.close()\r\n\r\n def check(self, L0, L1, bound):\r\n possible_reactions = []\r\n if bound:\r\n if (L0, L1) in self.ruleb:\r\n possible_reactions += self.ruleb[(L0, L1)]\r\n else:\r\n if (L0, L1) in self.ruleu:\r\n possible_reactions += self.ruleu[(L0, L1)]\r\n if len(possible_reactions) == 0:\r\n return None\r\n return possible_reactions\r\n\r\n\r\nclass HChem:\r\n # n : number of particles\r\n # r : radious of particles\r\n # v0 : initial velocity of particles\r\n # dt : duration of one frame\r\n # k : strength of bonds\r\n # w,h : width and height of the universe\r\n # seed: random seed\r\n def __init__(self, rules, particles_filename=None, n=1000, r=10, v0=None, dt=0.1,\r\n width=1200, height=700, bucket_size=None, seed=None):\r\n self.rule = HChemRule(rules)\r\n if seed: np.random.seed(seed)\r\n if v0 == None: v0 = r\r\n if bucket_size == None: bucket_size = 2 * r\r\n\r\n self.n = n\r\n if self.rule.num is not None:\r\n self.n = self.rule.num\r\n self.r = r\r\n self.dt = dt\r\n self.w = width\r\n self.h = height\r\n self.speed = 1\r\n self.show_applied_rules = False\r\n self.randomize_rule_order = False\r\n self.state_max = self.rule.state_max\r\n self.R = 0 # The R hipotesis probe\r\n self.p = 0 # the p-value of the statistical distribution\r\n\r\n # Initialize positions of particles\r\n self.pos = np.zeros((n, 2))\r\n self.pos[:, 0] = np.random.uniform(r, width - r, n)\r\n self.pos[:, 1] = np.random.uniform(r, height - r, n)\r\n\r\n ##Create a set of q agents per state\r\n\r\n\r\n self.q_agents = [QLearningAgent(ParticleTypeMDP(self, i), Ne=5, Rplus=2,\r\n alpha=lambda n: 60. / (59 + n)) for i in range(self.n)]\r\n self.visited_agents = []\r\n self.chain_lengths = None\r\n\r\n # Initialize velocities of particles\r\n # N.B. this discards the velocities for particles files loaded on the command line\r\n direction = np.random.uniform(0, 2 * np.pi, n)\r\n self.vel = np.zeros((n, 2))\r\n self.vel[:, 0] = v0 * np.cos(direction)\r\n self.vel[:, 1] = v0 * np.sin(direction)\r\n\r\n # Initialize types\r\n self.types = np.zeros(n, dtype=int)\r\n for k in range(self.n):\r\n p = np.random.uniform(0, 1)\r\n q = 0\r\n for (t, r) in self.rule.fill:\r\n q += r\r\n if p < q:\r\n self.types[k] = self.rule.get_index(t)\r\n break\r\n self.stypes = np.zeros(n, dtype=object) # n number of particles\r\n for k in range(self.n):\r\n self.stypes[k] = self.rule.get_name(self.types[k])\r\n\r\n # self.bonds[i] == list of indexes of particles which is bound to i.\r\n self.bonds = np.zeros(n, dtype=object)\r\n for i in range(n): self.bonds[i] = []\r\n\r\n # Initialize buckets for compute_collision detection\r\n self.bucket_size = bucket_size\r\n self.nbx = int(math.ceil(float(width) / bucket_size))\r\n self.nby = int(math.ceil(float(height) / bucket_size))\r\n self.buckets = np.zeros((self.nbx, self.nby), dtype=object)\r\n\r\n if particles_filename:\r\n self.load_particles(particles_filename)\r\n # Randomize velocities of particles\r\n # N.B. this discards the velocities for particles files loaded on the command line\r\n direction = np.random.uniform(0, 2 * np.pi, self.n)\r\n self.vel = np.zeros((self.n, 2))\r\n self.vel[:, 0] = v0 * np.cos(direction)\r\n self.vel[:, 1] = v0 * np.sin(direction)\r\n print(\"Randomizing velocites. If want stored velocities, reload the particles file.\")\r\n\r\n def bucket_index(self, x):\r\n return (min(max(int(x[0] / self.bucket_size), 0), self.nbx - 1),\r\n min(max(int(x[1] / self.bucket_size), 0), self.nby - 1))\r\n\r\n def init_bucket(self):\r\n for i in range(self.nbx):\r\n for j in range(self.nby):\r\n self.buckets[i, j] = []\r\n for k in range(self.n):\r\n i, j = self.bucket_index(self.pos[k, :])\r\n self.buckets[i, j].append(k)\r\n\r\n def add_impulse_from_walls(self):\r\n r = self.r\r\n for k in range(self.n):\r\n x = self.pos[k, 0]\r\n y = self.pos[k, 1]\r\n vx = self.vel[k, 0]\r\n vy = self.vel[k, 1]\r\n if (x < r and vx < 0) or (x > self.w - r and vx > 0):\r\n self.vel[k, 0] += -2 * self.vel[k, 0]\r\n self.vel[k, 1] += 0\r\n if (y < r and vy < 0) or (y > self.h - r and vy > 0):\r\n self.vel[k, 0] += 0\r\n self.vel[k, 1] += -2 * self.vel[k, 1]\r\n\r\n def update_state_of_particle_pair(self, k, l):\r\n mdp = self.q_agents[k].mdp\r\n mdp.other_index = l\r\n run_single_trial(self.q_agents[k], mdp)\r\n #run_single_trial(self.q_agents[l], ParticleMDP(self, l,k))\r\n if l in self.bonds[k]:\r\n return True\r\n return False\r\n # is_bond =False\r\n #\r\n # if l in self.bonds[k]:\r\n # is_bond = True\r\n # type_k = self.types[k]\r\n # if not self.visited_agents[type_k]:\r\n # mdp = self.q_agents[type_k].mdp\r\n # mdp.other_index = l\r\n # self.visited_agents[type_k] = True\r\n # run_single_trial(self.q_agents[type_k], mdp)\r\n #\r\n # type_l = self.types[l]\r\n # if not self.visited_agents[type_l]:\r\n # mdp = self.q_agents[type_l].mdp\r\n # mdp.other_index = k\r\n # self.visited_agents[type_l] = True\r\n # run_single_trial(self.q_agents[type_l], mdp)\r\n # LL0 = type_k\r\n # LL1 = type_l\r\n # RR0 = type_k\r\n # RR1 = type_l\r\n # if l in self.bonds[k] and not is_bond:\r\n # if not (LL0, LL1) in self.ruleb:\r\n # self.ruleb[(LL0, LL1)] = []\r\n # if LL0 != LL1:\r\n # self.ruleb[(LL1, LL0)] = []\r\n # self.ruleb[(LL0, LL1)].append((RR0, RR1, rbnd, 1))\r\n # if LL0 != LL1:\r\n # self.ruleb[(LL1, LL0)].append((RR1, RR0, rbnd, 1))\r\n # else:\r\n # if not (LL0, LL1) in self.ruleu:\r\n # self.ruleu[(LL0, LL1)] = []\r\n # if LL0 != LL1:\r\n # self.ruleu[(LL1, LL0)] = []\r\n # self.ruleu[(LL0, LL1)].append((RR0, RR1, rbnd, prob))\r\n # if LL0 != LL1:\r\n # self.ruleu[(LL1, LL0)].append((RR1, RR0, rbnd, prob))\r\n # if l in self.bonds[k]:\r\n # # bound pair\r\n # rules = self.rule.check(self.types[k], self.types[l], True)\r\n #\r\n # if rules:\r\n # if self.randomize_rule_order:\r\n # np.random.shuffle(rules)\r\n # for r in rules:\r\n # p = r[3]\r\n # if np.random.uniform(0, 1) < p:\r\n # if self.show_applied_rules:\r\n # print(\"apply:\", )\r\n # print(self.rule.get_name(\r\n # self.types[k]), ) #### TODO: Ingresar aqui el q learning por cada accion\r\n # print(\"-\", )\r\n # print(self.rule.get_name(self.types[l]), )\r\n # print(\" -> \", )\r\n # print(self.rule.get_name(r[0]), )\r\n # if r[2]:\r\n # print(\"-\", )\r\n # else:\r\n # print(\" \", )\r\n # print(self.rule.get_name(r[1]))\r\n # self.types[k] = r[0]\r\n # self.types[l] = r[1]\r\n # if not r[2]:\r\n # self.bonds[k].remove(l)\r\n # self.bonds[l].remove(k)\r\n # return False\r\n # return True\r\n # return True\r\n # else:\r\n # # unbound pair\r\n # rules = self.rule.check(self.types[k], self.types[l], False)\r\n # if rules:\r\n # if self.randomize_rule_order:\r\n # np.random.shuffle(rules)\r\n # for r in rules:\r\n # p = r[3]\r\n # if np.random.uniform(0, 1) < p:\r\n # if self.show_applied_rules:\r\n # print(\"apply:\", )\r\n # print(self.rule.get_name(self.types[k]), )\r\n # print(\" \", )\r\n # print(self.rule.get_name(self.types[l]), )\r\n # print(\" -> \", )\r\n # print(self.rule.get_name(r[0]), )\r\n # if r[2]:\r\n # print(\"-\", )\r\n # else:\r\n # print(\" \", )\r\n # print(self.rule.get_name(r[1]))\r\n # self.types[k] = r[0]\r\n # self.types[l] = r[1]\r\n # if r[2]:\r\n # self.bonds[k].append(l)\r\n # self.bonds[l].append(k)\r\n # return True\r\n # return False\r\n # return False\r\n\r\n def add_impulse_between_unbound_pair(self, k, l, rx, rv, d2):\r\n if self.update_state_of_particle_pair(k, l):\r\n return\r\n d = math.sqrt(d2)\r\n n = rx / d\r\n ldt = -n.dot(rv)\r\n self.vel[k, :] += ldt * n\r\n self.vel[l, :] -= ldt * n\r\n\r\n def add_impulse_between_bound_pair(self, k, l, rx, rv, d2):\r\n d = math.sqrt(d2)\r\n n = rx / d\r\n c = rx.dot(rv)\r\n # ldt = -(2*c + 3*(d2-4*self.r*self.r))/(8*d2)\r\n # self.vel[k,:] += 2*ldt*rx\r\n # self.vel[l,:] -= 2*ldt*rx\r\n if (d < 2 * self.r and c < 0) or (d > 2 * self.r and c > 0):\r\n ldt = -n.dot(rv)\r\n self.vel[k, :] += ldt * n\r\n self.vel[l, :] -= ldt * n\r\n\r\n def add_impulse_between_particles_sub(self, k, i, j):\r\n if i < 0 or j < 0 or i >= self.nbx or j >= self.nby: return\r\n for l in self.buckets[i, j]:\r\n if k >= l: continue\r\n rx = self.pos[k, :] - self.pos[l, :]\r\n rv = self.vel[k, :] - self.vel[l, :]\r\n if rx.dot(rv) >= 0: continue\r\n d2 = np.sum(rx * rx)\r\n if d2 > 4 * self.r * self.r: continue\r\n self.add_impulse_between_unbound_pair(k, l, rx, rv, d2)\r\n\r\n def add_impulse_between_particles(self):\r\n r = self.r\r\n\r\n # add impulse between unbound pairs\r\n for k in range(self.n):\r\n i, j = self.bucket_index(self.pos[k, :])\r\n self.add_impulse_between_particles_sub(k, i - 1, j)\r\n self.add_impulse_between_particles_sub(k, i - 1, j - 1)\r\n self.add_impulse_between_particles_sub(k, i - 1, j + 1)\r\n self.add_impulse_between_particles_sub(k, i, j - 1)\r\n self.add_impulse_between_particles_sub(k, i, j)\r\n self.add_impulse_between_particles_sub(k, i, j + 1)\r\n self.add_impulse_between_particles_sub(k, i + 1, j - 1)\r\n self.add_impulse_between_particles_sub(k, i + 1, j)\r\n self.add_impulse_between_particles_sub(k, i + 1, j + 1)\r\n\r\n def add_impulse_between_bound_particles(self):\r\n # add impulse between bound pairs\r\n for k in range(self.n):\r\n for l in self.bonds[k]:\r\n if k >= l: continue\r\n rx = self.pos[k, :] - self.pos[l, :]\r\n rv = self.vel[k, :] - self.vel[l, :]\r\n d2 = np.sum(rx * rx)\r\n self.add_impulse_between_bound_pair(k, l, rx, rv, d2)\r\n\r\n def compute_impulse(self):\r\n self.init_bucket()\r\n self.add_impulse_from_walls()\r\n self.add_impulse_between_particles()\r\n self.add_impulse_between_bound_particles()\r\n self.pos += self.vel * self.dt\r\n\r\n def change_speed(self, delta):\r\n self.speed += delta;\r\n if self.speed < 0:\r\n self.speed = 1\r\n\r\n def update(self):\r\n # Update position\r\n for k in range(self.speed):\r\n #self.visited_agents = [False] * self.state_max\r\n self.compute_impulse()\r\n self.chain_lengths = self.calculate_chain_lengths()\r\n unique, counts = np.unique(self.chain_lengths, return_counts=True)\r\n freq = dict(zip(unique, counts))\r\n if self.chain_lengths is not None and len(freq) > 2:\r\n results = powerlaw.Fit(self.chain_lengths)\r\n self.R, self.p = results.distribution_compare('power_law', 'lognormal')\r\n\r\n def total_energy(self):\r\n return np.sum(self.vel * self.vel) / 2\r\n\r\n def save(self, fname, type):\r\n if type == \"particles\":\r\n self.save_particles(fname)\r\n else:\r\n self.save_rules(fname)\r\n\r\n def save_particles(self, fname):\r\n try:\r\n with open(fname, \"w\") as f:\r\n f.write(repr(self.n));\r\n f.write(\"\\n\")\r\n f.write(repr(self.dt));\r\n f.write(\"\\n\")\r\n for k in range(self.n):\r\n f.write(self.stypes[k]);\r\n f.write(\",\")\r\n f.write(repr(self.pos[k, 0]));\r\n f.write(\",\")\r\n f.write(repr(self.pos[k, 1]));\r\n f.write(\",\")\r\n f.write(repr(self.vel[k, 0]));\r\n f.write(\",\")\r\n f.write(repr(self.vel[k, 1]));\r\n f.write(\"\\n\")\r\n for k in range(self.n):\r\n bonds = \" \".join(map(str, self.bonds[k]))\r\n f.write(bonds);\r\n f.write(\"\\n\")\r\n except Exception as e:\r\n print(\"Error:\", str(e))\r\n\r\n def load_particles(self, fname):\r\n try:\r\n with open(fname, \"r\") as f:\r\n n = int(f.readline())\r\n dt = float(f.readline())\r\n pos = []\r\n vel = []\r\n types = []\r\n bonds = []\r\n for k in range(n):\r\n line = f.readline()\r\n t, p0, p1, v0, v1 = line.strip().split(\",\")\r\n pos.append((float(p0), float(p1)))\r\n vel.append((float(v0), float(v1)))\r\n if not self.rule.is_valid_type(t):\r\n print(\"Unknown type on line:\", line)\r\n return\r\n types.append(t)\r\n for k in range(n):\r\n bonds.append(map(int, f.readline().strip().split()))\r\n self.n = n\r\n self.dt = dt\r\n self.pos = np.array(pos)\r\n self.vel = np.array(vel)\r\n self.stypes = np.array(types, dtype=object)\r\n self.types = np.array(map(lambda t: self.rule.get_index(t), types), dtype=int)\r\n self.bonds = bonds\r\n\r\n except Exception as e:\r\n print(\"Error:\", str(e))\r\n\r\n def load(self, fname, type):\r\n if type == \"particles\":\r\n self.load_particles(fname)\r\n else:\r\n self.load_rules(fname)\r\n\r\n def start_record(self):\r\n date = datetime.date.today()\r\n now = time.time()\r\n dirname = str(date) + str(now)\r\n os.mkdir(dirname)\r\n self.record_dir = dirname\r\n\r\n def record(self, iteration):\r\n self.save_particles(\"%s/iteration-%d.dat\" % (self.record_dir, iteration))\r\n\r\n def calculate_chain_lengths(self):\r\n chain_lengths = []\r\n visited_bonds = []\r\n\r\n for x in range(len(self.bonds)):\r\n bond = self.bonds[x]\r\n if x not in visited_bonds and len(bond) > 0:\r\n current_chain = self.look_deep_chain(x, [])\r\n visited_bonds.extend(current_chain)\r\n if len(current_chain) > 1:\r\n chain_lengths.append(len(current_chain))\r\n\r\n return chain_lengths\r\n\r\n def look_deep_chain(self, particle, current_chain=[]):\r\n current_chain.append(particle)\r\n for pair in self.bonds[particle]:\r\n if pair not in current_chain:\r\n current_chain = self.look_deep_chain(pair, current_chain)\r\n return current_chain\r\n\r\n\r\nclass HChemViewer:\r\n RED = (255, 0, 0)\r\n BLUE = (0, 0, 255)\r\n WHITE = (255, 255, 255)\r\n BLACK = (0, 0, 0)\r\n INFO = [\r\n # \"(P) play/pause, (F) stepwise, (R) record, (Q) quit, (T) show/hide particle types, (up) Speed up, (down) Speed down\",\r\n # \"(drag) move particle, (shift + drag) bind/unbind particles, (double click) chanppppppppsdpsdfospdfpsfopppppppppppppppolerwqertyuilokpppppppge type and state of a particle\"\r\n ]\r\n\r\n def __init__(self, sim, w=None, h=None):\r\n if w == None: w = sim.w\r\n if h == None: h = sim.h\r\n\r\n self.sim = sim\r\n pygame.init()\r\n self.screen = pygame.display.set_mode((w, h),\r\n pygame.DOUBLEBUF) # | pygame.FULLSCREEN | pygame.HWSURFACE\r\n # )\r\n pygame.display.set_caption(\"Artificial Chemistry - Chain Simulator\")\r\n self.fontsize = 18\r\n self.font = pygame.font.SysFont(None, self.fontsize)\r\n # info_texts = self.INFO + sim.rule.rule_texts\r\n info_texts = self.INFO # prefer not to see rules at the moment\r\n self.info = map(lambda text: self.font.render(text, False, self.BLUE),\r\n info_texts)\r\n\r\n self.speed = 10\r\n\r\n # For events\r\n self.record = False\r\n self.shift = False\r\n self.play = True\r\n self.stepwise = False\r\n self.dragged = False\r\n self.which_dragged = None\r\n self.moving = False\r\n self.binding = False\r\n self.display_types = True\r\n self.prev_lclick = time.time()\r\n\r\n def get_clicked(self):\r\n for k in range(self.sim.n):\r\n d2 = np.sum((self.sim.pos[k, :] - pygame.mouse.get_pos()) ** 2)\r\n if d2 < self.sim.r ** 2:\r\n return k\r\n break\r\n return None\r\n\r\n def ask_particle(self):\r\n return raw_input(\"Enter particle type and state: \")\r\n\r\n def ask_file(self, title):\r\n filename = raw_input(\"Enter filename: \")\r\n if \".dat\" in filename:\r\n savetype = \"particles\"\r\n else:\r\n savetype = \"rules\"\r\n return filename, savetype\r\n\r\n def check_event(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n key = pygame.key.get_pressed()\r\n if key[pygame.K_RSHIFT] or key[pygame.K_LSHIFT]:\r\n self.shift = True\r\n if key[pygame.K_r]:\r\n self.record = not self.record\r\n if self.record:\r\n self.sim.start_record()\r\n if key[pygame.K_q]:\r\n sys.exit()\r\n if key[pygame.K_p]:\r\n self.play = not self.play\r\n if key[pygame.K_UP]:\r\n self.sim.change_speed(1)\r\n if key[pygame.K_DOWN]:\r\n self.sim.change_speed(-1)\r\n if key[pygame.K_s]:\r\n fname, type = self.ask_file(\"Save configuration\")\r\n if fname: self.sim.save(fname, type)\r\n if key[pygame.K_l]:\r\n fname, type = self.ask_file(\"Load configuration\")\r\n if fname: self.sim.load(fname, type)\r\n if key[pygame.K_t]:\r\n self.display_types = not self.display_types\r\n if key[pygame.K_f]:\r\n self.stepwise = True\r\n self.play = True\r\n if event.type == pygame.KEYUP:\r\n key = pygame.key.get_pressed()\r\n if not key[pygame.K_RSHIFT] and not key[pygame.K_LSHIFT]:\r\n self.shift = False\r\n if not self.dragged and event.type == pygame.MOUSEBUTTONDOWN:\r\n self.play = False\r\n which_clicked = self.get_clicked()\r\n clicked = not (which_clicked == None)\r\n\r\n # Detect double click\r\n t = time.time()\r\n double_click = False\r\n if t - self.prev_lclick < 1.0 / 3:\r\n double_click = True\r\n self.prev_lclick = t\r\n\r\n if clicked and double_click:\r\n t = self.ask_particle()\r\n try:\r\n self.sim.stypes[which_clicked] = t\r\n self.sim.types[which_clicked] = self.sim.rule.get_index(t)\r\n except:\r\n pass\r\n elif clicked:\r\n self.dragged = True\r\n self.which_dragged = which_clicked\r\n if not self.shift:\r\n self.moving = True\r\n elif self.shift:\r\n self.binding = True\r\n elif self.dragged and event.type == pygame.MOUSEMOTION:\r\n if self.moving:\r\n self.sim.pos[self.which_dragged, :] = pygame.mouse.get_pos()\r\n elif self.dragged and event.type == pygame.MOUSEBUTTONUP:\r\n if self.binding:\r\n which_clicked = self.get_clicked()\r\n clicked = not (which_clicked == None)\r\n if clicked and self.which_dragged != which_clicked and \\\r\n not (which_clicked in self.sim.bonds[self.which_dragged]):\r\n self.sim.bonds[self.which_dragged].append(which_clicked)\r\n self.sim.bonds[which_clicked].append(self.which_dragged)\r\n elif clicked and self.which_dragged != which_clicked and \\\r\n (which_clicked in self.sim.bonds[self.which_dragged]):\r\n self.sim.bonds[self.which_dragged].remove(which_clicked)\r\n self.sim.bonds[which_clicked].remove(self.which_dragged)\r\n self.moving = False\r\n self.binding = False\r\n self.dragged = False\r\n self.which_dragged = None\r\n elif event.type == pygame.QUIT:\r\n sys.exit()\r\n\r\n def loop(self, iterations=float('inf')):\r\n iteration = 0\r\n screen = self.screen\r\n sim = self.sim\r\n while iteration * sim.dt < iterations:\r\n\r\n n = sim.n\r\n r = sim.r\r\n if self.play:\r\n iteration += 1\r\n sim.update()\r\n if self.record:\r\n sim.record(iteration)\r\n\r\n if self.stepwise:\r\n self.play = False\r\n self.stepwise = False\r\n\r\n pos = sim.pos\r\n\r\n screen.fill(self.WHITE)\r\n # Draw particles\r\n for k in range(n):\r\n pygame.draw.circle(screen, sim.rule.colors[sim.types[k]],\r\n (int(pos[k, 0]), int(pos[k, 1])), r, 1)\r\n\r\n if self.display_types:\r\n for k in range(sim.n):\r\n t = sim.rule.get_name(sim.types[k])\r\n t = t[1:] # DEBUG: just draw the state\r\n text = self.font.render(t, False, self.BLACK)\r\n rect = text.get_rect()\r\n rect.centerx = pos[k, 0]\r\n rect.centery = pos[k, 1]\r\n self.screen.blit(text, rect)\r\n\r\n # Draw bonds\r\n for k in range(n):\r\n for l in sim.bonds[k]:\r\n if k >= l: continue\r\n pygame.draw.line(screen, self.BLACK, pos[k, :], pos[l, :])\r\n y = 10\r\n\r\n # update chain longs\r\n # if iteration * sim.dt % 10 == 0 :\r\n #chains = sim.calculate_chain_lengths()\r\n\r\n\r\n # results = powerlaw.Fit(chains)\r\n # print(results.power_law.alpha)\r\n # print(results.power_law.xmin)\r\n #R, p = results.distribution_compare('power_law', 'lognormal')\r\n text = self.font.render(\"chains: \" + str(sim.chain_lengths), False, self.BLUE)\r\n self.screen.blit(text, (10, y + 2 * self.fontsize))\r\n # text = self.font.render(\"Ratio: \" + str(R) + \" P-value: \" + str(p), False, self.BLUE)\r\n # self.screen.blit(text, (10, y + 3 * self.fontsize))\r\n # Other info\r\n if self.binding:\r\n pygame.draw.line(screen, self.BLACK,\r\n pos[self.which_dragged, :], pygame.mouse.get_pos())\r\n\r\n for i in self.info:\r\n self.screen.blit(i, (10, y))\r\n y += self.fontsize\r\n text = self.font.render(\r\n \"time = \" + str(iteration * sim.dt),\r\n False, self.BLUE)\r\n self.screen.blit(text, (10, y))\r\n energy = sim.total_energy()\r\n text = self.font.render(\r\n \"energy = \" + str(energy),\r\n False, self.BLUE)\r\n self.screen.blit(text, (10, y + self.fontsize))\r\n\r\n self.check_event()\r\n pygame.display.update()\r\n\r\n\r\nclass ChemMDP(MDP):\r\n def __init__(self, terminals, state, rules_file, gamma=.9):\r\n MDP.__init__(self, state, actlist=chem_actions,\r\n terminals=terminals, gamma=gamma)\r\n self.rules = HChemRule(rules_file)\r\n self.reward = -0.04\r\n self.sim_steps = 10\r\n\r\n def R(self, state):\r\n \"Return a numeric reward for this state.\"\r\n sim = HChem(self.rules, state)\r\n chain_lenghts = sim.calculate_chain_lengths()\r\n results = powerlaw.Fit(chain_lenghts)\r\n R, p = results.distribution_compare('power_law', 'lognormal')\r\n reward = self.reward\r\n if R > 0:\r\n reward = p\r\n return reward\r\n\r\n def T(self, state, action):\r\n if action is None:\r\n return [(0.0, state)]\r\n else:\r\n return [(0.7, self.go(state, action)),\r\n (0.1, self.go(state, add_bond_rule())),\r\n (0.1, self.go(state, add_unbond_rule())),\r\n (0.1, self.go(state, add_particles()))]\r\n\r\n def go(self, state, action):\r\n \"\"\"Return the state that results from going in this action.\"\"\"\r\n rules = self.rules\r\n # TODO: apply the action and apply it\r\n sim = HChem(rules, state) # Ejecutar aqui el modelo y medir sim loop aqui\r\n viewer = HChemViewer(sim)\r\n viewer.loop(self.sim_steps)\r\n state1 = r\"particles/exp1\" + str(time.time()) + \".dat\"\r\n sim.save(state1, \"particles\")\r\n return state1\r\n\r\n\r\nclass ParticleMDP(MDP):\r\n '''gets an state from the particle and calculates the T transaction, the terminal node is when the statistics\r\n distribution of power law gets the p value < 0.05'''\r\n\r\n\r\n def __init__(self, simulator, my_index=None, its_index = None, gamma=.9):\r\n\r\n self.sim = simulator\r\n self.index = my_index\r\n self.other_index = its_index\r\n self.default_reward = -0.04\r\n self.terminal = 10.0\r\n MDP.__init__(self, -0.04, actlist=particle_actions,\r\n terminals=[self.terminal], gamma=gamma)\r\n self.main_states = ['single', 'double']\r\n\r\n\r\n\r\n\r\n\r\n\r\n def R(self, state):\r\n \"\"\"\"Return a numeric reward for this state.\"\"\"\r\n sim = self.sim\r\n if sim.R > 0 and 0.05 > sim.p > 0.0:\r\n return self.terminal\r\n else:\r\n self.reward[state] = self.reward[state]-self.default_reward + sim.p\r\n return self.reward[state]\r\n\r\n def T(self, state, action):\r\n # else:\r\n # costs = []\r\n # for act in particle_actions:\r\n # if act != action:\r\n # costs.append((0.1, self.default_reward))\r\n # costs.append((0.7, self.go(state, action)))\r\n return [(0.7, self.go(state, action))]\r\n\r\n def go(self, state, action):\r\n \"\"\"Return the state that results from going in this action.\"\"\"\r\n action(self.sim, self.index, self.other_index) # no se sabe en reward hasta la siguiente iteracion\r\n return self.default_reward + self.R(state)\r\n\r\nclass ParticleTypeMDP(MDP):\r\n '''gets an state from the particle and calculates the T transaction, the terminal node is when the statistics\r\n distribution of power law gets the p value < 0.05'''\r\n\r\n\r\n def __init__(self, simulator, my_index, gamma=.9):\r\n\r\n self.sim = simulator\r\n self.index = my_index\r\n self.default_reward = -0.04\r\n self.terminal = 10.0\r\n self.last_state = None\r\n self.action_reward = {}\r\n self.other_index = None\r\n\r\n MDP.__init__(self, -0.04, actlist=particle_actions,\r\n terminals=[self.terminal], gamma=gamma)\r\n\r\n self.reward[self.last_state] = self.default_reward\r\n for action in particle_actions:\r\n self.action_reward[action] = self.default_reward\r\n\r\n\r\n def R(self, state):\r\n \"\"\"\"Return a numeric reward for this state.\"\"\"\r\n sim = self.sim\r\n if sim.R > 0 and 0.05 > sim.p > 0.0:\r\n self.last_state = state\r\n return self.terminal\r\n else:\r\n if state not in self.reward.keys():\r\n self.reward[state] = 0.0\r\n self.reward[self.last_state] = self.reward[self.last_state] + sim.p\r\n self.reward[state] += 0.4 * self.reward[self.last_state]\r\n return self.reward[state]\r\n\r\n def T(self, state, action):\r\n # else:\r\n # costs = []\r\n # for act in particle_actions:\r\n # if act != action:\r\n # costs.append((0.1, self.default_reward))\r\n # costs.append((0.7, self.go(state, action)))\r\n cost = []\r\n for act in particle_actions:\r\n if act != action:\r\n cost.append((0.1,self.action_reward[act]))\r\n self.action_reward[action] = self.go(state,action)\r\n cost.append((0.8, self.action_reward[action]))\r\n return cost\r\n\r\n def go(self, state, action):\r\n \"\"\"Return the state that results from going in this action.\"\"\"\r\n action(self.sim, self.index, self.other_index) # no se sabe en reward hasta la siguiente iteracion\r\n return self.terminal\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) == 2:\r\n sim = HChem(sys.argv[1])\r\n elif len(sys.argv) == 3:\r\n sim = HChem(sys.argv[1], sys.argv[2])\r\n\r\n else:\r\n print(\"Usage: python\", sys.argv[0], \" [optional: particles_filename]\")\r\n exit()\r\n\r\n HChemViewer(sim).loop(1000)\r\n # q_agent = QLearningAgent(chemMDP_env, Ne=5, Rplus=2,\r\n # alpha=lambda n: 60. / (59 + n)) # TODO: Definir los parametros de esta funcion\r\n # for i in range(200):\r\n # run_single_trial(q_agent, chemMDP_env)\r\n","sub_path":"hchem.py","file_name":"hchem.py","file_ext":"py","file_size_in_byte":41325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517353363","text":"import os\n# 01: 创建一个黑马文件夹\n# os.mkdir(\"黑马文件夹\")\n# 进入黑马文件夹 创建10个文件[python16期1..10].txt\n# os.chdir(\"黑马文件夹\")\n# for i in range(1, 11):\n# f = open(\"python16期%d.txt\" % i, \"w\")\n# f.close()\n\n# 批量修改文件名\n# 进入黑马文件夹\nos.chdir(\"黑马文件夹\")\n# 读取所有的文件名\nname_list = os.listdir()\n# 循环遍历\n# python16期6.txt -> 顺义-python16期6.txt\nfor old_name in name_list:\n new_name = \"顺义-\" + old_name\n # 重命名\n os.rename(old_name, new_name)\n","sub_path":"pythonstage1/day09/05-应用-批量修改文件名.py","file_name":"05-应用-批量修改文件名.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240829330","text":"from Datathon.Utils.getData import *\nfrom Datathon.Utils.getPreparedData import *\n\nfrom sklearn.preprocessing import StandardScaler\n\nimport numpy as np\nimport pandas as pd\nimport pandas.api.types as ptypes\nimport hdbscan\n\ndf = getTrainingData()\nDEPENDENT_VAR = getDependentVariable()\nDEPENDENT_VAR in df\n\nnumericCols = getNumericColumns()\ncolsWithLargeMissingValues = df.loc[:,df.isna().sum() > df.shape[0] * 0.6]\n\n\n# clusterer = hdbscan.HDBSCAN(min_cluster_size=1000)\n# #kdf[\"cluster\"]= clusterer.fit_predict(kdf.iloc[:,:10].isnull())\n# df[\"cluster\"]= clusterer.fit_predict(colsWithLargeMissingValues.isnull())\n# df[\"cluster\"] = df[\"cluster\"].astype(\"category\")\n#\n# #df.to_csv(\"training_with_cluster.csv\")\n# dfc = pd.read_csv(\"training_with_cluster.csv\")\n# df[\"cluster\"] = dfc[\"cluster\"]\n\n\n #numericCols = [c for c in numericCols if c not in [maxCol , minCol]]\n\ndef getOutliersScore(df , numericCols):\n scaler = StandardScaler()\n #catDf = df.drop(numericCols , axis=1)\n numDf = df.loc[:,numericCols]\n numDf = numDf[(numDf > -3) & (numDf < 3)]\n df[\"outlierScore\"] = numDf.apply(lambda x: x.isna().sum(), axis=1)\n df.loc[:, \"outlierScore\"] = list(scaler.fit_transform( df[[\"outlierScore\"]]))\n return df\n #return numDf.join(catDf)\n\ndef impute(series):\n #cat = DICTDF[DICTDF[\"Variable Name\"] == series.name][\"Category\"].values[0]\n # if cat.find(\"lab\") > -1 or cat.find(\"vital\") > -1:\n # imputeVal = df[df[DEPENDENT_VAR] == 1][series.name].mean()\n # series.fillna(imputeVal , inplace=True)\n if ptypes.is_numeric_dtype(series):\n # if series.name in colsWithLargeMissingValues.columns:\n # imputeVal = df[df[DEPENDENT_VAR] == 1][series.name].mean()\n # else:\n imputeVal = series.mean()\n series.fillna(imputeVal , inplace=True)\n if ptypes.is_categorical_dtype(series):\n #imputeVal = series.value_counts().sort_values().index[0]\n #series.fillna(imputeVal , inplace=True)\n #imputeVal = \"missing\"\n #series = series.astype(str).fillna(\"missing\").astype(\"category\")\n imputeVal = df[df[DEPENDENT_VAR] == 1][series.name].value_counts().sort_values().index[0]\n series.fillna(imputeVal , inplace=True)\n\n return series\n#\n# def imputeByCluster(series):\n# if ptypes.is_numeric_dtype(series):\n# series.fillna( series.mean(skipna=True) , inplace=True)\n# if ptypes.is_categorical_dtype(series):\n# imputeVal = df[df[DEPENDENT_VAR] == 1][series.name].value_counts().sort_values().index[0]\n# series.fillna(imputeVal , inplace=True)\n#\n# return series\n#\n# def nimpute(fdf):\n# return fdf.apply(imputeByCluster,axis=0)\n\n#df = df.groupby(\"cluster\").apply(nimpute)\n\ndf = df.apply(impute ,axis=0)\n\ndf = diffCols(df , numericCols)\ndf.shape\n\ndf[DEPENDENT_VAR].isna().sum()\n\ndf.isna().sum()[df.isna().sum() > 0].index\n\n# means = df[df[DEPENDENT_VAR] == 1].mean(skipna =True)\n# df[numericCols] = df[numericCols].fillna( means)\n\nfrom sklearn.preprocessing import PowerTransformer\ndf.loc[:,numericCols] = PowerTransformer().fit_transform(df.loc[:,numericCols])\n\n#df[((df[numericCols] > 3) | (df[numericCols] < -3)).any(axis=1)] = df[((df[numericCols] > 3) & (df[numericCols] < -3)).all(axis=1)]\n\n\ndf = getOutliersScore(df, numericCols)\ndf[\"outlierScore\"]\ndf.shape\n\n\nfrom plotnine import *\n\n(ggplot(df)\n + aes(x = 'weight')\n + geom_histogram())\n\n\nfrom sklearn.model_selection import train_test_split\n\n# colsToInclude = ['apache_4a_icu_death_prob',\n# 'apache_4a_hospital_death_prob',\n# 'd1_lactate_min',\n# 'd1_spo2_min',\n# 'd1_sysbp_min',\n# 'd1_lactate_max',\n# 'd1_arterial_ph_min',\n# 'gcs_motor_apache',\n# 'd1_sysbp_noninvasive_min',\n# 'temp_apache',\n# 'd1_heartrate_min',\n# 'apache_2_diagnosis_114.0',\n# 'd1_temp_min',\n# 'd1_bun_max',\n# 'd1_mbp_noninvasive_min',\n# 'gcs_eyes_apache',\n# 'd1_bun_min',\n# 'd1_temp_max',\n# 'd1_mbp_min',\n# 'bun_apache',\n# 'd1_creatinine_max',\n# 'creatinine_apache',\n# 'd1_platelets_min',\n# 'ph_apache',\n# 'd1_heartrate_max',\n# 'bmi',\n# 'd1_hco3_min',\n# 'd1_arterial_ph_max',\n# 'heart_rate_apache',\n# 'age',\n# 'd1_pao2fio2ratio_min',\n# 'd1_wbc_min',\n# 'd1_platelets_max',\n# 'd1_pao2fio2ratio_max',\n# 'd1_resprate_min',\n# 'd1_creatinine_min',\n# 'd1_glucose_min',\n# 'weight',\n# 'd1_hco3_max',\n# 'pre_icu_los_days',\n# 'd1_arterial_pco2_min',\n# 'd1_diasbp_noninvasive_min',\n# 'd1_wbc_max',\n# 'gcs_verbal_apache',\n# 'd1_diasbp_min',\n# 'h1_temp_min',\n# 'd1_sysbp_max',\n# 'h1_resprate_min',\n# 'd1_arterial_po2_min',\n# 'fio2_apache',\n# 'glucose_apache',\n# 'map_apache',\n# 'h1_temp_max',\n# 'wbc_apache',\n# 'd1_sysbp_noninvasive_max',\n# 'h1_sysbp_min',\n# 'd1_resprate_max',\n# 'd1_sodium_max',\n# 'd1_glucose_max',\n# 'd1_albumin_min']\ncolsToInclude = df.columns\n#colsToExclude = [DEPENDENT_VAR , \"encounter_id\" , \"patient_id\" , \"icu_id\" , \"apache_3j_diagnosis\"]\ncolsToExclude = [DEPENDENT_VAR , \"encounter_id\" , \"patient_id\"]\ncolsToExclude += [ c for c in df.columns if c not in colsToInclude]\n#df[\"intercept\"] = 1\nY = df[DEPENDENT_VAR]\nX = df.drop(columns = colsToExclude)\n\nX.shape\n#from sklearn.preprocessing import OneHotEncoder\n#encoder = OneHotEncoder(drop=\"first\")\ncats = X.apply(ptypes.is_categorical_dtype)\ncats = cats[cats == True]\n\nX = pd.get_dummies(X , columns=list(cats.index) , drop_first=True)\nX.shape[1]\nX_train, X_test, y_train, y_test = train_test_split(X, Y ,test_size=0.33, stratify=Y)\n\ny_train.value_counts(normalize=True)\nfrom sklearn.metrics import accuracy_score , confusion_matrix ,roc_curve,roc_auc_score ,precision_score, recall_score\nfrom sklearn import metrics\n\nfrom sklearn.linear_model import LogisticRegression\n\n\nlogit = LogisticRegression(max_iter = 10000)\nlogit.fit(X_train , y_train)\n\n\n\npreds = logit.predict(X_test)\nprobs_y=logit.predict_proba(X_test)[:,1]\n\n# for thresh in np.linspace(0, 1, num=30):\n# preds = np.where(probs_y > thresh,1, 0)\n# #y_test_dum = np.where(y_test == 1,1, 0)\n# #print(preds, y_test_dum)\n# a=accuracy_score(y_test , preds)\n# r=roc_auc_score(y_test , preds)\n# rec=recall_score(y_test , preds)\n# p = precision_score(y_test,preds)\n# print(f\"thresh : {thresh} acc : {a} auc: {r} recall : {rec} precision: {p}\")\n\n\n\nfpr, tpr, thresholds = metrics.roc_curve(y_test , probs_y ,drop_intermediate=False, pos_label=1)\nrocdf = pd.DataFrame({\"fpr\" : fpr , \"tpr\" : tpr , \"thresholds\" : thresholds})\n\n(ggplot(rocdf , aes(x='fpr' , y='tpr', fill='thresholds')) + geom_line())\n\naccuracy_score(y_test , preds)\nroc_auc_score(y_test , probs_y)\n\nrocdf[rocdf[\"fpr\"] < 0.25].sort_values(\"tpr\" , ascending=False)\nimport statsmodels.api as sm\nlogitstat = sm.Logit(y_train.astype(float), X_train.astype(float))\nlogitstat.fit()\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nrf = RandomForestClassifier()\nrf.fit(X_train , y_train)\nroc_auc_score(y_test , rf.predict_proba(X_test)[:,1])\n\n\nimportances = pd.DataFrame({\"imp\" : rf.feature_importances_ , \"feature\" : X_train.columns})\nimportances.sort_values(\"imp\" , ascending=False).head(60)\n\nlist(importances.sort_values(\"imp\" , ascending=False).head(60)[\"feature\"])\n\nimportances[importances[\"feature\"] == \"cluster\"]\n\n\nfrom sklearn.ensemble import AdaBoostClassifier\nada = AdaBoostClassifier(n_estimators=100, random_state=0)\nada.fit(X_train , y_train)\nroc_auc_score(y_test , ada.predict_proba(X_test)[:,1])\n","sub_path":"Datathon/EDA/hydrogen/Modeling/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":7358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530722013","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np \nimport pandas as pd\nimport matplotlib\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom wordcloud import WordCloud, STOPWORDS\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\n\n# In[4]:\n\n\ndata_df = pd.read_csv(\"/Users/VRodrigues/Documents/reddit_wsb.csv\")\n\n\n# In[5]:\n\n\ndata_df.head()\n\n\n# In[6]:\n\n\ndata_df.info()\n\n\n# In[9]:\n\n\ndef missing_data(data):\n total = data.isnull().sum()\n percent = (data.isnull().sum()/data.isnull().count()*100)\n tt = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\n types = []\n for col in data.columns:\n dtype = str(data[col].dtype)\n types.append(dtype)\n tt['Types'] = types\n return(np.transpose(tt))\n\n\n# In[10]:\n\n\nmissing_data(data_df)\n\n\n# In[15]:\n\n\ndef show_wordcloud(data, title=\"\"):\n text = \" \".join(t for t in data.dropna())\n stopwords = set(STOPWORDS)\n stopwords.update([\"t\", \"co\", \"https\", \"amp\", \"U\", \"fuck\", \"fucking\",\"retard\", \"new\", \"will\", \"buy\", \"time\",\"stock\"])\n wordcloud = WordCloud(stopwords=stopwords, scale=4, max_font_size=50, max_words=500,background_color=\"black\").generate(text)\n fig = plt.figure(1, figsize=(16,16))\n plt.axis('off')\n fig.suptitle(title, fontsize=20)\n fig.subplots_adjust(top=2.3)\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.show()\n\n\n# In[16]:\n\n\nshow_wordcloud(data_df['title'], title = 'Prevalent words in titles')\n\n\n# In[17]:\n\n\nshow_wordcloud(data_df['body'], title = 'Prevalent words in post bodies')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"wallstreetbets.py","file_name":"wallstreetbets.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"342933961","text":"import numpy as np\nfrom functools import partial\nimport random\n\nfrom .helpers import sqexp_p_vectors\nfrom .qff.embedding import HermiteEmbedding\n\n\n\nINFINITE_KERNELS = ['rbf']\n\n\nclass BO:\n ''' Wrapper over various kernel bayesian optimization methods.'''\n \n def __init__(self, kernel='rbf', m=0, lam=1.0):\n \n self.kernel = kernel\n self.inf_kernel = False\n self.lam = lam\n self.t = 0\n\n if self.kernel not in INFINITE_KERNELS and m<1:\n raise ValueError('Incorrect m provided')\n \n if m == 0:\n self.k_func = partial(sqexp_p_vectors, p=2, scale=1.0)\n self.inf_kernel = True\n \n else:\n self.m = m\n self.feat_func = lambda x: x\n self.inf_kernel = False\n self.rho = 1.0\n \n self.reset()\n\n \n def reset(self):\n ''' Reset the BO parameters '''\n self.t = 0\n if self.inf_kernel:\n self.K_t = None\n self.X_t = None\n \n else:\n self.S_t = self.lam * np.eye(self.m)\n self.u_t = np.zeros((1, self.m))\n self.X_t = None\n \n \n def _update_inf_dim(self, x_t, y_t):\n # rank 1 update of kernel matrices\n\n if self.K_t is None:\n self.K_t = 1.0/(self.k_func(x_t, x_t) + self.lam) * np.ones((1, 1))\n else:\n inp_X = np.tile(x_t, [self.t, 1])\n b = self.k_func(inp_X, self.X_t)\n \n K_22 = np.power(self.k_func(x_t,x_t) + self.lam - np.matmul(b.T, np.matmul(self.K_t, b)), -1)\n K_11 = self.K_t + K_22 * np.matmul(self.K_t, np.matmul(b, np.matmul(b.T, self.K_t.T)))\n K_12 = -K_22 * np.matmul(self.K_t, b)\n K_21 = -K_22 * np.matmul(b.T, self.K_t.T)\n \n K_t_up = np.concatenate((K_11, K_12), axis=1)\n K_t_down = np.concatenate((K_21, K_22), axis=1)\n\n self.K_t = np.concatenate((K_t_up, K_t_down), axis=0)\n \n self.t += 1\n # padding y appropriately\n y_t = y_t * np.ones((1, 1))\n \n if self.X_t is None:\n self.X_t = x_t\n self.y_t = y_t\n else:\n self.X_t = np.concatenate((self.X_t, x_t), axis=0)\n self.y_t = np.concatenate((self.y_t, y_t), axis=0)\n \n def _update_fin_dim(self, x_t, y_t):\n # regular finite-dimensional update\n self.t += 1\n\n phi_t = self.feat_func(x_t)\n y_t = np.expand_dims(y_t, 0)\n\n if self.X_t is None:\n self.X_t = phi_t\n self.y_t = y_t\n else:\n self.X_t = np.concatenate((self.X_t, x_t), axis=0)\n self.y_t = np.concatenate((self.y_t, y_t), axis=0)\n \n self.S_t += np.matmul(phi_t, phi_t.T)\n self.u_t += y_t * phi_t\n \n def update(self, x_t, y_t):\n ''' Update internal parameters with new observations.'''\n if self.inf_kernel:\n self._update_inf_dim(x_t, y_t)\n else:\n self._update_fin_dim(x_t, y_t)\n \n def _params_fin(self):\n # return mu, sigma for finite k\n S_inv = np.linalg.inv(self.S_t)\n mu = np.matmul(S_inv, np.matmul(self.X_t.T, self.y_t))\n\n return mu, S_inv\n \n def _params_inf(self, x):\n # return mu, sigma for infinite dimensional k\n x_rep = np.tile(x, [self.t, 1])\n k_t = self.k_func(self.X_t, x_rep)\n \n mu = np.matmul(k_t.T, np.matmul(self.K_t, self.y_t))\n sigma = np.sqrt(self.k_func(x, x) - np.matmul(k_t.T, np.matmul(self.K_t, k_t)))\n\n return mu, sigma\n \n def get_posterior(self, D_t):\n ''' Compute the posterior mean and variance for each arm in D_t '''\n \n mus, sigmas = [], []\n if not self.inf_kernel:\n u, S_inv = self._params_fin()\n\n for x in D_t:\n if self.inf_kernel:\n mu, sigma = self._params_inf(x)\n else:\n phi = self.feat_func(x)\n mu = np.dot(phi, u)\n sigma = np.matmul(phi, np.matmul(S_inv, phi.T))\n \n mus.append(mu)\n sigmas.append(sigma)\n\n return mus, sigmas\n\n\nclass Agent:\n\n def __init__(self):\n \n self.t = 0\n pass\n\n def select_action(self, *args, **kwargs):\n self.t += 1\n \n def update(self, *args, **kwargs):\n pass\n\n def reset(self):\n pass\n\nclass Random(Agent):\n\n def __init__(self):\n super(Random, self).__init__()\n \n def select_action(self, D_t, *args, **kwargs):\n # randomly select action\n super(Random, self).select_action()\n return random.choice(D_t)\n\n\nclass GP_UCB(Agent):\n ''' Implements regular GP-UCB with the original kernel. '''\n\n def __init__(self, lam=1.0, m=0, B=1.0, kernel='rbf'):\n \n super(GP_UCB, self).__init__()\n self.kernel = kernel\n self.bo = BO(kernel, m, lam)\n self.delta = 0.01\n self.B = B\n \n def update(self, x_t, y_t):\n self.bo.update(x_t, y_t)\n \n def reset(self):\n self.bo.reset()\n \n def get_beta(self, D_t):\n\n if self.kernel in INFINITE_KERNELS:\n return np.sqrt(2 * np.log(len(D_t) \n * (self.bo.t ** 2) * (np.pi ** 2) / (6 * self.delta)))\n else:\n # finite dimensional kernel, use Abbasi-Yadkori\n return self.bo.rho * \\\n np.sqrt(self.bo.m * np.log(1 + self.bo.t * (self.B**2) / self.bo.lam) \n - np.log(self.delta)) + self.bo.lam ** (0.5) * self.B\n \n def select_action(self, D_t, beta_mult):\n \n if self.bo.t == 0:\n return random.choice(D_t)\n \n beta_t = self.get_beta(D_t) * beta_mult\n best_x, max_ucb = None, -np.inf\n mus, sigmas = self.bo.get_posterior(D_t)\n for x, mu, sigma in zip(D_t, mus, sigmas):\n ucb_x = mu + beta_t * sigma\n if max_ucb < ucb_x:\n best_x, max_ucb = x, ucb_x\n\n return best_x","sub_path":"private_gp/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"78376758","text":"import logging\nimport shutil\nimport tempfile\nfrom pathlib import Path\nfrom typing import List, Optional, Sequence\nfrom zipfile import ZipFile\n\nimport torch\n\nfrom pybio import spec\nfrom pybio.spec.utils import train\nfrom tiktorch.server.exemplum import Exemplum\n\nMODEL_EXTENSIONS = (\".model.yaml\", \".model.yml\")\nlogger = logging.getLogger(__name__)\n\n\ndef guess_model_path(file_names: List[str]) -> Optional[str]:\n for file_name in file_names:\n if file_name.endswith(MODEL_EXTENSIONS):\n return file_name\n\n return None\n\n\ndef eval_model_zip(model_zip: ZipFile, devices: Sequence[str], cache_path: Optional[Path] = None):\n temp_path = Path(tempfile.mkdtemp(prefix=\"tiktorch\"))\n if cache_path is None:\n cache_path = temp_path / \"cache\"\n\n model_zip.extractall(temp_path)\n spec_file_str = guess_model_path([str(file_name) for file_name in temp_path.glob(\"*\")])\n pybio_model = spec.utils.load_model(spec_file_str, root_path=temp_path, cache_path=cache_path)\n\n devices = [torch.device(d) for d in devices]\n if pybio_model.spec.training is None:\n return Exemplum(pybio_model=pybio_model, _devices=devices)\n else:\n ret = train(pybio_model, _devices=devices)\n assert isinstance(ret, Exemplum)\n\n def _on_errror(function, path, exc_info):\n logger.warning(\"Failed to delete temp directory %s\", path)\n\n shutil.rmtree(temp_path, on_error=_on_errror)\n\n return ret\n","sub_path":"tiktorch/server/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"305814738","text":"import copy\nimport random\n# Consider using the modules imported above.\n\nclass Hat:\n \"\"\" Class that models balls in a hat probablistically \"\"\"\n\n def __init__(self, **kwargs):\n self.contents = []\n for key, value in kwargs.items():\n for i in range(value):\n self.contents.append(key)\n\n def draw(self, number):\n balls_drawn = []\n\n if number >= len(self.contents):\n return self.contents\n\n # Pick a ball at random and remove from the bag\n for i in range(number):\n ball_picked = random.choice(self.contents)\n balls_drawn.append(ball_picked)\n self.contents.pop(self.contents.index(ball_picked))\n \n return balls_drawn\n\n\ndef experiment(hat, expected_balls, num_balls_drawn, num_experiments):\n \n num_desired_results = 0\n\n for i in range(num_experiments):\n hat_copy = copy.deepcopy(hat)\n\n actual = hat_copy.draw(num_balls_drawn)\n \n # Convert result to dict:\n actual_dict = {ball: actual.count(ball) for ball in set(actual)}\n\n # Compare drawn balls to desired result:\n result = True\n for key, value in expected_balls.items():\n if key not in actual_dict or actual_dict[key] < expected_balls[key]:\n result = False\n break\n\n if result:\n num_desired_results += 1\n\n return num_desired_results/num_experiments","sub_path":"prob_calculator.py","file_name":"prob_calculator.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"370773467","text":"import math\nfrom flask import request\n\n__version__ = '0.0.1'\n\n\nclass Page(object):\n\n def __init__(self, items, page, page_size, total_count):\n self.items = items\n self.previous_page = None\n self.next_page = None\n self.has_previous = page > 1\n if self.has_previous:\n self.previous_page = page - 1\n\n previous_items = (page - 1) * page_size\n self.has_next = previous_items + len(items) < total_count\n if self.has_next:\n self.next_page = page + 1\n\n self.total_count = total_count\n self.total_pages = int(math.ceil(total_count / float(page_size)))\n self.page = page\n self.page_size = page_size\n\n\ndef paginate(query, max_per_page=1000):\n default_per_page = 20\n default_page = 1\n page = request.args.get('page', type=int) or default_page\n page_size = request.args.get('per_page', type=int) or default_per_page\n if page <= 0:\n raise ValueError('page needs to be >= 1')\n if page_size <= 0:\n raise ValueError('page_size needs to be >= 1')\n\n page_size = min(page_size, max_per_page)\n items = query.limit(page_size).offset((page - 1) * page_size).all()\n # We remove the ordering of the query since it doesn't matter for getting a count and\n # might have performance implications as discussed on this Flask-SqlAlchemy issue\n # https://github.com/mitsuhiko/flask-sqlalchemy/issues/100\n total = query.order_by(None).count()\n return Page(items, page, page_size, total)\n","sub_path":"sqlalchemy_pagination/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"128970993","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# author Aistis Jokubauskas aistis.jokubauskas@mif.vu.lt\n\n# -*- coding: utf-8 -*-\n#\n# author Aistis Jokubauskas aistis.jokubauskas@mif.vu.lt\n\nimport logging\n\nlogger = logging.getLogger()\n\nclass Solver(object):\n\n def __init__(self, target=None, rules=None, gdb=None):\n self.target = target\n self.rules = rules\n self.gdb = gdb\n self.path = []\n\n self.goal_stack = set()\n\n self.output_indent_level = 0\n self.output_indent_unit = '| '\n self.output_step_counter = 1\n\n def solve(self):\n result, self.path = self._solve_one_target(self.target)\n return result\n\n def _solve_one_target(self, target):\n if target in self.goal_stack:\n self._log('target: loop reason %s in goal stack %s.' %\n (str(target), str(list(self.goal_stack))))\n return False, []\n\n if target in self.gdb: # no need to solve, since its in database\n self._log('target: %s in gdb' % str(target))\n return True, []\n\n rule_list = self._get_rules(target)\n\n if len(rule_list) == 0: # no rules to solve this target\n self._log('target: %s, empty rule list' % target)\n return False, []\n\n self._log('target: %s, rule list: %s' % (str(target), str(rule_list)))\n\n # adding target to goal stack\n self.goal_stack.add(target)\n\n self.output_indent_level += 1\n result, path = self._solve_at_least_one_rule(*rule_list)\n self.output_indent_level -= 1\n\n # adding target to goal stack\n self.goal_stack.remove(target)\n\n if not result:\n self._log('failed to solve ' + str(target) + ' goal')\n else:\n self._log('solved ' + str(target) + ' goal')\n\n return result, path\n\n def _solve_at_least_one_rule(self, *rule_list):\n # self._log('rule list: ' + str(list(rule_list)))\n\n for rule in rule_list:\n self._log('trying rule: %s' % str(rule))\n self.output_indent_level += 1\n\n solved, path = self._solve_all_targets(*rule.condition)\n if solved:\n self.output_indent_level -= 1\n return True, [rule.name] + path\n\n self.output_indent_level -= 1\n\n self._log('failed to solve all rules')\n return False, []\n\n def _solve_all_targets(self, *target_list):\n \"\"\"Returns bool and a list of rule name that where used\"\"\"\n self._log('target list: %s' % str(target_list))\n\n path = []\n self.output_indent_level += 1\n\n for target in target_list:\n solved, sub_path = self._solve_one_target(target)\n if not solved:\n self.output_indent_level -= 1\n return False, []\n path += sub_path\n\n self.output_indent_level -= 1\n return True, path\n\n def _get_rules(self, target):\n \"\"\"Returns a list of rules that can be used to find this target\"\"\"\n rule_list = []\n for rule in self.rules:\n if rule.target == target:\n rule_list.append(rule)\n return rule_list\n\n def _log(self, msg, indent_level=0):\n prefix = self.output_indent_unit * \\\n (self.output_indent_level + indent_level)\n logger.debug('%2d) %s' % (self.output_step_counter, prefix + msg))\n self.output_step_counter += 1\n","sub_path":"backword_chaining_solver.py","file_name":"backword_chaining_solver.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"87727290","text":"import time\nfrom django.db import models\n\nfrom globals.decorators import check_shop\n\n\nclass OrderManager(models.Manager):\n def get_all(self, **kwargs):\n obj = self.filter(**kwargs).order_by('-created')\n return obj\n\n def get(self, **kwargs):\n obj = self.filter(**kwargs).first()\n if not obj:\n raise ValueError('Order does not exist')\n return obj\n\n def create(self, **kwargs):\n if 'shop' not in kwargs or not kwargs['shop']:\n raise ValueError('Shop is required')\n\n check_shop(kwargs['shop'])\n\n obj = self.model(**kwargs)\n obj.save()\n return obj\n\n def edit(self, pk, **kwargs):\n obj = self.model.objects.get(pk=pk)\n for (key, value) in kwargs.items():\n setattr(obj, key, value)\n\n obj.modified = time.time()\n obj.save()\n return obj\n\n def delete(self, pk):\n obj = self.model.objects.get(pk=pk)\n obj.delete()\n return obj\n","sub_path":"eshop_server/order/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"463473647","text":"import tensorflow as tf\nimport numpy as np\nimport cv2\nimport time\nimport os\n\n\nclass DataLoader():\n def __init__(self, file_list, args, phase, debug=False):\n print(\"labels; \", args.num_labels)\n time.sleep(3)\n self.file_list = file_list\n self.args = args\n self.phase = phase\n self.file_list, self.landmarks, self.attributes, self.euler_angles = self.gen_data(\n self.file_list, self.args.num_labels)\n self.num_file = len(self.file_list)\n\n if debug:\n n = self.args.batch_size * 10\n file_list = self.file_list[:n]\n landmarks = self.landmarks[:n]\n attributes = self.attributes[:n]\n euler_angles = self.euler_angles[:n]\n\n def gen_data(self, file_list, num_labels):\n with open(file_list, 'r') as f:\n lines = f.readlines()\n filenames, landmarks, attributes, euler_angles = [], [], [], []\n for line in lines:\n line = line.strip().split()\n path = line[0]\n landmark = line[1:num_labels*2+1] # 1:197\n attribute = line[num_labels*2+1:num_labels*2+7] # 197:203\n euler_angle = line[num_labels*2+7:num_labels*2+10] # 203:206\n\n landmark = np.asarray(landmark, dtype=np.float32)\n attribute = np.asarray(attribute, dtype=np.int32)\n euler_angle = np.asarray(euler_angle, dtype=np.float32)\n filenames.append(path)\n landmarks.append(landmark)\n attributes.append(attribute)\n euler_angles.append(euler_angle)\n filenames = np.asarray(filenames, dtype=np.str)\n landmarks = np.asarray(landmarks, dtype=np.float32)\n attributes = np.asarray(attributes, dtype=np.int32)\n euler_angles = np.asarray(euler_angles, dtype=np.float32)\n return (filenames, landmarks, attributes, euler_angles)\n\n def get_dataset(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.file_list, self.landmarks, self.attributes, self.euler_angles))\n\n def _parse_data(filename, landmarks, attributes, euler_angles):\n # filename, landmarks, attributes = data\n file_contents = tf.read_file(filename)\n image = tf.image.decode_png(\n file_contents, channels=self.args.image_channels)\n # print(image.get_shape())\n # image.set_shape((args.image_size, args.image_size, args.image_channels))\n image = tf.image.resize_images(\n image, (self.args.image_size, self.args.image_size), method=0)\n image = tf.cast(image, tf.float32)\n\n image = image / 256.0\n return (image, landmarks, attributes, euler_angles)\n\n dataset = dataset.map(_parse_data)\n dataset = dataset.shuffle(buffer_size=10000)\n return dataset, self.num_file\n\n\nif __name__ == '__main__':\n file_list = 'data/train_data/list.txt'\n num_labels = 98\n filenames, landmarks, attributes = gen_data(file_list, num_labels)\n for i in range(len(filenames)):\n filename = filenames[i]\n landmark = landmarks[i]\n attribute = attributes[i]\n print(attribute)\n img = cv2.imread(filename)\n h, w, _ = img.shape\n landmark = landmark.reshape(-1, 2) * [h, w]\n for (x, y) in landmark.astype(np.int32):\n cv2.circle(img, (x, y), 1, (0, 0, 255))\n cv2.imshow('0', img)\n cv2.waitKey(0)\n","sub_path":"generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"507865214","text":"import pandas as pd\nimport numpy as np\nimport mdtraj as md\nimport itertools\n\ndef initProteins():\n proteins = pd.DataFrame(index=['FUS','FUS12E','A2','aSyn','ht40','p15PAF'], columns=['labels','eps_factor','wh','L','temp','obs','pH','ionic','expPREs','fasta','path'])\n fasta_FUS = \"\"\"MASNDYTQQATQSYGAYPTQPGQGYSQQSSQPYGQQSYSGYSQSTDTSGYGQSSYSSYGQ\nSQNTGYGTQSTPQGYGSTGGYGSSQSSQSSYGQQSSYPGYGQQPAPSSTSGSYGSSSQSS\nSYGQPQSGSYSQQPSYGGQQQSYGQQQSYNPPQGYGQQNQYNS\"\"\".replace('\\n', '')\n fasta_FUS12E = \"\"\"GMASNDYEQQAEQSYGAYPEQPGQGYEQQSEQPYGQQSYSGYEQSTDTSGYGQSSYSSYGQ\nEQNTGYGEQSTPQGYGSTGGYGSEQSEQSSYGQQSSYPGYGQQPAPSSTSGSYGSSEQSS\nSYGQPQSGSYEQQPSYGGQQQSYGQQQSYNPPQGYGQQNQYNS\"\"\".replace('\\n', '')\n fasta_A2 = \"\"\"GHMGRGGNFGFGDSRGGGGNFGPGPGSNFRGGSDGYGSGRGFGDGYNGYGGGPGG\nGNFGGSPGYGGGRGGYGGGGPGYGNQGGGYGGGYDNYGGGNYGSGNYNDFGNYNQQPSNYGPMKSGNFGGSRNMGG\nPYGGGNYGPGGSGGSGGYGGRSRY\"\"\".replace('\\n', '')\n fasta_aSyn = \"\"\"MDVFMKGLSKAKEGVVAAAEKTKQGVAEAAGKTKEGVLYVGSKTKEGVVHGVATVAEKTK\nEQVTNVGGAVVTGVTAVAQKTVEGAGSIAAATGFVKKDQLGKNEEGAPQEGILEDMPVDPDNEAYEMPSEEGYQDYEPEA\"\"\".replace('\\n', '') \n fasta_ht40 = \"\"\"MAEPRQEFEVMEDHAGTYGLGDRKDQGGYTMHQDQEGDTDAGLKESPLQTPTEDGSEEP\nGSETSDAKSTPTAEDVTAPLVDEGAPGKQAAAQPHTEIPEGTTAEEAGIGDTPSLEDEAAGHVTQARMVSKSKDGTGSDDKK\nAKGADGKTKIATPRGAAPPGQKGQANATRIPAKTPPAPKTPPSSGEPPKSGDRSGYSSPGSPGTPGSRSRTPSLPTPPTREP\nKKVAVVRTPPKSPSSAKSRLQTAPVPMPDLKNVKSKIGSTENLKHQPGGGKVQIINKKLDLSNVQSKCGSKDNIKHVPGGGS\nVQIVYKPVDLSKVTSKCGSLGNIHHKPGGGQVEVKSEKLDFKDRVQSKIGSLDNITHVPGGGNKKIETHKLTFRENAKAKTD\nHGAEIVYKSPVVSGDTSPRHLSNVSSTGSIDMVDSPQLATLADEVSASLAKQGL\"\"\".replace('\\n', '') \n fasta_p15PAF = \"\"\"MVRTKADSVPGTYRKVVAARAPRKVLGSSTSATNSTSVSSRKAENKYAGGNPVCVRPTPK\nWQKGIGEFFRLSPKDSEKENQIPEEAGSSGLGKAKRKACPLQPDHTNDEKE\"\"\".replace('\\n', '') \n proteins.loc['FUS'] = dict(labels=[16, 86, 142],eps_factor=0.2,L=40.5,wh=850,temp=298,obs='rate',pH=5.5,fasta=list(fasta_FUS),ionic=0.15,path='./') \n proteins.loc['FUS12E'] = dict(labels=[16, 86, 142],eps_factor=0.2,L=40.5,wh=850,temp=298,obs='rate',pH=5.5,fasta=list(fasta_FUS12E),ionic=0.15,path='./')\n proteins.loc['A2'] = dict(labels=[99, 143],eps_factor=0.2,L=48,wh=850,temp=298,obs='rate',pH=5.5,fasta=list(fasta_A2),ionic=0.005,path='./')\n proteins.loc['aSyn'] = dict(eps_factor=0.2,temp=283,pH=7.4,ionic=0.150,L=25.5,fasta=list(fasta_aSyn),path='./')\n proteins.loc['ht40'] = dict(eps_factor=0.2,temp=278,pH=6.8,ionic=0.100,L=48.0,fasta=list(fasta_ht40),path='./')\n proteins.loc['p15PAF'] = dict(eps_factor=0.2,temp=298,pH=7.0,ionic=0.150,L=34.0,fasta=list(fasta_p15PAF),path='./')\n return proteins\n\ndef genParamsLJ(df,name,prot):\n fasta = prot.fasta.copy()\n r = df.copy()\n r.loc['X'] = r.loc[fasta[0]]\n r.loc['Z'] = r.loc[fasta[-1]]\n fasta[0] = 'X'\n fasta[-1] = 'Z'\n types = list(np.unique(fasta))\n sigmamap = pd.DataFrame((r.sigmas.values+r.sigmas.values.reshape(-1,1))/2,\n index=r.sigmas.index,columns=r.sigmas.index)\n lambdamap = pd.DataFrame((r.lambdas.values+r.lambdas.values.reshape(-1,1))/2,\n index=r.lambdas.index,columns=r.lambdas.index)\n lj_eps = prot.eps_factor*4.184\n # Generate pairs of amino acid types\n pairs = np.array(list(itertools.combinations_with_replacement(types,2)))\n return pairs, lj_eps, lambdamap, sigmamap, fasta, types\n\ndef genParamsDH(df,name,prot):\n kT = 8.3145*prot.temp*1e-3\n r = df.copy()\n # Set the charge on HIS based on the pH of the protein solution\n r.loc['H','q'] = 1. / ( 1 + 10**(prot.pH-6) )\n r.loc['X','q'] = r.loc[prot.fasta[0],'q'] + 1.\n r.loc['Z','q'] = r.loc[prot.fasta[-1],'q'] - 1.\n # Calculate the prefactor for the Yukawa potential\n qq = pd.DataFrame(r.q.values*r.q.values.reshape(-1,1),index=r.q.index,columns=r.q.index)\n fepsw = lambda T : 5321/T+233.76-0.9297*T+0.1417*1e-2*T*T-0.8292*1e-6*T**3\n epsw = fepsw(prot.temp)\n lB = 1.6021766**2/(4*np.pi*8.854188*epsw)*6.022*1000/kT\n yukawa_eps = qq*lB*kT\n # Calculate the inverse of the Debye length\n yukawa_kappa = np.sqrt(8*np.pi*lB*prot.ionic*6.022/10)\n return yukawa_eps, yukawa_kappa\n\ndef genDCD(residues,name,prot,path,run_type,n_chains):\n \"\"\" \n Generates coordinate and trajectory \n in convenient formats for multiple chains\n \"\"\"\n top = md.Topology()\n for _ in range(n_chains):\n chain = top.add_chain()\n for resname in prot.fasta:\n residue = top.add_residue(residues.loc[resname,'three'], chain)\n top.add_atom(residues.loc[resname,'three'], element=md.element.carbon, residue=residue)\n traj = md.load_dcd(path+\"/{:s}.dcd\".format(run_type), top)\n traj.center_coordinates()\n traj.xyz *= 10\n traj.unitcell_lengths *= 10\n traj.xyz += traj.unitcell_lengths[0,0]/2\n traj[:].save_dcd(path+\"/{:s}.dcd\".format(name))\n traj[0].save_pdb(path+\"/{:s}.pdb\".format(name))\n","sub_path":"2021/CG-IDPs-Tesei-et-al/two-chain/code/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"308568722","text":"class ListNode:\n def __init__(self, val):\n self.val = val\n self.next = None\n\ndef merge_sorted_lists(l1, l2):\n dummy = ListNode(-1)\n tail = dummy\n while l1 and l2:\n if l1.val < l2.val:\n tail.next = l1\n tail = tail.next\n l1 = l1.next\n else:\n tail.next = l2\n l2 = l2.next\n tail = tail.next\n tail.next = l1 or l2\n return dummy.next\n\n\nif __name__ == \"__main__\":\n l1 = ListNode(4)\n head = l1\n l1.next = ListNode(7)\n l1 = l1.next\n l1.next = ListNode(9)\n\n\n l2 = ListNode(2)\n head2 = l2\n l2.next = ListNode(10)\n l2 = l2.next\n l2.next = ListNode(54)\n\n res = merge_sorted_lists(head, head2)\n while res:\n print(res.val)\n res = res.next","sub_path":"revise-daily/arjuna-vishwamitra-abhimanyu/epi/linked-lists/1_merge_lists.py","file_name":"1_merge_lists.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"128220730","text":"from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport shutil\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.init as init \nimport torchvision.models as models \nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport scipy.io as sio\nimport time\nfrom collections import OrderedDict\nimport numpy as np\nimport torch.utils.model_zoo as model_zoo\nimport os \nimport cv2\nos.environ['GLOG_minloglevel'] = '2' \n#import caffe\ndir_path = os.path.dirname(os.path.realpath(__file__))\nimport sys\nsys.path.insert(0, \"/home/raaj/openpose_caffe_train/build/op/\")\nimport opcaffe\nimport signal\nexit = 0\ndef signal_handler(sig, frame):\n global exit\n exit = 1\nsignal.signal(signal.SIGINT, signal_handler)\nsys.path.append('/home/raaj/openpose_orig/build/python/')\nfrom openpose import pyopenpose as op\n\nfrom models import *\nfrom loader import *\n\n# Parsers\nparser = argparse.ArgumentParser(description='OP')\nparser.add_argument('--ngpu', type=int, default=1,\n help='number of GPUs to use')\nparser.add_argument('--batch', type=int, default=10,\n help='batch size')\nparser.add_argument('--reload', action='store_true')\nargs = parser.parse_args()\n\n# Sample OP Network\nparams = dict()\nparams[\"model_folder\"] = \"/home/raaj/openpose_orig/models/\"\nparams[\"body\"] = 2 # Disable OP Network\nparams[\"upsampling_ratio\"] = 0\nparams[\"model_pose\"] = \"BODY_25B\"\nopWrapper = op.WrapperPython()\nopWrapper.configure(params)\nopWrapper.start()\n\n# Setup Model\nNAME = \"weights\"\nmodel = Model(Body25(\"7x7\"), ngpu=int(args.ngpu)).cuda()\nmodel.train()\n\n# Load weights etc.\niterations = 0\nreload = int(args.reload)\nif not reload:\n state = load_checkpoint(NAME)\n if state != None:\n iterations = state[\"iterations\"]\n model.load_state_dict(state['state_dict'])\n print(\"Loaded Iteration \" + str(iterations))\n\n# # Load Caffe?\n# model.net.load_caffe()\n\n# params = {\n# \"batch_size\" : int(args.batch),\n# \"stride\": 8,\n# \"max_degree_rotations\": \"45.0\",\n# \"crop_size_x\": 368,\n# \"crop_size_y\": 368,\n# \"center_perterb_max\": 40.0,\n# \"center_swap_prob\": 0.0,\n# \"scale_prob\": 1.0,\n# \"scale_mins\": \"0.333333333333\",\n# \"scale_maxs\": \"1.5\",\n# \"target_dist\": 0.600000023842,\n# \"number_max_occlusions\": \"2\",\n# \"sigmas\": \"7.0\",\n# \"models\": \"COCO_25B_23;COCO_25B_17;MPII_25B_16;PT_25B_15\",\n# \"sources\": \"/media/raaj/Storage/openpose_train/dataset/lmdb_coco2017_foot;/media/raaj/Storage/openpose_train/dataset/lmdb_coco;/media/raaj/Storage/openpose_train/dataset/lmdb_mpii;/media/raaj/Storage/openpose_train/dataset/lmdb_pt2_train\",\n# \"probabilities\": \"0.05;0.85;0.05;0.05\",\n# \"source_background\": \"/media/raaj/Storage/openpose_train/dataset/lmdb_background\",\n# \"normalization\": 0,\n# \"add_distance\": 0\n# }\n# myClass = opcaffe.OPCaffe(params)\n\n# Caffe Loader\nWORKER_SIZE = int(args.ngpu)\nBATCH_SIZE = int(args.batch)\nkwargs = {'num_workers': WORKER_SIZE, 'pin_memory': True}\ntrain_loader = torch.utils.data.DataLoader(\n OPLoader(WORKER_SIZE, BATCH_SIZE),\n batch_size=WORKER_SIZE, shuffle=False, **kwargs)\n\n# Loss\nlr = 0.000100\nparameters = [\n {\"params\": model.net.vgg19.parameters(), \"lr\": lr*1},\n {\"params\": model.net.pafA.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafB.parameters(), \"lr\": lr*4},\n {\"params\": model.net.hmA.parameters(), \"lr\": lr*4},\n {\"params\": model.net.hmB.parameters(), \"lr\": lr*4},\n ]\nmseLoss = torch.nn.MSELoss()\noptimizer = optim.Adam(parameters, lr=lr, betas=(0.9, 0.999))\nlr_half_sets = [10, 200000, 300000, 360000, 420000, 480000, 540000, 600000, 700000, 800000]\n\n# # Iterate\n# while 1:\n# iterations += 1\n# batch = opcaffe.Batch()\n# myClass.load(batch)\n\ndef half_lr(optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr'] /= 2.\n\n# Iterate\nwhile 1:\n for batch_idx, (data, label) in enumerate(train_loader):\n iterations += 1\n data = data.flatten(0,1)\n label = label.flatten(0,1) \n\n # LR\n if iterations in lr_half_sets:\n print(\"Half LR\")\n half_lr(optimizer) \n\n # Split\n paf_mask = label[:, 0:TOTAL_PAFS].cuda()\n hm_mask = label[:, TOTAL_PAFS:TOTAL_PAFS+TOTAL_HMS].cuda()\n paf_truth = label[:, TOTAL_PAFS+TOTAL_HMS:TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS].cuda()\n hm_truth = label[:, TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS:TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS+TOTAL_HMS].cuda()\n imgs = data.cuda()\n\n # Mask\n paf_truth_m = torch.mul(paf_truth, paf_mask)\n hm_truth_m = torch.mul(hm_truth, hm_mask)\n\n # Forward Model\n pafs_pred, hms_pred = model.forward(imgs)\n\n # Multiply with Masks\n loss = 0\n for i in range(0, ITERATIONS):\n paf_pred_m = torch.mul(pafs_pred[i], paf_mask)\n hm_pred_m = torch.mul(hms_pred[i], hm_mask)\n loss += mseLoss(paf_pred_m, paf_truth_m)\n loss += mseLoss(hm_pred_m, hm_truth_m)\n\n # Opt\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Save\n if iterations % 2000 == 0 or exit:\n print(\"Saving\")\n save_checkpoint({\n 'iterations': iterations,\n 'state_dict': model.state_dict(),\n }, NAME)\n if exit: sys.exit()\n print((iterations,loss))\n\n # # OP Test\n # test_index = 0\n # hm_final = hms_pred[ITERATIONS-1][test_index,:,:,:]\n # paf_final = pafs_pred[ITERATIONS-1][test_index,:,:,:]\n # poseHeatMaps = torch.cat([hm_final, paf_final], 0).detach().cpu().numpy().copy()\n # imageToProcess = imgs.detach().cpu().numpy().copy()[test_index,:,:,:]\n # imageToProcess = (cv2.merge([imageToProcess[0,:,:]+0.5, imageToProcess[1,:,:]+0.5, imageToProcess[2,:,:]+0.5])*255).astype(np.uint8)\n # datum = op.Datum()\n # datum.cvInputData = imageToProcess\n # datum.poseNetOutput = poseHeatMaps\n # opWrapper.emplaceAndPop([datum])\n # print(\"Body keypoints: \\n\" + str(datum.poseKeypoints))\n # cv2.imshow(\"OpenPose 1.4.0 - Tutorial Python API\", datum.cvOutputData)\n # cv2.waitKey(0)\n\n # img_viz = imgs.detach().cpu().numpy().copy()[0,0,:,:]\n # hm_pred_viz = hms_pred[ITERATIONS-1].detach().cpu().numpy().copy()[0,0,:,:]\n # hm_truth_viz = hm_truth_m.cpu().numpy().copy()[0,0,:,:]\n # cv2.imshow(\"hm_pred_viz\", cv2.resize(hm_pred_viz, (0,0), fx=8, fy=8, interpolation = cv2.INTER_CUBIC))\n # cv2.imshow(\"hm_truth_viz\", cv2.resize(hm_truth_viz, (0,0), fx=8, fy=8, interpolation = cv2.INTER_CUBIC))\n # cv2.imshow(\"img\", img_viz+0.5)\n # cv2.waitKey(15)\n\n\n\"\"\"\nTraining of POF?\n\"\"\"","sub_path":"train_raaj.py","file_name":"train_raaj.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"543793906","text":"while True:\n num = input()\n if int(num) == 0: break\n if len(set(num)) == 1:\n print(\"NA\")\n continue\n cnt = 0\n while num != \"6174\":\n cnt += 1\n L = ''.join(sorted(num))\n S = ''.join(reversed(sorted(num)))\n num = str(int(S) - int(L)).zfill(4)\n print(cnt)\n \n","sub_path":"AOJ/0259/0259.py","file_name":"0259.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"455974106","text":"f2 = open(\"text.txt\", \"w\")\r\nf2.write(\"|\" + \" \" * 12 + \"String\" + \" \" * 12 + \"|\" + \" \" * 12 + \"Integer\" + \" \" * 12 + \"|\")\r\n\r\n\r\ndef Delete(n):\r\n file = open('text.txt' , 'r')\r\n spisok = file.readlines()\r\n file.close()\r\n del spisok[n-1]\r\n file = open('text.txt' , 'w')\r\n file.writelines(spisok)\r\n file.close()\r\n\r\n\r\ndef add():\r\n print(\"Enter stop if u want to quit \")\r\n s = 0\r\n while s != \"stop\":\r\n s = input(\"Enter some text or enter a number: \")\r\n for i in range(0, 10):\r\n for j in str(s):\r\n if j == str(i):\r\n s = int(s)\r\n k = 3\r\n if type(s) == str:\r\n if k%2 == 0:\r\n f2.write(\"|\" + \" \" * 12 + s + \" \" * (18 - len(s)) + \"|\")\r\n k += 1\r\n if k%2 == 1:\r\n f2.write(\"\\n\"+\"|\" + \" \" * 12 + s + \" \" * (18 - len(s)) + \"|\")\r\n k += 1\r\n if type(s) == int:\r\n f2.write(\" \"*12+str(s) + \" \" * (19 - len(str(s))) + \"|\")\r\n k += 1\r\n lines = 0\r\n for line in open(\"text.txt\"):\r\n lines += 1\r\n f2.close()\r\n Delete(lines)\r\nadd()\r\n\r\n\r\n","sub_path":"I семестр/Алгоритми і структури даних/Лаби 2016-17/Братун/LABA6/Filedit.py","file_name":"Filedit.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"468066067","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0003_auto_20150912_0813'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='AboutInfo',\n ),\n migrations.DeleteModel(\n name='PrivateInfo',\n ),\n migrations.AlterModelOptions(\n name='subsection',\n options={'ordering': ['index']},\n ),\n migrations.AddField(\n model_name='page',\n name='menu_name',\n field=models.CharField(default='', max_length=255, help_text='The heading to include in the top menu bar'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='page',\n name='name',\n field=models.CharField(max_length=255, unique=True, help_text='A unique identifier for this page. Use lowercase, no spaces.'),\n ),\n ]\n","sub_path":"website/migrations/0004_auto_20150912_1022.py","file_name":"0004_auto_20150912_1022.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"258251709","text":"class HamiltonianProblem:\n def __init__(self, adj, N):\n self.adj = adj\n self.N = N\n self.path = []\n\n def printSolution(self):\n [print(i, end='💫') for i in self.path]\n\n def isValid(self, nodeIndex):\n return nodeIndex > 0 and nodeIndex < self.N\n\n def isAdjacent(self, firstNodeIndex, secondNodeIndex):\n return self.adj[firstNodeIndex][secondNodeIndex] == 1\n\n def isVisited(self, nodeIndex):\n return nodeIndex in self.path\n\n def solveHamiltonianPath(self):\n self.path.append(0)\n return self.solve(1)\n\n def solve(self, nodeIndex):\n # last node is adjacent to starting node 0\n if nodeIndex == self.N:\n if self.isAdjacent(nodeIndex - 1, 0):\n return True\n else:\n return False\n\n # For all adjacent and not visited nodes\n for nextNodeIndex in range(self.N):\n print(f\"index: {nodeIndex} next: {nextNodeIndex}\")\n if (self.isValid(nextNodeIndex) and \n self.isAdjacent(nextNodeIndex, nodeIndex) and \n not self.isVisited(nextNodeIndex)):\n self.path.append(nextNodeIndex)\n\n # Solve the subproblem\n if (self.solve(nodeIndex + 1)):\n return True\n\n # Cannot find a solution for the nodeIndex + 1\n # Backtrack by removing last item in the path\n self.path.pop()\n\n \n\nadj = [\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 0]\n]\n \nproblem = HamiltonianProblem(adj, len(adj))\nif problem.solveHamiltonianPath():\n problem.printSolution()\nelse:\n print(\"No hamiltonian path is found 💥\")","sub_path":"algorithims/backtracking/HamiltonianProblem.py","file_name":"HamiltonianProblem.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"414569012","text":"import random\n\ndef print_intro():\n print(\"\"\"Welcome to Camel!\n In your desperation, you have stolen a camel to make your way\n across the Mobi desert.\n The locals want their camel back and are chasing you down\n Survive your desert trek and out run the locals \n \"\"\")\n\n\ndef main():\n print_intro()\n\n oasis = False\n miles_traveled = 0\n thirst = 0\n camel_tiredness = 0\n drinks_in_canteen = 3\n distance_from_locals = -20\n done = False\n while not done:\n\n print(\"\"\"\n A. Drink from the canteen\n B. Ahead moderate speed.\n C. Ahead full speed\n D. Stop for the night\n E. Status check.\n Q. Quit \n\n \"\"\")\n user_choice = input(\"What is your choice? \" )\n\n if user_choice == \"Q\":\n done = True\n \n elif user_choice == \"E\":\n print (\"miles_traveled :\", miles_traveled)\n print (\"drinks_in_canteen:\", drinks_in_canteen)\n print (\"the locals are\", distance_from_locals, \"behind you\")\n print (\"thirst meter:\", thirst)\n \n #stop for the night\n elif user_choice == \"D\":\n print(\"The camel is happy\") \n camel_tiredness = 0\n distance_from_locals -= random.randrange(7, 15)\n \n #Ahead full speed\n elif user_choice == \"C\":\n miles_traveled += random.randrange(10, 20)\n print(\"miles traveled:\", miles_traveled)\n thirst += 1 \n camel_tiredness += random.randrange(1, 3)\n distance_from_locals += random.randrange(7, 15)\n if random.randrange(20) == 0:\n oasis = True\n \n #ahead moderate speed \n elif user_choice ==\"B\":\n miles_traveled += random.randrange(5, 12)\n print(\"miles traveled:\", miles_traveled)\n camel_tiredness += 1\n \n #drink from the canteen\n elif user_choice ==\"A\":\n print(\"You take a drink from your canteen\")\n drinks_in_canteen -= 1\n thirst = 0\n\n if miles_traveled == random.randrange(1, 200):\n drinks_in_canteen = 0\n thirst = 0\n camel_tiredness = 0\n print(\" You have found an oasis\")\n \n if thirst > 4 and thirst <= 6:\n print(\"you are thirsty\")\n \n elif thirst > 6:\n print(\"You died of thirst\")\n done = True\n \n if camel_tiredness > 5 and camel_tiredness <= 8:\n print(\"Your camel is getting tired\")\n\n elif camel_tiredness > 8:\n print(\"Your camel is dead\") \n done = True\n\n if distance_from_locals < 15:\n print(\"The locals are getting close\")\n \n\n elif distance_from_locals == 0:\n print(\"You've been caught by the locals!!\")\n done = True\n\n \n elif oasis == True:\n print(\"You stumble upon an oasis\")\n drinks_in_canteen = 3\n thirst = 0\n camel_tiredness = 0\n\n if miles_traveled == 200:\n print(\"You won the game!!\")\n\n \n\n\n\nif __name__ == '__main__':\n main()","sub_path":"lab5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208194220","text":"#!/usr/bin/env python\r\n# based on Chippy Ruxpin by Next Thing Co 2015\r\n\r\nfrom bottle import run, get, post, request, response, route, redirect, template, static_file\r\nimport socket\r\n\r\nclass WebFramework:\r\n def __init__(self,bear):\r\n # self.ip = [(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]\r\n self.ip = '127.0.0.1'\r\n self.e = 'o'\r\n self.m = 'c'\r\n\r\n print( \"---------\")\r\n print( \"RasPi Ruxpin in ONLINE!\")\r\n print( \"In your browser, go to http://\" + str(self.ip) + \":8080\")\r\n print( \"---------\")\r\n\r\n @get('/public/')\r\n def server_static(path):\r\n return static_file(path, root='./public')\r\n \r\n @get('/')\r\n def index():\r\n return template('templates/index', phrases=bear.phrases, e=self.e, m=self.m)\r\n\r\n @get('/api/bear')\r\n def apiBearGetStatus():\r\n return bear.getStatus()\r\n\r\n @get('/api/bear//')\r\n def apiBearServoAction(servo, action):\r\n data = { \"bear\": {servo: {\"open\":(action == 'open')}}}\r\n return bear.update(data)\r\n\r\n @post('/api/bear')\r\n def apiBearPostStatus():\r\n data = request.json\r\n return bear.update(data)\r\n\r\n @get('/puppet')\r\n def puppet():\r\n self.e = request.query.e or 'o'\r\n self.m = request.query.m or 'o'\r\n\r\n data = { \"bear\": {\"eyes\": {\"open\":(self.e == 'o')}, \"mouth\":{\"open\":(self.m == 'o')}}}\r\n print(data)\r\n bear.update(data)\r\n return index()\r\n\r\n @post('/api/play/')\r\n def play(filename):\r\n bear.play(filename)\r\n return index()\r\n\r\n @post('/speak')\r\n def speak():\r\n text = request.forms.get('speech')\r\n if(text != \"\"): bear.talk(text)\r\n return index()\r\n\r\n @post('/slack')\r\n def slack():\r\n text = request.forms.get('text')\r\n response.content_type = 'text/plain'\r\n\r\n if(text == \"list\"):\r\n phraseList = \"```\\n\"\r\n for key, value in bear.phrases.items():\r\n phrase = (\"%s => %s \\n\" % (key, value))\r\n phraseList += phrase\r\n phraseList += \"```\\n\"\r\n return phraseList\r\n else:\r\n if(text in bear.phrases):\r\n bear.phrase( text )\r\n return \"RasPi Ruxpin played the phrase: \\\"%s\\\"\" % phrases[text]\r\n else:\r\n bear.talk( text )\r\n return \"RasPi Ruxpin said: \\\"%s\\\"\" % text\r\n\r\n run(host=self.ip, port=8080, debug=True)\r\n","sub_path":"lib/webFramework.py","file_name":"webFramework.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392156641","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom importlib import import_module\n\n\nsentences = import_module(\"30\").create_sentences('neko.txt.mecab')\n\nseq = []\nseqs = []\nfor sentence in sentences:\n for word in sentence:\n if word['pos'] == '名詞':\n seq.append(word)\n else:\n if len(seq) > 1:\n seqs.append(seq)\n seq = []\n\nprint(seqs)\n","sub_path":"chapter4/35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"463289276","text":"\n\nclass Person(object):\n \"\"\"\n The Person class that provides a superclass for Fellow and Staff subclasses\n \"\"\"\n def __init__(self, first_name, second_name, office_name=None, is_allocated=False):\n \"\"\"\n The init method of Person. Every person created is expected to have a name\n \"\"\"\n self.first_name = first_name\n self.second_name = second_name\n\n # refers to the fact that a person is allocated to an office and if it's a fellow\n # and he wants a living space, he is allocated to one\n self.is_allocated = is_allocated\n\n self.office_name = office_name\n","sub_path":"person/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"415931215","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import classification_report, confusion_matrix,\\\n accuracy_score\nfrom sklearn.utils import shuffle\n\n\ndef load_dataset(wine_dataset_csv):\n wine_dataframe = pd.read_csv(wine_dataset_csv, index_col=False)\n\n return wine_dataframe\n\n\ndef analyse_dataset(processed_data):\n\n data = processed_data\n\n print(data.head())\n\n\ndef naive_bayes_algorithm_with_holdout_validation(wine_dataset):\n\n shuffled_data = wine_dataset.sample(frac=1,\n random_state=42).reset_index(\n drop=True)\n\n label = shuffled_data[\"Class\"].values\n dataset = shuffled_data.iloc[:, : 486].values\n\n X_train, X_test, y_train, y_test = train_test_split(dataset, label,\n test_size=0.20,\n random_state=1)\n\n naive_bayes_classifier = GaussianNB()\n naive_bayes_classifier.fit(X_train, y_train)\n\n y_pred = naive_bayes_classifier.predict(X_test)\n\n accuracy = accuracy_score(y_test, y_pred) * 100\n print(accuracy)\n\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n\n\ndef naive_bayes_algorithm_with_k_fold_validation(wine_dataset):\n\n shuffled_data = wine_dataset.sample(frac=1,\n random_state=42).reset_index(\n drop=True)\n # Extract features and label\n label = shuffled_data[\"Class\"].values\n # dataset = shuffled_data.iloc[:, : 486].values\n # dataset = shuffled_data.iloc[:, : 485].values\n dataset = shuffled_data.iloc[:, : 482].values\n\n # Create classifier\n naive_bayes_classifier = GaussianNB()\n\n # Train model with 10 fold cross validation\n cross_validation_scores = cross_val_score(naive_bayes_classifier,\n dataset, label, cv=10)\n\n print(cross_validation_scores)\n print()\n print(\"Cross validation scores mean: {}%\".format(np.mean(\n cross_validation_scores\n ) * 100))\n\n\ndef main():\n\n # wine_dataset_file = \"drink_and_hold_dataset.csv\"\n\n # tweaked_wine_dataset_file = \\\n # \"drink_and_hold_dataset_with_finish_attribute_deleted.csv\"\n\n tweaked_wine_dataset_file = \\\n \"drink_and_hold_dataset_with_4_attributes_above_35_percent_deleted.csv\"\n\n processed_data_file = load_dataset(tweaked_wine_dataset_file)\n\n # processed_data_file = load_dataset(wine_dataset_file)\n\n # analyse_dataset(processed_data_file)\n\n # naive_bayes_algorithm_with_holdout_validation(processed_data_file)\n\n naive_bayes_algorithm_with_k_fold_validation(processed_data_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"wine_informatics/navive_bayes_algorithm/naive_bayes_algorithm.py","file_name":"naive_bayes_algorithm.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"171671846","text":"# Created by FangZhenfang in 18--6-21\nimport csv\nimport numpy as np\n\nyi_reader = list(csv.reader(open('yi.csv','r')))\nwi_reader = list(csv.reader(open('wi.csv','r')))\nyi=[]\nwi=[]\nfor stu in yi_reader:\n yi.append(float(stu[0]))\nfor stu in wi_reader:\n wi.append(float(stu[0]))\nxi=[i for i in range(1,60)]\nc=[[],[],[],[]] # first two coefficient of A,last two is b\nc0=sum(wi)\nfor i in range(30):\n c[0].append(0)\n c[1].append(0)\n c[2].append(0)\n c[3].append(0)\n for j in range(30):\n c[0][i] += wi[j] * xi[i + j] # sum(wi*xi)\n c[1][i] += wi[j] * xi[i + j] * xi[i + j] # sum(wi*xi^2)\n c[2][i] += wi[j] * yi[i + j] # sum(wi*yi)\n c[3][i] += wi[j] * xi[i + j] * yi[i + j] # sum(wi*xi*yi)\na=[[],[]] # a0,a1\nfor i in range(30):\n a[1].append((c[3][i]*c0-c[0][i]*c[2][i])/(c0*c[1][i]-c[0][i]*c[0][i]))\n a[0].append(c[2][i]/c0-c[0][i]*a[1][i]/c0)\nzi=[] # vector b in systerm equation\nfor i in range(30):\n zi.append(a[0][i]+a[1][i]*(30+i))\nA=[] # matrix A in systerm equation\nfor i in range(30):\n A.append([])\n for j in range(30):\n A[i].append(yi[29+i-j])\n# using matrix to caculate the answer\nAm=np.mat(A)\nbv=np.transpose(np.mat(zi))\nx=Am.I*bv\nx=np.transpose(np.mat(x)).tolist()\nb=[]\n\nfor num in x[0]:\n b.append(num)\ny_reader = list(csv.reader(open('754yi.csv','r')))\ny=[]\nfor stu in y_reader:\n y.append(float(stu[0]))\nz=[]\nfor i in range(725):\n z.append(0)\n for j in range(30):\n z[i]+=b[j]*y[29+i-j]\nprint(z)\n","sub_path":"python/HW_of _NA/identify_trend_of_stock_price.py","file_name":"identify_trend_of_stock_price.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"502419036","text":"from django.contrib import admin\nfrom .models import *\nfrom .upload import Upload\nfrom .forms import UploadFileForm\n\n# from .upload import upload\n# class SignUpAdmin(admin.ModelAdmin):\n# \tlist_display = [\"email\", \"timestamp\", \"updated\"]\n# \t# class Meta:\n# \t# \tmodel = SignUp\n# \tform = SignUpForm\n\n\n\n# admin.site.register(SignUp, SignUpAdmin)\n\n# Register your models here.\nclass UploadFileAdmin(admin.ModelAdmin):\n\t# object = UploadImage.uploadname()\n\t# a,b = UploadImage.__str__()\n\t# a,b = \"__str__\".split(\"======================>\")\n\t# list_display = ['Event','Filename'] \n\tlist_display = [\"__str__\",\"event_name\",\"description\"]\n\tform = UploadFileForm\n\t\n\t# class Meta:\n\t# \t#model = SignUp\n\t# form1 = UploadImageForm#can be skipped as well if forms.py is empty but use pass then\n\t# class Meta:\n\t# \tmodel = UploadImage\n\t# dictionary = [\"__str__\"]\n\t# a = dictionary['event_name']\n\t# b = dictionary['image'].split(\"/\")\n\t# def Event(a):\n\t# \treturn a\n\t# def event_name(self,request):\n\t# \treturn request.FILES['image'].name\n\t# def uploaded_by(self,request):\n\t# \treturn request.user.get_username()\n\n\t\n\tdef save_model(self, request, obj, form, change=False):\n # do any pre-save stuff here\n\t\tprint(\"hello before saving\")\n\t\tobj.save()\n\t\tprint(\"Hello\")\n\t\t# global filename\n\t\t# filename = request.FILES['storage'].name\n\t\t# filesize = request.FILES['storage'].size\n\t\t# upload.main()\n\t\tevent_name = request.POST.get('event_name')\n\t\tprint(event_name)\n\t\tsliderImages = request.FILES['sliderImages'].name\n\t\tprint(request.FILES['sliderImages'].name)\n\t\tpanelImages = request.FILES['panelImages'].name\n\t\tstorage = request.FILES['storage'].name\n\t\tthumbnails = request.FILES['thumbnails'].name\n\t\tprint(request)\n\t\t# storage = filename = request.FILES['storage'].name\n\t\tUp = Upload()\n\t\tUp.startScript(event_name,sliderImages,panelImages,storage,thumbnails)\n\t\t#list_display = [\"__str__\",\"filename\"]\n\t\t#list_display = [\"__str__\",\"filename\"]\n\t\t# list_display.extend(filename)\n\t\t# list_display = [\"a\",\"b\"]\n\t\t# list_display.extend([\"__str__\",filename])\n\n\t\t# filename = UploadImage\n\t\t# for f in files:\n\t\t\t# if object.endswith('.zip'):\n\t\t\t# \tobj.extractall()\n\t\t\t# \tprint(\"Extracted File Successfully!\")\n\nadmin.site.register(UploadFile, UploadFileAdmin)\n\n# class EventNameAdmin(admin.ModelAdmin):\n# \t\tform2 = EventNameForm\n# \t\tlist_display = [\"__str__\"]\n# admin.site.register(EventName, EventNameAdmin)","sub_path":"upload/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"149526222","text":"import os\nimport sys\n\nstorage_file = os.path.expandvars(\"$DOT_ROOT/scripts/storage/night_mode.txt\")\n\nif not os.path.exists(storage_file):\n open(storage_file, \"w\").write(\"false\")\n\nenabled = open(storage_file, \"r\").read()\n\nif \"toggle\" in sys.argv:\n if enabled == \"true\":\n enabled = \"false\"\n else:\n enabled = \"true\"\n\nif enabled == \"true\":\n os.system(\"redshift -x\")\n os.system(\"redshift -O 4500\")\nelse:\n os.system(\"redshift -x\")\n\nopen(storage_file, \"w\").write(enabled)\n","sub_path":"scripts/night_mode.py","file_name":"night_mode.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336828451","text":"#!/usr/bin/env python3\nimport sys, os\n\n#some necessary configuration:\nsys.path.append(\"/home/simon/projects/hal/build/lib/\") #this is where your hal python lib is located\nos.environ[\"HAL_BASE_PATH\"] = \"/home/simon/projects/hal/build\" # hal base path\nimport hal_py\n\nnetlist_to_read = \"netlist.v\"\ngate_library_path = \"gate_library.lib\"\n\n#initialize HAL\nhal_py.plugin_manager.load_all_plugins()\n\n#read netlist\nnetlist = hal_py.NetlistFactory.load_netlist(netlist_to_read, gate_library_path)\n\nfrom hal_plugins import solve_fsm\n\npl_fsm = hal_py.plugin_manager.get_plugin_instance(\"solve_fsm\")\n\n# UPDATE THE MODULE IDS OR CREATE YOUR OWN LIST OF GATES\nstate_mod = netlist.get_module_by_id(0)\ntransition_mod = netlist.get_module_by_id(0)\n\ntransition_gates = transition_mod.gates\nstate_gates = state_mod.gates\n\ninitial_state = {}\ntimeout = 600000\n\ng = pl_fsm.solve_fsm(netlist, state_gates, transition_gates, initial_state, timeout)\n\n#unload everything hal related\nhal_py.plugin_manager.unload_all_plugins()","sub_path":"plugins/solve_fsm/test_plugin.py","file_name":"test_plugin.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"467152014","text":"\"\"\"Update for DSM6\n\nRevision ID: a41b2b645a3c\nRevises: 26b4c36c11e\nCreate Date: 2016-12-20 18:30:15.449680\n\n\"\"\"\nrevision = 'a41b2b645a3c'\ndown_revision = '26b4c36c11e'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n\ndef upgrade():\n op.add_column('version', sa.Column('conf_privilege', sa.Unicode(length=255), nullable=True))\n op.add_column('version', sa.Column('conf_resource', sa.Unicode(length=255), nullable=True))\n\n\ndef downgrade():\n op.drop_column('version', 'conf_resource')\n op.drop_column('version', 'conf_privilege')\n","sub_path":"migrations/versions/a41b2b645a3c_.py","file_name":"a41b2b645a3c_.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"645822793","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport cgi\nimport os\nimport sys\nimport traceback\nimport wsgiref.handlers\nimport settings\nimport base64\nimport random\nimport math\nimport glob\n\nfrom google.appengine.ext.webapp import template, WSGIApplication\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nfrom google.appengine.ext import zipserve\nfrom google.appengine.api import urlfetch\nfrom base import *\nfrom datetime import datetime, timedelta\nfrom django.utils import simplejson\n\nfrom models.entry import Entry\nfrom models.tag import Tag\nfrom models.blog import g_blog\n\nclass Error404(BaseRequestHandler):\n\n def get(self, slug=None):\n self.render2('views/admin/404.html')\n\nclass setlanguage(BaseRequestHandler):\n\n def get(self):\n lang_code = self.param('language')\n next = self.param('next')\n if not next and os.environ.has_key('HTTP_REFERER'):\n next = os.environ['HTTP_REFERER']\n if not next:\n next = '/'\n from django.utils.translation import check_for_language, activate, to_locale, get_language\n\n if lang_code and check_for_language(lang_code):\n g_blog.language = lang_code\n\n activate(lang_code)\n g_blog.save()\n self.redirect(next)\n\nclass admin_do_action(BaseRequestHandler):\n\n @requires_admin\n def get(self, slug=None):\n try:\n func = getattr(self, 'action_' + slug)\n if func and callable(func):\n func()\n else:\n self.render2('views/admin/error.html', {'message': 'This operate has not defined!'})\n except:\n self.render2('views/admin/error.html', {'message': 'This operate has not defined!'})\n\n @requires_admin\n def post(self, slug=None):\n try:\n func = getattr(self, 'action_' + slug)\n if func and callable(func):\n func()\n else:\n self.render2('views/admin/error.html', {'message': 'This operate has not defined!'})\n except:\n self.render2('views/admin/error.html', {'message': 'This operate has not defined!'})\n\n def action_test(self):\n self.write(os.environ)\n\n def action_cacheclear(self):\n memcache.flush_all()\n self.write('\"Cache cleared successful\"')\n\n def action_updatecomments(self):\n for entry in Entry.all():\n cnt = entry.comments().count()\n if cnt != entry.commentcount:\n entry.commentcount = cnt\n entry.put()\n self.write('\"ok\"')\n\n def action_updatelink(self):\n link_format = self.param('linkfmt')\n\n if link_format:\n link_format = link_format.strip()\n g_blog.link_format = link_format\n g_blog.save()\n for entry in Entry.all():\n vals = {\n 'year': entry.date.year,\n 'month': str(entry.date.month).zfill(2),\n 'day': entry.date.day,\n 'postname': entry.slug,\n 'post_id': entry.post_id,\n }\n\n if entry.slug:\n newlink = link_format % vals\n else:\n newlink = '?p=%(post_id)s' % vals\n\n if entry.link != newlink:\n entry.link = newlink\n entry.put()\n self.write('\"ok\"')\n else:\n self.write('\"Please input url format.\"')\n\n def action_init_blog(self, slug=None):\n\n for com in Comment.all():\n com.delete()\n\n for entry in Entry.all():\n entry.delete()\n\n g_blog.entrycount = 0\n self.write('\"Init has succeed.\"')\n\n def action_update_tags(self, slug=None):\n for tag in Tag.all():\n tag.delete()\n for entry in Entry.all().filter('entrytype =', 'post'):\n if entry.tags:\n for t in entry.tags:\n try:\n logging.info('sss:' + t)\n Tag.add(t)\n except:\n traceback.print_exc()\n\n self.write('\"All tags for entry have been updated.\"')\n\nclass admin_status(BaseRequestHandler):\n\n @requires_admin\n def get(self):\n self.render2('views/admin/status.html', {'cache': memcache.get_stats(), 'current': 'status', 'environ'\n : os.environ})\n\nclass WpHandler(BaseRequestHandler):\n\n @requires_admin\n def get(self, tags=None):\n entries = Entry.all().order('-date')\n cates = Category.all()\n tags = Tag.all()\n\n self.response.headers['Content-Type'] = 'binary/octet-stream' # 'application/atom+xml'\n self.render2('views/wordpress.xml', {'entries': entries, 'cates': cates, 'tags': tags})\n\nfrom admins.admin_entries import admin_entry, admin_entries\nfrom admins.admin_sitemap import admin_sitemap\nfrom admins.admin_links import admin_links, admin_link\nfrom admins.admin_setup import admin_setup\nfrom admins.admin_import import admin_import, admin_import_next\nfrom admins.admin_importls import admin_importls, admin_import_nextls\nfrom admins.admin_authors import admin_author, admin_authors\nfrom admins.admin_categories import admin_category, admin_categories\nfrom admins.admin_comments import admin_comments\nfrom admins.admin_tools import admin_tools\n\ndef main():\n webapp.template.register_template_library('filter')\n application = webapp.WSGIApplication([\n ('/admin', admin_setup),\n ('/admin/setup', admin_setup),\n ('/admin/entries/(post|page)', admin_entries),\n ('/admin/links', admin_links),\n ('/admin/categories', admin_categories),\n ('/admin/comments', admin_comments),\n ('/admin/link', admin_link),\n ('/admin/category', admin_category),\n ('/admin/(post|page)', admin_entry),\n ('/admin/status', admin_status),\n ('/admin/authors', admin_authors),\n ('/admin/author', admin_author),\n ('/admin/import', admin_import),\n ('/admin/import_next', admin_import_next),\n ('/admin/importls', admin_importls),\n ('/admin/import_nextls/(.*)/(.*)/(.*)', admin_import_nextls),\n ('/admin/tools', admin_tools),\n ('/admin/sitemap', admin_sitemap),\n ('/admin/export/micolog.xml', WpHandler),\n ('/admin/do/(\\w+)', admin_do_action),\n ('/admin/lang', setlanguage),\n ('.*', Error404),\n ], debug = True)\n wsgiref.handlers.CGIHandler().run(application)\n\nif __name__ == '__main__':\n main()\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"290023782","text":"#=========================================================================\n# MeshRouterCL.py\n#=========================================================================\n\nfrom __future__ import print_function\n\nfrom math import sqrt\nfrom collections import deque\n\nfrom pymtl import *\nfrom pclib.ifcs import InValRdyBundle, OutValRdyBundle, NetMsg\nfrom pclib.cl import InValRdyQueue, OutValRdyQueue\n\n#=========================================================================\n# MeshRouterCL\n#=========================================================================\nclass MeshRouterCL( Model ):\n\n NORTH = 0\n EAST = 1\n SOUTH = 2\n WEST = 3\n TERM = 4\n\n #-----------------------------------------------------------------------\n # __init__\n #-----------------------------------------------------------------------\n def __init__( s, id_, nrouters, nmessages, payload_nbits, nentries ):\n\n s.id_ = id_\n s.xnodes = int( sqrt( nrouters ) )\n s.x = id_ % s.xnodes\n s.y = id_ / s.xnodes\n s.msg_type = NetMsg( nrouters, nmessages, payload_nbits )\n s.nentries = nentries\n\n #s.params = [ nrouters, nmessages, payload_nbits, buffering ]\n #---------------------------------------------------------------------\n # Interface\n #---------------------------------------------------------------------\n\n s.in_ = InValRdyBundle [ 5 ]( s.msg_type )\n s.out = OutValRdyBundle[ 5 ]( s.msg_type )\n\n\n #-----------------------------------------------------------------------\n # elaborate_logic\n #-----------------------------------------------------------------------\n def elaborate_logic( s ):\n\n # Instantiate buffers\n\n s.input_buffers = InValRdyQueue [ 5 ]( s.msg_type, s.nentries )\n s.output_regs = OutValRdyQueue[ 5 ]( s.msg_type, 1 )\n s.priorities = [ 0 ] * 5\n\n # Connect\n\n for i in range( 5 ):\n s.connect( s.in_[ i ], s.input_buffers[ i ].in_ )\n s.connect( s.out[ i ], s.output_regs [ i ].out )\n\n # Logic\n\n @s.tick\n def router_logic():\n\n # Xfer data from input ports to input_buffers\n for i in range( 5 ):\n s.input_buffers[ i ].xtick()\n s.output_regs[ i ].xtick()\n\n\n # Arbitration and Crossbar Traversal\n s.winners = []\n for i in range( 5 ):\n\n if not s.output_regs[ i ].is_full():\n data = s.arbitrate( i )\n if data != None:\n s.output_regs[ i ].enq( data )\n\n # Deque winners\n for winner in s.winners:\n winner.deq()\n\n #-----------------------------------------------------------------------\n # route_compute\n #-----------------------------------------------------------------------\n # dimension-ordered (x then y) routing algorithm\n def route_compute( s, dest ):\n\n x_dest = dest.uint() % s.xnodes\n y_dest = dest.uint() / s.xnodes\n\n if x_dest < s.x: return s.WEST\n elif x_dest > s.x: return s.EAST\n elif y_dest < s.y: return s.NORTH\n elif y_dest > s.y: return s.SOUTH\n else:\n assert x_dest == s.x\n assert y_dest == s.y\n return s.TERM\n\n #-----------------------------------------------------------------------\n # arbitrate\n #-----------------------------------------------------------------------\n # round robin arbitration algorithm\n def arbitrate( s, output ):\n first = s.priorities[ output ]\n order = range( 5 )[first:] + range( 5 )[:first]\n #print(\"arbitrating for r:\", s.id_, \"out:\", output, order,)\n for i in order:\n request_q = s.input_buffers[ i ]\n if not request_q.is_empty():\n if s.route_compute( request_q.peek().dest ) == output:\n #print(\"*** i\", i, \"wins! dest\", s.route_compute(request_q[0].dest),)\n s.priorities[ output ] = ( i + 1 ) % 5\n s.winners.append( request_q )\n return request_q.peek()\n #print(\"NO WINNER\")\n\n\n #-----------------------------------------------------------------------\n # line_trace\n #-----------------------------------------------------------------------\n def line_trace( s ):\n\n router_traces = []\n for i in range( 5 ):\n in_str = s.in_[ i ].to_str( s.in_[ i ].msg.payload )\n out_str = s.out[ i ].to_str( s.out[ i ].msg.payload )\n router_traces += ['{} {}'.format( in_str, out_str ) ]\n\n return '|'.join( router_traces )\n\n\n\n","sub_path":"net/simple_mesh/MeshRouterCL.py","file_name":"MeshRouterCL.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"334005801","text":"'''\n Created By: Nick Kane\n Date: 05/20/2017\n Desc: Python3 program\n \n Assignment-1C info:\n - In part B, you had a chance to explore how both the percentage of your salary that you save each month\n and your annual raise affect how long it takes you to save for a down payment. This is nice, but suppose\n you want to set a particular goal, e.g. to be able to afford the down payment in three years. How much\n should you save each month to achieve this? In this problem, you are going to write a program to answer\n that quesiton. To simplify things, assume:\n\n 1) Your semi-annual raise is .07 (7%)\n 2) Your investment have an annual return of 0.04 (4%)\n 3) The down payment is 0.25 (25%) of the cost of the housea\n 4) The cost of the house that you are saving for is $1M\n\n - You are going to try and find the best rate of savings to achieve a down payment on a $1M house in 36 months.\n Since hitting this exactly is a challenge, we simply want your savings to be within $100 of the required down\n payment. \n\n - Write a program to calculate the best savings rate, as a function of your starting salary. You should use \n bisection search to help you do this efficiently. You should keep track of the number of steps it takes your\n bisections search to finish. You should be able to reuse some of the code you wrote for part B in this problem.\n\n - Because we are searching for a value that is in principle a float, we are going to limit ourselves to two\n decimals of accuracy (i.e., we may want to save at 7.04% -- or 0.0704 in decimal - but we are not going to worry\n about the different between 7.041% and 7.039%). This means we can search for an integer between 0 and 10000\n (using integer division), and then convert it to a decimal percentage (using float division) to use when we are \n calculating the current_savings after 36 months. By using this range, there are only a finite number of numbers\n that we are searching over, as opposed to the infinite number of decimals between 0 and 1. This range will help\n prevent infinite loops. The reason we use 0 to 10000 is to account for two additional decimal places in the range\n 0% to 100%. Your code should print out a decimal (e.g., 0.0704 for 7.04%)\n\n - Try different inputs for your starting salary, and see how the percentage you need to save changes to reach your\n desired down payment. Also keep in mind it may not be possible to save a down payment in a year and a half for some\n salaries. In this case your function should notify the user that it is not possible to save for the down payment in\n 36 months with a print statement.\n \n'''\n\n# debug output\ndebug = False\n\n# define main function\ndef main():\n\n global debug\n\n starting_salary = float(input('Enter in annual salary: '))\n\n # 0 is 0 percent\n min_rate = 0\n\n # 10000 is 100 percent\n max_rate = 10000\n\n # half the min + the max\n portion_saved = ((min_rate + max_rate) / 2)\n\n # static cost is 1M\n total_cost = 1000000\n\n # static raise rate is 0.07 or 7%\n raise_rate = 0.07\n \n down_payment = (total_cost * 0.25)\n annual_return_rate = 0.04\n\n best_rate = 0\n search_count = 0\n\n while (max_rate - min_rate) > 1:\n annual_salary = starting_salary\n monthly_income = (annual_salary / 12)\n monthly_deposit = (monthly_income * (portion_saved / 10000))\n current_savings = 0\n search_count += 1\n\n for month in range(1, 37):\n current_savings += ((current_savings * annual_return_rate) / 12)\n current_savings += monthly_deposit\n \n if debug:\n print('month ', month, 'cs: ', current_savings)\n\n if abs(current_savings - down_payment) < 100:\n # current savings is within 100 \n # found a rate\n min_rate = max_rate\n best_rate = portion_saved\n break;\n elif current_savings > (down_payment + 100):\n # current savings exceeds the amount needed\n break;\n\n if month % 6 == 0:\n annual_salary += (annual_salary * raise_rate)\n monthly_income = (annual_salary / 12)\n monthly_deposit = (monthly_income * (portion_saved / 10000))\n\n\n if debug: \n print('before ....')\n print('rate: ', portion_saved)\n print('min: ', min_rate)\n print('max: ', max_rate)\n print('')\n \n if current_savings < (down_payment - 100):\n # increase min rate\n min_rate = portion_saved\n elif current_savings > (down_payment + 100):\n # increase max rate\n max_rate = portion_saved\n\n portion_saved = ((max_rate + min_rate) // 2)\n\n if debug:\n print('after ....')\n print('rate: ', portion_saved)\n print('min: ', min_rate)\n print('max: ', max_rate)\n print('')\n\n if best_rate > 0:\n # found a rate\n print('Best Rate: ', best_rate / 10000)\n print('Search Count: ', search_count)\n else:\n print('Could not find a rate for 36 months :(')\n\n# invoke main function\nmain()\n\n","sub_path":"Electrical-Engineering-and-Computer-Science/6.0001/assignments/assignment-1/ps1c.py","file_name":"ps1c.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595597690","text":"from flask import Flask\r\nfrom flask import render_template, jsonify, request, redirect, url_for, send_file, session\r\nfrom flask_session import Session\r\nfrom tempfile import mkdtemp\r\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\r\nfrom pathlib import Path\r\n\r\nimport os\r\n\r\n\r\nfrom Yt_P import get_media, zipping, yt_cal\r\n\r\napp = Flask(__name__)\r\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\r\n\r\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\r\napp.config[\"SESSION_PERMANENT\"] = False\r\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\r\nSession(app)\r\n\r\n\r\nfinaldata=[]\r\ntemp=[]\r\nk = [0]\r\n\r\n@app.route('/',methods=[\"GET\", \"POST\"])\r\ndef a():\r\n if request.method == \"POST\":\r\n data = yt_cal(request.form.get(\"in\"))\r\n finaldata.append(data)\r\n return redirect(\"/medi\")\r\n else:\r\n finaldata.clear()\r\n return render_template(\"index.html\")\r\n\r\n@app.route('/medi',methods=[\"GET\", \"POST\"])\r\ndef d():\r\n if request.method == \"POST\":\r\n if request.form.get(\"in2\"):\r\n data = yt_cal(request.form.get(\"in2\"))\r\n finaldata[0] = data\r\n return render_template(\"medi.html\", data = data)\r\n elif request.form.get(\"fname\"):\r\n temp.clear()\r\n temp.append(request.form.get(\"fname\"))\r\n k[0] = 0\r\n return redirect('/play')\r\n else:\r\n return render_template(\"medi.html\", data = finaldata[0])\r\n\r\n@app.route('/play',methods=[\"GET\",\"POST\"])\r\ndef e():\r\n if request.method == \"POST\":\r\n if request.form['name']==\"1\":\r\n attempted_url = request.form[\"url\"]\r\n if attempted_url != \"\":\r\n result_id = get_media(attempted_url)\r\n session[\"url\"] = attempted_url\r\n session[\"id\"] = result_id\r\n filename = request.form[\"title\"]\r\n session[\"filename\"] = filename\r\n # return render_template('material-life.html', title = \"Success {}\".format(title))\r\n # return render_template('material-life.html', title = result_id)\r\n return jsonify(data = filename)\r\n else:\r\n data = yt_cal(request.form['name'])\r\n return jsonify(data = data)\r\n elif len(temp)==1 and k[0] == 0:\r\n k[0] = 1\r\n for data in finaldata[0]:\r\n if data['id'] == int(temp[0]):\r\n temp2 = data\r\n return render_template(\"play.html\",data = temp2)\r\n\r\n@app.route(\"/download/\")\r\ndef return_file():\r\n if True:\r\n filename = session.get(\"filename\")\r\n filename_formatted = filename + \".mp3\"\r\n #location = Path(\"media/Audio downloads/{}.mp3\".format(session.get(\"id\")))\r\n #a = format(session.get(\"id\"))\r\n #location = \"C:/Users/91704/OneDrive/Documents/Project/media/Audio downloads/{}.mp3\".format(session.get(\"id\"))\r\n location = \"media/Audio downloads/{}.mp3\".format(session.get(\"id\"))\r\n print(filename_formatted)\r\n return send_file(\r\n location, attachment_filename=filename_formatted, as_attachment=True\r\n )\r\n\r\n\"\"\"def errorhandler(e):\r\n if not isinstance(e, HTTPException):\r\n e = InternalServerError()\r\n return apology(e.name, e.code)\r\n\r\n\r\n# Listen for errors\r\nfor code in default_exceptions:\r\n app.errorhandler(code)(errorhandler)\"\"\"\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"50592889","text":"import tempfile\nimport argparse\nimport logging\nimport threading\nimport os\n\nfrom ocs_ci.framework import config\nfrom ocs_ci.ocs.constants import CLEANUP_YAML, TEMPLATE_CLEANUP_DIR\nfrom ocs_ci.utility.utils import run_cmd\nfrom ocs_ci.utility import templating\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef cleanup(cluster_name, cluster_id):\n \"\"\"\n Cleanup existing cluster in AWS\n\n Args:\n cluster_name (str): Name of the cluster\n cluster_id (str): Cluster id to cleanup\n\n \"\"\"\n data = {'cluster_name': cluster_name, 'cluster_id': cluster_id}\n template = templating.Templating(base_path=TEMPLATE_CLEANUP_DIR)\n cleanup_template = template.render_template(CLEANUP_YAML, data)\n cleanup_path = tempfile.mkdtemp(prefix='cleanup_')\n cleanup_file = os.path.join(cleanup_path, 'metadata.json')\n with open(cleanup_file, \"w\") as temp:\n temp.write(cleanup_template)\n bin_dir = os.path.expanduser(config.RUN['bin_dir'])\n oc_bin = os.path.join(bin_dir, \"openshift-install\")\n logger.info(f\"cleaning up {cluster_id}\")\n run_cmd(f\"{oc_bin} destroy cluster --dir {cleanup_path} --log-level=debug\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Cleanup AWS Resource')\n parser.add_argument(\n '--cluster',\n nargs=1,\n action='append',\n required=True,\n help=\"Cluster name tag\"\n )\n logging.basicConfig(level=logging.DEBUG)\n args = parser.parse_args()\n procs = []\n for id in args.cluster:\n cluster_name = id[0].rsplit('-', 1)[0]\n logger.info(f\"cleaning up {id[0]}\")\n proc = threading.Thread(target=cleanup, args=(cluster_name, id[0]))\n proc.start()\n procs.append(proc)\n for p in procs:\n p.join()\n","sub_path":"ocs_ci/cleanup/aws/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610594515","text":"import threading\nimport time\n\nsem = threading.Semaphore(4)\n\n\ndef gothread():\n with sem:\n for i in range(8):\n print(threading.current_thread().name, i)\n time.sleep(1)\n\n\nfor i in range(5):\n threading.Thread(target=gothread).start()\n","sub_path":"huodong/act.py","file_name":"act.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"331808049","text":"#!/usr/bin/python3\n'''\n Implementation of the State class\n'''\nimport models\nfrom models.base_model import BaseModel, Base\nfrom models.city import City\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.orm import relationship\nimport os\n\n\nclass State(BaseModel, Base):\n '''\n Implementation for the State.\n '''\n __tablename__ = \"states\"\n\n if os.getenv(\"HBNB_TYPE_STORAGE\") != \"db\":\n name = \"\"\n\n @property\n def cities(self):\n '''\n getter attribute cities that returns the list of City\n '''\n temp_list = []\n for c in models.storage.all(City).values():\n if c.state_id == self.id:\n temp_list.append(c)\n return temp_list\n\n else:\n name = Column(String(128), nullable=False)\n cities = relationship(\"City\",\n backref=\"state\",\n cascade=\"all, delete-orphan\")\n","sub_path":"models/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67324487","text":"from unlock import PygletTextLabel, BellRingTextLabelDecorator\nfrom unlock import PygletWindow, Canvas, UnlockController, AlternatingBinaryStateModel\n\nimport unittest\nimport pyglet\nimport multiprocessing as mp\n\n\nclass PygletTextTests(unittest.TestCase): \n def testPygletText(self):\n window = PygletWindow(fullscreen=False, show_fps=True)\n canvas = Canvas.create(window.width, window.height)\n model = AlternatingBinaryStateModel()\n pos = PositionMixin()\n text_label = PygletTextLabel(model, canvas, 'the text', canvas.width / 2.0, canvas.height / 2.0)\n bell_ring_text_label_decorator = BellRingTextLabelDecorator(text_label) \n controller = UnlockController(window, [bell_ring_text_label_decorator], canvas)\n controller.make_active()\n window.start() \n window.close()\n \n \ndef getSuite():\n return unittest.makeSuite(PygletSpriteTests,'test')\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"unlock/view/test/test_pyglet_text.py","file_name":"test_pyglet_text.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"269881045","text":"import RPi.GPIO as GPIO\n\nclass IR6SensorModule():\n \n def __init__(self,pin1, pin2, pin3, pin4, pin5, pin6):\n \n self.irsensors = [pin1, pin2, pin3, pin4, pin5, pin6];\n self.sensorReadings = [0,0,0,0,0,0];\n \n GPIO.setmode(GPIO.BCM)\n for ir in self.irsensors:\n GPIO.setup(ir, GPIO.IN);\n\n def readSensor(self):\n res = b'';\n for i in range(len(self.irsensors)):\n self.sensorReadings[i] = GPIO.input(self.irsensors[i]);\n if(self.sensorReadings[i] == 1):\n res = res + '1';\n else:\n res = res + '0';\n return res;\n\n def __del__(self):\n GPIO.cleanup();\n\n\n","sub_path":"scripts/IR6SensorModule.py","file_name":"IR6SensorModule.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69904171","text":"\"\"\"Build an object to represent a Conductor submission.\"\"\"\n\nimport datetime\nimport hou\nfrom conductor.houdini.hda import notifications_ui, types\nfrom conductor.houdini.hda.job import Job\nfrom conductor.native.lib import data_block\n\n\nclass Submission(object):\n \"\"\"class Submission holds all data needed for a submission.\n\n A Submission contains many Jobs, and those Jobs contain\n many Tasks. A Submission can provide the correct args to\n send to Coductor, or it can be used to create a dry run\n to show the user what will happen. A Submission also\n manages a list of environment tokens that the user can\n access as $ variables, similar to Houdini Local\n variables, in order to build strings in the UI such as\n commands and job titles.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"Collect member data from the Houdini UI.\n\n If the submission has been instantiated from a\n conductor::job node, then the submission data will\n be pulled from the submission tab, and the same node\n will be used as the only Job. Both self._node and\n self._jobs will point to the same node. If instead\n it is instantiated from a conductor::submitter\n node, then it will provide top level submission data\n and the Jobs (self._jobs) will built from the\n conductor::submitter's input nodes.\n\n * Generate a timestamp which will be common to all jobs.\n * Get the basename of the file in case its needed as a token.\n * Scene name - see notes on scene name in the Job.\n * Get upload flags and notification data.\n * Get the project.\n\n After _setenv has been called, the Submission level token\n variables are valid and calls to eval string attributes will\n correctly resolve where those tokens have been used. This is\n why we eval jobs after the call to _setenv()\n \"\"\"\n self._node = node\n if types.is_job_node(self._node):\n self._nodes = [node]\n else:\n self._nodes = node.inputs()\n\n self._use_timestamped_scene = bool(\n self._node.parm(\"use_timestamped_scene\").eval())\n\n self._timestamp = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n hou.putenv(\"CT_TIMESTAMP\", self._timestamp)\n self._scene = self._node.parm(\"scene_file\").eval()\n\n self._upload = {\n \"local\": bool(self._node.parm(\"local_upload\").eval()),\n \"force\": bool(self._node.parm(\"force_upload\").eval()),\n \"only\": bool(self._node.parm(\"upload_only\").eval())\n }\n\n self._project = self._get_project()\n\n self._tokens = self._setenv()\n\n self._notifications = notifications_ui.get_notifications(self._node)\n\n self._jobs = []\n for node in self._nodes:\n job = Job.create(node, self._tokens, self._scene)\n self._jobs.append(job)\n\n def _get_project(self):\n \"\"\"Get the project name by looking up its ID.\n\n In case the current project is no longer in the list\n of projects, throw an error.\n \"\"\"\n project_id = self._node.parm('project').eval()\n projects = data_block.for_houdini().projects()\n project_names = [project[\"name\"]\n for project in projects if project['id'] == project_id]\n if not project_names:\n raise hou.InvalidInput(\n \"%s %s is an invalid project.\" %\n self._node.name(), project_id)\n return {\n \"id\": project_id,\n \"name\": project_names[0]\n }\n\n def _setenv(self):\n \"\"\"Env tokens are variables to help the user build strings.\n\n The user interface has fields for strings such as\n job title, task command, metadata. The user can use\n these tokens, prefixed with a $ symbol, to build\n those strings. Tokens at the Submission level are\n also available in Job level fields, and likewise\n tokens at the Job level are available in Task level\n fields. However, it makes no sense the other way,\n for example you can't use a chunk token (available\n at Task level) in a Job title because a chunk\n changes for every task.\n\n We use hou.putenv() which basically sets these as\n global env vars in the scene. Unfortunately thats the\n only way because you can only attach variables local\n to nodes through the Houdini devkit.\n\n Once tokens are set, strings using them are expanded\n correctly. In fact we don't need these tokens to be\n stored on the Submission object (or Job or Task) for\n the submission to succeed. The only reason we store\n them is to display them in a dry-run scenario.\n \"\"\"\n tokens = {}\n tokens[\"CT_TIMESTAMP\"] = self._timestamp\n tokens[\"CT_SUBMITTER\"] = self._node.name()\n # tokens[\"CT_HIPBASE\"] = self._file[\"hipbase\"]\n tokens[\"CT_SCENE\"] = self._scene\n tokens[\"CT_PROJECT\"] = self.project_name\n\n for token in tokens:\n hou.putenv(token, tokens[token])\n\n return tokens\n\n def get_args(self):\n \"\"\"Prepare the args for submission to conductor.\n\n This is a list where there is one args object for\n each Conductor job. The project, notifications, and\n upload args are the same for all jobs, so they are\n set here. Other args are provided by Job objects and\n updated with these submission level args to form\n complete jobs.\n \"\"\"\n result = []\n submission_args = {}\n\n submission_args[\"local_upload\"] = self._upload[\"local\"]\n submission_args[\"upload_only\"] = self._upload[\"only\"]\n submission_args[\"force\"] = self._upload[\"force\"]\n submission_args[\"project\"] = self.project_name\n\n if self.email_addresses:\n addresses = \", \".join(self.email_addresses)\n submission_args[\"notify\"] = {\"emails\": addresses, \"slack\": []}\n else:\n submission_args[\"notify\"] = None\n\n for job in self._jobs:\n args = job.get_args()\n args.update(submission_args)\n result.append(args)\n return result\n\n @property\n def local_upload(self):\n \"\"\"local_upload.\"\"\"\n return self._upload[\"local\"]\n\n @property\n def force_upload(self):\n \"\"\"force_upload.\"\"\"\n return self._upload[\"force\"]\n\n @property\n def upload_only(self):\n \"\"\"upload_only.\"\"\"\n return self._upload[\"only\"]\n\n @property\n def scene(self):\n \"\"\"scene.\"\"\"\n return self._scene\n\n @property\n def node_name(self):\n \"\"\"node_name.\"\"\"\n return self._node.name()\n\n @property\n def project_id(self):\n \"\"\"project_id.\"\"\"\n return self._project[\"id\"]\n\n @property\n def project_name(self):\n \"\"\"project_name.\"\"\"\n return self._project[\"name\"]\n\n @property\n def filename(self):\n \"\"\"filename.\"\"\"\n return hou.hipFile.name()\n\n @property\n def basename(self):\n \"\"\"basename.\"\"\"\n return hou.hipFile.basename()\n\n @property\n def unsaved(self):\n \"\"\"unsaved.\"\"\"\n return hou.hipFile.hasUnsavedChanges()\n\n @property\n def use_timestamped_scene(self):\n \"\"\"use_timestamped_scene.\"\"\"\n return self._use_timestamped_scene\n\n @property\n def tokens(self):\n \"\"\"tokens.\"\"\"\n return self._tokens\n\n @property\n def jobs(self):\n \"\"\"jobs.\"\"\"\n return self._jobs\n\n def has_notifications(self):\n \"\"\"has_notifications.\"\"\"\n return bool(self._notifications)\n\n @property\n def email_addresses(self):\n \"\"\"email_addresses.\"\"\"\n if not self.has_notifications():\n return []\n return self._notifications[\"email\"][\"addresses\"]\n\n @property\n def email_hooks(self):\n \"\"\"email_hooks.\"\"\"\n if not self.has_notifications():\n return []\n return self._notifications[\"email\"][\"hooks\"]\n","sub_path":"conductor/houdini/hda/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":8029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"525594940","text":"\"\"\"Tensorflow local version.\n\nTo start the tensorboard, run:\nlearning/brain/tensorboard/tensorboard.sh --port 2222 --logdir /tmp/sug_logs\n\"\"\"\n\nimport game_pool\nimport tensorflow as tf\nimport logging\nimport logging.handlers\nimport time\nimport importlib\nfrom tensorflow.python.training.summary_io import SummaryWriter\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('model_name', 'game_model',\n 'Default model name, use game_model when no value set.')\nflags.DEFINE_string('output_path', '/tmp/log/',\n 'Tensorflow log will be under /tmp/[output_path]')\nflags.DEFINE_string('exp_name', 'test1', 'The experiment name')\nflags.DEFINE_integer('generator_type', 2, 'Generator type, 1:Model, 2:Random(default)')\n\nlogger = logging.getLogger(FLAGS.exp_name)\n\nhandler = logging.handlers.RotatingFileHandler(\n '%s/log/%s.log' % (FLAGS.output_path, FLAGS.exp_name),\n maxBytes = 1024*1024,\n backupCount = 5)\n\nfmt = '%(asctime)s - %(name)s - %(message)s'\nformatter = logging.Formatter(fmt)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(formatter)\nlogger.addHandler(consoleHandler)\n\nlogger.setLevel(logging.DEBUG)\n\n\ndef scalar_summary_detail(scalar_name, scalar_tensor, averages):\n \"\"\"Record the scalar op value, which need to be triggered.\"\"\"\n tf.summary.scalar(scalar_name, scalar_tensor)\n scalar_tensor_log = tf.log(scalar_tensor)\n tf.summary.scalar(scalar_name + '_log', scalar_tensor_log)\n averages = tf.train.ExponentialMovingAverage(0.95)\n average_op = averages.apply([scalar_tensor_log])\n tf.summary.scalar(scalar_name + '_log_average',\n averages.average(scalar_tensor_log))\n # Please note, this op must be triggered explicitly.\n return average_op\n\n\ndef train():\n epoch_size = 32\n batch_size = 128\n decay_iteration = 3000\n total_iteration = decay_iteration * 20 # 8,000,000\n game_model = importlib.import_module(FLAGS.model_name)\n\n \"\"\"Trains the game_model.\"\"\"\n with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n game_model_instance = game_model.GameModel(\"train\", batch_size)\n global_step = tf.Variable(0, trainable=False)\n averages = tf.train.ExponentialMovingAverage(0.95, global_step)\n\n # Get the score of each direction.\n raw_loss = game_model_instance.get_internal_variable().get(\"raw_loss\")\n # Record all summary data.\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Track the moving averages of all trainable variables.\n epoch_loss = tf.sqrt(tf.reduce_sum(raw_loss))\n epoch_loss_op = scalar_summary_detail('epoch_loss', epoch_loss, averages)\n with tf.control_dependencies([epoch_loss_op]), tf.name_scope('update'):\n learning_rate = tf.train.exponential_decay(\n 0.1, global_step, epoch_size * decay_iteration, 0.8, staircase=True)\n tf.summary.scalar('learning_rate', learning_rate)\n opt = tf.train.GradientDescentOptimizer(learning_rate)\n grads = opt.compute_gradients(epoch_loss)\n train_op = opt.apply_gradients(grads, global_step=global_step)\n\n for var in game_model_instance.get_internal_variable().values():\n tf.summary.histogram(var.op.name, var)\n\n summary_average_score = tf.placeholder(tf.float32, shape=(), name=\"summary_average_score\")\n tf.summary.scalar(\"average_score\", summary_average_score)\n\n tf.summary.merge_all()\n summary_op = tf.summary.merge_all()\n summary_writer = SummaryWriter('%s/summary/%s/' % (FLAGS.output_path, FLAGS.exp_name), sess.graph)\n\n saver = tf.train.Saver(max_to_keep=100)\n init = tf.global_variables_initializer()\n logger.info(init)\n logger.info(\"model ready\")\n\n # Creates the game object, using the existing model.\n eval_game_obj = game_model.GameModel(\"eval\", 4, params_dict=game_model_instance.get_params_dict())\n pool = game_pool.GamePool(500000, sess, eval_game_obj)\n logger.info(\"gen graph ready\")\n sess.run(init)\n stat_info = pool.get_stat_info()\n pool.generate_training_data()\n for i in xrange(total_iteration):\n total_loss_value = 0.0\n start_time = time.time()\n pool.generate_training_data()\n for p in xrange(epoch_size):\n feed_dict = pool.create_external_feed_dict(game_model_instance)\n # Here is where most of time consuemd.\n # Core training function.\n _, epoch_loss_value, learning_rate_value = \\\n sess.run([train_op, epoch_loss, learning_rate],\n feed_dict=feed_dict)\n total_loss_value = total_loss_value + epoch_loss_value\n logger.info('time:%.3fs e_s:%d, e_n:%d lr:%.5f average_loss:%.3f' % (\n time.time() - start_time,\n epoch_size,\n i,\n learning_rate_value,\n total_loss_value / float(epoch_size)))\n\n if i % 300 == 0:\n saver.export_meta_graph('%s/checkpoint/%s/model.meta' % (FLAGS.output_path, FLAGS.exp_name))\n saver.save(sess,\n '%s/checkpoint/%s/model.ckpt' % (FLAGS.output_path, FLAGS.exp_name),\n global_step=global_step)\n logger.info(\"Training model saved\")\n if i % 10 == 0:\n stat_info = pool.get_stat_info()\n feed_dict[summary_average_score] = stat_info[1]\n summary_str = sess.run(summary_op, feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, i)\n summary_writer.flush()\n stat_info = pool.get_stat_info_string()\n logger.info('[%s] [%s] Summary saved. %s' % (FLAGS.exp_name, game_model_instance.get_name(), stat_info))\n logger.info('[%s] Final stat_info:%s' % (FLAGS.exp_name, stat_info))\n\n\ndef main(unused_argv):\n \"\"\" Generate some debug info and start trainning. \"\"\"\n logger.info('exp_name:%s, generator_type:%d' % (FLAGS.exp_name, FLAGS.generator_type))\n logger.info('output_path:%s' % FLAGS.output_path)\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"game_train.py","file_name":"game_train.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"571427300","text":"from socket import *\nfrom time import ctime\nimport argparse\n\nHOST = ''\nPORT = 666\nBUFFSIZE = 1024\nADDR = (HOST, PORT)\n\ntcpServerSock = socket(AF_INET, SOCK_STREAM)\ntcpServerSock.bind(ADDR)\ntcpServerSock.listen(5)\n\nwhile True:\n\tprint('Waiting for connection...')\n\ttcpClientSock, addr = tcpServerSock.accept()\n\tprint('...connected from:', addr)\n\n\twhile True:\n\t\tdata = tcpClientSock.recv(BUFFSIZE)\n\t\tif not data:\n\t\t\tbreak\n\t\ttcpClientSock.send('[%s] %s' % (ctime(), data))\n\n\ttcpClientSock.close()\ntcpServerSock.close()\n","sub_path":"tcpServer.py","file_name":"tcpServer.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498064808","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.forms import ModelForm\nfrom django import forms\nfrom datetime import date, datetime\nimport datetime\nfrom django.contrib.sites.shortcuts import get_current_site\n\nfrom task.models import TGroup\n\nclass GroupForm(forms.ModelForm):\n class Meta:\n model = TGroup\n exclude = ['id', 'user', 'sort', 'active']\n\n\n#============================================================================\ndef edit_context(request, form, pk, debg):\n groups = TGroup.objects.filter(user = request.user.id).order_by('name')\n context = {'groups': groups, \n 'form': form, \n 'pid': pk,\n 'debg': debg,\n 'title': 'Группы',\n 'site_header': get_current_site(request).name,\n }\n return context\n\n#============================================================================\ndef do_grps(request, pk):\n if (request.method == 'GET'):\n if (pk > 0):\n grp = get_object_or_404(TGroup, pk = pk)\n form = GroupForm(instance = grp)\n else:\n form = GroupForm(initial = {'name': ''})\n context = edit_context(request, form, pk, 'get-0')\n return render(request, 'task/grps.html', context)\n else:\n action = request.POST.get('action', False)\n \n act = 0\n if (action == 'Отменить'):\n act = 1\n else:\n if (action == 'Добавить') or (action == '+'):\n act = 2\n else:\n if (action == 'Сохранить'):\n act = 3\n else:\n if (action == 'Удалить'):\n act = 4\n else:\n act = 5\n\n if (act > 1):\n form = GroupForm(request.POST)\n\n if not form.is_valid():\n # Ошибки в форме, отобразить её снова\n context = edit_context(request, form, pk, 'post-error')# + str(form.non_field_errors))\n return render(request, 'task/grps.html', context)\n else:\n t = form.save(commit = False)\n\n if (act == 2):\n t.user = request.user\n t.save()\n\n if (act == 3):\n t.id = pk\n t.user = request.user\n t.save()\n\n if (act == 4):\n t = get_object_or_404(TGroup, id = pk)\n t.delete()\n\n if (pk > 0) or (act == 1):\n return HttpResponseRedirect(reverse('task:grps_view'))\n else:\n return render(request, 'task/grps.html', context)\n\n","sub_path":"task/v_grps.py","file_name":"v_grps.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309088251","text":"import pyspeckit\nfrom spectral_cube import SpectralCube\nfrom astropy import units as u\nimport pylab as pl\n\ncube11 = SpectralCube.read('W51_NH3_11.fits').to(u.K)\ncube11 = cube11.with_spectral_unit(u.km/u.s, velocity_convention='radio')\n\nn11cube = pyspeckit.Cube(cube = cube11)\n\ncube22 = SpectralCube.read('W51_NH3_22.fits').to(u.K)\ncube22 = cube22.with_spectral_unit(u.km/u.s, velocity_convention='radio')\n\nn22cube = pyspeckit.Cube(cube = cube22)\n\ncube44 = SpectralCube.read('W51_NH3_44.fits').to(u.K)\ncube44 = cube44.with_spectral_unit(u.km/u.s, velocity_convention='radio')\n\nn44cube = pyspeckit.Cube(cube = cube44)\n\ncube55 = SpectralCube.read('W51_NH3_55.fits').to(u.K)\ncube55 = cube55.with_spectral_unit(u.km/u.s, velocity_convention='radio')\n\nn55cube = pyspeckit.Cube(cube = cube55)\n\n#Extracting Spectrum\nx = 128\ny = 154\n\nsp11 = n11cube.get_spectrum(x,y) \n\nsp22 = n22cube.get_spectrum(x,y) \n\nsp44 = n44cube.get_spectrum(x+25,y+25)\n\nsp55 = n55cube.get_spectrum(x+25,y+25)\n\n#sp11.plotter()\n#sp22.plotter()\n#sp33.plotter() #Plots spectrum\n\nsp11.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['oneone']\nsp22.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['twotwo']\nsp44.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['fourfour']\nsp55.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['fivefive']\n\nsp11.xarr.velocity_convention='radio'\nsp22.xarr.velocity_convention='radio'\nsp44.xarr.velocity_convention='radio'\nsp55.xarr.velocity_convention='radio'\n\ninput_dict = {'oneone':sp11, 'twotwo':sp22, 'fourfour':sp44, 'fivefive':sp55}\n\nspf = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict, dobaseline=False)\n\n\n\npl.show()\n","sub_path":"data_analysis/ammonia_fitting_W51.py","file_name":"ammonia_fitting_W51.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555709088","text":"###########################################################################\n# This File is the base Hand detection module and needs to be imported in #\n# all image processing py scripts and there we will create a instance of #\n# handsDetector class and use its functions #\n###########################################################################\n\n#### Imports ####\nimport cv2\nimport mediapipe as mp\nimport time\n\n#### Main class we will use in other py files ####\nclass handsDetector():\n\n #### The innit function with standard mediapipe variables ####\n def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):\n self.mode = mode\n self.maxHands = maxHands\n self.detectionCon = detectionCon\n self.trackCon = trackCon\n\n self.mpHands = mp.solutions.hands\n self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)\n self.mpDraw = mp.solutions.drawing_utils\n\n #### Finds Hands and draws lines on them ####\n def findHands(self, img, draw=True):\n\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.hands.process(imgRGB)\n if self.results.multi_hand_landmarks:\n for handLms in self.results.multi_hand_landmarks:\n if draw:\n self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)\n return img\n\n #### Find the hand with given hand no and highlits it using blue circles ####\n def findPosition(self, img, handNo=0, draw=True):\n\n lmList = []\n if self.results.multi_hand_landmarks:\n myHand = self.results.multi_hand_landmarks[handNo]\n\n for id, lm in enumerate(myHand.landmark):\n h, w, z = img.shape\n cx, cy = int(lm.x * w), int(lm.y * h)\n lmList.append([id, cx, cy])\n if draw:\n cv2.circle(img, (cx, cy), 5, (255, 255, 0), cv2.FILLED)\n\n return lmList\n\n #### The function to detect the direction the hand is pointing to ####\n def direction_hand(self, img, thres, ind_1, ind_2, flag):\n img = self.findHands(img)\n lmList = self.findPosition(img)\n if len(lmList) != 0:\n x = (lmList[ind_1][1] - lmList[ind_2][1])\n y = (lmList[ind_1][2] - lmList[ind_2][2])\n # print(x, y)\n\n if y > thres:\n return 'd'\n\n elif y < -thres:\n return 'u'\n\n elif x > thres:\n return 'l'\n\n elif x < -thres:\n return 'r'\n\n return flag\n\n #### The function to detect and return the hand no for left and right hand ####\n def left_or_right(self, img):\n img = self.findHands(img, draw=False)\n lmList = self.findPosition(img, draw=False)\n if len(lmList) != 0:\n num_hands = len(self.results.multi_handedness)\n if num_hands != 2:\n # print(num_hands)\n # print(\"I got removed Here\")\n return False, 2, 2\n for id, classification in enumerate(self.results.multi_handedness):\n # print(\"I got here !!!!!!!!!!\")\n if classification.classification[id].label == \"Left\":\n l = classification.classification[id].index\n return True, l, 1 - l\n elif classification.classification[id].label == \"Right\":\n r = classification.classification[id].index\n return True, 1 - r, r\n else:\n print(\"What the hell just happened....heh?\")\n return False, 2, 2\n return False, 2, 2\n \n #### The function to detect whether a finger is open or closed ####\n # tip_num => 8, 12, 16, 20\n def num_fingers(self, img, hand_flag, tip_num):\n\n flag, left, right = self.left_or_right(img)\n if flag:\n if hand_flag == \"l\":\n hand_index = left\n elif hand_flag == \"r\":\n hand_index = right\n else:\n print(\"Please check the hand_flag Parameter\")\n return False\n lmList = self.findPosition(img, draw=False, handNo=hand_index)\n\n if lmList[tip_num][2] < lmList[tip_num - 2][2]:\n return True\n\n return False\n\n\ndef main():\n print(\"Please dont run this file!!!!\")\n print(\"This is a package, it contains functions for your projects\")\n print(\"Download it and import the handsDetector() class in another python project\")\n print(\"Enjoy!!!\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"HandTrackingModule.py","file_name":"HandTrackingModule.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"618765688","text":"from discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\nfrom .utils import config\nfrom .utils import checks\nfrom .utils.config import getPhrase\nimport discord\nimport random\n\n\ndef battling_off(player_id):\n battling = config.get_content('battling') or {}\n\n # Create a new dictionary, exactly the way the last one was setup\n # But don't include any that have the player's ID provided\n battling = {p1: p2 for p1, p2 in battling.items() if not p2 == player_id and not p1 == player_id}\n\n config.save_content('battling', battling)\n\n\ndef user_battling(ctx, player2=None):\n battling = config.get_content('battling')\n\n # If no one is battling, obviously the user is not battling\n if battling is None:\n return False\n # Check if the author is battling\n if ctx.message.author.id in battling.values() or ctx.message.author.id in battling.keys():\n return True\n # Check if the player2 was provided, if they are check if they're in the list\n if player2 and (player2.id in battling.values() or player2.id in battling.keys()):\n return True\n # If neither are found, no one is battling\n return False\n\n\ndef update_battle_records(winner, loser):\n # We're using the Harkness scale to rate\n # http://opnetchessclub.wikidot.com/harkness-rating-system\n battles = config.get_content('battle_records')\n if battles is None:\n battles = {winner.id: \"1-0\", loser.id: \"0-1\"}\n\n # Start ratings at 1000 if they have no rating\n winner_stats = battles.get(winner.id) or {}\n winner_rating = winner_stats.get('rating') or 1000\n\n loser_stats = battles.get(loser.id) or {}\n loser_rating = loser_stats.get('rating') or 1000\n\n # The scale is based off of increments of 25, increasing the change by 1 for each increment\n # That is all this loop does, increment the \"change\" for every increment of 25\n # The change caps off at 300 however, so break once we are over that limit\n difference = abs(winner_rating - loser_rating)\n rating_change = 0\n count = 25\n while count <= difference:\n if count > 300:\n break\n rating_change += 1\n count += 25\n\n # 16 is the base change, increased or decreased based on whoever has the higher current rating\n if winner_rating > loser_rating:\n winner_rating += 16 - rating_change\n loser_rating -= 16 - rating_change\n else:\n winner_rating += 16 + rating_change\n loser_rating -= 16 + rating_change\n\n # Just increase wins/losses for each person, making sure it's at least 0\n winner_wins = winner_stats.get('wins') or 0\n winner_losses = winner_stats.get('losses') or 0\n loser_wins = loser_stats.get('wins') or 0\n loser_losses = loser_stats.get('losses') or 0\n winner_wins += 1\n loser_losses += 1\n\n # Now save the new wins, losses, and ratings\n winner_stats = {'wins': winner_wins, 'losses': winner_losses, 'rating': winner_rating}\n loser_stats = {'wins': loser_wins, 'losses': loser_losses, 'rating': loser_rating}\n battles[winner.id] = winner_stats\n battles[loser.id] = loser_stats\n\n return config.save_content('battle_records', battles)\n\n\nclass Interaction:\n \"\"\"Commands that interact with another user\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(pass_context=True, no_pm=True, invoke_without_command=True)\n @commands.cooldown(1, 60, BucketType.user)\n @checks.custom_perms(send_messages=True)\n async def battle(self, ctx, player2: discord.Member):\n \"\"\"Challenges the mentioned user to a battle\"\"\"\n if len(ctx.message.mentions) == 0:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_NO_USER_MENTIONED\").format(ctx.message.author.mention))\n return\n if len(ctx.message.mentions) > 1:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_MULTIPLE_MENTIONS\").format(ctx.message.author.mention, getPhrase(\"INTERACTION:BATTLE\")))\n return\n if ctx.message.author.id == player2.id:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_PREVENT_SUICIDE\").format(ctx.message.author.mention))\n return\n if self.bot.user.id == player2.id:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_GODBOT\").format(ctx.message.author.mention))\n return\n if userBattling(ctx, player2):\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_IN_BATTLE\").format(ctx.message.author.mention, player2.mention))\n return\n\n # Add the author and player provided in a new battle\n battling = config.get_content('battling') or {}\n battling[ctx.message.author.id] = player2.id\n config.save_content('battling', battling)\n\n fmt = getPhrase(\"INTERACTION:BATTLE_REQUEST\")\n # Add a call to turn off battling, if the battle is not accepted/declined in 3 minutes\n config.loop.call_later(180, battling_off, ctx.message.author.id)\n await self.bot.say(fmt.format(ctx.message.author, player2, config.commandPrefix))\n await self.bot.delete_message(ctx.message)\n\n @commands.command(pass_context=True, no_pm=True)\n @checks.custom_perms(send_messages=True)\n async def accept(self, ctx):\n \"\"\"Accepts the battle challenge\"\"\"\n # Ensure that the author is actually in a battle, otherwise they can't accept one\n if not user_battling(ctx):\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_NOT_IN_BATTLE\").format(ctx.message.author.mention))\n return\n\n # This is an extra check to make sure that the author is the one being BATTLED\n # And not the one that started the battle \n battling = config.get_content('battling') or {}\n p1 = [p1_id for p1_id, p2_id in battling.items() if p2_id == ctx.message.author.id]\n if len(p1) == 0:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_NO_BATTLE_REQUEST\").format(ctx.message.author.mention))\n return\n\n battleP1 = discord.utils.find(lambda m: m.id == p1[0], ctx.message.server.members)\n battleP2 = ctx.message.author\n\n # Get a random win message from our list\n fmt = config.battleWins[random.SystemRandom().randint(0, len(config.battleWins) - 1)]\n # Due to our previous check, the ID should only be in the dictionary once, in the current battle we're checking\n battling_off(ctx.message.author.id)\n\n DarkscratchID = '106182485913690112'\n if battleP1.id == DarkscratchID:\n await self.bot.say(getPhrase(\"INTERACTION:BATTLE_GOOBORG_ALWAYS_WINS\").format(battleP1.mention, battleP2.mention))\n updateBattleRecords(battleP1, battleP2)\n if battleP2.id == DarkscratchID:\n await self.bot.say(getPhrase(\"INTERACTION:BATTLE_GOOBORG_ALWAYS_WINS\").format(battleP2.mention, battleP1.mention))\n updateBattleRecords(battleP2, battleP1)\n # Randomize the order of who is printed/sent to the update system\n # All we need to do is change what order the challengers are printed/added as a paramater\n if random.SystemRandom().randint(0, 1):\n await self.bot.say(fmt.format(battleP1.mention, battleP2.mention))\n update_battle_records(battleP1, battleP2)\n else:\n await self.bot.say(fmt.format(battleP2.mention, battleP1.mention))\n update_battle_records(battleP2, battleP1)\n\n await self.bot.delete_message(ctx.message)\n\n @commands.command(pass_context=True, no_pm=True)\n @checks.custom_perms(send_messages=True)\n async def decline(self, ctx):\n \"\"\"Declines the battle challenge\"\"\"\n if not user_battling(ctx):\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_NOT_IN_BATTLE\").format(ctx.message.author.mention))\n return\n\n # This is an extra check to make sure that the author is the one being BATTLED\n # And not the one that started the battle \n battling = config.get_content('battling') or {}\n p1 = [p1_id for p1_id, p2_id in battling.items() if p2_id == ctx.message.author.id]\n if len(p1) == 0:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_NO_BATTLE_REQUEST\").format(ctx.message.author.mention))\n return\n\n battleP1 = discord.utils.find(lambda m: m.id == p1[0], ctx.message.server.members)\n battleP2 = ctx.message.author\n\n battling_off(ctx.message.author.id)\n await self.bot.say(getPhrase(\"INTERACTION:BATTLE_DECLINE\").format(battleP2.mention, battleP1.mention))\n await self.bot.delete_message(ctx.message)\n\n @commands.command(pass_context=True, no_pm=True)\n @commands.cooldown(3, 30, BucketType.user)\n @checks.custom_perms(send_messages=True)\n async def boop(self, ctx, boopee: discord.Member):\n \"\"\"Boops the mentioned person\"\"\"\n booper = ctx.message.author\n if len(ctx.message.mentions) == 0:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_NO_USER_MENTIONED\").format(ctx.message.author.mention))\n return\n if len(ctx.message.mentions) > 1:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_MULTIPLE_MENTIONS\").format(ctx.message.author.mention, getPhrase(\"INTERACTION:BOOP\")))\n return\n if boopee.id == booper.id:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_SELF_BOOP\").format(ctx.message.author.mention))\n return\n if boopee.id == self.bot.user.id:\n await self.bot.say(getPhrase(\"INTERACTION:ERROR_BOT_BOOP\").format(ctx.message.author.mention))\n return\n\n boops = config.get_content('boops') or {}\n\n # This is only used to print the amount of times they've booped someone\n # Set to 1 for the first time someone was booped\n amount = 1\n # Get all the booped stats for the author\n booper_boops = boops.get(ctx.message.author.id)\n # If the author does not exist in the dictionary, then he has never booped someone\n # Create a new dictionary with the amount \n if booper_boops is None:\n boops[ctx.message.author.id] = {boopee.id: 1}\n # If the booper has never booped the member provided, still add that user\n # To the dictionary with the amount of 1 to start it off\n elif booper_boops.get(boopee.id) is None:\n booper_boops[boopee.id] = 1\n boops[ctx.message.author.id] = booper_boops\n # Otherwise increment how many times they've booped that user\n else:\n amount = booper_boops.get(boopee.id) + 1\n booper_boops[boopee.id] = amount\n boops[ctx.message.author.id] = booper_boops\n\n config.save_content('boops', boops)\n fmt = getPhrase(\"INTERACTION:BOOPED\")\n await self.bot.say(fmt.format(booper, boopee, amount, \"s\" if amount > 1 else \"\"))\n await self.bot.delete_message(ctx.message)\n\n\ndef setup(bot):\n bot.add_cog(Interaction(bot))\n","sub_path":"cogs/interaction.py","file_name":"interaction.py","file_ext":"py","file_size_in_byte":10928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"4679027","text":"# 행복 왕국의 왕실 정원은 체스판과같은 8 X 8 좌표 평면이다.\n# 왕실 정원의 특정한 한 칸에 나이트가 서 있다. 나이트는 매우 충성스러운 신하로서 매일 무술을 연마한다.\n\n# 나이트는 말을 타고 있기 떄문에 이동을 할 때는 L자 형태로만 이동할수 있으며 정원 밖으로는 나갈 수 없다.\n# 나이트는 특정한 위치에서 다음과 같은 2가지 경우로 이동할 수 있다.\n\n# 1. 수평으로 두 칸 이동한 뒤에 수직으로 한 칸 이동하기\n# 2. 수직으로 두 칸 이동한 뒤에 수평으로 한 칸 이동하기\n\n# a b c d e f g h O : 색칠 X\n# 1 O X O X O X O X X : 색칠 O\n# 2 O X O X O X O X\n# 3 O X O X O X O X\n# 4 O X O X O X O X\n# 5 O X O X O X O X\n# 6 O X O X O X O X\n# 7 O X O X O X O X\n# 8 O X O X O X O X\n\n# 이처럼 8 X 8 좌표 평면상에서 나이트의 위치가 주어졌을 때 나이트가 이동할 수 있는 경우의 수를 출력하는 프로그램을 작성하시오.\n# 이때 왕실의 정원에서 행 위치를 표현할 때는 1부터 8로 표현하며, 열 위치를 표현할 때는 a부터 h로 표현한다.\n\n# 예를 들어 만약 나이트가 a1에 있을 때 이동할 수 있는 경우의 수는 다음 2가지이다.\n# a1의 위치는 좌표 평면에서 구석의 위치에 해당하며 나이트는 정원의 밖으로는 나갈 수 없기 때문이다.\n\n# 1. 오른쪽으로 두 칸 이동 후 아래로 한 칸 이동하기(c2)\n# 2. 아래로 두 칸 이동 후 오른쪽으로 한 칸 이동하기(b3)\n\n# 또 다른 예로 나이트가 c2에 위치해 있다면 나이트가 이동할 수 있는 경우의 수는 6가지이다. (직접 계산해보시오)\n\n# 입력 조건 : 첫째 줄에 8 X 8 좌표 평면사에서 현재 나이트가 위치한 곳의 좌표를 나타내는 두 문자로 구성된 문자열이 입력된다.\n# 입력 문자는 a1처럼 열과 행으로 이뤄진다.\n\n# 출력 조건 : 첫째 줄에 나이트가 이동할 수 있는 경우의 수를 출력하시오.\n\n# 입력 예시 : a1 출력 예시 : 2\n\n\n# 문제 해설\n\n# 왕실의 나이트 문제는 앞서 다루었던 예제 4-1 '상하좌우' 문제와 유사하다.\n# 나이트가 이동할수 있는 경로를 하나씩 확인하여 이동하면 된다. 다만 8 X 8 좌표 평면을 벗어나지 않도록 꼼꼼하게 검사하는 과정이 필요하다.\n# 나이트는 2가지 경로로 움직일수 있다고 했다.\n\n# 1. 수평으로 두 칸 이동한 뒤에 수직으로 한 칸 이동하기\n# 2. 수직으로 두 칸 이동한 뒤에 수평으로 한 칸 이동하기\n\n# 나이트의 이동 경로를 steps 변수에 넣는다면, 이 2가지 규칙에 따라\n# steps= [(-2, -1), (-1,-2), (1, -2), (2, -1), (2, 1), (1, 2), (-1, 2), (-2, 1)]로 값을 대입할 수 있다.\n# 현재 위치를 기준으로 아래쪽과 오른쪽은 양수의 값을, 위쪽과 왼쪽은 음수의 값을 대입한 결과이다.\n# 이제 나이트의 현재 위치가 주어지면 현재 위치에서 이동 경로를 더한 다음, 8 X 8 좌표 평면에 있는지 확인하면 된다.(이 과정은 반복문으로 처리할수 있다.)\n\n# ---------------------------------------------------------------------------------------------------------------------------\n# 풀이 (Python)\n\n# 현재 나이트의 위치 입력받기\ninput_data = input()\nrow = int(input_data[1]) # 행위 위치 : 두번쨰 위치의 문자를 숫자로 바꾼것이 나이트가 존재하는 행위 위치\n# 열의 위치 : 문자로 들어온 값을 아스키코드 형태로 변환하여 그값과 문자 'a'를 아스키코드 형태로 변환한 값을 뺸 값이 나이트가 위치하는 행위 위치\ncolumn = int(ord(input_data[0])) - int(ord('a')) + 1\n\n# 나이트가 이동할 수 있는 8가지 방향 정의\nsteps = [\n (-2, -1), (-1, -2), (1, -2), (2, -1),\n (2, 1), (1, 2), (-1, 2), (-2, 1)\n]\n\n# 8가지 방향에 대하여 각 위치로 이동이 가능한지 확인\nresult = 0\nfor step in steps:\n # 이동하고자 하는 위치 확인\n next_row = row + step[0]\n next_column = column + step[1]\n # 해당 위치로 이동이 가능하다면 카운트 증가\n if next_row >= 1 and next_row <= 8 and next_column >= 1 and next_column <= 8:\n result += 1\n\n\nprint(result)\n","sub_path":"KYJ/이코테/구현/왕실의 나이트.py","file_name":"왕실의 나이트.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"624192809","text":"''' Functions to support the Pylontech US2000 and similar Batteries.\n\n This module provides Classes to communicate over RS-485 to the Pylontech\n Battery. This code is based on the\n \"PYLON low voltage Protocol RS485\", Version 3.3 (2018/08/21)\n\n The RS-485 communication ist based on pyserial.\n As hardware, a simple usb-serial rs485 adapter can be used.\n these adapter are able to receive out of the box, sending is possible\n by enabling the transceive pin using the RTS signal.\n'''\n\nimport serial\nfrom construct import *\nimport time\n\n# message_crc = Struct('message_crc', 'crc'/Int32ul)\n\nCHKSUM_BYTES = 4\nEOI_BYTES = 1\n\n\nmessage_header = 'message_header'/Struct(\n #'soi' / Int8ul, # 1 byte hex value - single character\n 'ver' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'adr' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'cid1' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'cid2' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'length' / Int32ul, # 2 byte hex value => encoded as 4 Characters HEX\n)\n\nmessage_format = 'message_format'/Struct(\n #'soi' / Int8ul, # 1 byte hex value - single character\n 'ver' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'adr' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'cid1' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'cid2' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'length' / Int32ul, # 2 byte hex value => encoded as 4 Characters HEX\n 'cmd_info' / Int16ul, # 1 byte hex value => encoded as 2 Characters HEX\n 'data_info' / Int16ul # 1 byte hex value => encoded as 2 Characters HEX\n)\n\n\n\nclass Rs485Handler():\n ''' Handles the USB to RS485 adapter with TE / Transmit Enable on RTS\n preset for\n - 9600 baud,8n1\n - sending and receiving frames defined by start and end byte\n '''\n sendTime1 = 0\n sendTime2 = 0\n rcvTime1 = 0\n rcvTime2 = 0\n\n def __init__(self, device='/dev/ttyUSB0', baud=9600):\n #try:\n self.ser = serial.Serial(device,\n baudrate=baud,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n rtscts=False,\n dsrdtr=False,\n timeout=10.0,\n inter_byte_timeout=0.02) # open serial port\n #except OSError:\n # print(\"device not found: \" + device)\n # exit(1)\n\n def send(self, data):\n ''' send a Frame of binary data\n :param data: binary data e.g. b'~2002464FC0048520FCB2\\r'\n :return: -\n '''\n print(\"-> \" + data.decode())\n self.ser.rts = True # set TX enable\n self.ser.write(data)\n self.ser.rts = False # reset TX enable = enable Receive\n self.sendTime1 = time.time_ns()\n while self.ser.out_waiting > 0:\n time.sleep(0.001)\n self.sendTime2 = time.time_ns() - self.sendTime1\n\n\n def receive_frame(self, start=b'~', end=b'\\r'):\n ''' receives a frame defined by a stert and end byte\n :param start: the start byte, e.g. b'~'\n :param end: the end byte, e.g. b'\\r'\n :return: the frame as binary data,\n e.g. b'~200246000000FDB2\\r'\n returns after the first end byte.\n '''\n char = self.ser.read(1)\n # wait for leading byte / start byte:\n while char != start:\n char = self.ser.read(1)\n self.rcvTime1 = time.time_ns() - self.sendTime1 # just for Timeout hamdling\n # receive all until the trialing byte / end byte:\n data = self.ser.read_until(end)\n # build a complete frame:\n frame = start + data\n # just more timeout handling:\n self.rcvTime2 = time.time_ns() - self.sendTime1 # just for Timeout hamdling\n # just for debugging:\n print(\"\\r <- \" + frame.decode())\n # return the frame\n print(\" times: {:04.3f} {:6.3f} - {:6.3f} \".format(self.sendTime2 / 1000.0, self.rcvTime1 /1000.0, self.rcvTime2 /1000.0))\n return frame\n\n\nclass Pylontech_rs485():\n ''' pylontech rs485 protocol handler '''\n validchars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\n\n\n def __init__(self, device='/dev/ttyUSB0'):\n baud = 9600\n self.rs485 = Rs485Handler(device, baud)\n\n def recv(self):\n data = self.rs485.receive_frame(start=b'~', end=b'\\r')\n # check len\n if len(data) < 20:\n # smaller then minimal size\n return None\n start = data.index(b'~')\n if start > 0:\n data = data[start:-1]\n # check prefix and suffix\n index = 0\n while (data[index] != 0x7E) and (data[index] not in self.validchars):\n index += 1\n if (data[index] == 0x7E) and (data[index] in self.validchars):\n data = data[index:len(data)]\n break\n if data[0] != 0x7E: # '~'\n # pefix missing\n return None\n if data[-1] != 0xd: # '\\r'\n # suffix missing\n return None\n data = data[1:-1] # packet stripped, - without prefix, suffix\n packages = data.split(b'\\r~')\n data2 = []\n for package in reversed(packages):\n chksum = self.get_chk_sum(package, len(package))\n chksum_from_pkg = int(package[-4:].decode(), base=16)\n if chksum == chksum_from_pkg:\n #data2.append(package[0:-4])\n data2.append(package)\n else:\n print(\"crc error soll<->ist {:04x} --- {:04x}\".format(chksum, chksum_from_pkg))\n print(package)\n return data2\n\n def get_chk_sum(self, data, size):\n sum = 0\n for byte in data[0:size - CHKSUM_BYTES]:\n sum += byte\n sum = ~sum\n sum &= 0xFFFF\n sum += 1\n return sum\n\n def send(self, data): # data -> b'2002464FC0048520' for b'~2002464FC0048520....\\r' to be sent\n # - checksum and prefix/suffix will be added.\n chksum = self.get_chk_sum(data, len(data)+CHKSUM_BYTES)\n package = (\"~\" + data.decode() + \"{:04X}\".format(chksum) + \"\\r\").encode()\n self.rs485.send(package)\n\n def send1(self):\n #package = b'~21024651C0040000FCC0\\r'\n package = b'~2002464FC0048520....\\r'\n #package = b'~200246040000FDAE\\r'\n chksum = self.get_chk_sum(package[1:-1], len(package)-2)\n data = package[0:-5].decode() + \"{:04X}\".format(chksum) + \"\\r\"\n package = data.encode()\n self.rs485.send(package)\n\n pass\n","sub_path":"pylon/pylontech.py","file_name":"pylontech.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"489939169","text":"import os\nfrom flask import Flask, json, request\nimport mysql.connector\nfrom mysql.connector.errors import IntegrityError\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\nCORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\ndbConn = mysql.connector.connect(\n host=\"35.238.249.95\",\n user=\"root\",\n password=\"root\",\n database=\"a2\"\n)\n\ndbCursor = dbConn.cursor()\n\n\n@app.route(\"/\")\n@cross_origin()\ndef hello_world():\n name = os.environ.get(\"NAME\", \"World\")\n return \"Hello {}!\".format(name)\n\n\n@app.route(\"/dashboard\", methods=[\"GET\"])\n@cross_origin()\ndef dashboard():\n query = \"SELECT * FROM user_status WHERE status = %s\"\n query_val = (1,)\n try:\n dbCursor.execute(query, query_val)\n result = dbCursor.fetchall()\n print(\"Result\")\n print(result)\n return json.jsonify({\n \"status\": \"true\",\n \"message\": \"Active User List Success!\",\n \"active_users\": result\n })\n except IntegrityError as err:\n return json.jsonify({\n \"status\": \"false\",\n \"message\": err.msg,\n })\n\n\n@app.route(\"/logout\", methods=[\"GET\"])\n@cross_origin()\ndef logout():\n user_info = request.get_json()\n query = \"UPDATE user_status SET status = %s WHERE email = %s\"\n query_val = (0, user_info[\"email\"])\n\n try:\n dbCursor.execute(query, query_val)\n dbConn.commit()\n return json.jsonify({\n \"msg\": \"Logout Successful!\",\n })\n except IntegrityError as e:\n return json.jsonify({\n \"msg\": \"User Logout Failed:\" + e.msg\n })\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n","sub_path":"A2/backend/c3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"270381125","text":"#!/usr/bin/env python3\n\n\nimport kwrapper as kraken\nimport curses, threading, time, pickle\n\nfrom pathlib import Path\n\n\nWINDOW_MODE_MAIN = 0\nWINDOW_MODE_BALANCE = 1\nWINDOW_MODE_TICKER = 2\nWINDOW_MODE_OHLC = 3\n\nKEY_FILE_LOCATIONS = [\n str(Path.home()) + \"/.kraken_key\",\n str(Path.home()) + \"/.config/.kraken_key\",\n str(Path.home()) + \"/.config/ckraken/kraken_key\"]\n\nwindow_curr = WINDOW_MODE_MAIN\nscreen = None\ndata = {}\n\n\ndef window_main():\n global data\n while True:\n screen.addstr(0, 0, 'Active pairs:')\n y, x = 1, 0\n for item in data['p_active']:\n screen.addstr(y, x, item)\n y, x = calcyx(y, x)\n key = screen.getch()\n if key == ord('s'):\n result = search()\n if result is not None and result not in data['p_active']:\n data['p_active'] += [result]\n elif key == ord('q'):\n quit()\n break\n screen.refresh()\n screen.erase()\n\n\ndef window_balance():\n pass\n\n\ndef window_ticker():\n pass\n\n\ndef window_ohlc():\n pass\n\n\nwindow_mode = {\n WINDOW_MODE_MAIN: window_main,\n WINDOW_MODE_BALANCE: window_balance,\n WINDOW_MODE_TICKER: window_ticker,\n WINDOW_MODE_OHLC: window_ohlc}\n\n\ndef calcyx(y, x, xsize=15):\n \"\"\"Utility to draw list of entries on the screen realestate.\n\n Params:\n y, x: current y, x coordinates.\n \n Returns:\n [y, x]: list of new y, x coordinates.\n \"\"\"\n y += 1\n if y >= curses.LINES:\n y = 1\n x += xsize\n if x >= curses.COLS:\n x = 0\n return [y, x]\n\n\ndef search():\n \"\"\"Search for asset pairs.\n \n Returns:\n ksel: the key for the selected asset pair.\n \"\"\"\n search_window = curses.newwin(1, curses.COLS, 0, 0)\n search_window.addstr(0, 0, 'Search:')\n\n curses.curs_set(True)\n\n ss = ''\n draw_search_result(ss)\n while True:\n screen.erase()\n c = search_window.getch()\n screen.addstr(0, curses.COLS-(len(str(c))+1), str(c))\n # select result\n if c == curses.KEY_ENTER or c == 10 or c == 13:\n sel, ksel = navigate_search(ss)\n if sel < 0:\n search_window.move(0, 7+len(ss))\n draw_search_result(ss)\n continue\n else:\n curses.curs_set(False)\n return ksel\n # quit search\n elif c == 27:\n curses.curs_set(False)\n return None\n # search\n else:\n # backspace\n if c == 127:\n ss = ss[:-1]\n\n elif 47 < c < 123:\n ss += chr(c)\n draw_search_result(ss)\n screen.refresh()\n\n\ndef navigate_search(ss):\n \"\"\"Navigate through the search results of a given search string.\n\n Params:\n ss(str): search string to pattern match with\n\n Returns:\n pos: position of the selected pair, or -1 if canceled\n kr: key of the selected pair\n \"\"\"\n curses.curs_set(False)\n pos = 0\n r, kr = draw_search_result(ss, pos)\n while True:\n c = screen.getch()\n # selection\n if c == 10 or c == 13:\n curses.curs_set(True)\n return [pos, kr]\n # back\n elif c == 27 or c == ord('q'):\n curses.curs_set(True)\n return [-1, kr]\n # movement\n elif c == ord('j') or c == curses.KEY_DOWN:\n pos = min(r-1, pos + 1)\n elif c == ord('k') or c == curses.KEY_UP:\n pos = max(0, pos - 1)\n elif c == ord('l') or c == curses.KEY_RIGHT:\n pos = min(r-1, pos + curses.LINES-1)\n elif c == ord('h') or c == curses.KEY_LEFT:\n pos = max(0, pos - curses.LINES+1)\n r, kr = draw_search_result(ss, pos)\n\n\ndef draw_search_result(ss, highlight=-1):\n \"\"\"Search for given string in all valid asset pairs.\n\n Params:\n ss(str): search string to pattern match with\n highlight: index of highlighted result, -1 if none\n\n Returns:\n r: result count\n kr: result asset pair key or -1 if ss not found\n \"\"\"\n y, x ,r = 1, 0, 0\n kr = -1\n for k, v in data['p_available'].items():\n if ss.upper() in v['altname'] and '.d' not in v['altname']:\n if r == highlight:\n kr = k\n screen.addstr(y, x, v['altname'], curses.A_REVERSE)\n else:\n screen.addstr(y, x, v['altname'])\n r += 1\n y, x = calcyx(y, x)\n screen.addstr(curses.LINES-1, curses.COLS-(len(str(r))+1), str(r))\n screen.addstr(0, 0, 'Search:' + ss)\n screen.refresh()\n return [r, kr]\n\n\ndef init():\n global data\n curses.use_default_colors()\n curses.curs_set(False)\n\n # COLORS\n curses.init_color(curses.COLOR_RED, 600, 300, 300)\n curses.init_color(curses.COLOR_GREEN, 300, 600, 300)\n curses.init_pair(1, curses.COLOR_RED, -1)\n curses.init_pair(2, curses.COLOR_GREEN, -1)\n\n # load private kraken api key\n data['key_loaded'] = False\n for loc in KEY_FILE_LOCATIONS:\n if kraken.load_private_key(loc):\n data['key_loaded'] = True\n break\n try:\n data = load('data')\n except:\n data['fiat'] = 'EUR'\n data['p_available'] = kraken.get_asset_pairs()\n if data['key_loaded']:\n data['balance'] = kraken.get_balance()\n data['p_active'] = kraken.get_pairs_from_currencies(data['p_available'], data['balance'], data['fiat'])\n data['ticker'] = kraken.get_ticker(data['p_active'])\n else:\n data['p_active'] = []\n data['balance'] = {}\n data['ticker'] = {}\n save(data, 'data')\n\n\ndef quit():\n global data\n save(data, 'data')\n\n\ndef synchronize(full: bool):\n global data\n if full:\n data['p_available'] = kraken.get_asset_pairs()\n if data['key_loaded']:\n data['balance'] = kraken.get_balance()\n data['ticker'] = kraken.get_ticker(data['p_active'])\n\n \"\"\"Pull data from the kraken API.\"\"\"\n return time.time()\n\n\ndef load(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\n\ndef save(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef main(stdscr):\n global screen\n screen = stdscr\n init()\n window_mode[window_curr]()\n\n\nif __name__ == \"__main__\":\n curses.wrapper(main)\n","sub_path":"ckraken.py","file_name":"ckraken.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"332942954","text":"l1 = [\"bhindi\",\"aalo\",\"gaajar\",\"mooli\"]\r\n\r\n# i = 1\r\n# for item in l1:\r\n# if i%2 == 0:\r\n# print(f\"jarvis please buy {item}\")\r\n# i +=1\r\nfor index,item in enumerate(l1):\r\n if index%2 == 0:\r\n print(f\"jarvis please buy {item}\")","sub_path":"Python Complete course/Simple python/enumerate function.py","file_name":"enumerate function.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578128101","text":"# -*- coding:utf-8 -*-\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib import rnn\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport pandas as pd\r\nimport numpy as np\r\nimport firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import db\r\nimport csv\r\nimport sys\r\nimport os\r\nfrom operator import itemgetter\r\n\r\ncred = credentials.Certificate('firebase-adminsdk.json')\r\nfirebase_admin.initialize_app(cred, {\r\n 'databaseURL' : 'https://total-cascade-210406.firebaseio.com'\r\n})\r\n\r\ncsv_folder_name = \"data\"\r\ncsv_file_name = \"rawData\"\r\n\r\nmodel_folder_name = \"~/Activity_model/model\"\r\nmodel_file_name = \"ActivityRNN\"\r\n\r\nmodel_info_file_name = \"model_info.csv\"\r\nmodel_info_type = [\"Index\", \"Test Accuracy\", \"Batch Size\", \"Epoch\", \"Model Layer\"]\r\nlayer_info = \"(lstm + dropout) * 2\"\r\n\r\n\r\nlr = 1e-3\r\n\r\nbatch_size = tf.placeholder(tf.int32, [], name='batch_size') # tf.int32\r\n# Feature Num #28\r\ninput_size = 6\r\n# Time Series Num #28\r\ntimestep_size = 450\r\n# Hidden Layer Feature Num \r\nhidden_size = 256\r\n# LSTM Layer Num\r\nlayer_num = 2\r\n# Y Class Num #10\r\nclass_num = 6\r\nclass_type = [\"Biking\", \"In Vehicle\", \"Running\", \"Still\", \"Tilting\", \"Walking\", \"Features\"]\r\n\r\n_batch_size = 128\r\n\r\nepoch_num = 2000\r\n\r\n# Training Data / Raw Data\r\ntrain_p = 0.8\r\n\r\nepoch_start = 0\r\niters = 0\r\nperm_X = np.empty\r\nperm_Y = np.empty\r\n\r\ndef connect_firebase():\r\n root = db.reference()\r\n values = root.child('SensorDataSet').get()\r\n data = pd.DataFrame(values).T\r\n\r\n data_index = [[i.split()[0], int(i.split()[1])] for i in list(data)[class_num:]]\r\n sorted_data_index = sorted(data_index, key=itemgetter(1))\r\n sorted_data_index = [i[0] + \" \" + str(i[1]) for i in sorted_data_index]\r\n sorted_data_index = list(data)[:class_num] + sorted_data_index\r\n \r\n data = data.reindex(columns=sorted_data_index)\r\n print (data)\r\n\r\n npdata = np.array(data.values)\r\n print (npdata.shape)\r\n print (npdata[:,0:10])\r\n\r\n return npdata\r\n\r\ndef write_data(raw_data):\r\n global csv_folder_name\r\n global csv_file_name\r\n\r\n # **Raw Data\r\n # make new csv folder\r\n if not os.path.exists(csv_folder_name):\r\n os.mkdir(csv_folder_name)\r\n\r\n csv_path = os.path.join(csv_folder_name, csv_file_name)\r\n i = 0\r\n while os.path.exists(csv_path + str(i) + \".csv\"):\r\n i += 1\r\n\r\n #write raw data to csv\r\n if i != 0:\r\n csv_last_path = csv_path + str(i-1) + \".csv\"\r\n csv_path = csv_path + str(i) + \".csv\"\r\n # write raw data to csv\r\n with open(csv_last_path, 'rt') as infile:\r\n with open(csv_path, 'wt') as outfile:\r\n writer = csv.writer(outfile)\r\n reader = csv.reader(infile)\r\n writer.writerow(next(reader))\r\n for row in reader:\r\n writer.writerow(row)\r\n for item in raw_data:\r\n writer.writerow(item)\r\n else:\r\n csv_path = csv_path + str(i) + \".csv\"\r\n csv_file = open(csv_path,\"w\")\r\n csv_w = csv.writer(csv_file)\r\n csv_w.writerow(class_type)\r\n for item in raw_data:\r\n csv_w.writerow(item)\r\n csv_file.close()\r\n\r\n new_data = list(csv.reader(open(csv_path,'r')))\r\n new_data = np.array(new_data[1:]).astype(float)\r\n print (all_data.shape)\r\n\r\n # clean firebase\r\n root = db.reference()\r\n root.child('SensorDataSet').delete()\r\n\r\n return new_data\r\n\r\ndef write_result(sess, frozen_graphdef, tflite_model, test_accuracy):\r\n global model_folder_name\r\n global model_file_name\r\n global model_info_file_name\r\n global _batch_size\r\n global epoch_num\r\n global layer_info\r\n\r\n # **Models\r\n # make new models folder\r\n i = 0\r\n while os.path.exists(model_folder_name + str(i)):\r\n i += 1\r\n model_path = model_folder_name + str(i)\r\n os.makedirs(model_path)\r\n\r\n model_path = os.path.join(model_path, model_file_name)\r\n ckpt_path = model_path + str(i) + \".ckpt\"\r\n tflite_path = model_path + str(i) + \".tflite\"\r\n\r\n # save model\r\n # ckpt\r\n saver = tf.train.Saver()\r\n saver.save(sess, ckpt_path)\r\n # pb\r\n tf.train.write_graph(frozen_graphdef, model_folder_name + str(i),\r\n model_file_name + str(i) + '.pb', as_text=False)\r\n # tflite\r\n open(tflite_path, \"wb\").write(tflite_model)\r\n\r\n # **Write model information to csv\r\n minfo = [i, test_accuracy, _batch_size, epoch_num, layer_info]\r\n if not os.path.exists(model_info_file_name):\r\n model_info_file = open(model_info_file_name, \"w\")\r\n model_info_w = csv.writer(model_info_file)\r\n model_info_w.writerow(model_info_type)\r\n model_info_w.writerow(minfo)\r\n else:\r\n model_info_file = open(model_info_file_name, \"a\")\r\n model_info_w = csv.writer(model_info_file)\r\n model_info_w.writerow(minfo)\r\n\r\ndef canonical_name(x):\r\n return x.name.split(\":\")[0]\r\n\r\ndef next_batch(X_train, Y_train, num, start):\r\n global perm_X\r\n global perm_Y\r\n global iters\r\n if start == 0:\r\n perm = np.random.permutation(X_train.shape[0])\r\n perm_X = X_train[perm, :]\r\n perm_Y = Y_train[perm, :]\r\n batch_X = perm_X[start:start + num, :]\r\n batch_Y = perm_Y[start:start + num, :]\r\n start += num\r\n elif start <= X_train.shape[0] - num:\r\n batch_X = perm_X[start:start + num, :]\r\n batch_Y = perm_Y[start:start + num, :]\r\n start += num\r\n else:\r\n rest_num = X_train.shape[0] - start\r\n new_part_num = num - rest_num\r\n batch_X = np.vstack((perm_X[start:, :], perm_X[:new_part_num, :]))\r\n batch_Y = np.vstack((perm_Y[start:, :], perm_Y[:new_part_num, :]))\r\n perm = np.random.permutation(X_train.shape[0])\r\n perm_X = X_train[perm, :]\r\n perm_Y = Y_train[perm, :]\r\n start = 0\r\n iters += 1\r\n\r\n return batch_X, batch_Y, start\r\n\r\n\r\n# Read raw data\r\nraw_data = connect_firebase()\r\nnew_data = write_data(raw_data)\r\n\r\n# Get train, test data\r\n\r\ndataNum = new_dataset.shape[0]\r\ntrainNum = int(dataNum*train_p)\r\ntrainX = new_dataset[:trainNum, class_num:]\r\ntrainY = new_dataset[:trainNum, :class_num].astype(int)\r\ntestX = new_dataset[trainNum:, class_num:]\r\ntestY = new_dataset[trainNum:,:class_num].astype(int)\r\n\r\nprint (trainX.shape)\r\nprint (trainY.shape)\r\nprint (testX.shape)\r\nprint (testY.shape)\r\nprint (trainX)\r\nprint (trainY)\r\nprint (testX)\r\nprint (testY)\r\n\r\n# set training x, y placeholder\r\nX_train = tf.placeholder(tf.float32, [_batch_size, timestep_size*input_size], name=\"imput_train_x\")\r\nY_train = tf.placeholder(tf.float32, [_batch_size, class_num], name=\"imput_train_y\")\r\n\r\n# set testing x, y placeholder\r\nX_test = tf.placeholder(tf.float32,[None, input_size*timestep_size], name='input_test_x')\r\nY_test = tf.placeholder(tf.float32,[None, class_num], name='input_test_y')\r\nkeep_prob = tf.placeholder(tf.float32, [])\r\nprint (X_train.shape)\r\n\r\n# Training Data: [_batch_size, timestep_size*input_size] ==> [_batch_size, timestep_size, input_size]\r\n# Testing Data: [None, timestep_size*input_size] ==> [None, timestep_size, input_size]\r\n# Build RNN LSTM layer\r\n####################################################################\r\n\r\n# **Step 1: Input Shape = (batch_size, timestep_size, input_size)\r\nX = tf.placeholder(tf.float32, [None, timestep_size, input_size])\r\n\r\n# **Step 2: Run MultiRNN with ((lstm + dropout) * 2)\r\nmlstm_cell = []\r\nfor i in range(layer_num):\r\n lstm_cell = rnn.BasicLSTMCell(num_units=hidden_size, forget_bias=1.0, state_is_tuple=True)\r\n lstm_cell = rnn.DropoutWrapper(cell=lstm_cell, input_keep_prob=1.0, output_keep_prob=keep_prob)\r\n mlstm_cell.append(lstm_cell)\r\nmlstm_cell = tf.contrib.rnn.MultiRNNCell(mlstm_cell,state_is_tuple=True)\r\n\r\n# **Step3: Initiate state with zero\r\ninit_state = mlstm_cell.zero_state(batch_size, dtype=tf.float32)\r\n\r\n# **Step4: Calculate in timeStep\r\noutputs = list()\r\nstate = init_state\r\nwith tf.variable_scope('RNN'):\r\n for timestep in range(timestep_size):\r\n if timestep > 0:\r\n tf.get_variable_scope().reuse_variables()\r\n # Variable \"state\" store the LSTM state\r\n (cell_output, state) = mlstm_cell(X[:, timestep, :], state)\r\n outputs.append(cell_output)\r\nh_state = outputs[-1]\r\n\r\n# h_state is the output of hidden layer\r\n# Weight, Bias, Softmax to predict\r\nW = tf.Variable(tf.truncated_normal([hidden_size, class_num], stddev=0.1), dtype=tf.float32)\r\nbias = tf.Variable(tf.constant(0.1,shape=[class_num]), dtype=tf.float32)\r\nY_pre = tf.nn.softmax(tf.matmul(h_state, W) + bias)\r\n\r\n\r\n# Loss function and accuracy\r\ncross_entropy = -tf.reduce_mean(Y_train * tf.log(Y_pre))\r\ntrain_op = tf.train.AdamOptimizer(lr).minimize(cross_entropy)\r\n\r\ntrain_correct_prediction = tf.equal(tf.argmax(Y_pre,1), tf.argmax(Y_train,1))\r\ntrain_accuracy = tf.reduce_mean(tf.cast(train_correct_prediction, \"float\"))\r\n\r\ntest_correct_prediction = tf.equal(tf.argmax(Y_pre,1), tf.argmax(Y_test,1))\r\ntest_accuracy = tf.reduce_mean(tf.cast(test_correct_prediction, \"float\"))\r\n\r\n####################################################################\r\n\r\n# Set up GPU demand\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM\r\nconfig.gpu_options.allow_growth = True\r\n\r\ninit = tf.global_variables_initializer()\r\nout = tf.identity(Y_pre, name=\"output\")\r\n#saver = tf.train.Saver()\r\n\r\nprint (\"Start...\")\r\nwith tf.Session(config=config) as sess:\r\n sess.run(init)\r\n for i in range(epoch_num):\r\n batch_X, batch_Y, epoch_start = next_batch(trainX, trainY, _batch_size, epoch_start)\r\n # print (batch_X.shape)\r\n # print (batch_Y.shape)\r\n # print (epoch_start)\r\n # batch = mnist.train.next_batch(_batch_size)\r\n # reshape_trainX = np.array(batch[0]).reshape(-1, timestep_size, input_size)\r\n # reshape_testX = np.array(mnist.test.images).reshape(-1, timestep_size, input_size)\r\n reshape_trainX = np.array(batch_X).reshape(-1, timestep_size, input_size)\r\n reshape_testX = np.array(testX).reshape(-1, timestep_size, input_size)\r\n if (i+1)%200 == 0:\r\n accuracy = sess.run(train_accuracy, feed_dict={\r\n X_train:batch_X, Y_train: batch_Y, keep_prob: 1.0, batch_size: _batch_size, X: reshape_trainX})\r\n #print (\"Iter%d, step %d, training accuracy %g\" % ( mnist.train.epochs_completed, (i+1), accuracy))\r\n print (\"Iter%d, Epoch %d, Training Accuracy %g\" % ( iters, (i+1), accuracy))\r\n sess.run(train_op, feed_dict={X_train: batch_X, Y_train: batch_Y, keep_prob: 0.5, batch_size: _batch_size, X: reshape_trainX})\r\n\r\n # Testing data accuracy\r\n # print (\"Test Accuracy %g\"% sess.run(test_accuracy, feed_dict={\r\n # X_test: mnist.test.images, Y_test: mnist.test.labels, keep_prob: 1.0, batch_size:mnist.test.images.shape[0], X: reshape_testX}))\r\n test_accuracy = sess.run(test_accuracy, feed_dict={\r\n X_test: testX, Y_test: testY, keep_prob: 1.0, batch_size:testX.shape[0], X: reshape_testX})\r\n print (\"Test Accuracy %g\"% test_accuracy)\r\n\r\n #saver.save(sess, \"model/rnn.ckpt\")\r\n\r\n frozen_tensors = [out]\r\n out_tensors = [out]\r\n\r\n frozen_graphdef = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, list(map(canonical_name, frozen_tensors)))\r\n # tf.train.write_graph(frozen_graphdef, \"model\",\r\n # 'rnn.pb', as_text=False)\r\n tflite_model = tf.contrib.lite.toco_convert(frozen_graphdef, [X_train], out_tensors, allow_custom_ops=True)\r\n\r\n #open(\"writer_model.tflite\", \"wb\").write(tflite_model)\r\n\r\n write_result(sess, frozen_graphdef, tflite_model, test_accuracy)","sub_path":"ML/trainSaver.py","file_name":"trainSaver.py","file_ext":"py","file_size_in_byte":11644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"229094226","text":"import os\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport random\nfrom tqdm import tqdm\nimport warnings\nimport json\nimport yaml\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom PIL import Image\nimport cv2\n#from skimage.transform import resize\nfrom kaggle.api.kaggle_api_extended import KaggleApi\n\n# sklearn\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n# PyTorch\nimport torch\nfrom torch.utils.data import DataLoader, Dataset, Subset\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\nfrom tensorboardX import SummaryWriter\n\n# 自作モジュール\nfrom dataset import load_dataset, TGSSaltDataset, TGSSaltDataLoader\nfrom models import UNet4, UNet4BottleNeck, UNet4ResNet34, GANimationGenerator, MGVTONResGenerator\nfrom models import DeepLab, DeepLabBottleNeck\nfrom models import PatchGANDiscriminator, MultiscaleDiscriminator, GANimationDiscriminator\nfrom models import ParsingCrossEntropyLoss, LovaszSoftmaxLoss, VGGLoss, VanillaGANLoss, LSGANLoss, HingeGANLoss, ConditionalExpressionLoss\n\nfrom utils import save_checkpoint, load_checkpoint, convert_rle\nfrom utils import board_add_image, board_add_images, save_image_w_norm\nfrom utils import iou_metric, iou_metric_batch\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--exper_name\", default=\"single_model_pytorch\", help=\"実験名\")\n parser.add_argument(\"--dataset_dir\", type=str, default=\"../datasets/competition_data\")\n parser.add_argument(\"--results_dir\", type=str, default=\"results\")\n parser.add_argument(\"--submit_file\", type=str, default=\"submission.csv\")\n parser.add_argument(\"--competition_id\", type=str, default=\"tgs-salt-identification-challenge\")\n parser.add_argument(\"--train_mode\", choices=[\"train\", \"test\", \"eval\"], default=\"train\", help=\"\")\n parser.add_argument(\"--model_type_G\", choices=[\"unet4\", \"unet5\", \"unet4bottleneck\", \"unet4resnet34\", \"mgvton\", \"ganimation\", \"deep_lab_v3+\", \"deep_lab_v3+_bottleneck\"], default=\"unet4\", help=\"生成器モデルの種類\")\n parser.add_argument(\"--model_type_D\", choices=[\"patchgan\", \"multiscale\", \"ganimation\"], default=\"patchgan\", help=\"識別器モデルの種類\")\n parser.add_argument('--save_checkpoints_dir', type=str, default=\"checkpoints\", help=\"モデルの保存ディレクトリ\")\n parser.add_argument('--load_checkpoints_path_G', type=str, default=\"\", help=\"生成器モデルの読み込みファイルのパス\")\n parser.add_argument('--load_checkpoints_path_D', type=str, default=\"\", help=\"識別器モデルの読み込みファイルのパス\")\n parser.add_argument('--tensorboard_dir', type=str, default=\"tensorboard\", help=\"TensorBoard のディレクトリ\")\n parser.add_argument(\"--n_epoches\", type=int, default=200, help=\"エポック数\") \n parser.add_argument('--batch_size', type=int, default=32, help=\"バッチサイズ\")\n parser.add_argument('--batch_size_valid', type=int, default=1, help=\"バッチサイズ\")\n parser.add_argument('--batch_size_test', type=int, default=1, help=\"バッチサイズ\")\n parser.add_argument('--lr', type=float, default=0.001, help=\"学習率\")\n parser.add_argument('--beta1', type=float, default=0.5, help=\"学習率の減衰率\")\n parser.add_argument('--beta2', type=float, default=0.999, help=\"学習率の減衰率\")\n parser.add_argument('--image_height_org', type=int, default=101, help=\"入力画像の高さ(pixel単位)\")\n parser.add_argument('--image_width_org', type=int, default=101, help=\"入力画像の幅(pixel単位)\")\n parser.add_argument('--image_height', type=int, default=128, help=\"入力画像の高さ(pixel単位)\")\n parser.add_argument('--image_width', type=int, default=128, help=\"入力画像の幅(pixel単位)\")\n parser.add_argument(\"--n_channels\", type=int, default=1, help=\"チャンネル数\") \n parser.add_argument(\"--n_samplings\", type=int, default=100000, help=\"ラベル数\")\n parser.add_argument('--data_augument', action='store_true')\n parser.add_argument('--depth', action='store_true')\n\n parser.add_argument('--lambda_bce', type=float, default=1.0, help=\"クロスエントロピー損失関数の係数値\")\n parser.add_argument('--lambda_enpropy', type=float, default=1.0, help=\"クロスエントロピー損失関数の係数値\")\n parser.add_argument('--lambda_lovasz_softmax', type=float, default=1.0, help=\"Lovasz Softmax loss の係数値\")\n parser.add_argument('--lambda_l1', type=float, default=0.0, help=\"L1損失関数の係数値\")\n parser.add_argument('--lambda_vgg', type=float, default=0.0, help=\"VGG perceptual loss_G の係数値\")\n parser.add_argument('--lambda_adv', type=float, default=1.0, help=\"Adv loss_G の係数値\")\n parser.add_argument('--adv_loss_type', choices=['vanilla', 'lsgan', 'hinge'], default=\"lsgan\", help=\"GAN Adv loss の種類\")\n parser.add_argument('--lambda_cond', type=float, default=1000.0, help=\"conditional expression loss の係数値\")\n\n parser.add_argument(\"--n_diaplay_step\", type=int, default=100,)\n parser.add_argument('--n_display_valid_step', type=int, default=100, help=\"valid データの tensorboard への表示間隔\")\n parser.add_argument(\"--n_save_epoches\", type=int, default=50,)\n\n parser.add_argument(\"--val_rate\", type=float, default=0.05)\n parser.add_argument(\"--seed\", type=int, default=71)\n parser.add_argument('--device', choices=['cpu', 'gpu'], default=\"gpu\", help=\"使用デバイス (CPU or GPU)\")\n parser.add_argument('--n_workers', type=int, default=4, help=\"CPUの並列化数(0 で並列化なし)\")\n parser.add_argument('--use_cuda_benchmark', action='store_true', help=\"torch.backends.cudnn.benchmark の使用有効化\")\n parser.add_argument('--use_cuda_deterministic', action='store_true', help=\"再現性確保のために cuDNN に決定論的振る舞い有効化\")\n parser.add_argument('--submit', action='store_true')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n if( args.model_type_G == \"unet4bottleneck\" ):\n args.depth = True\n if( args.model_type_D == \"ganimation\" ):\n args.depth = True\n\n # 実験名を自動的に変更\n if( args.exper_name == \"single_model_pytorch\" ):\n if( args.train_mode in [\"test\", \"eval\"] ):\n args.exper_name = \"test_\" + args.exper_name\n args.exper_name += \"_\" + args.model_type_G\n if( args.data_augument ):\n args.exper_name += \"_da\"\n if( args.depth ):\n args.exper_name += \"_depth\"\n\n args.exper_name += \"_ep\" + str(args.n_epoches)\n args.exper_name += \"_b\" + str(args.batch_size)\n args.exper_name += \"_lr{}\".format(args.lr)\n args.exper_name += \"_bce{}\".format(args.lambda_bce)\n args.exper_name += \"_enpropy{}\".format(args.lambda_enpropy)\n args.exper_name += \"_lovasz{}\".format(args.lambda_lovasz_softmax)\n args.exper_name += \"_l1{}\".format(args.lambda_l1)\n args.exper_name += \"_vgg{}\".format(args.lambda_vgg)\n args.exper_name += \"_adv{}_{}\".format(args.adv_loss_type, args.lambda_adv)\n if( args.model_type_D == \"ganimation\" ):\n args.exper_name += \"_cond{}\".format(args.lambda_cond)\n\n if( args.debug ):\n for key, value in vars(args).items():\n print('%s: %s' % (str(key), str(value)))\n\n if not os.path.isdir(args.results_dir):\n os.mkdir(args.results_dir)\n if not os.path.isdir( os.path.join(args.results_dir, args.exper_name) ):\n os.mkdir(os.path.join(args.results_dir, args.exper_name))\n if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, \"test\") ):\n os.mkdir(os.path.join(args.results_dir, args.exper_name, \"test\"))\n if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, \"test\", \"images\") ):\n os.mkdir(os.path.join(args.results_dir, args.exper_name, \"test\", \"images\"))\n if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, \"test\", \"masks\") ):\n os.mkdir(os.path.join(args.results_dir, args.exper_name, \"test\", \"masks\"))\n if( args.train_mode in [\"train\"] ):\n if not( os.path.exists(args.save_checkpoints_dir) ):\n os.mkdir(args.save_checkpoints_dir)\n if not( os.path.exists(os.path.join(args.save_checkpoints_dir, args.exper_name)) ):\n os.mkdir( os.path.join(args.save_checkpoints_dir, args.exper_name) )\n\n # 警告非表示\n warnings.simplefilter('ignore', DeprecationWarning)\n\n # 実行 Device の設定\n if( args.device == \"gpu\" ):\n use_cuda = torch.cuda.is_available()\n if( use_cuda == True ):\n device = torch.device( \"cuda\" )\n #torch.cuda.set_device(args.gpu_ids[0])\n print( \"実行デバイス :\", device)\n print( \"GPU名 :\", torch.cuda.get_device_name(device))\n print(\"torch.cuda.current_device() =\", torch.cuda.current_device())\n else:\n print( \"can't using gpu.\" )\n device = torch.device( \"cpu\" )\n print( \"実行デバイス :\", device)\n else:\n device = torch.device( \"cpu\" )\n print( \"実行デバイス :\", device)\n\n # seed 値の固定\n if( args.use_cuda_deterministic ):\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n # for visualation\n if( args.train_mode == \"train\" ):\n board_train = SummaryWriter( log_dir = os.path.join(args.tensorboard_dir, args.exper_name) )\n board_valid = SummaryWriter( log_dir = os.path.join(args.tensorboard_dir, args.exper_name + \"_valid\") )\n\n board_test = SummaryWriter( log_dir = os.path.join(args.tensorboard_dir, args.exper_name + \"_test\") )\n\n #================================\n # データセットの読み込み\n #================================ \n df_submission = pd.read_csv( os.path.join(args.dataset_dir, \"sample_submission.csv\" ) )\n\n # 学習用データセットとテスト用データセットの設定\n ds_train = TGSSaltDataset( args, args.dataset_dir, datamode = \"train\", data_augument = args.data_augument, debug = args.debug )\n ds_test = TGSSaltDataset( args, args.dataset_dir, datamode = \"test\", data_augument = False, debug = args.debug )\n\n dloader_train = torch.utils.data.DataLoader(ds_train, batch_size=args.batch_size, shuffle=True, num_workers = args.n_workers, pin_memory = True )\n dloader_test = torch.utils.data.DataLoader(ds_test, batch_size=args.batch_size_test, shuffle=False, num_workers = args.n_workers, pin_memory = True )\n \n #================================\n # 前処理\n #================================\n index = np.arange(len(ds_train))\n train_index, valid_index = train_test_split( index, test_size=args.val_rate, random_state=args.seed )\n if( args.debug ):\n print( \"train_index.shape : \", train_index.shape )\n print( \"valid_index.shape : \", valid_index.shape )\n print( \"train_index[0:10] : \", train_index[0:10] )\n print( \"valid_index[0:10] : \", valid_index[0:10] )\n\n dloader_train = torch.utils.data.DataLoader(Subset(ds_train, train_index), batch_size=args.batch_size, shuffle=True, num_workers = args.n_workers, pin_memory = True )\n dloader_valid = torch.utils.data.DataLoader(Subset(ds_train, valid_index), batch_size=args.batch_size_valid, shuffle=False, num_workers = args.n_workers, pin_memory = True )\n\n #================================\n # モデルの構造を定義する。\n #================================\n # 生成器\n if( args.model_type_G == \"unet4\" ):\n if( args.depth ):\n model_G = UNet4( n_in_channels = args.n_channels + 1, n_out_channels = args.n_channels, n_fmaps = 32,).to( device )\n else:\n model_G = UNet4( n_in_channels = args.n_channels, n_out_channels = args.n_channels, n_fmaps = 32,).to( device )\n elif( args.model_type_G == \"unet4bottleneck\" ):\n model_G = UNet4BottleNeck( n_in_channels = args.n_channels, n_out_channels = args.n_channels, n_fmaps = 32,).to( device )\n elif( args.model_type_G == \"unet4resnet34\" ):\n model_G = UNet4ResNet34( n_in_channels = args.n_channels, n_out_channels = args.n_channels, n_fmaps = 64,).to( device )\n elif( args.model_type_G == \"mgvton\" ):\n if( args.depth ):\n model_G = MGVTONResGenerator( input_nc = args.n_channels + 1, output_nc = args.n_channels, padding_type='zero', affine=False ).to( device )\n else:\n model_G = MGVTONResGenerator( input_nc = args.n_channels, output_nc = args.n_channels, padding_type='zero', affine=False ).to( device )\n #model_G = MGVTONResGenerator( input_nc = args.n_channels, output_nc = args.n_channels, padding_type='reflect', affine=True ).to( device )\n elif( args.model_type_G == \"ganimation\" ):\n if( args.depth ):\n model_G = GANimationGenerator( input_nc = args.n_channels + 1, output_nc = args.n_channels, conv_dim = 32 ).to( device )\n else:\n model_G = GANimationGenerator( input_nc = args.n_channels, output_nc = args.n_channels, conv_dim = 32 ).to( device )\n elif( args.model_type_G == \"deep_lab_v3+\" ):\n if( args.depth ):\n model_G = DeepLab( num_classes = args.n_channels, backbone = \"resnet\", n_in_channels = args.n_channels + 1, output_stride = 16, sync_bn = False, freeze_bn = False, pretrained_backbone=True ).to( device )\n else:\n model_G = DeepLab( num_classes = args.n_channels, backbone = \"resnet\", n_in_channels = args.n_channels, output_stride = 16, sync_bn = False, freeze_bn = False, pretrained_backbone=True ).to( device )\n elif( args.model_type_G == \"deep_lab_v3+_bottleneck\" ):\n model_G = DeepLabBottleNeck( num_classes = args.n_channels, backbone = \"resnet\", n_in_channels = args.n_channels, output_stride = 16, n_bottleneck_channels = 1, sync_bn = False, freeze_bn = False, pretrained_backbone=True ).to( device )\n\n # 識別器\n if( args.model_type_D == \"patchgan\" ):\n model_D = PatchGANDiscriminator( n_in_channels = args.n_channels, n_fmaps = 32 ).to( device )\n elif( args.model_type_D == \"multiscale\" ):\n model_D = MultiscaleDiscriminator( n_in_channels = args.n_channels, n_fmaps = 32, n_dis = 3 ).to( device )\n elif( args.model_type_D == \"ganimation\" ):\n model_D = GANimationDiscriminator( n_in_channels = args.n_channels, n_fmaps = 32, feat_dim = 1 ).to( device )\n\n if( args.debug ):\n print( \"model_G :\\n\", model_G )\n print( \"model_D :\\n\", model_D )\n\n # モデルを読み込む\n if not args.load_checkpoints_path_G == '' and os.path.exists(args.load_checkpoints_path_G):\n load_checkpoint(model_G, device, args.load_checkpoints_path_G )\n if not args.load_checkpoints_path_D == '' and os.path.exists(args.load_checkpoints_path_D):\n load_checkpoint(model_D, device, args.load_checkpoints_path_D )\n\n #================================\n # optimizer_G の設定\n #================================\n optimizer_G = optim.Adam(\n params = model_G.parameters(),\n lr = args.lr, betas = (args.beta1,args.beta2)\n )\n\n optimizer_D = optim.Adam(\n params = model_D.parameters(),\n lr = args.lr, betas = (args.beta1,args.beta2)\n )\n\n #================================\n # loss_G 関数の設定\n #================================\n loss_l1_fn = nn.L1Loss()\n loss_vgg_fn = VGGLoss(device, args.n_channels)\n loss_entropy_fn = ParsingCrossEntropyLoss()\n loss_bce_fn = nn.BCEWithLogitsLoss()\n loss_lovasz_softmax_fn = LovaszSoftmaxLoss()\n\n if( args.adv_loss_type == \"vanilla\" ):\n loss_adv_fn = VanillaGANLoss(device)\n elif( args.adv_loss_type == \"lsgan\" ):\n loss_adv_fn = LSGANLoss(device)\n elif( args.adv_loss_type == \"hinge\" ):\n loss_adv_fn = HingeGANLoss(device)\n\n if( args.model_type_D == \"ganimation\" ):\n loss_cond_fn = ConditionalExpressionLoss()\n\n #================================\n # モデルの学習\n #================================ \n if( args.train_mode == \"train\" ):\n print(\"Starting Training Loop...\")\n n_print = 1\n step = 0\n for epoch in tqdm( range(args.n_epoches), desc = \"Epoches\" ):\n #=====================================\n # 学習用データの処理\n #=====================================\n for iter, inputs in enumerate( tqdm( dloader_train, desc = \"minbatch iters\" ) ):\n model_G.train() \n model_D.train() \n\n # 一番最後のミニバッチループで、バッチサイズに満たない場合は無視する(後の計算で、shape の不一致をおこすため)\n if inputs[\"image\"].shape[0] != args.batch_size:\n break\n\n # ミニバッチデータを GPU へ転送\n image_name = inputs[\"image_name\"]\n image = inputs[\"image\"].to(device)\n mask = inputs[\"mask\"].to(device)\n depth = inputs[\"depth\"].to(device)\n if( args.debug and n_print > 0):\n print( \"image.shape : \", image.shape )\n print( \"mask.shape : \", mask.shape )\n print( \"depth.shape : \", depth.shape )\n\n #====================================================\n # 学習処理\n #====================================================\n #----------------------------------------------------\n # 生成器 の forword 処理\n #----------------------------------------------------\n # 学習用データをモデルに流し込む\n if( args.model_type_G == \"unet4bottleneck\" ):\n output, output_mask, output_none_act = model_G( image, depth )\n elif( args.model_type_G == \"deep_lab_v3+_bottleneck\" ):\n output, output_mask, output_none_act = model_G( image, depth )\n else:\n if( args.depth ):\n depth = depth.expand(depth.shape[0], depth.shape[1], image.shape[2], image.shape[3] )\n concat = torch.cat( [image, depth], dim=1)\n output, output_mask, output_none_act = model_G( concat )\n else:\n output, output_mask, output_none_act = model_G( image )\n\n if( args.debug and n_print > 0 ):\n print( \"output.shape :\", output.shape )\n \n #----------------------------------------------------\n # 識別器の更新処理\n #----------------------------------------------------\n # 無効化していた識別器 D のネットワークの勾配計算を有効化。\n for param in model_D.parameters():\n param.requires_grad = True\n\n # 学習用データをモデルに流し込む\n if( args.model_type_D == \"ganimation\" ):\n d_real, d_real_depth = model_D( mask )\n d_fake, d_fake_depth = model_D( output.detach() )\n if( args.debug and n_print > 0 ):\n print( \"d_real.shape :\", d_real.shape )\n print( \"d_fake.shape :\", d_fake.shape )\n print( \"d_real_depth.shape :\", d_real_depth.shape )\n print( \"d_fake_depth.shape :\", d_fake_depth.shape )\n else:\n d_real = model_D( mask )\n d_fake = model_D( output.detach() )\n if( args.debug and n_print > 0 ):\n print( \"d_real.shape :\", d_real.shape )\n print( \"d_fake.shape :\", d_fake.shape )\n\n # 損失関数を計算する\n loss_D, loss_D_real, loss_D_fake = loss_adv_fn.forward_D( d_real, d_fake )\n if( args.model_type_D == \"ganimation\" ):\n loss_D_cond_depth = loss_cond_fn( d_real_depth, depth[:,:,0,0] ) + loss_cond_fn( d_fake_depth, depth[:,:,0,0] )\n loss_D = loss_D + args.lambda_cond * loss_D_cond_depth\n\n # ネットワークの更新処理\n optimizer_D.zero_grad()\n loss_D.backward(retain_graph=True)\n optimizer_D.step()\n\n # 無効化していた識別器 D のネットワークの勾配計算を有効化。\n for param in model_D.parameters():\n param.requires_grad = False\n\n #----------------------------------------------------\n # 生成器の更新処理\n #----------------------------------------------------\n # 損失関数を計算する\n loss_l1 = loss_l1_fn( output, mask )\n loss_vgg = loss_vgg_fn( output, mask )\n loss_entropy = loss_entropy_fn( output, mask )\n loss_bce = loss_bce_fn( output, mask )\n loss_lovasz_softmax = loss_lovasz_softmax_fn( mask, output_none_act )\n loss_adv = loss_adv_fn.forward_G( d_fake )\n loss_G = args.lambda_l1 * loss_l1 + args.lambda_vgg * loss_vgg + args.lambda_enpropy * loss_entropy + args.lambda_bce * loss_bce + args.lambda_lovasz_softmax * loss_lovasz_softmax + args.lambda_adv * loss_adv\n if( args.model_type_D == \"ganimation\" ):\n loss_G_cond_depth = loss_cond_fn( d_real_depth, depth[:,:,0,0] ) + loss_cond_fn( d_fake_depth, depth[:,:,0,0] )\n loss_G = loss_G + args.lambda_cond * loss_G_cond_depth\n\n # ネットワークの更新処理\n optimizer_G.zero_grad()\n loss_G.backward()\n optimizer_G.step()\n\n #====================================================\n # 学習過程の表示\n #====================================================\n if( step == 0 or ( step % args.n_diaplay_step == 0 ) ):\n board_train.add_scalar('G/loss_G', loss_G.item(), step)\n board_train.add_scalar('G/loss_l1', loss_l1.item(), step)\n board_train.add_scalar('G/loss_vgg', loss_vgg.item(), step)\n board_train.add_scalar('G/loss_entropy', loss_entropy.item(), step)\n board_train.add_scalar('G/loss_bce', loss_bce.item(), step)\n board_train.add_scalar('G/loss_lovasz_softmax', loss_lovasz_softmax.item(), step)\n board_train.add_scalar('G/loss_adv', loss_adv.item(), step)\n if( args.model_type_D == \"ganimation\" ):\n board_train.add_scalar('G/loss_G_cond_depth', loss_G_cond_depth.item(), step)\n\n board_train.add_scalar('D/loss_D', loss_D.item(), step)\n board_train.add_scalar('D/loss_D_real', loss_D_real.item(), step)\n board_train.add_scalar('D/loss_D_fake', loss_D_fake.item(), step)\n if( args.model_type_D == \"ganimation\" ):\n board_train.add_scalar('D/loss_D_cond_depth', loss_D_cond_depth.item(), step)\n\n print( \"step={}, loss_G={:.5f}, loss_l1={:.5f}, loss_vgg={:.5f}, loss_entropy={:.5f}, loss_bce={:.5f}, loss_lovasz_softmax={:.5f}, loss_adv={:.5f}\".format(step, loss_G, loss_l1, loss_vgg, loss_entropy, loss_bce, loss_lovasz_softmax, loss_adv) )\n print( \"step={}, loss_D={:.5f}, loss_D_real={:.5f}, loss_D_fake={:.5f}\".format(step, loss_D.item(), loss_D_real.item(), loss_D_fake.item()) )\n if( args.model_type_D == \"ganimation\" ):\n print( \"step={}, loss_G_cond_depth={:.5f}\".format(step, loss_G_cond_depth,) )\n print( \"step={}, loss_D_cond_depth={:.5f}\".format(step, loss_D_cond_depth,) )\n\n visuals = [\n [image, mask, output],\n ]\n board_add_images(board_train, 'train', visuals, step+1)\n\n #=====================================\n # 検証用データの処理\n #=====================================\n if( step == 0 or ( step % args.n_display_valid_step == 0 ) ):\n loss_G_total = 0\n loss_l1_total, loss_vgg_total = 0, 0\n loss_entropy_total, loss_bce_total, loss_lovasz_softmax_total = 0, 0, 0\n loss_adv_total = 0\n loss_G_cond_depth_total = 0\n loss_D_total, loss_D_real_total, loss_D_fake_total, loss_D_cond_depth_total = 0, 0, 0, 0\n n_valid_loop = 0\n for iter, inputs in enumerate(dloader_valid):\n model_G.eval() \n model_D.eval() \n\n # 一番最後のミニバッチループで、バッチサイズに満たない場合は無視する(後の計算で、shape の不一致をおこすため)\n if inputs[\"image\"].shape[0] != args.batch_size_valid:\n break\n\n # ミニバッチデータを GPU へ転送\n image_name = inputs[\"image_name\"]\n image = inputs[\"image\"].to(device)\n mask = inputs[\"mask\"].to(device)\n depth = inputs[\"depth\"].to(device)\n\n #====================================================\n # 推論処理\n #====================================================\n # 生成器\n with torch.no_grad():\n if( args.model_type_G == \"unet4bottleneck\" ):\n output, output_mask, output_none_act = model_G( image, depth )\n elif( args.model_type_G == \"deep_lab_v3+_bottleneck\" ):\n output, output_mask, output_none_act = model_G( image, depth )\n else:\n if( args.depth ):\n depth = depth.expand(depth.shape[0], depth.shape[1], image.shape[2], image.shape[3] )\n concat = torch.cat( [image, depth], dim=1)\n output, output_mask, output_none_act = model_G( concat )\n else:\n output, output_mask, output_none_act = model_G( image )\n\n # 識別器\n with torch.no_grad():\n if( args.model_type_D == \"ganimation\" ):\n d_real, d_real_depth = model_D( mask )\n d_fake, d_fake_depth = model_D( output.detach() )\n else:\n d_real = model_D( mask )\n d_fake = model_D( output.detach() )\n\n #----------------------------------------------------\n # 損失関数の計算\n #----------------------------------------------------\n # 生成器\n loss_l1 = loss_l1_fn( output, mask )\n loss_vgg = loss_vgg_fn( output, mask )\n loss_entropy = loss_entropy_fn( output, mask )\n loss_bce = loss_bce_fn( output, mask )\n loss_lovasz_softmax = loss_lovasz_softmax_fn( mask, output_none_act )\n loss_adv = loss_adv_fn.forward_G( d_fake )\n loss_G = args.lambda_l1 * loss_l1 + args.lambda_vgg * loss_vgg + args.lambda_enpropy * loss_entropy + args.lambda_bce * loss_bce + args.lambda_lovasz_softmax * loss_lovasz_softmax + args.lambda_adv * loss_adv\n\n if( args.model_type_D == \"ganimation\" ):\n loss_G_cond_depth = loss_cond_fn( d_real_depth, depth[:,:,0,0] ) + loss_cond_fn( d_fake_depth, depth[:,:,0,0] )\n loss_G = loss_G + args.lambda_cond * loss_G_cond_depth\n\n # 識別器\n loss_D, loss_D_real, loss_D_fake = loss_adv_fn.forward_D( d_real, d_fake )\n if( args.model_type_D == \"ganimation\" ):\n loss_D_cond_depth = loss_cond_fn( d_real_depth, depth[:,:,0,0] ) + loss_cond_fn( d_fake_depth, depth[:,:,0,0] )\n loss_D = loss_D + args.lambda_cond * loss_D_cond_depth\n\n # total\n loss_G_total += loss_G\n loss_l1_total += loss_l1\n loss_vgg_total += loss_vgg\n loss_entropy_total += loss_entropy\n loss_bce_total += loss_bce\n loss_lovasz_softmax_total += loss_lovasz_softmax\n loss_adv_total += loss_adv\n\n if( args.model_type_D == \"ganimation\" ):\n loss_G_cond_depth_total += loss_G_cond_depth\n loss_D_total += loss_D\n loss_D_real_total += loss_D_real\n loss_D_fake_total += loss_D_fake\n if( args.model_type_D == \"ganimation\" ):\n loss_D_cond_depth_total += loss_D_cond_depth\n\n # \n if( iter <= args.batch_size ):\n visuals = [\n [image, mask, output],\n ]\n board_add_images(board_valid, 'valid/{}'.format(iter), visuals, step+1)\n\n n_valid_loop += 1\n\n #----------------------------------------------------\n # 表示処理\n #----------------------------------------------------\n # 生成器\n board_valid.add_scalar('G/loss_G', loss_G_total.item()/n_valid_loop, step)\n board_valid.add_scalar('G/loss_l1', loss_l1_total.item()/n_valid_loop, step)\n board_valid.add_scalar('G/loss_vgg', loss_vgg_total.item()/n_valid_loop, step)\n board_valid.add_scalar('G/loss_entropy', loss_entropy_total.item()/n_valid_loop, step)\n board_valid.add_scalar('G/loss_bce', loss_bce_total.item()/n_valid_loop, step)\n board_valid.add_scalar('G/loss_lovasz_softmax', loss_lovasz_softmax_total.item()/n_valid_loop, step)\n board_valid.add_scalar('G/loss_adv', loss_adv_total.item()/n_valid_loop, step)\n if( args.model_type_D == \"ganimation\" ):\n board_valid.add_scalar('G/loss_G_cond_depth', loss_G_cond_depth_total.item()/n_valid_loop, step)\n\n # 識別器\n board_valid.add_scalar('D/loss_D', loss_D_total.item()/n_valid_loop, step)\n board_valid.add_scalar('D/loss_D_real', loss_D_real_total.item()/n_valid_loop, step)\n board_valid.add_scalar('D/loss_D_fake', loss_D_fake_total.item()/n_valid_loop, step)\n if( args.model_type_D == \"ganimation\" ):\n board_valid.add_scalar('D/loss_D_cond_depth', loss_D_cond_depth_total.item()/n_valid_loop, step)\n\n step += 1\n n_print -= 1\n\n #====================================================\n # モデルの保存\n #====================================================\n if( epoch % args.n_save_epoches == 0 ):\n save_checkpoint( model_G, device, os.path.join(args.save_checkpoints_dir, args.exper_name, 'model_ep%03d.pth' % (epoch)) )\n save_checkpoint( model_G, device, os.path.join(args.save_checkpoints_dir, args.exper_name, 'model_final.pth') )\n print( \"saved checkpoints\" )\n \n save_checkpoint( model_G, device, os.path.join(args.save_checkpoints_dir, args.exper_name, 'model_final.pth') )\n print(\"Finished Training Loop.\")\n\n #================================\n # 学習用データでの推論処理\n #================================\n print(\"Starting eval Valid Loop...\")\n y_pred_valid = []\n y_pred_valid_mask = []\n model_G.eval()\n for step, inputs in enumerate( tqdm( dloader_valid, desc = \"Samplings\" ) ):\n if inputs[\"image\"].shape[0] != args.batch_size_valid:\n break\n\n image = inputs[\"image\"].to(device)\n mask = inputs[\"mask\"].to(device)\n depth = inputs[\"depth\"].to(device)\n\n # 生成器 G の 推論処理\n with torch.no_grad():\n if( args.model_type_G in [\"unet4bottleneck\"] ):\n output, output_mask, output_none_act = model_G( image, depth )\n else:\n if( args.depth ):\n depth = depth.expand(depth.shape[0], depth.shape[1], image.shape[2], image.shape[3] )\n concat = torch.cat( [image, depth], dim=1)\n output, output_mask, output_none_act = model_G( concat )\n else:\n output, output_mask, output_none_act = model_G( image )\n\n y_pred_valid.append( output[0].detach().cpu().numpy() )\n y_pred_valid_mask.append( mask[0].detach().cpu().numpy() )\n\n y_pred_valid = np.array( y_pred_valid )\n y_pred_valid_mask = np.array( y_pred_valid_mask )\n if( args.debug ):\n print( \"type(y_pred_valid) : \", type(y_pred_valid) )\n print( \"y_pred_valid.shape : \", y_pred_valid.shape )\n\n #================================\n # テスト用データでの推論処理\n #================================\n print(\"Starting eval Test Loop...\")\n y_pred_test = []\n test_image_names = []\n model_G.eval()\n for step, inputs in enumerate( tqdm( dloader_test, desc = \"Samplings\" ) ):\n if inputs[\"image\"].shape[0] != args.batch_size_test:\n break\n\n image_name = inputs[\"image_name\"]\n test_image_names.append(image_name[0])\n image = inputs[\"image\"].to(device)\n depth = inputs[\"depth\"].to(device)\n\n # 生成器 G の 推論処理\n with torch.no_grad():\n if( args.model_type_G in [\"unet4bottleneck\"] ):\n output, output_mask, output_none_act = model_G( image, depth )\n else:\n if( args.depth ):\n depth = depth.expand(depth.shape[0], depth.shape[1], image.shape[2], image.shape[3] )\n concat = torch.cat( [image, depth], dim=1)\n output, output_mask, output_none_act = model_G( concat )\n else:\n output, output_mask, output_none_act = model_G( image )\n\n y_pred_test.append( output[0].detach().cpu().numpy() )\n\n n_display_images = 50\n if( step <= n_display_images ):\n visuals = [\n [image, output],\n ]\n board_add_images(board_test, 'test/{}'.format(step), visuals, -1 )\n\n save_image_w_norm( image, os.path.join( args.results_dir, args.exper_name, \"test\", \"images\", image_name[0] ) )\n save_image_w_norm( output, os.path.join( args.results_dir, args.exper_name, \"test\", \"masks\", image_name[0] ) )\n\n if( step >= args.n_samplings ):\n break\n\n y_pred_test = np.array( y_pred_test )\n if( args.debug ):\n print( \"type(y_pred_test) : \", type(y_pred_test) )\n print( \"y_pred_test.shape : \", y_pred_test.shape )\n\n #================================\n # 可視化処理\n #================================\n # 最適なマスクスレッショルド値での IoU スコアの計算\n thresholds = np.linspace(-0.95, 0.95, 100) # IoU スコアの低い結果を除外するためのスレッショルド(-1.0 ~ 1.0 は生成マスク画像のピクセル値に対応)\n ious = np.array( [iou_metric_batch(y_pred_valid_mask, np.int32(y_pred_valid > threshold)) for threshold in thresholds] )\n\n threshold_best_index = np.argmax(ious[9:-10]) + 9\n iou_best = ious[threshold_best_index]\n threshold_best = thresholds[threshold_best_index]\n print( \"iou_best = {:0.4f} \".format(iou_best) )\n print( \"threshold_best = {:0.4f} \".format(threshold_best) )\n\n fig, axs = plt.subplots()\n axs.plot(thresholds, ious)\n axs.plot(threshold_best, iou_best, \"xr\", label=\"Best threshold\")\n plt.xlabel(\"Threshold (mask pixel value)\")\n plt.ylabel(\"IoU\")\n plt.title(\"Threshold vs IoU ({}, {})\".format(threshold_best, iou_best))\n plt.grid()\n plt.legend()\n plt.savefig( os.path.join(args.results_dir, args.exper_name, \"IoU_mask_threshold.png\"), dpi = 300, bbox_inches = 'tight' )\n\n #================================\n # Kaggle API での submit\n #================================\n # RLE [Run Length Encoding] 形式で提出のため生成画像を元の画像サイズに変換\n y_pred_test_org = np.zeros( (len(y_pred_test), args.image_height_org, args.image_width_org), dtype=np.float32 )\n for i in range(len(y_pred_test)):\n y_pred_test_org[i] = cv2.resize( y_pred_test[i,0,:,:].squeeze(), (args.image_height_org, args.image_width_org), interpolation = cv2.INTER_NEAREST )\n #y_pred_test_org[i] = resize( y_pred_test[i,0,:,:].squeeze(), (args.image_height_org, args.image_width_org), mode='constant', preserve_range=True )\n\n # 提出用データに値を設定\n #threshold_best = 0.0\n y_sub = { name.split(\".png\")[0] : convert_rle(np.round(y_pred_test_org[i] > threshold_best)) for i,name in enumerate(test_image_names) }\n df_submission = pd.DataFrame.from_dict( y_sub, orient='index' )\n df_submission.index.names = ['id']\n df_submission.columns = ['rle_mask']\n df_submission.to_csv( os.path.join(args.results_dir, args.exper_name, args.submit_file) )\n\n if( args.submit ):\n # Kaggle-API で submit\n api = KaggleApi()\n api.authenticate()\n api.competition_submit( os.path.join(args.results_dir, args.exper_name, args.submit_file), args.exper_name, args.competition_id)\n os.system('kaggle competitions submissions -c {}'.format(args.competition_id) )\n ","sub_path":"tgs-salt-identification-challenge/pytorch_version_deep_lab_v3+/single_models.py","file_name":"single_models.py","file_ext":"py","file_size_in_byte":38968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306904489","text":"import numpy as np\nimport qutip\nfrom qutip import qip\nfrom random import random\nfrom bisect import bisect\nimport matplotlib.pyplot as plt\n \nclass qasm:\n def __init__(self,filename): # The function used when the object is created.\n self.filename=filename\n self.qasm_file=[]\n self.qasm_instructions=[]\n self.qasm_instruction_line=[]\n self.last_qubit_line_index=int(0)\n self.number_of_qubits=1\n self.probability_vector=([0,1])\n self.state=np.array([0,1])\n self.gate_dict=self.operator_dict_default()\n self.measurement_hist=[]\n pass\n \n def load_qasm_file(self):\n '''\n Reads in a .qasm file. filename a string with the filename, eg. 'test1.qasm'\n ---\n Return: List of strings with all characters in the file\n '''\n qasm_file=[]\n f=open(self.filename,'r')\n for line in f:\n qasm_file.append(line)\n self.qasm_file=qasm_file\n\n def get_filtered_qasm(self):\n '''\n In: List of strings with comments\n ---\n Return: list of strings without comments\n '''\n qasm_instructions=[]\n m=0\n for tt, line in enumerate(self.qasm_file):\n if line[0]=='#' or line.split()==[]:\n m+=1\n pass\n else:\n qasm_instructions.append('')\n for char in line:\n if not char=='#':\n qasm_instructions[tt-m]+=char\n else:\n break\n self.qasm_instructions=qasm_instructions\n \n def read_instruction_line(self,line_index):\n \"\"\"\n In: instructions \n read the first line\n recognize the operation and qubit\n ---\n Out: list with operation as first entry and list of qubit(s) as second entry\n \"\"\"\n instructions=self.qasm_instructions\n if instructions!=[]:\n instruction_line=str(instructions[line_index])\n instruction = instruction_line.split()\n operator = instruction[0]\n qubits = instruction[1].split(\",\") \n self.qasm_instruction_line=[operator, qubits]\n \n def read_qubits_string(self,qubits):\n \"\"\"\n In: list with qubit names\n ----\n Out: returns a list with qubit numbers starting at 0.\n \"\"\"\n qubit_numbers=[]\n for i in range(len(qubits)):\n name=qubits[i]\n number=name[1::]\n qubit_numbers.append(int(number))\n self.qubit_numbers=qubit_numbers\n #return qubit_numbers\n \n def run_algorithm(self):\n \"\"\"\n load the file, filter the instructions\n run the instructions line by line\n Out: final qubit state.\n \"\"\"\n self.load_qasm_file()\n self.get_filtered_qasm()\n self.get_number_of_qubits()\n self.create_qubits()\n for line_index,line in enumerate(self.qasm_instructions[self.last_qubit_line_index::]):\n self.read_instruction_line(line_index+self.last_qubit_line_index)\n self.run_instruction_line()\n return(self.state)\n \n def run_instruction_line(self):\n instruction=self.qasm_instruction_line\n self.read_qubits_string(instruction[1])\n qubits=self.qubit_numbers\n gate_matrix=self.gate_dict.get(instruction[0])\n matrix=[]\n if gate_matrix==None:\n if instruction[0]=='cnot':\n control=qubits[0]\n target=qubits[1]\n matrix=self.create_cnot_gate(control,target)\n self.act_gate_on_state(matrix)\n elif instruction[0]=='measure':\n self.do_measurement()\n else:\n raise NameError ('Gate ' + instruction[0] + ' not defined')\n else:\n for qubit_number in range(len(qubits)):\n matrix=self.create_single_qubit_gate(instruction[0],qubit_number)\n print(matrix)\n self.act_gate_on_state(matrix)\n\n def get_number_of_qubits(self):\n '''\n In: filtered quasm instructions\n ---\n Out: integer with number of needed qubits\n '''\n num_qubits=0\n last_qubit_line_index=0\n for line_index, line in enumerate(self.qasm_instructions):\n if 'qubit' in line:\n num_qubits+=1\n last_qubit_line_index=line_index\n self.number_of_qubits=num_qubits\n self.last_qubit_line_index=last_qubit_line_index+1\n \n def create_qubits(self):\n self.state=np.zeros(2**self.number_of_qubits) # construct the state vector\n self.state[0]=1 #start in the ground state\n\n def create_single_qubit_gate(self,gate,qubit_number):\n '''\n In: which gate is to be applied, qubit_number: which qubit is acted on (first qubit is 0), number_of_qubits: total number of qubits\n ---\n Out: Process matrix with dimension 2**number_of_qubits x 2**number_of_qubits\n '''\n single_qubit_gate=self.gate_dict[gate]\n if qubit_number==0:\n matrix=single_qubit_gate\n else:\n matrix=self.gate_dict['i']\n for tt in range(1,self.number_of_qubits):\n if tt==qubit_number:\n matrix=np.kron(matrix,single_qubit_gate)\n else:\n matrix=np.kron(matrix,self.gate_dict['i'])\n return matrix\n\n def create_cnot_gate(self,control,target):\n '''\n In: control qubit, starting with 0, target qubit, starting with 0, number of qubits\n ---\n Out: Matrix for the gate\n '''\n return qip.cnot(N=self.number_of_qubits, control=control, target=target).full()\n \n def do_measurement(self,number_of_measurements=10000):\n self.get_probability_vector()\n result=0\n hist=np.zeros(len(self.probability_vector))\n for i in range(number_of_measurements):\n result=int(self.measure())\n #add a count to the histogram\n hist[result]+=1.\n hist=hist/number_of_measurements\n self.measurement_hist=hist\n self.plot_measurement_hist()\n \n def plot_measurement_hist(self):\n plt.hist(self.measurement_hist)\n plt.xlabel(self.measurement_hist_xlabel())\n plt.show()\n \n def measurement_hist_xlabel(self):\n #just take binary of the index\n label=[]\n for i in range(len(self.probability_vector)):\n label.append(\"{0:b}\".format(i))\n return label\n \n def act_gate_on_state(self,matrix):\n '''\n In: Matrix that should be acted on the qubits\n ---\n Out: - updates the state of the qubit\n '''\n old_state=self.state\n new_state=np.dot(matrix,old_state)\n self.state=new_state\n\n def operator_dict_default(self):\n \"\"\"\n In: none\n Creates a dictionary with default operations\n Out: none\n \"\"\"\n operator_dict={}\n # add hadamard\n operator_dict.update({'h':1/np.sqrt(2)*np.array([[1,1],[1,-1]])})\n # add identity\n operator_dict.update({'i':np.array([[1,0],[0,1]])})\n # add identity\n operator_dict.update({'nop':np.array([[1,0],[0,1]])})\n # add Pauli X\n operator_dict.update({'x':np.array([[0,1],[1,0]])})\n # add Pauli Y\n operator_dict.update({'y':np.array([[0,-1j],[1j,0]])})\n # add Pauli Z\n operator_dict.update({'z':np.array([[1,0],[0,-1]])})\n return operator_dict\n\n def get_probability_vector(self):\n self.probability_vector=self.state**2\n \n \n def measure(self):\n weights=self.probability_vector\n values=range(2**self.number_of_qubits)\n total = 0\n cum_weights = []\n for w in weights:\n total += w\n cum_weights.append(total)\n x = random() * total\n i = bisect(cum_weights, x)\n return values[i]\n\n","sub_path":"quasm_module.py","file_name":"quasm_module.py","file_ext":"py","file_size_in_byte":8018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"180216536","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom .models import *\nfrom django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm\nfrom django import forms\nfrom django.contrib.auth import login, logout, authenticate\nfrom .forms import *\nimport hashlib \nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404, get_list_or_404\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes\nfrom django.conf import settings\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.core.exceptions import SuspiciousOperation\nimport os\n\n\ndef homepage(request):\n\tlogged_in = False\n\tcourse_count = range(0,Course.objects.count(),2)\n\n\tif request.user.is_authenticated:\n\t\tlogged_in = True\n\n\tparams = {\"notes\": Note.objects.order_by('-note_whenPublished')[:6],\n\t\t\t \"tutorials\": Tutorial.objects.all,\n\t\t\t \"chapters\": Chapter.objects.all,\n\t\t\t \"courses\": Course.objects.all,\n\t\t\t \"logged_in\": logged_in,\n\t\t\t \"range\": course_count }\n\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/index.html\",\n\t\t\t\t context = params )\n\n\n#Session Handling\ndef register(request):\n\t#send home if already logged in\n\tif request.user.is_authenticated:\n\t\treturn redirect(\"/\")\n\n\terr_msgs = None\n\tif request.method == \"POST\":\n\t\tinfo = NewUserForm(request.POST)\n\t\tif info.is_valid():\n\t\t\temail = info.cleaned_data.get('email')\n\t\t\tif User.objects.filter(email = email).exists():\n\t\t\t\tinfo.add_error('email', 'account with email already exists')\n\t\t\t\terr_msgs = info.errors.items\n\t\t\telse:\n\t\t\t\tuser = info.save()\n\t\t\t\tuser.is_active = False\n\t\t\t\tuser.save()\n\t\t\t\thashed = make_hash(user)\n\t\t\t\tsend_mail(\"Notesite sign up confirmation\",\n\t\t\t\t \t\t f\"Thank you {user.first_name} for registering with Notesite as {user.username}. Please click on the link below to activate your account \\n\\n {request.META['HTTP_HOST']}/users/validate/{user.username}/{hashed}\",\n\t\t\t\t \t\t settings.EMAIL_HOST_USER,\n\t\t\t\t \t\t [user.email,])\n\t\t\t\treturn redirect(\"/preconfirm\")\n\t\telse:\n\t\t\terr_msgs = info.errors.items\n\n\tcreate_form = NewUserForm\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/register.html\",\n\t\t\t\t context = {\"create_form\": create_form,\n\t\t\t\t \t\t\t \"err_msgs\": err_msgs})\n\n#TODO: not be lazy and make a proper digest\ndef make_hash(user):\n\thash = hashlib.sha256(str.encode(user.username+user.password)).hexdigest()\n\treturn hash\n\ndef activate_user(request, username, token):\n\tuser = get_object_or_404(User, username = username)\n\tvalid_token = make_hash(user)\n\n\tif not user.is_active and valid_token == token:\n\t\tuser.is_active = True\n\t\tuser.save()\n\t\tmessages.success(request, f\"User confirmation successful. Please login to proceed\")\n\t\treturn redirect('/login')\n\telse:\n\t\treturn HttpResponse(\"The link is incorrect or the user is already validated\")\n\ndef preconfirm(request):\n\n\tif request.user.is_active:\n\t\tmessages.success(request, f\"Account already activated\")\n\t\treturn redirect('/')\n\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/preconfirm.html\",\n\t\t\t\t context = {\"logged_in\": False})\n\ndef login_user(request):\n\tif request.user.is_authenticated:\n\t\treturn redirect(\"/\")\n\terr_msgs = None\n\tif request.method == \"POST\":\n\t\tinfo = AuthenticationForm(request, request.POST)\n\t\tif info.is_valid():\n\t\t\tusername = info.cleaned_data.get('username')\n\t\t\tpassword = info.cleaned_data.get('password')\n\t\t\tuser = authenticate(username = username, password = password)\n\t\t\tif user and user.is_active:\n\t\t\t\tmessages.success(request, f\"Logged in\")\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn redirect(\"/\")\n\n\t\terr_msgs = info.errors.items \n\n\tlogin_form = AuthenticationForm()\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/login.html\",\n\t\t\t\t context = {\"login_form\":login_form,\n\t\t\t\t \t\t\t \"err_msgs\": err_msgs})\n\ndef logout_user(request):\n\tif request.user.is_authenticated:\n\t\tlogout(request)\n\t\tmessages.info(request, \"Logged out successfully\")\n\treturn redirect(\"/\")\n\ndef direct_path(request, var):\n\tlogged_in = False\n\tif request.user.is_authenticated:\n\t\tlogged_in = True\n\tcourse = get_object_or_404(Course, course_code = var)\n\tchapters = Chapter.objects.filter(chapter_course = course)\n\n\treturn render(request = request,\n\t\t template_name = \"main/chapters.html\",\n\t\t context = {\"chapters\": chapters,\n\t\t \t\t\t \"course\": var,\n\t\t \t\t\t \"logged_in\": logged_in})\n\n\ndef indirect_path(request, var, var2):\n\n\tlogged_in = False\n\tif request.user.is_authenticated:\n\t\tlogged_in = True\n\tcourse = get_object_or_404(Course, course_code = var)\n\tchapter = get_object_or_404(Chapter, chapter_course = course, chapter_name = var2)\n\tnotes_list = Note.objects.filter(note_chapter = chapter)\n\tfaves = []\n\tnotes = {}\n\tfor note in notes_list:\n\t\tfcount = Favourite.objects.filter(fav_note = note).count()\n\t\tnotes[note] = fcount\n\n\t#sorts by favourites\n\tnotes = {k: v for k, v in sorted(notes.items(), key=lambda x: x[1], reverse = True)}\n\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/notes.html\",\n\t\t\t\t context = {\"notes\": notes,\n\t\t\t\t \t\t\t \"logged_in\":logged_in,\n\t\t\t\t \t\t\t \"chapter\":chapter,\n\t\t\t\t \t\t\t \"course\":course})\n\n\ndef show(request, var, var2, var3):\n\tlogged_in = False\n\tuser = None\n\tliked = False\n\tif request.user.is_authenticated:\n\t\tlogged_in = True\n\t\tuser = request.user\n\n\tcourse = get_object_or_404(Course, course_code = var)\n\tchapter = get_object_or_404(Chapter, chapter_course = course, chapter_name = var2)\n\tnote = get_object_or_404(Note, note_chapter = chapter, id = var3)\n\n\tif user:\n\t\tliked = Favourite.objects.filter(fav_user = user, fav_note = note)\n\ttotal_likes = len(Favourite.objects.filter(fav_note = note))\n\n\tif request.method == 'POST':\n\t\tif request.POST.get('like'):\n\t\t\tif user:\n\t\t\t\tis_liked = Favourite.objects.filter(fav_user = user, fav_note = note)\n\t\t\t\tif is_liked:\n\t\t\t\t\tis_liked.delete()\n\t\t\t\t\tmessages.success(request, f\"Removed from favourites\")\n\t\t\t\telse:\n\t\t\t\t\tnew_fav = Favourite.objects.create(fav_user = user,fav_note = note)\n\t\t\t\t\tnew_fav.save()\n\t\t\t\t\tmessages.success(request, f\"Added to favourites\")\n\n\t\t\t\treturn redirect(\".\")\n\t\t\telse:\n\t\t\t\traise PermissionDenied\n\t\telse:\n\t\t\tif not request.user.username == note.note_author:\n\t\t\t\traise PermissionDenied\n\t\t\telse:\n\t\t\t\t#remove referenced file\n\t\t\t\tif os.path.isfile(os.path.join(settings.MEDIA_ROOT, note.note_fileurl.split('/')[-1])):\n\t\t\t\t\tos.remove(os.path.join(settings.MEDIA_ROOT, note.note_fileurl.split('/')[-1]))\n\t\t\t\tnote.delete()\n\t\t\t\tmessages.success(request, f\"Note Deleted\")\n\t\t\t\treturn redirect(\"/\")\n\n\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/show.html\",\n\t\t\t\t context = {\"course\": course,\n\t\t\t\t \t\t\t \"chapter\": chapter,\n\t\t\t\t \t\t\t \"note\": note,\n\t\t\t\t \t\t\t \"file_type\": note.note_fileurl.split('.')[-1],\n\t\t\t\t \t\t\t \"liked\": liked,\n\t\t\t\t \t\t\t \"total_likes\":total_likes,\n\t\t\t\t \t\t\t \"logged_in\": logged_in})\n\ndef open_document(request, var, var2, var3):\n\treturn HttpResponse(200)\n\ndef create(request):\n\terr_msgs = None\n\tpermitted_extensions = ['png','jpg','jpeg','pdf','odf','docx','doc','ppt','txt']\n\tillegal_file_extension = False\n\n\tif not request.user.is_authenticated:\n\t\traise PermissionDenied\n\n\tlogged_in = True\n\tif request.method == \"POST\":\n\t\tdata = NoteForm(request.POST)\n\t\tnote = data.save(commit = False)\n\t\tnote.note_author = request.user\n\t\tchapter = data.cleaned_data.get('note_chapter')\n\t\t\n\t\trequest_file = request.FILES['document'] if 'document' in request.FILES else None\n\t\tif request_file:\n\t\t\tif request_file.name.split('.')[-1] in permitted_extensions and request_file.size <= 5242880:\n\n\t\t\t\t#save attatched file\n\t\t\t\tfs = FileSystemStorage()\n\t\t\t\tfile = fs.save(request_file.name, request_file)\n\t\t\t\tfileurl = fs.url(file)\n\n\t\t\t\tnote.note_fileurl = fileurl\n\t\t\telse:\n\t\t\t\tillegal_file_extension = True\n\n\t\tif data.is_valid() and not illegal_file_extension:\n\t\t\tnote.save()\n\t\t\tmessages.success(request, f\"Note published\")\n\t\t\treturn redirect(f\"/{chapter.chapter_course.course_code}/{chapter.chapter_name}/{note.id}\")\n\t\t\n\t\t#todo notification about publishing\n\t\telse:\n\t\t\terr_msgs = data.errors.items\n\n\n\tnew_form = NoteForm\n\treturn render(request = request,\n\t\t template_name = \"main/new.html\",\n\t\t context = {\"new_form\": new_form,\n\t\t \t\t\t \"logged_in\": logged_in,\n\t\t \t\t\t \"err_msgs\": err_msgs,\n\t\t \t\t\t \"illegal_extension\": illegal_file_extension})\n\ndef view_user(request, uname):\n\tif not request.user.is_authenticated:\n\t\tpass\n\n\tquery_user = get_object_or_404(User, username = uname) \n\n\tcurr_user = request.user\n\tuser_is_authenticated = False\n\tif curr_user.username == uname:\n\t\tuser_is_authenticated = True\n\n\tnotes_list = Note.objects.filter(note_author = uname)\n\tfaves = []\n\tnotes = {}\n\tfor note in notes_list:\n\t\tfcount = Favourite.objects.filter(fav_note = note).count()\n\t\tnotes[note] = fcount\n\n\ttotal_notes = len(notes.keys())\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/user.html\",\n\t\t\t\t context = {\"logged_in\": request.user.is_authenticated,\n\t\t\t\t \t\t\t \"query_user\": query_user,\n\t\t\t\t \t\t\t \"total_notes\": total_notes,\n\t\t\t\t \t\t\t \"curr_user_is_auth\": user_is_authenticated,\n\t\t\t\t \t\t\t \"notes\": notes})\n\n\ndef view_favourites(request):\n\tif not request.user.is_authenticated:\n\t\traise PermissionDenied\n\n\tlogged_in = True\n\tuser = request.user\n\tfaves = Favourite.objects.filter(fav_user = user)\n\tnotes = {}\n\tfor fav in faves:\n\t\tlikes_for_note = Favourite.objects.filter(fav_note = fav.fav_note).count()\n\t\tnotes[fav.fav_note] = likes_for_note\n\n\tnote_count = len(notes)\n\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/favourites.html\",\n\t\t\t\t context = {\"logged_in\": logged_in,\n\t\t\t\t \t\t\t \"notes\": notes,\n\t\t\t\t \t\t\t \"faves\": faves,\n\t\t\t\t \t\t\t \"note_count\": note_count,\n\t\t\t\t \t\t\t \"user\": user})\n\n\ndef user_settings(request, uname):\n\tif request.user.username != uname:\n\t\traise PermissionDenied\n\n\tuser = request.user\n\n\tif request.method == \"POST\":\n\t\taction = request.POST.copy().get('act')\n\t\tif action == \"chname\":\n\t\t\treturn redirect(\"./changename\")\n\t\telif action == \"chpass\":\n\t\t\treturn redirect(\"./changepass\")\n\t\telif action == \"deac\":\n\t\t\treturn deactivate_account(request, user)\n\t\telse:\n\t\t\treturn HttpResponseBadRequest(\"400 Bad Request\")\n\n\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/user_settings.html\",\n\t\t\t\t context = {\"user\": user,\n\t\t\t\t \t\t\t \"logged_in\": user.is_authenticated,\n\t\t\t\t \t\t\t \"type\": None})\n\n\ndef change_name(request, uname):\n\tif request.user.username != uname :\n\t\traise PermissionDenied\n\n\tuser = request.user\n\tif request.method == \"POST\" and (request.POST.get('fname') or request.POST.get('lname')):\n\t\tdata = request.POST.get('fname'), request.POST.get('lname') #tuple 0,1 fname,lname\n\t\tuser.first_name = data[0]\n\t\tuser.last_name = data[1]\n\t\tuser.save()\n\t\tmessages.success(request, f\"Name changed\")\n\t\treturn redirect(f\"/users/{user.username}/settings/\")\n\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/user_settings.html\",\n\t\t\t\t context = {\"user\": user,\n\t\t\t\t \t\t\t \"logged_in\": user.is_authenticated,\n\t\t\t\t \t\t\t \"type\": \"chname\"})\n\n\ndef change_pass(request, uname):\n\tif request.user.username != uname:\n\t\traise PermissionDenied\n\terr_msgs = None\n\tuser = request.user\n\tif request.method == \"POST\":\n\t\tinfo = PasswordChangeForm(request.user, request.POST)\n\n\t\tif info.is_valid():\n\t\t\tuser = info.save()\n\t\t\tupdate_session_auth_hash(request, user) #keep user logged in after passchange\n\t\t\tmessages.success(request, f\"Password Updated\")\n\t\t\treturn redirect(f\"/users/{user.username}/settings/\")\n\t\telse:\n\t\t\terr_msgs = info.errors.items\n\n\tchange_form = PasswordChangeForm(request.user)\n\treturn render(request = request,\n\t\t\t\t template_name = \"main/user_settings.html\",\n\t\t\t\t context = {\"user\": user,\n\t\t\t\t \t\t\t \"logged_in\": user.is_authenticated,\n\t\t\t\t \t\t\t \"type\": \"chpass\",\n\t\t\t\t \t\t\t \"change_form\": change_form, \n\t\t\t\t \t\t\t \"err_msgs\": err_msgs})\n\n\ndef deactivate_account(request, user):\n\tif request.user.username != user.username:\n\t\traise PermissionDenied\n\n\tuser.is_active = False\n\tlogout(request)\n\tmessages.success(request, f\"Account {user.username} deactivated\")\n\tuser.save()\n\treturn redirect(\"/\")\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"553676584","text":"# story arc grapher. Allows you to plot out a character's mental state on each story beat,\n# use graphing abilities to observe their changes overtime, and how their changes interact\n'''\nTurn this into a Video Game Narrative Design Tool:\nThis could eventually be useful in videogame design by adding parameters for production time projections, creating possible asset sheets, and other game-relevant info.\n'''\n\n\nimport math\nimport json\nimport logging\nimport os\nfrom model import UUID\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\nfrom PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot\nfrom PyQt5.QtWidgets import QFileDialog\n#import pyqtgraph as pyqtgraph\n\nactions = ['heal', 'wound', 'kill', 'betray', 'defend', 'immobilize', 'condemn', 'beg']\nemotions = ['happy', 'sad', 'furious', 'motivated', 'disgusted']\nenergy_level_list = ['manic', 'excited', 'neutral', 'sluggish', 'lethargic']\nrational_level_list = ['logical', 'thoughtful', 'neutral', 'irrational', 'idiotic']\ncaution_level_list = ['reckless', 'confident', 'neutral', 'reserved', 'risk averse']\nentries = []\n\nlogging.basicConfig(filename='log.log', filemode='w', level=logging.DEBUG)\n\nui_file = \"mainwindow.ui\"\nadd_char_ui_file = \"add_character_window.ui\"\nedit_char_ui_file = \"character_details_window.ui\"\nadd_entry_ui_file = \"add_entry_window.ui\"\nUi_MainWindow, QtBaseClass = uic.loadUiType(ui_file)\nUi_AddCharacterWindow, QtBaseClass = uic.loadUiType(add_char_ui_file)\nUi_EditCharacterWindow, QtBaseClass = uic.loadUiType(edit_char_ui_file)\nUi_AddEntryWindow, QtBaseClass = uic.loadUiType(add_entry_ui_file)\n\nclass View(QtWidgets.QMainWindow, Ui_MainWindow):\n '''\n App Name: Storiograph\n - This Graph will keep track of the arc as you plot it\n -In the model-controller-view system, this is a view object.\n - Add character arcs\n - Change the level of each point\n - Add a \"tension\" arc that tracks overall story development\n\n '''\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n Ui_MainWindow.__init__(self)\n self.setupUi(self)\n self.tabWidget = EntryPerCharWidget(view=self)\n self.tabHolder.addWidget(self.tabWidget)\n button_palette = QtGui.QPalette()\n button_palette.setColor(QtGui.QPalette.Active, QtGui.QPalette.Button, QtGui.QColor(134, 235, 152))\n button_palette.setCurrentColorGroup(QtGui.QPalette.Disabled)\n button_palette.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Button, QtGui.QColor(209, 209, 209))\n self.applyEditsBtn.setPalette(button_palette)\n\n discard_palette = QtGui.QPalette()\n discard_palette.setColor(QtGui.QPalette.Active, QtGui.QPalette.Button, QtGui.QColor(219, 37, 67))\n discard_palette.setCurrentColorGroup(QtGui.QPalette.Disabled)\n discard_palette.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Button, QtGui.QColor(209, 209, 209))\n self.discardEditsBtn.setPalette(discard_palette)\n def set_controller(self, controller):\n self.controller = controller\n\n def connect_signals(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n def new(self):\n '''\n start a new project\n '''\n\n def clear(self):\n '''\n remove all content from the view\n '''\n pass\n\n def default_view(self):\n '''\n the standard view of the first character and one scale, if either exist\n '''\n\n pass\n\n def refresh_view(self, data):\n '''\n refresh all widgets with up to date information\n\n '''\n self.refresh_character_view(data)\n self.refresh_beat_view(data)\n self.refresh_synopsis_view(data)\n\n def refresh_character_view(self, data):\n\n #take all items\n # for x in range(0, self.characterList.count() + 1):\n # self.characterList.takeItem(x)\n # print(self.characterList.count())\n self.characterList.clear()\n name_list = []\n for key, value in data['characters'].items():\n name_list.append((value['name'], key))\n sorted_name_list = sorted(name_list, key=lambda x: x[0])\n for name_uuid in sorted_name_list:\n temp_widget = CharListWidgetItem(name_uuid[0], uuid=name_uuid[1])\n self.characterList.addItem(temp_widget)\n\n def refresh_beat_view(self, data):\n if len(data[\"beats\"]) > 1:\n self.plotSlider.setMaximum(len(data[\"beats\"])-1)\n else:\n self.plotSlider.setMaximum(0)\n\n def graph_styling(self, grid_x=True, grid_y=True, **kwargs):\n '''\n set the formatting of the graph\n '''\n self.graphWidget.setBackground(kwargs['bg'])\n self.graphWidget.setLabel('left', kwargs['left_label'])\n self.graphWidget.setLabel('bottom', kwargs['bottom_label'])\n self.graphWidget.addLegend()\n self.graphWidget.showGrid(x=grid_x, y=grid_y)\n pass\n\n def graph_data(self, character_names=[], y_axis_per_character=[]):\n '''\n refresh the graph with data about a character\n Controller will gather data from model and insert into this func.\n How should controller handle that?\n '''\n len_per_char = [len(char) for char in y_axis_per_character]\n x_len = max(len_per_char)\n x_axis = [i+1 for i in range(0, x_len)]\n max_per_char = [max(char) for char in y_axis_per_character]\n y_len = max(max_per_char)\n\n self.graphWidget.setXRange(0,x_len, padding=1)\n self.graphWidget.setYRange(0, y_len, padding=1)\n\n for char in y_axis_per_character:\n name = y_axis_per_character.index()\n self.graphWidget.plot(x_axis, char, ) # x, y, legend, color\n pass\n\n def refresh_synopsis_view(self, data):\n '''\n show what is happening before, now , and next in the story\n based on the position of the plotSlider\n data = the data variable of a model.StoryObject\n '''\n # prep vars\n if len(data[\"beats\"]) == 0:\n self.prevNameLabel.setText(\"...\")\n self.prevTextEdit.setPlainText(\"\")\n self.nextNameLabel.setText(\"...\")\n self.nextTextEdit.setPlainText(\"\")\n self.nowNameLabel.setText(\"...\")\n self.nowTextEdit.setPlainText(\"\")\n self.tabWidget.refresh_view()\n return\n prev_entry = True\n next_entry = True\n slider_val = self.plotSlider.value()\n # check if the plot slider is at max or min position\n # so we know whether prev or next text exists in data\n if slider_val == 0:\n prev_entry = False\n else:\n prev_entry_val = slider_val -1\n\n # slider value starts at 1, beats len starts at 1\n if slider_val == len(data[\"beats\"])-1:\n next_entry = False\n else:\n next_entry_val = slider_val + 1\n\n # edit the text widgets based on availability of data\n if prev_entry:\n self.prevNameLabel.setText(data[\"beats\"][prev_entry_val][\"name\"])\n self.prevTextEdit.setPlainText(data[\"beats\"][prev_entry_val][\"synopsis\"])\n else:\n self.prevNameLabel.setText(\"...\")\n self.prevTextEdit.setPlainText(\"\")\n if next_entry:\n self.nextNameLabel.setText(data[\"beats\"][next_entry_val][\"name\"])\n self.nextTextEdit.setPlainText(data[\"beats\"][next_entry_val][\"synopsis\"])\n else:\n self.nextNameLabel.setText(\"...\")\n self.nextTextEdit.setPlainText(\"\")\n self.nowNameLabel.setText(data[\"beats\"][slider_val][\"name\"])\n self.nowTextEdit.setPlainText(data[\"beats\"][slider_val][\"synopsis\"])\n self.tabWidget.refresh_view(data, slider_val, editable = False)\n\n def insert_beat_at_cursor_window(self, data=None, beat_num=int):\n self.insert_beat_window = AddBeatView(view=self, data=data, beat_num=beat_num)\n self.insert_beat_window.send_entry_data.connect(self.controller.insert_beat_in_model)\n self.insert_beat_window.canceled.connect(self.controller.cancel)\n self.insert_beat_window.show()\n return self.insert_beat_window\n\n def add_beat_to_end_window(self, data=None, beat_num=int):\n self.beat_window = AddBeatView(view=self, data=data, beat_num=beat_num)\n self.beat_window.send_entry_data.connect(self.controller.add_beat_to_end_model)\n self.beat_window.canceled.connect(self.controller.cancel)\n self.beat_window.show()\n return self.beat_window\n\n def edit_entry_window(self, entry_int):\n pass\n\n def new_scale_window(self):\n pass\n\n def character_details_window(self, character):\n '''\n show character details/ allow editing.\n\n '''\n\n def add_character_window(self):\n '''\n load a pop up window that lets you edit a character or add a new one,\n depending on if you passed in data or not\n returns the window object.\n '''\n self.char_window = AddCharacterView(view = self)\n self.char_window.send_character_data.connect(self.controller.add_character_to_model)\n self.char_window.canceled.connect(self.controller.cancel)\n self.char_window.show()\n return self.char_window\n\n def edit_character_window(self, data):\n self.edit_char_window = EditCharacterView(view = self, char_data= data)\n self.edit_char_window.send_character_data.connect(self.controller.edit_character_in_model)\n self.edit_char_window.canceled_signal.connect(self.controller.cancel)\n self.edit_char_window.show()\n return self.edit_char_window\n\n def character_beat_wizard(self, data, char_uuid):\n result = self.character_wizard_prompt()\n # handle the result of the wizard prompt when opening the beat wizard\n\n def character_wizard_prompt(self, data):\n \"\"\"\n learn if the user wants to:\n - edit every empty beat of a character\n - edit a custom selection of beats\n - edit every beat\n :return:\n result to pass directly into character_beat_wizard_window\n \"\"\"\n if hasattr(self, \"character_wizard_prompt_window\"):\n self.character_wizard_prompt_window.refresh_view()\n self.character_wizard_prompt_window.show()\n def save_as_window(self):\n print(\"saved!\")\n save_path, _filter = QFileDialog.getSaveFileName(self,\"Story Project\", \"./save_files\", \"Story Arcs (*.json)\")\n if save_path:\n logging.debug(\"Save Story Path: {}\".format(save_path))\n return save_path\n\n def load_window(self):\n print(\"load\")\n load_data, _filter = QFileDialog.getOpenFileName(self, \"Story Project\", \"./save_files\", \"Story Arcs (*.json)\")\n try:\n ext = os.path.splitext(load_data)[1]\n assert ext == '.json'\n logging.debug(\"Load Story Path: {}\".format(load_data))\n return load_data\n\n except AssertionError:\n logging.debug(\"File type for save data to load must be .json\")\n return None\n pass\n\nclass AddCharacterView(QtWidgets.QWidget, Ui_AddCharacterWindow):\n \n send_character_data = pyqtSignal(dict)\n canceled = pyqtSignal(QtWidgets.QWidget)\n\n def __init__(self, view=None):\n QtWidgets.QWidget.__init__(self)\n Ui_AddCharacterWindow.__init__(self)\n self.setupUi(self)\n self.view = view\n self.connect_signals()\n\n def connect_signals(self):\n self.addBtn.clicked.connect(self.create_character)\n self.cancelBtn.clicked.connect(self.cancel)\n\n def create_character(self):\n self.data = {}\n self.data['name']= self.nameLineEdit.text()\n self.data['age'] = self.ageLineEdit.text()\n self.data['desc'] = self.descTextEdit.toPlainText()\n self.send_character_data.emit(self.data)\n\n def cancel(self):\n self.canceled.emit(self)\n\n def refresh_view(self):\n self.nameLineEdit.setText(\"\")\n self.ageLineEdit.setText(\"\")\n self.descTextEdit.clear()\n\n\nclass EditCharacterView(QtWidgets.QWidget, Ui_EditCharacterWindow):\n send_character_data = pyqtSignal(dict)\n canceled_signal = pyqtSignal(QtWidgets.QWidget)\n\n def __init__(self, view = None, char_data = dict):\n QtWidgets.QWidget.__init__(self)\n Ui_AddCharacterWindow.__init__(self)\n self.view = view\n self.setupUi(self)\n self.connect_signals()\n self.fill_data(char_data)\n self.data = char_data\n\n def connect_signals(self):\n self.addBtn.clicked.connect(self.edit_character)\n self.cancelBtn.clicked.connect(self.cancel)\n\n def fill_data(self, data):\n self.nameLineEdit.setText(data['name'])\n self.ageLineEdit.setText(data['age'])\n self.descTextEdit.setPlainText(data['desc'])\n\n def edit_character(self):\n self.data['name'] = self.nameLineEdit.text()\n self.data['age'] = self.ageLineEdit.text()\n self.data['desc'] = self.descTextEdit.toPlainText()\n self.send_character_data.emit(self.data)\n print(\"set point\")\n\n def cancel(self):\n self.canceled_signal.emit(self)\n\n def refresh_view(self, char_data):\n self.nameLineEdit.setText(\"\")\n self.ageLineEdit.setText(\"\")\n self.descTextEdit.clear()\n self.fill_data(char_data)\n\n\nclass AddBeatView(QtWidgets.QWidget, Ui_AddEntryWindow):\n send_entry_data = pyqtSignal(dict, int)\n canceled = pyqtSignal(QtWidgets.QWidget)\n\n def __init__(self, view=None, data=None, beat_num=int):\n QtWidgets.QWidget.__init__(self)\n Ui_AddEntryWindow.__init__(self)\n self.setupUi(self)\n self.view = view\n self.beat_num = beat_num\n self.create_character_widgets()\n if data:\n self.refresh_synopsis_view(data=data, beat_num=beat_num)\n self.tabWidget.clear_all_tab_text()\n self.connect_signals()\n\n\n def connect_signals(self):\n self.addEntryBtn.clicked.connect(self.create_entry)\n self.cancelBtn.clicked.connect(self.cancel)\n\n def create_character_widgets(self):\n self.tabWidget = EntryPerCharWidget(view=self.view, data=self.view.controller.model.data, beat_num=self.beat_num)\n self.tabHolder.addWidget(self.tabWidget)\n\n def create_entry(self):\n \"\"\"\n pull data from the widgets and then format it for the model.\n emit signal and send data.\n \"\"\"\n text_per_character = []\n for index in range(0, self.tabWidget.tabWidget.count()):\n tab = self.tabWidget.tabWidget.widget(index)\n temp_text = tab.entryTextEdit.toPlainText()\n text_per_character.append((tab.uuid, temp_text))\n self.data = {}\n self.data[\"name\"] = self.nameLineEdit.text()\n self.data[\"synopsis\"] = self.synopsisTextEdit.toPlainText()\n self.data[\"uuid\"] = UUID()\n self.data[\"characters\"]= {}\n for uuid, text in text_per_character:\n self.data[\"characters\"][str(uuid)] = {\"uuid\":uuid, \"scale_list\":[0], \"notes_list\":[text]}\n self.send_entry_data.emit(self.data, self.beat_num)\n\n def cancel(self):\n self.canceled.emit(self)\n\n def refresh_synopsis_view(self, data, beat_num):\n '''\n show what is happening before, now , and next in the story\n based on the position of the plotSlider\n data = the data variable of a model.StoryObject\n beat_num =\n '''\n # prep vars\n if len(data[\"beats\"]) == 0:\n self.prevNameLabel.setText(\"...\")\n self.prevTextEdit.setPlainText(\"\")\n self.nextNameLabel.setText(\"...\")\n self.nextTextEdit.setPlainText(\"\")\n\n return\n prev_entry = True\n next_entry = True\n slider_val = beat_num\n # check if the plot slider is at max or min position\n # so we know whether prev or next text exists in data\n if slider_val == 0:\n prev_entry = False\n else:\n prev_entry_val = slider_val - 1\n\n # slider value starts at 0, beats len starts at 1\n if slider_val == len(data[\"beats\"]):\n next_entry = False\n else:\n next_entry_val = slider_val\n\n # edit the text widgets based on availability of data\n if prev_entry:\n self.prevNameLabel.setText(data[\"beats\"][prev_entry_val][\"name\"])\n self.prevTextEdit.setPlainText(data[\"beats\"][prev_entry_val][\"synopsis\"])\n else:\n self.prevNameLabel.setText(\"...\")\n self.prevTextEdit.setPlainText(\"\")\n if next_entry:\n self.nextNameLabel.setText(data[\"beats\"][next_entry_val][\"name\"])\n self.nextTextEdit.setPlainText(data[\"beats\"][next_entry_val][\"synopsis\"])\n else:\n self.nextNameLabel.setText(\"...\")\n self.nextTextEdit.setPlainText(\"\")\n\n def refresh_view(self, data, beat_num):\n self.nameLineEdit.setText(\"\")\n self.synopsisTextEdit.clear()\n self.tabWidget.refresh_view(data=data, beat_num=beat_num)\n self.tabWidget.clear_all_tab_text()\n self.refresh_synopsis_view(data, beat_num)\n\n\n\n\nclass CharListWidgetItem(QtWidgets.QListWidgetItem):\n \"\"\"\n a custom version of QListWidgetItem so that it stores the uuid for the character with the item\n \"\"\"\n def __init__(self, *args, uuid='', **kwargs ):\n super().__init__(*args, **kwargs)\n self.uuid = uuid\n\nclass EntryPerCharWidget(QtWidgets.QWidget):\n \"\"\"\n This creates a tabbed view of the specific entry for each character.\n A separate implementation of this will make one tab per entry for a single character.\n\n \"\"\"\n def __init__(self, *args, view=None, data=None, beat_num=0, editable=True, **kwargs):\n \"\"\"\n data = the entire JSON data contained in the model\n beat_num = the integer for the entry to display\n\n \"\"\"\n super().__init__(*args, **kwargs)\n self.view = view\n self.beat_num = beat_num\n self.layout = QtWidgets.QVBoxLayout(self)\n self.tabWidget = QtWidgets.QTabWidget()\n self.layout.addWidget(self.tabWidget)\n self.setLayout(self.layout)\n self.tabs = []\n self.is_dirty = False\n if data:\n self.create_character_tabs(data, editable)\n\n def create_character_tabs(self, data=None, editable=True):\n \"\"\"\n create all the tabs in the tabwidget\n iterates through the entries in a beat and adds a tab for each one it finds\n\n sets everything to self, so it can be easily referenced\n \"\"\"\n self.tabs = []\n\n if len(data[\"beats\"]) > self.beat_num:\n for char_uuid, char_data in data['beats'][self.beat_num]['characters'].items():\n name = data[\"characters\"][str(char_uuid)][\"name\"]\n self.tabs.append((self.create_character_tab(entry_character_data = char_data, editable=editable), name, char_uuid))\n else:\n for char_uuid, char_data in data[\"characters\"].items():\n name = char_data[\"name\"]\n self.tabs.append((self.create_character_tab(char_uuid = char_data[\"uuid\"], editable=editable), name, char_data[\"uuid\"]))\n for tab in self.tabs:\n self.tabWidget.addTab(tab[0], tab[1])\n tab[0].entryTextEdit.textChanged.connect(self.set_dirty_flag)\n\n def create_character_tab(self, entry_character_data=None, char_uuid=str, editable=True):\n \"\"\"\n create a character tab, and return the reference to the tab\n Each character tab contains the character's name\n and the entry text for the specified entry_num\n \"\"\"\n tab = QtWidgets.QWidget()\n tab.layout = QtWidgets.QVBoxLayout(tab)\n tab.entryTextEdit = QtWidgets.QPlainTextEdit()\n tab.entryTextEdit.setEnabled(editable)\n tab.layout.addWidget(tab.entryTextEdit)\n if entry_character_data:\n tab.uuid = entry_character_data[\"uuid\"]\n tab.entryTextEdit.setPlainText(entry_character_data[\"notes_list\"][0])\n else:\n tab.uuid = char_uuid\n tab.entryTextEdit.setPlainText(\"\")\n tab.setLayout(tab.layout)\n return tab\n\n def refresh_view(self, data=None, beat_num=0, editable=True):\n #self.layout = QtWidgets.QVBoxLayout(self)\n #self.tabWidget = QtWidgets.QTabWidget()\n #self.tabs = []\n self.beat_num = beat_num\n self.tabWidget.clear()\n if data:\n self.create_character_tabs(data, editable=editable)\n\n def clear_all_tab_text(self):\n for tab in self.tabs:\n tab[0].entryTextEdit.clear()\n\n def refresh_edit_state(self, editable=False):\n for tab in self.tabs:\n tab[0].entryTextEdit.setEnabled(editable)\n\n def set_dirty_flag(self):\n self.is_dirty = True\n\n","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":20897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"609628903","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass MountPointMap(Model):\n \"\"\"The share mount point.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :param share_id: Required. ID of the share mounted to the role VM.\n :type share_id: str\n :ivar role_id: ID of the role to which share is mounted.\n :vartype role_id: str\n :ivar mount_point: Mount point for the share.\n :vartype mount_point: str\n :ivar role_type: Role type. Possible values include: 'IOT', 'ASA',\n 'Functions', 'Cognitive'\n :vartype role_type: str or ~azure.mgmt.edgegateway.models.RoleTypes\n \"\"\"\n\n _validation = {\n 'share_id': {'required': True},\n 'role_id': {'readonly': True},\n 'mount_point': {'readonly': True},\n 'role_type': {'readonly': True},\n }\n\n _attribute_map = {\n 'share_id': {'key': 'shareId', 'type': 'str'},\n 'role_id': {'key': 'roleId', 'type': 'str'},\n 'mount_point': {'key': 'mountPoint', 'type': 'str'},\n 'role_type': {'key': 'roleType', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(MountPointMap, self).__init__(**kwargs)\n self.share_id = kwargs.get('share_id', None)\n self.role_id = None\n self.mount_point = None\n self.role_type = None\n","sub_path":"sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/mount_point_map.py","file_name":"mount_point_map.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607263794","text":"#!/usr/bin/env python3\n\n'''\nThe MIT License (MIT)\n\nCopyright (c) 2015 Peter Dahlberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n'''\n\n\"\"\"\nCheck if username is a member, if yes print the members group\n\"\"\"\n\nMYSQL_CONFFILE = \"/etc/mysql-users-credentials\" \n\n\nimport sys\nimport os\nimport mysql.connector\nimport ast\n\ndef groups_from_mysql(mysql_config, username):\n connection = mysql.connector.connect(**mysql_config)\n query = (\"select \\\"members\\\" \" # only one group for now\n \"from users as u \"\n \"where u.maylogin = 1 \"\n \"and u.name = %s \"\n \"and u.cmember is not NULL;\"\n )\n cursor = connection.cursor()\n cursor.execute(query, (username, ))\n result = cursor.fetchone()\n cursor.close()\n connection.close()\n return result[0] if result else \"\"\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"only supply a username\", file=sys.stderr)\n sys.exit(1)\n username = sys.argv[1]\n\n with open(MYSQL_CONFFILE) as file:\n mysql_config = ast.literal_eval(file.read())\n mysql_config[\"raise_on_warnings\"] = True\n\n print(groups_from_mysql(mysql_config, username))\n \n\n","sub_path":"git/gitolite_mysql_group_lookup.py","file_name":"gitolite_mysql_group_lookup.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377374254","text":"import math\nN = 32\n\nJ = 500\n\ndef convert(digits, base):\n res = 0\n for i in range(len(digits) - 1, -1, -1):\n if (digits[i] == 1):\n res += base ** (len(digits) - 1 - i)\n return res\n\ndef getDivisor(num):\n #for i in range(2, round(math.sqrt(num)) + 1):\n for i in range(2, 1000):\n if (num % i == 0):\n return i\n return num\n\ndef iterateDigits(digits):\n for i in range(len(digits) - 1, -1, -1):\n if digits[i] == 0:\n digits[i] = 1\n break\n else:\n digits[i] = 0\n digits[-1] = 1\n\ndigits = [int(x) for x in str((10 ** (N - 1)) + 1)]\nprint(\"Case #1:\")\nfor j in range(0, J):\n divisors = []\n while True:\n for base in range(2, 11):\n num = convert(digits, base)\n #print(\"converting digits \" + str(\"\".join([str(i) for i in digits])) + \" to base \" + str(base) + \" is eq to num \" + str(num))\n divisor = getDivisor(num)\n if divisor == num:\n break\n divisors.append(divisor)\n if (len(divisors) == 9):\n break\n iterateDigits(digits)\n divisors.clear()\n outstr = \"\".join([str(i) for i in digits])\n for base in range(2, 11):\n outstr += \" \" + str(divisors[base - 2])\n print(outstr)\n #print(convert(digits, 10) // divisors[8])\n iterateDigits(digits)\n","sub_path":"codes/CodeJamCrawler/16_0_3/mherzberg/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595577069","text":"import os\nimport re\nimport glob\n\nclass Market1501DataCrawler:\n def __init__(self,data_folder=\"Market1501\", train_folder=\"bounding_box_train\", test_folder=\"bounding_box_test\", query_folder=\"query\", **kwargs):\n self.metadata = {}\n\n self.data_folder = data_folder\n self.train_folder = os.path.join(self.data_folder, train_folder)\n self.test_folder = os.path.join(self.data_folder, test_folder)\n self.query_folder = os.path.join(self.data_folder, query_folder)\n\n self.logger = kwargs.get(\"logger\")\n\n self.__verify(self.data_folder)\n self.__verify(self.train_folder)\n self.__verify(self.test_folder)\n self.__verify(self.query_folder)\n\n self.crawl()\n\n def __verify(self,folder):\n if not os.path.exists(folder):\n raise IOError(\"Folder {data_folder} does not exist\".format(data_folder=folder))\n else:\n self.logger.info(\"Found {data_folder}\".format(data_folder = folder))\n\n def crawl(self,):\n self.metadata[\"train\"], self.metadata[\"test\"], self.metadata[\"query\"] = {}, {}, {}\n self.metadata[\"train\"][\"crawl\"], self.metadata[\"train\"][\"pids\"], self.metadata[\"train\"][\"cids\"], self.metadata[\"train\"][\"imgs\"] = self.__crawl(self.train_folder, reset_labels=True)\n self.metadata[\"test\"][\"crawl\"], self.metadata[\"test\"][\"pids\"], self.metadata[\"test\"][\"cids\"], self.metadata[\"test\"][\"imgs\"] = self.__crawl(self.test_folder)\n self.metadata[\"query\"][\"crawl\"], self.metadata[\"query\"][\"pids\"], self.metadata[\"query\"][\"cids\"], self.metadata[\"query\"][\"imgs\"] = self.__crawl(self.query_folder)\n\n self.logger.info(\"Train\\tPIDS: {:6d}\\tCIDS: {:6d}\\tIMGS: {:8d}\".format(self.metadata[\"train\"][\"pids\"], self.metadata[\"train\"][\"cids\"], self.metadata[\"train\"][\"imgs\"]))\n self.logger.info(\"Test \\tPIDS: {:6d}\\tCIDS: {:6d}\\tIMGS: {:8d}\".format(self.metadata[\"test\"][\"pids\"], self.metadata[\"test\"][\"cids\"], self.metadata[\"test\"][\"imgs\"]))\n self.logger.info(\"Query\\tPIDS: {:6d}\\tCIDS: {:6d}\\tIMGS: {:8d}\".format(self.metadata[\"query\"][\"pids\"], self.metadata[\"query\"][\"cids\"], self.metadata[\"query\"][\"imgs\"]))\n\n def __crawl(self,folder, reset_labels=False):\n imgs = glob.glob(os.path.join(folder, \"*.jpg\"))\n _re = re.compile(r'([\\d]+)_[a-z]([\\d]+)')\n pid_labeler = 0\n pid_tracker, cid_tracker = {}, {}\n crawler = []\n pid_counter, cid_counter, img_counter = 0, 0, 0\n for img in imgs:\n pid, cid = map(int, _re.search(img).groups()) # _re.search lol\n if pid < 0: continue # ignore junk\n if cid < 0: continue # ignore junk\n if pid not in pid_tracker:\n pid_tracker[pid] = pid_labeler if reset_labels else pid\n pid_labeler += 1\n if cid not in cid_tracker:\n cid_tracker[cid] = cid-1\n crawler.append((img, pid_tracker[pid], cid-1)) # cids start at 1 in data\n return crawler, len(pid_tracker), len(cid_tracker), len(crawler)","sub_path":"crawlers/Market1501DataCrawler.py","file_name":"Market1501DataCrawler.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"257741259","text":"from Tkinter import *\r\n\r\n\r\n\r\ndef Test():\r\n\tprint('This works')\r\n\r\ntop = Tk()\r\n\r\nL1 = Label(top, text = 'Type Message:')\r\nL1.pack( side = LEFT )\r\n\r\nMessage = Entry( top )\r\n\r\nMessage.pack()\r\n\r\nSend = Button( top , command = Test, text = 'Send!')\r\n\r\nSend.pack(side = RIGHT)\r\ntop.mainloop()","sub_path":"GUI1.py","file_name":"GUI1.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"476917676","text":"#!/usr/bin/env python3\nimport tkinter as tk\nimport random\n\nclass GuessHex:\n def __init__(self, parent):\n self.parent = parent\n parent.title(\"GuessHex\")\n parent.minsize(660, 250)\n parent.resizable(False, False)\n self.color_label = tk.Label(parent, text=\"Which colour matches the Hex code below?\")\n self.color_label.place(x=190,y=135)\n self.hex_label = tk.Label(parent, text=\"\")\n self.hex_label.place(x=285,y=170)\n self.winlose = tk.Label(parent, text=\"\")\n self.winlose.place(x=220, y=200)\n self.canvas = tk.Canvas(parent, width=600,height=80)\n self.canvas.place(x=30,y=30)\n self.col = {}\n self.color = {}\n for i in range(0,6):\n self.color['col'+str(i)] = self.randomhex()\n self.col['col'+str(i)] = self.canvas.create_oval((15+(i*100)), 20, (75+(i*100)), 80, width=0, fill=self.color['col'+str(i)], tags=\"col\"+str(i))\n self.canvas.tag_bind(\"col\"+str(i), \"\", self.checkwin)\n self.rwin = random.randint(0,5)\n self.hex_label.configure(text=self.color['col'+str(self.rwin)])\n\n def checkwin(self, event):\n self.canvas.config(state = 'disabled')\n if self.color[\"col\"+str((event.widget.find_closest(event.x, event.y)[0]-1))] == self.hex_label.cget(\"text\"):\n self.winlose.configure(text=\"Congratulations. You got it right!\")\n else:\n self.winlose.configure(text=\"Sorry. Better luck next time!\")\n self.canvas.itemconfig(self.col['col'+str(self.rwin)], width=2, outline='red')\n self.parent.after(2000, self.regenerate)\n\n def regenerate(self):\n for i in range(0,6):\n self.color['col'+str(i)] = self.randomhex()\n self.canvas.itemconfig(self.col['col'+str(i)],fill=self.color['col'+str(i)],width=0)\n self.rwin = random.randint(0,5)\n self.hex_label.configure(text=self.color['col'+str(self.rwin)])\n self.winlose.configure(text=\"\")\n self.canvas.config(state = 'normal')\n\n def randomhex(self):\n r = lambda: random.randint(0,255)\n return '#%02X%02X%02X' % (r(),r(),r())\n\n\ndef main():\n root = tk.Tk()\n thegui = GuessHex(root)\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"guesshex.py","file_name":"guesshex.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"225044056","text":"import numpy as np\ndef add_virtual_node(batched_fixed_order_graph_dataset):\n ds = batched_fixed_order_graph_dataset\n for i, G in enumerate(ds):\n G.add_node('virtual')\n if ds.vertex_dim > 0:\n G.node['virtual']['data'] = np.zeros([G.graph['batch_size'], ds.vertex_dim])\n for u in range(G.order):\n G.add_edge('virtual', u)\n if ds.edge_dim > 0:\n G.edge['virtual'][u]['data'] = np.zeros([G.graph['batch_size'], ds.edge_dim])\n return None\n\ndef add_target_nodes(graph_dataset):\n targets = graph_dataset.target_names\n for i, G in enumerate(graph_dataset):\n try:\n bs = G.graph['batch_size']\n except KeyError:\n bs = 1\n\n order = len(G.nodes())\n for target in targets:\n G.add_node(target.name)\n if graph_dataset.vertex_dim > 0:\n G.node[target.name]['data'] = np.zeros([bs, graph_dataset.vertex_dim])\n for target in targets:\n for u in G.nodes():\n G.add_edge(target.name, u)\n if graph_dataset.edge_dim > 0:\n G.edge[target.name][u]['data'] = np.zeros([bs, graph_dataset.edge_dim])\n return None\n","sub_path":"old_implementation/data_utils/add_virtual_node.py","file_name":"add_virtual_node.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"312785367","text":"from __future__ import print_function\nimport random\n\nSUITS = ['s', 'h', 'c', 'd']\nVALUES = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\n\nclass Card():\n\tdef __init__(self, s, v):\n\t\tself.suit = s\n\t\tself.value = v\n\n\tdef __str__(self):\n\t\t'''\n\t\tprint (self.suit, self.value, sep='')\n\n\t\t'''\n\t\t\n\t\tnum = self.value\n\t\tsue = self.suit\n\t\tif num == 'A':\n\t\t\tnum = 'Ace'\n\t\telif num == 'J':\n\t\t\tnum = 'Jack'\n\t\telif num == 'Q':\n\t\t\tnum = 'Queen'\n\t\telif num == 'K':\n\t\t\tnum = 'King'\n\n\t\tif sue == 's':\n\t\t\tsue = 'Spades'\n\t\telif sue == 'h':\n\t\t\tsue = 'Hearts'\n\t\telif sue == 'c':\n\t\t\tsue = 'Clubs'\n\t\telse:\n\t\t\tsue = 'Diamonds'\n\n\t\tface = num + ' of ' + sue\n\t\t\n\t\treturn face\n\n\ndef makeDeck():\n\td = []\n\tfor s in SUITS:\n\t\tfor v in VALUES:\n\t\t\td.append(Card(s,v))\n\n\trandom.shuffle(d)\n\t\n\treturn d\n\n'''\t\nprint (SUITS)\n\nc = Card(SUITS[2], VALUES[10])\nprint(c)\n\ndeck = makeDeck()\n\nfor i in range(10):\n\tprint(deck[i], ' '*(18-len(str(deck[i]))), end='')\n'''\n\n","sub_path":"deckTemplate.py","file_name":"deckTemplate.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"273855699","text":"\"\"\"trello\n\nRevision ID: 83a3d27de55e\nRevises: 39d4e9812288\nCreate Date: 2019-10-18 12:38:18.620251\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '83a3d27de55e'\ndown_revision = '39d4e9812288'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('trello_api_access_token', sa.String(length=128), nullable=True))\n op.add_column('users', sa.Column('trello_api_key', sa.String(length=128), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'trello_api_key')\n op.drop_column('users', 'trello_api_access_token')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/83a3d27de55e_trello.py","file_name":"83a3d27de55e_trello.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"236303938","text":"\"\"\"Module to manage scheduling, spawning and syncing.\n\"\"\"\n\nfrom jinja2 import Template\n\nimport os\nimport utils\nimport yaml\n\nFLAVOR_MAPPING = {\n \"satisfied\": 6,\n \"intermediate\": 4,\n \"greedy\": 3\n}\n\n\n\nclass Syncer(object):\n \"\"\"Syncer module.\"\"\"\n def __init__(self, worker_num, sync_flavor, branch_path):\n self.workers_per_source = FLAVOR_MAPPING[sync_flavor]\n self.worker_num = worker_num\n self.sync_flavor = sync_flavor\n self.branch_path = branch_path\n with open('config.yml', 'r') as config_file:\n config = yaml.load(config_file)\n self.all_sources = config['sources']\n self.all_workers = config['workers']\n self.ansible_dir = config['ansible_dir']\n if not os.path.exists(self.ansible_dir):\n os.mkdir(self.ansible_dir)\n\n\n def _get_num_of_sources(self, worker_num):\n return worker_num/self.workers_per_source\n\n\n def _spawn_sources(self, source_num):\n \"\"\"spawning a list of names of source machines for syncing.\"\"\"\n return self.all_sources[0:source_num]\n\n\n def map_sources(self, source_list):\n mapping = {}\n redundant_num = self.worker_num % len(source_list)\n if redundant_num:\n redundant_workers = self.all_workers[-redundant_num:]\n workers = self.all_workers[:-redundant_num]\n else:\n redundant_workers = []\n workers = self.all_workers\n start = 0\n for source in source_list:\n mapping[source] = workers[start:start+self.workers_per_source]\n if redundant_workers:\n mapping[source].append(redundant_workers.pop())\n start = start+self.workers_per_source\n return mapping\n\n\n def _sync(self, source_worker_mapping):\n inventory_file = os.path.join(self.ansible_dir, 'inventory.yml')\n vars_file = os.path.join(self.ansible_dir, 'group_vars/all')\n self._generate_inventories(source_worker_mapping, inventory_file)\n self._generate_vars(source_worker_mapping, vars_file)\n os.system(\"cp -rf roles %s\" % self.ansible_dir)\n os.system(\"cp sync.yml %s\" % self.ansible_dir)\n cmd = \"ANSIBLE_CONFIG=%s ansible-playbook -i %s sync.yml\" % (os.path.join(self.ansible_dir),\n inventory_file)\n\n# with open(os.path.join(self.ansible_dir, 'run.log')) as logfile:\n# subprocess.Popen(cmd, shell=True, stdout=logfile, stderr=logfile)\n\n\n def _generate_inventories(self, source_worker_mapping, inventory_file):\n source_dict = {'sources': source_worker_mapping.keys()}\n utils.render_jinja_templates('templates/inventory.yml.j2', source_dict, inventory_file)\n\n\n def _generate_vars(self, source_worker_mapping, ansible_vars):\n vars_dir = os.path.join(self.ansible_dir, 'group_vars')\n if not os.path.exists(vars_dir):\n os.mkdir(vars_dir)\n mapping = {'source_worker_mapping': source_worker_mapping}\n mapping['branch_path'] = self.branch_path\n utils.render_jinja_templates('templates/vars.j2', mapping, ansible_vars)\n\n\n def sync(self):\n source_num = self._get_num_of_sources(self.worker_num)\n source_list = self._spawn_sources(source_num)\n source_worker_mapping = self.map_sources(source_list)\n self._sync(source_worker_mapping)\n","sub_path":"syncer.py","file_name":"syncer.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"291788541","text":"from collections import Counter\n\n\nphrase = 'How many times does each word show up in this sentence word times each each word'\n\nwrods = phrase.split()\n\nc = Counter(wrods)\n\nprint(c.elements())\nprint(c.most_common(3))","sub_path":"09-counter.py","file_name":"09-counter.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"340103885","text":"from flask import Flask, redirect, url_for, session,render_template,flash\nfrom flask_oauthlib.client import OAuth, OAuthException\nfrom flask_login import LoginManager, login_user,current_user,login_required\nimport random,datetime,json\nfrom forms import PhoneNumberForm,TokenForm,CourseForm\nfrom functools import wraps\nfrom db_classes import User, Course, Exam, db, delete_user\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py') #set as envar in local windows environment.\ndb.init_app(app)\n\noauth = OAuth()\noauth.init_app(app)\n\nlm = LoginManager(app)\nlm.login_view = \"new_user\"\n\nDEFAULT_NOTIF = 'dayof'\n\ngoogle = oauth.remote_app(\n 'google',\n app_key = 'GOOGLE',\n request_token_params={\n 'scope': 'email'\n },\n base_url='https://www.googleapis.com/oauth2/v1/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://accounts.google.com/o/oauth2/token',\n authorize_url='https://accounts.google.com/o/oauth2/auth',\n)\n\n\ndef verified_users(func):\n @wraps(func)\n def verify(*args,**kwargs):\n if not current_user.verified:\n return redirect(url_for('register',_external=True))\n return func(*args,**kwargs)\n return verify\n\n\n\ndef update_token(user):\n token = random.randrange(10000, 99999) #generate 5 digit number w/o leading 0\n now = datetime.datetime.now()\n user.token = token\n\n '''\n This is the part where you use the plivo api to send a token to the user's phoen number.\n Actually we could always just send them emails but that would be pretty lame. LAME.\n '''\n\n user.token_date = now\n db.session.commit()\n\n\n\n@lm.user_loader\ndef load_user(id):\n return User.query.filter_by(social_id=id).first()\n\n@app.route('/')\ndef new_user():\n if not current_user.is_anonymous:\n return redirect(url_for('home',_external=True))\n return render_template('welcome.html')\n\n\n@app.route('/login')\ndef login():\n callback = url_for('authorized',_external = True)\n return google.authorize(callback=callback)\n\n@app.route('/login/authorized')\n@google.authorized_handler\ndef authorized(resp):\n if resp is None:\n return redirect(url_for('new_user',_external=True))\n\n if isinstance(resp, OAuthException):\n return 'Error: %s' % resp\n\n session['google_token'] = (resp['access_token'], '')\n me = google.get('userinfo')\n social_id = me.data['id']\n username = me.data['name']\n email = me.data['email']\n\n if social_id is None:\n flash('Authentication failed.')\n return redirect(url_for('new_user',_external=True))\n\n user = User.query.filter_by(social_id=social_id).first()\n if not user:\n user = User.make_unique(username,email,social_id)\n db.session.add(user)\n db.session.commit()\n\n login_user(user, True)\n return redirect(url_for('register',_external=True)) #home is for logged in users\n\n@app.route('/home', methods=('GET','POST'))\n@login_required\n@verified_users\ndef home():\n buildings = {}\n buildings_file = app.config['BUILDINGS_JSON']\n with open(buildings_file,'r') as f:\n buildings = json.load(f)\n form = CourseForm()\n delform = PhoneNumberForm()\n return render_template('home.html', exams=current_user.exams, form=form, delform=delform, buildings=buildings)\n\n@app.route('/enroll',methods=('GET','POST'))\n@login_required\n@verified_users\ndef enroll():\n if not current_user.verified:\n return redirect(url_for('register',_external=True))\n\n form = CourseForm()\n if not form.validate_on_submit():\n flash(\"Invalid Course name provided\", \"warning\")\n return redirect(url_for('home',_external=True))\n\n print(form.data['course'])\n course = Course.query.filter_by(name=form.data['course']).first()\n print(course)\n notif = Exam.query.filter_by(course = course, user= current_user).first()\n if course and not notif:\n exam = Exam.make_unique(course,DEFAULT_NOTIF,current_user)\n db.session.add(exam)\n db.session.commit()\n flash(\"You have successfully registered for this course.\", \"success\")\n elif not course:\n flash(\"Course not found. Please enter the full course code as it appears on ACORN.\", \"warning\")\n elif notif and course:\n flash(\"You are already registered in %s\" % course.name, \"warning\")\n\n return redirect(url_for('home',_external=True))\n\n@app.route('/unenroll/',methods=('GET','POST'))\n@login_required\n@verified_users\ndef unenroll(course_code):\n\n if len(course_code) > 9 or len(course_code) < 6:\n flash(\"Invalid course code\", \"warning\")\n return redirect(url_for('home',_external=True))\n\n course = Course.query.filter_by(name=course_code).first()\n notif = Exam.query.filter_by(course = course, user= current_user)\n if not course :\n flash(\"Course not found. Please screenshot this and scold francis because something is borked.\",\"warning\")\n elif not notif.all():\n flash(\"You are not registered in %s\" % course.name, \"warning\")\n elif course and notif.all():\n notif.delete()\n db.session.commit()\n flash(\"You have been removed from the exam list.\", \"success\")\n\n return redirect(url_for('home',_external=True))\n\n\n\n@app.route('/delete', methods=('GET','POST'))\n@login_required\n@verified_users\ndef delete():\n form = PhoneNumberForm()\n if form.validate_on_submit():\n number = form.data['phone']\n print('Stored: %r Provided: %r' % (current_user.phone,number))\n if number == current_user.phone:\n delete_user(current_user)\n flash(\"Your account has been deleted.\" , \"success\")\n return redirect(url_for('home',_external=True))\n\n flash(\"This is not your phone number. Deletion aborted.\", \"danger\")\n return redirect(url_for('home',_external=True))\n\n\n@app.route('/register',methods=('GET','POST'))\n@login_required\ndef register():\n '''\n Checks to see if the user has verified their number (and redirects them away)\n :return: Forms to enter phone number, then token\n '''\n if current_user.verified:\n return redirect(url_for('home',_external=True))\n\n elif current_user.phone:\n token_form = TokenForm()\n if token_form.validate_on_submit():\n token = str(token_form.data['token'])\n if current_user.check_token(token):\n current_user.verified = True\n db.session.commit()\n return redirect(url_for('register',_external=True))\n else:\n flash('Please check your token.', \"info\")\n\n return render_template('token.html', form = token_form)\n else:\n form = PhoneNumberForm()\n if form.validate_on_submit():\n phoneNum = form.data['phone']\n if User.query.filter_by(phone = phoneNum, verified = True).all():\n flash(\"Number already in use.\", \"danger\")\n return redirect(url_for('register',_external=True))\n current_user.phone = phoneNum\n update_token(current_user) #db committed in this method for us\n return current_user.token + \" This is your token. Remember it or smth idc.\"\n return render_template('register.html', form=form)\n\n\n@app.route('/resend')\n@login_required\ndef resend_token():\n '''\n If the user has already verified their account, send them back to the homepage.\n '''\n if current_user.verified or not current_user.phone:\n return redirect(url_for('home',_external=True))\n\n delta = datetime.datetime.now() - current_user.token_date\n min_required = datetime.timedelta(minutes = 2)\n\n if min_required < delta:\n update_token(current_user)\n flash('A new token has been sent to %s' % current_user.phone, \"success\")\n else:\n flash('Please wait %s seconds before requesting a new token' % str((min_required-delta).seconds), \"warning\")\n\n flash('Your token is %s' % current_user.token)\n return redirect(url_for('register',_external=True))\n\n\n@google.tokengetter\ndef get_google_oauth_token():\n return session.get('google_token')\n\n\nif __name__ == '__main__':\n #make_users(db)\n #test_dup()\n #import_data()\n app.run(debug=True)","sub_path":"exams.py","file_name":"exams.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"643369960","text":"class Car(object):\n\t\"\"\"docstring for Car\"\"\"\n\tspeed = 0\n\tdef __init__(self, name = 'General', model = 'GM', vehicle_type = 'None'):\n\t\tself.name= name\n\t\tself.model = model\n\t\tself.vehicle_type = vehicle_type\n\t\tself.speed = speed\n\n\t\tif self.name in ['Porshe', 'Koenigsegg']:\n\t\t\tself.num_of_doors = 2\n\t\telse:\n\t\t\tself.num_of_doors = 4\n\n\t\tif self.vehicle_type == 'trailer':\n\t\t\tself.num_of_wheels = 8\n\t\telse:\n\t\t\tself.num_of_wheels = 4\n\n\tdef is_saloon(self):\n\t\tif self.vehicle_type is not 'trailer':\n\t\t\tself.vehicle_type == 'saloon'\n\t\t\treturn True\n\t\treturn False\n\n\tdef drive(self, moving_speed):\n\t\tif moving_speed == 3:\n\t\t\tself.speed = 1000\n\t\telif moving_speed == 7:\n\t\t\tself.speed = 77\n\t\treturn self\n\t\t","sub_path":"carclass.py","file_name":"carclass.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"320081420","text":"from __future__ import print_function, division, unicode_literals, absolute_import\nimport os\nimport re\nimport fnmatch\nfrom itertools import cycle\nfrom collections import defaultdict, OrderedDict\nimport numpy as np\nimport numexpr as ne\nfrom scipy.stats import norm, binned_statistic\nfrom mpi4py import MPI\nimport time\n\nfrom .base import BaseValidationTest, TestResult\nfrom .plotting import plt\nfrom .parallel import send_to_master\n\n#import lsst\n#import lsst.analysis\nimport lsst.analysis.tools\n\nfrom lsst.analysis.tools.actions.scalar import MedianAction\nfrom lsst.analysis.tools.actions.vector import SnSelector, MagColumnNanoJansky, MagDiff\nfrom lsst.analysis.tools.interfaces import AnalysisMetric\nfrom lsst.analysis.tools.analysisPlots.analysisPlots import WPerpPSFPlot\nfrom lsst.analysis.tools.tasks.base import _StandinPlotInfo\n\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\n\n__all__ = ['TestMetric','DemoMetric']\n\n\nclass DemoMetric(AnalysisMetric):\n def setDefaults(self):\n super().setDefaults()\n\n # select on high signal to noise obejcts\n # add in a signal to noise selector\n \n \n #self.prep.selectors.snSelector = SnSelector()\n\n # set what key the selector should use when deciding SNR\n #self.prep.selectors.snSelector.fluxType = \"psfFlux\"\n\n # select what threshold value is desireable for the selector\n #self.prep.selectors.snSelector.threshold = 100\n\n # the final name in the qualification is used as a key to insert\n # the calculation into KeyedData\n self.process.buildActions.mags = MagColumnNanoJansky(vectorKey=\"psfFlux\")\n self.process.calculateActions.medianValueName = MedianAction(vectorKey=\"mags\")\n\n # tell the metic what the units are for the quantity\n #self.produce.units = {\"medianValueName\": \"Jy\"}\n self.produce.units = {\"medianValueName\": \"mag\"}\n\n # Rename the quanity prior to producing the Metric\n # (useful for resuable workflows that set a name toward the end of computation)\n self.produce.newNames = {\"medianValueName\": \"DemoMetric\"}\n\n\nclass TestMetric(BaseValidationTest):\n \"\"\"\n Check flux values and magnitudes\n \"\"\"\n\n def __init__(self, **kwargs):\n self.catalog_filters = kwargs.get('catalog_filters', [])\n self.lgndtitle_fontsize = kwargs.get('lgndtitle_fontsize', 12)\n self.truncate_cat_name = kwargs.get('truncate_cat_name', False)\n self.no_version = kwargs.get('no_version', False)\n self.title_size = kwargs.get('title_size', 'small')\n self.font_size = kwargs.get('font_size', 12)\n self.legend_size = kwargs.get('legend_size', 'x-small')\n self.ra = kwargs.get('ra')\n self.dec = kwargs.get('dec')\n self.compare = kwargs.get('compare',False)\n self.models = kwargs.get('models')\n self.bands = kwargs.get('bands')\n self.mag_lim = kwargs.get('mag_lim')\n self.snr_lim = kwargs.get('snr_lim',0)\n\n\n if not any((\n self.catalog_filters,\n )):\n raise ValueError('you need to specify catalog_filters for these checks, add a good flag if unsure')\n\n self.enable_individual_summary = bool(kwargs.get('enable_individual_summary', True))\n self.enable_aggregated_summary = bool(kwargs.get('enable_aggregated_summary', False))\n self.always_show_plot = bool(kwargs.get('always_show_plot', True))\n\n self.nbins = int(kwargs.get('nbins', 20))\n self.prop_cycle = None\n\n self.current_catalog_name = None\n self.current_failed_count = None\n self._aggregated_header = list()\n self._aggregated_table = list()\n self._individual_header = list()\n self._individual_table = list()\n\n super(TestMetric, self).__init__(**kwargs)\n\n def record_result(self, results, quantity_name=None, more_info=None, failed=None, individual_only=False):\n if isinstance(results, dict):\n self.current_failed_count += sum(1 for v in results.values() if v[1] == 'fail')\n elif failed:\n self.current_failed_count += 1\n\n if self.enable_individual_summary:\n if quantity_name is None:\n self._individual_header.append(self.format_result_header(results, failed))\n else:\n self._individual_table.append(self.format_result_row(results, quantity_name, more_info))\n\n if self.enable_aggregated_summary and not individual_only:\n if quantity_name is None:\n results = '{} {}'.format(self.current_catalog_name, results) if self.current_catalog_name else results\n self._aggregated_header.append(self.format_result_header(results, failed))\n else:\n quantity_name = '{} {}'.format(self.current_catalog_name, quantity_name) if self.current_catalog_name else quantity_name\n self._aggregated_table.append(self.format_result_row(results, quantity_name, more_info))\n\n def format_result_row(self, results, quantity_name, more_info):\n more_info = 'title=\"{}\"'.format(more_info) if more_info else ''\n output = ['', '{0}'.format(quantity_name, more_info)]\n output.append('')\n return ''.join(output)\n\n @staticmethod\n def format_result_header(results, failed=False):\n return '{0}'.format(results, 'class=\"fail\"' if failed else '')\n\n\n\n def generate_summary(self, output_dir, aggregated=False):\n if aggregated:\n if not self.enable_aggregated_summary:\n return\n header = self._aggregated_header\n table = self._aggregated_table\n else:\n if not self.enable_individual_summary:\n return\n header = self._individual_header\n table = self._individual_table\n\n with open(os.path.join(output_dir, 'SUMMARY.html'), 'w') as f:\n f.write('\\n')\n\n f.write('
    \\n')\n for line in header:\n f.write('
  • ')\n f.write(line)\n f.write('
  • \\n')\n f.write('

\\n')\n\n f.write('\\n')\n f.write('\\n')\n for line in table:\n f.write(line)\n f.write('\\n')\n f.write('
Quantity
\\n')\n\n if not aggregated:\n self._individual_header.clear()\n self._individual_table.clear()\n\n def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):\n\n all_quantities = sorted(map(str, catalog_instance.list_all_quantities(True)))\n\n self.prop_cycle = cycle(iter(plt.rcParams['axes.prop_cycle']))\n self.current_catalog_name = catalog_name\n self.current_failed_count = 0\n galaxy_count = None\n quantity_hashes = defaultdict(set)\n\n if rank==0:\n self.record_result('Running flux and magnitude test on {} {}'.format(\n catalog_name,\n getattr(catalog_instance, 'version', ''),\n individual_only=True,\n ))\n\n if self.truncate_cat_name:\n catalog_name = catalog_name.partition(\"_\")[0]\n version = getattr(catalog_instance, 'version', '') if not self.no_version else ''\n\n # create filter labels\n filters=[]\n for i, filt in enumerate(self.catalog_filters):\n filters = filt['filters']\n\n print(filters)\n lgnd_loc_dflt ='best'\n\n label_tot=[]\n plots_tot=[]\n\n # doing everything together this time so we can combine flags\n quantities = []\n print(type(self.models))\n print(type(self.bands))\n print(self.models[0])\n for band in self.bands:\n print(band)\n for model in self.models:\n print(model)\n quantities.append(band+'_'+model+'Flux'); quantities.append(band+'_'+model+'FluxErr'); quantities.append(band+'_'+model+'_flag') #fluxes\n #quantities.append('mag_'+band + '_'+model); quantities.append('magerr_'+band+'_'+model); quantities.append('snr_'+band+'_'+model); #mags\n quantities.append('g_psfFlux_flag')\n quantities.append('r_psfFlux_flag')\n quantities.append('i_psfFlux_flag')\n quantities.append('g_pixelFlags_saturatedCenter')\n quantities.append('r_pixelFlags_saturatedCenter')\n quantities.append('i_pixelFlags_saturatedCenter')\n quantities.append('g_extendedness_flag')\n quantities.append('r_extendedness_flag')\n quantities.append('i_extendedness_flag')\n quantities.append('xy_flag')\n quantities.append('detect_isPatchInner')\n quantities.append('detect_isDeblendedSource')\n quantities.append('g_psfFlux')\n quantities.append('r_psfFlux')\n quantities.append('i_psfFlux')\n quantities.append('g_psfFluxErr')\n quantities.append('r_psfFluxErr')\n quantities.append('i_psfFluxErr')\n quantities.append('g_extendedness')\n quantities.append('r_extendedness')\n quantities.append('i_extendedness')\n #quantities.append('ebv')\n\n quantities = tuple(quantities)\n # note that snr is defined on flux directly and not on magnitudes\n\n # reading in the data \n if len(filters) > 0:\n catalog_data = catalog_instance.get_quantities(quantities,filters=filters,return_iterator=False)\n else:\n catalog_data = catalog_instance.get_quantities(quantities,return_iterator=False)\n a = time.time()\n\n \n data_rank={}\n recvbuf={}\n for quantity in quantities:\n data_rank[quantity] = catalog_data[quantity]\n print(len(data_rank[quantity]))\n if ('flag' in quantity) or ('Flag' in quantity) or ('detect' in quantity):\n recvbuf[quantity] = send_to_master(data_rank[quantity],'bool')\n else:\n recvbuf[quantity] = send_to_master(data_rank[quantity],'double')\n\n if rank==0:\n print(len(recvbuf[quantity]))\n\n\n if rank==0:\n recvbuf['ebv']= np.ones_like(recvbuf['r_cModelFlux'])* 0.01\n\n wPerpAction = WPerpPSFPlot()\n wPerpAction.populatePrepFromProcess()\n stage1 = wPerpAction.prep(recvbuf)\n stage2 = wPerpAction.process(stage1)\n plot = wPerpAction.produce(stage2, plotInfo=_StandinPlotInfo())\n plt.savefig(output_dir+\"stellarLocusTest.png\")\n plt.close()\n\n if rank==0:\n self.generate_summary(output_dir)\n else: \n self.current_failed_count=0\n \n self.metric=0\n self.current_failed_count = comm.bcast(self.current_failed_count, root=0)\n self.metric = comm.bcast(self.metric, root=0)\n\n return TestResult(passed=(self.current_failed_count == 0), score=self.metric)\n\n def conclude_test(self, output_dir):\n self.generate_summary(output_dir, aggregated=True)\n","sub_path":"descqa/srv_analysistools_wperp.py","file_name":"srv_analysistools_wperp.py","file_ext":"py","file_size_in_byte":11299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"560938787","text":"################################################\n#\n# Write Video file from Webcam or another Video\n#\n################################################\n\n# Import module\nimport cv2\nimport platform # platform used to specify codec based on OS\n\n# Read webcam using VideoCapture\n# and assign cap object\nwebcam_device = 0\ncap = cv2.VideoCapture(webcam_device)\n\n# Check if camera opened successfully\nif (cap.isOpened() == False):\n print(\"Error opening video stream or file\")\n\n# Obtain default resolutions of the frame\n# Convert the resolutions from float to integer\nframe_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nframe_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n# Define the codec and create VideoWriter object.\n# WINDoWS -- *'DIVX'\n# Linux -- *'XVID'\nif platform.system() == 'Windows':\n os_codec = 'DIVX'\nelse:\n os_codec = 'XVID'\noutavi = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(\n 'M', 'J', 'P', 'G'), 20, (frame_width, frame_height))\noutmp4 = cv2.VideoWriter('output.mp4', cv2.VideoWriter_fourcc(\n *os_codec), 20, (frame_width, frame_height))\n\n# Read until video is completed or stopped\nwhile(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Write the frame into the file\n outavi.write(frame)\n outmp4.write(frame)\n\n # Showing video\n cv2.imshow('Video Stream', frame)\n\n # Press 'ESC' to stop the video\n if cv2.waitKey(1) & 0xFF == 27:\n break\n\n# When everything done, release the VideoCapture and VideoWriter objects\ncap.release()\noutavi.release()\noutmp4.release()\n","sub_path":"Computer Vision 1 Introduction (Python)/week2-Video IO and GUI/.ipynb_checkpoints/1_2_WriteVideo-checkpoint.py","file_name":"1_2_WriteVideo-checkpoint.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653079081","text":"import time\nimport logging as log\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator\nfrom common import process_params, mult_list\n\nclass NeuralNetwork(BaseEstimator):\n \"\"\"\n Super class for neural nets.\n Has functionality for making ordinary nnets.\n \"\"\"\n def __init__(self, coefs, intercepts, layers):\n \"\"\"\n initialization for fully connected, standard nnets\n\n @param layers: list of nodes at each layer\n \"\"\"\n self.coefs = coefs\n self.intercepts = intercepts\n self.var_list = coefs + intercepts\n self.layers = layers\n\n @staticmethod\n def get_init_rand_bound_tanh(shape):\n # Used for tanh\n # Use the initialization method recommended by Glorot et al.\n return np.sqrt(6. / np.sum(shape))\n\n @staticmethod\n def get_init_rand_bound_sigmoid(shape):\n # Use the initialization method recommended by Glorot et al.\n return np.sqrt(2. / np.sum(shape))\n\n @staticmethod\n def create_tf_var(shape):\n bound = NeuralNetwork.get_init_rand_bound_tanh(shape)\n return tf.Variable(tf.random_uniform(shape, minval=-bound, maxval=bound))\n\n @staticmethod\n def create_full_nnet(layer_sizes, input_layer, act_func=tf.nn.tanh, output_act_func=None, dropout_rate=0):\n \"\"\"\n @param input_layer: input layer (tensor)\n @param layer_sizes: size of each layer (input to output)\n \"\"\"\n print(layer_sizes)\n coefs = []\n intercepts = []\n layers = []\n n_layers = len(layer_sizes)\n for i in range(n_layers - 1):\n fan_in = layer_sizes[i]\n fan_out = layer_sizes[i + 1]\n if fan_out.startswith(\"conv\"):\n # Build a convolutional layer\n # Ex: conv3~3~1~32:1~1~1~1\n W_size, strides = fan_out.replace(\"conv\", \"\").split(\":\")\n strides = process_params(strides, int, split_str=\"~\")\n W_size = process_params(W_size, int, split_str=\"~\")\n b_size = [W_size[-1]]\n W = NeuralNetwork.create_tf_var(W_size)\n b = NeuralNetwork.create_tf_var(b_size)\n layer = tf.nn.conv2d(input_layer, W, strides=strides, padding='VALID') + b\n elif fan_out == \"pool\":\n # We're doing a pooling layer...\n # Ex notation: pool\n W = None\n b = None\n layer = tf.nn.max_pool(input_layer, [1,2,2,1], strides=[1,1,1,1], padding='VALID')\n elif (fan_in.startswith(\"conv\") or fan_in == \"pool\") and not fan_out.startswith(\"conv\"):\n # Transitioning to conv/pool from a dense layer, require flattening\n fan_in = int(mult_list(input_layer.shape[1:]))\n fan_out = int(fan_out)\n input_layer = tf.reshape(input_layer, [-1, fan_in])\n W_size = [fan_in, fan_out]\n b_size = [1,fan_out]\n W = NeuralNetwork.create_tf_var(W_size)\n b = NeuralNetwork.create_tf_var(b_size)\n layer = tf.add(tf.matmul(input_layer, W), b)\n else:\n # Create a dense layer\n if dropout_rate > 0:\n input_layer = tf.nn.dropout(input_layer, keep_prob=1 - dropout_rate)\n fan_in = int(fan_in)\n fan_out = int(fan_out)\n W_size = [fan_in, fan_out]\n b_size = [1,fan_out]\n W = NeuralNetwork.create_tf_var(W_size)\n b = NeuralNetwork.create_tf_var(b_size)\n layer = tf.add(tf.matmul(input_layer, W), b)\n if i < n_layers - 2:\n # if not last layer, add activation\n layer = act_func(layer)\n else:\n # is the layer layer\n if output_act_func is not None:\n layer = output_act_func(layer)\n input_layer = layer\n if W is not None:\n coefs.append(W)\n if b is not None:\n intercepts.append(b)\n layers.append(layer)\n\n return NeuralNetwork(coefs, intercepts, layers)\n\n @staticmethod\n def create_full_nnet_dense(layer_sizes, input_layer, act_func=tf.nn.tanh, output_act_func=None):\n \"\"\"\n @param input_layer: input layer (tensor)\n @param layer_sizes: size of each layer (input to output)\n \"\"\"\n coefs = []\n intercepts = []\n layers = []\n n_layers = len(layer_sizes)\n for i in range(n_layers - 1):\n fan_in = layer_sizes[i]\n fan_out = layer_sizes[i + 1]\n W_size = [fan_in, fan_out]\n b_size = [1,fan_out]\n W = NeuralNetwork.create_tf_var(W_size)\n b = NeuralNetwork.create_tf_var(b_size)\n layer = tf.add(tf.matmul(input_layer, W), b)\n if i < n_layers - 2:\n # if not last layer, add activation\n layer = act_func(layer)\n else:\n # is the layer layer\n if output_act_func is not None:\n layer = output_act_func(layer)\n input_layer = layer\n coefs.append(W)\n intercepts.append(b)\n layers.append(layer)\n\n return NeuralNetwork(coefs, intercepts, layers)\n","sub_path":"neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601841522","text":"\n# I first made a list of all the names of my family and friends, and I\n# assigned it to friendsandfamilymembers, making it a list variable.\nfriendsandfamilynames = [\"Brenda\",\"Kimberly\",\"Jennifer\",\"Armando\",\"Alicia\",\"Mariana\",\"Angy\",\"Lidia\",\"Elizabeth\",\"Nancy\"]\n# I assigned il to the value of 0, which makes it into an integer variable. This comes in handy later.\nil = 0\n# I made the list assigned to friendsandfamilynames get sorted, which just means that \n# the names will now be in an alphabetical order, which would make the list alphabetical.\n# So, \"Alicia, Armando, Angy, Brenda, Elizabeth, Jennifer, Kimberly, Lidia, Mariana, Nancy.\"\nfriendsandfamilynames.sort()\n# This would make the list assigned to friendsandfamilynames be\n# now in reverse, so \"Nancy, Mariana, Lidia, Kimberly, Jennifer, Elizabeth, Brenda, Angy, Armando, Alicia.\" \nfriendsandfamilynames.reverse()\n# This will print,\"These are the 5 friends and family members that I spend the most time with:\"\n# \" \"\nprint(\"These are the 5 friends and family members that I spend the most time with:\")\nprint(\" \")\n\n# This is going to make a variable called current_name. Whatever it has been set to \n# will be how many words there are on the list of the variable friendsandfamilynames.\nfor current_name in friendsandfamilynames:\n# This will make a variable called il, which will be 1 plus the past value of the variable il, which will change\n# depending on how many words there were on the list. So, the value of il here will be 1, then 2, \n# then 3, etc.\n il= 1 + il\n# This is assigning current_name to the string version of its past self.\n current_name = str(current_name)\n# The first time, this will print, \"1. Nancy\"\n# It will continue to do so but with different il and current_name values.\n print(str(il) + \". \" + str(current_name))\n\n","sub_path":"skgrfbkj.py","file_name":"skgrfbkj.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"32882571","text":"import wx\nimport inspect, traceback\nimport imp, importlib\nimport subprocess\nfrom functools import reduce\nfrom os.path import dirname, basename\nfrom pprint import pprint as pp\nimport os, sys, codecs, json\nfrom include.log_conf import info, debug, error\n#from include.log_tools import ex\nfrom include.common import home\nimport dialog.ErrDlg as ED\nfrom locale import getdefaultlocale, setlocale, LC_ALL\ndefault_fullscreen_style = wx.FULLSCREEN_NOSTATUSBAR | wx.FULLSCREEN_NOBORDER | wx.FULLSCREEN_NOCAPTION\n\ne=sys.exit\n\nclass dpt(object):\n\tpreview\t= 2**1\n\tedit\t= 2**2\n\n\n\ndef deepgetattr(obj, attr):\n\tout=None\n\ttry:\n\t\treduce(getattr, attr.split('.'), obj)\n\texcept AttributeError:\n\t\tpass\n\treturn out\n\n########################################################################\nclass exlog(object):\n\t\n\t#----------------------------------------------------------------------\n\tdef __init__(self, fn):\n\t\tself.fn = fn\n\t\t\n\tdef format_stacktrace(self):\n\t\tparts = [\"Traceback (most recent call last):\\n\"]\n\t\tparts.extend(traceback.format_stack(limit=50)[:-2])\n\t\tparts.extend(traceback.format_exception(*sys.exc_info())[1:])\n\t\treturn \"\".join(parts)\n\t#----------------------------------------------------------------------\n\tdef __call__(self,evt):\n\t\ttry:\n\t\t\tself.fn(self, evt)\n\t\texcept Exception as e:\n\t\t\tprint(self.format_stacktrace())\n\t\t\t\n\ndef format_stacktrace():\n\tparts = [\"Traceback (most recent call last):\\n\"]\n\tparts.extend(traceback.format_stack(limit=50)[:-2])\n\tparts.extend(traceback.format_exception(*sys.exc_info())[1:])\n\treturn \"\".join(parts)\n\t\t\ndef exceptionLogger(func, mname=''):\n\tdef logger_func(*args, **kw):\n\t\ttry:\n\t\t\tif not kw:\n\t\t\t\treturn func(*args)\n\t\t\treturn func(*args, **kw)\n\t\texcept Exception:\n\t\t\tfname=func.__name__\t\t\t\n\t\t\tstacktrace = format_stacktrace().replace('\"\"', '\"%s\"' % mname)\n\t\t\terror(stacktrace)\n\n\t\t\tif 1:\n\t\t\t\t#import dialog.StacktraceDlg as ED\n\t\t\t\timport dialog.ErrDlg as ED\n\t\t\t\tED.show(stacktrace)\n\t\t\tif 0:\n\t\t\t\tdlg = wx.MessageDialog(win, stacktrace,\n\t\t\t\t\t'Cannot open file',\n\t\t\t\t\twx.OK | wx.ICON_INFORMATION\n\t\t\t\t\t#wx.YES_NO | wx.NO_DEFAULT | wx.CANCEL | wx.ICON_INFORMATION\n\t\t\t\t\t)\n\t\t\t\tdlg.ShowModal()\n\t\t\t\tdlg.Destroy()\n\t\t\n\tlogger_func.__name__ = func.__name__\n\tlogger_func.__doc__ = func.__doc__\n\tif hasattr(func, '__dict__'):\n\t\tlogger_func.__dict__.update(func.__dict__)\n\treturn logger_func \n\t\ndef evt_stacktrace(mname):\n\t\"\"\"\n\tA decorator that will catch and log any exceptions that may occur\n\tto the named logger.\n\t\"\"\"\n\timport functools\n\treturn functools.partial(exceptionLogger, mname=mname) \n\n\n\t\nclass fstring:\n\tdef __init__(self, payload):\n\t\tself.payload = payload\t\t\n\tdef __str__(self):\n\t\t\n\t\tvars = inspect.currentframe().f_back.f_globals.copy()\n\t\tvars.update(inspect.currentframe().f_back.f_locals)\n\t\t#pp(list(vars.keys()))\n\t\t#pp(self.payload)\n\t\t#pp(vars.get('page_load'))\n\t\treturn self.payload.format(**vars)\n\n\t\t\t\nclass fstring2:\n\tdef __init__(self, payload, relpath=None):\n\t\tself.payload = payload\n\t\tself.relpath=relpath\n\tdef __str__(self):\n\t\t\n\t\tvars = inspect.currentframe().f_back.f_globals.copy()\n\t\tvars.update(inspect.currentframe().f_back.f_locals)\n\t\tif not self.payload.format(**vars):\n\t\t\treturn self.payload\n\t\tif self.relpath:\n\t\t\t\n\t\t\treturn os.path.relpath(self.payload.format(**vars), self.relpath) \n\t\telse:\n\t\t\treturn self.payload.format(**vars)\n\t\t\n\t\n#error must be grouped in one message\ndef ex(win=None,_exit=False):\n\n\tstacktrace = format_stacktrace()\n\terror(stacktrace)\n\tif 1:\n\t\tED.show(stacktrace)\n\tif 0 and _exit:\n\t\te(1)\n#error must be grouped in one message\ndef exm(msg, win=None,_exit=False):\n\ttry:\n\t\traise Exception (msg)\n\texcept:\n\t\n\t\tstacktrace = format_stacktrace()\n\t\terror(stacktrace)\n\t\tif 1:\n\t\t\tED.show(stacktrace)\n\t\tif _exit:\n\t\t\te(1)\ndef asrt(cond, msg=''):\n\ttry:\n\t\tassert cond, msg\n\texcept:\n\t\tex(msg, _exit=True)\n\t\t\n\n\t\t\t\ndef import_module_2(filepath):\n\tmod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1]) \n\tassert os.path.isfile(filepath), 'File %s does not exists.' % filepath\n\tif file_ext.lower() == '.py':\n\t\tpy_mod = imp.load_source(mod_name, filepath)\n\telif file_ext.lower() == '.pyc':\n\t\tpy_mod = imp.load_compiled(mod_name, filepath)\n\treturn py_mod\ndef load_script(script_path):\n\treturn import_module_3(script_path)\ndef import_module_33(file_path):\n\tif 0:\n\t\tcdir=dirname(file_path)\n\t\t#print(9999999,cdir)\n\t\tos.chdir(cdir)\n\tbn=basename(file_path)\n\tmod_name,file_ext = os.path.splitext(os.path.split(file_path)[-1])\n\tspec = importlib.util.spec_from_file_location(mod_name, bn)\n\tmodule = importlib.util.module_from_spec(spec)\n\ttry:\n\t\tspec.loader.exec_module(module)\n\texcept Exception as err:\n\t\tex(str(err))\n\t\n\tif 1:\n\t\tsys.modules[mod_name] = module\n\treturn module\n\t\ndef import_module_3(file_path):\n\tif 0:\n\t\tcdir=dirname(file_path)\n\t\t#print(9999999,cdir)\n\t\tos.chdir(cdir)\n\tbn=basename(file_path)\n\tmod_name,file_ext = os.path.splitext(os.path.split(file_path)[-1])\n\tspec = importlib.util.spec_from_file_location(mod_name, file_path)\n\tmodule = importlib.util.module_from_spec(spec)\n\ttry:\n\t\tspec.loader.exec_module(module)\n\texcept Exception as err:\n\t\tex(str(err))\n\t\n\tif 1:\n\t\tsys.modules[mod_name] = module\n\treturn module\n\t\nclass dict2(dict): \n\t\"\"\" Scikit Learn's container object \n\n\tDictionary-like object that exposes its keys as attributes. \n\t>>> b = Bunch(a=1, b=2) \n\t>>> b['b'] \n\t2 \n\t>>> b.b \n\t2 \n\t>>> b.c = 6 \n\t>>> b['c'] \n\t6 \n\t\"\"\" \n\n\tdef __init__(self, **kwargs): \n\t\tsuper(dict2, self).__init__(kwargs) \n\n\tdef __setattr__(self, key, value): \n\t\tself[key] = value \n\n\tdef __dir__(self): \n\t\treturn self.keys() \n\n\tdef __getattr__(self, key): \n\t\ttry: \n\t\t\treturn self[key] \n\t\texcept KeyError: \n\t\t\traise AttributeError(key) \n\n\tdef __setstate__(self, state): \n\t\tpass \n\ndef open_settings(filename):\n\t#filename=r'c:\\Python35-64\\apps\\BatchStatusBrowser\\cfg\\batch_status.cfg'\n\tconf = wx.FileConfig(localFilename = filename)\n\t#print(conf)\n\t\n\tdef create_entry(entry_name, entry_value):\n\t\tif not conf.HasEntry(entry_name):\n\t\t\tif isinstance(entry_value, (str, bytes)):\n\t\t\t\tconf.Write(entry_name, entry_value)\n\t\t\telif isinstance(entry_value, int):\n\t\t\t\tconf.WriteInt(entry_name, entry_value)\n\t\t\telif isinstance(entry_value, bool):\n\t\t\t\tconf.WriteBool(entry_name, entry_value)\n\t\t\telse:\n\t\t\t\tconf.Write(entry_name, repr(entry_value))\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\tflag_flush = False\n\t#print(getdefaultlocale())\n\tif create_entry('Language/Catalog', getdefaultlocale()[0]):\n\t\tflag_flush = True\n\tif create_entry('GUI/load_default_perspective_on_start', True):\n\t\tflag_flush = True\n\tif create_entry('GUI/save_default_perspective_on_exit', True):\n\t\tflag_flush = True\n\tif create_entry('GUI/perspective', ''):\n\t\tflag_flush = True\n\tif create_entry('GUI/load_default_state_on_start', True):\n\t\tflag_flush = True\n\tif create_entry('GUI/save_default_state_on_exit', True):\n\t\tflag_flush = True\n\tif create_entry('GUI/fullscreen_style', default_fullscreen_style):\n\t\tflag_flush = True\n\tif create_entry('GUI/centre_on_screen', repr((False, wx.BOTH))):\n\t\tflag_flush = True\n\tif create_entry('GUI/default_open_path', '.'):\n\t\tflag_flush = True\n\tif flag_flush:\n\t\tconf.Flush()\n\t\n\treturn conf\n\n\n\t\ndef load_config(config_path, verify_version=True):\n\n\twith codecs.open(config_path, encoding=\"utf-8\") as stream:\n\t\tconfig = json.load(stream)\n\n\treturn config\n\t\ndef load_layout_config0(config_path, verify_version=True):\n\t\n\tls=load_config(config_path, verify_version)\n\t\n\t\t\ndef deep_update(from_dict, to_dict):\n\tfor (key, value) in from_dict.items():\n\t\tif key in to_dict.keys() and \\\n\t\t\t\tisinstance(to_dict[key], dict) and \\\n\t\t\t\tisinstance(value, dict):\n\t\t\tdeep_update(value, to_dict[key])\n\t\telse:\n\t\t\tto_dict[key] = value\t\n\n\n\nDEFAULT_PERSPECTIVE = \"Default Perspective\"\n_platformNames = [\"wxMSW\", \"wxGTK\", \"wxMac\"]\n\n\t\ndef GetDocFile():\n\n\tdocFile = os.path.join(GetDataDir(), \"docs\", \"TrunkDocs.pkl\")\n\n\treturn docFile\n\t\ndef MakeDocDirs():\n\n\tdocDir = os.path.join(GetDataDir(), \"docs\")\n\tif not os.path.exists(docDir):\n\t\tos.makedirs(docDir)\n\n\tfor plat in _platformNames:\n\t\timageDir = os.path.join(docDir, \"images\", plat)\n\t\tif not os.path.exists(imageDir):\n\t\t\tos.makedirs(imageDir)\t\ndef GetDocImagesDir():\n\n\tMakeDocDirs()\n\treturn os.path.join(GetDataDir(), \"docs\", \"images\")\t\ndef GetDataDir():\n\t\"\"\"\n\tReturn the standard location on this platform for application data\n\t\"\"\"\n\tsp = wx.StandardPaths.Get()\n\treturn sp.GetUserDataDir()\n\t\ndef GetConfig():\n\tif not os.path.exists(GetDataDir()):\n\t\tos.makedirs(GetDataDir())\n\n\tconfig = wx.FileConfig(\n\t\tlocalFilename=os.path.join(GetDataDir(), \"options\"))\n\treturn config\n\t\n\n\t","sub_path":"include/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"72941828","text":"import tensorflow as tf\nimport cv2\nimport numpy as np\nfrom tensorflow.contrib.layers import flatten\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\n\ndef pre_process(data):\n imgs = []\n for img in data:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n norm = gray /255*0.8+0.1\n imgs.append(norm)\n return imgs\n\ndef show_images(n_classes, X_train, y_train):\n N_samples = []\n plt.figure(figsize=(20, 11))\n for k in range(0, n_classes):\n im = X_train[y_train == k]\n plt.subplot(5, 10, k + 1)\n plt.imshow(im[1, :, :, :])\n plt.title(k)\n plt.axis('off')\n N_samples.append(len(im))\n plt.show()\n plt.figure(figsize=(12, 4))\n plt.bar(range(0, n_classes), N_samples)\n plt.show()\n\ndef shuffle_train(X_train, y_train):\n return shuffle(X_train, y_train)\n\ndef evaluate(X_data, y_data, BATCH_SIZE, logits, one_hot_y, x, y):\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\n accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142076714","text":"from cspmodel import Problem, Variable\nfrom cspconstraint import AllDiff, LinEq\n\nDIGITS = [x for x in range(10)]\nNZ_DIGITS = [x for x in range(1, 10)]\n\n\nclass Cryptarithm(Problem):\n def __init__(self):\n letters = [Variable(c, DIGITS) for c in 'WOUR'] + [Variable(c, NZ_DIGITS) for c in 'TF']\n reports = [Variable('X' + str(i + 1), [0, 1]) for i in range(3)]\n constraints = [\n AllDiff(letters),\n LinEq([letters[1], letters[3], reports[0]], [2, -1, -10, 0]),\n LinEq([reports[0], letters[0], letters[2], reports[1]], [1, 2, -1, -10, 0]),\n LinEq([reports[1], letters[4], letters[1], reports[2]], [1, 2, -1, -10, 0]),\n LinEq([reports[2], letters[5]], [1, -1, 0])\n ]\n super().__init__('Cryptarithm', letters + reports, constraints)\n\n\nc = Cryptarithm()\nprint(c)\nprint(c.solve())\n","sub_path":"Constraint_and_optimisation_problems/CSP_represention/cryptarithm.py","file_name":"cryptarithm.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"420620524","text":"import json\nimport httpx\n\nfrom graia.saya import Saya, Channel\nfrom graia.application.group import Group, Member\nfrom graia.application import GraiaMiraiApplication\nfrom graia.application.event.messages import GroupMessage\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\nfrom graia.application.message.parser.literature import Literature\nfrom graia.application.message.elements.internal import MessageChain, Plain, At\n\nfrom util.limit import member_limit_check\nfrom util.RestControl import rest_control\nfrom util.UserBlock import black_list_block\nfrom config import yaml_data, group_data, sendmsg\n\nsaya = Saya.current()\nchannel = Channel.current()\n\n\n@channel.use(ListenerSchema(listening_events=[GroupMessage],\n inline_dispatchers=[Literature(\"你在说什么\")],\n headless_decorators=[rest_control(), member_limit_check(30), black_list_block()]))\nasync def what_are_you_saying(app: GraiaMiraiApplication, group: Group, member: Member, message: MessageChain): # 你在说什么\n\n if yaml_data['Saya']['CyberBlacktalk']['Disabled']:\n return await sendmsg(app=app, group=group)\n elif 'CyberBlacktalk' in group_data[group.id]['DisabledFunc']:\n return await sendmsg(app=app, group=group)\n\n saying = message.asDisplay().split(\" \", 1)\n if len(saying) != 2:\n return await app.sendGroupMessage(group, MessageChain.create([Plain(f\"用法:你在说什么 <需要翻译的简写>\")]))\n api_url = \"https://lab.magiconch.com/api/nbnhhsh/guess\"\n api_data = {\"text\": saying[1]}\n api_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/51.0.2704.103 Safari/537.36\"}\n translation = httpx.post(api_url,\n json=api_data,\n headers=api_headers)\n ta = translation.text\n tb = json.loads(ta)\n if len(tb) == 0:\n return await app.sendGroupMessage(group, MessageChain.create([Plain(f\"用法:你在说什么 <需要翻译的简写>\")]))\n\n msg = [At(member.id)]\n for dict in tb:\n if \"trans\" in dict and len(dict[\"trans\"]) != 0:\n name = dict[\"name\"]\n tc = dict[\"trans\"]\n msg.append(Plain(f\"\\n===================\\n{name} 可能是:\\n > \" + \"\\n > \".join(tc)))\n elif \"inputting\" in dict and len(dict[\"inputting\"]) != 0:\n name = dict[\"name\"]\n tc = dict[\"inputting\"]\n msg.append(Plain(f\"\\n===================\\n{name} 可能是:\\n > \" + \"\\n > \".join(tc)))\n else:\n msg.append(Plain(f\"未收录该条目\"))\n\n await app.sendGroupMessage(group, MessageChain.create(msg))\n","sub_path":"saya/CyberBlacktalk.py","file_name":"CyberBlacktalk.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"89793690","text":"\n\nfrom xai.brain.wordbase.nouns._foolery import _FOOLERY\n\n#calss header\nclass _FOOLERIES(_FOOLERY, ):\n\tdef __init__(self,): \n\t\t_FOOLERY.__init__(self)\n\t\tself.name = \"FOOLERIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"foolery\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fooleries.py","file_name":"_fooleries.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"198927860","text":"import pyautogui as pag\nimport time\nimport random\n\ndef autoexit1(x, y):\n time.sleep(random.uniform(3.5, 4.5))\n pag.moveTo(316+x, 213+y)\n pag.drag(-100, 0, 1.5)\n time.sleep(random.uniform(3.5, 4.5))\n #자동 사냥 해제\n\n\np_list = pag.locateAllOnScreen(\"C:\\\\Users\\\\ODIN\\\\ODIN\\\\LineageW\\\\braveportion.png\", confidence=0.79)\np_list = list(p_list)\np_cneter = pag.center(p_list[0])\nf1 = [478, 122]\nf2 = [618, 327]\n\n\n\nif len(p_list) == 0:\n print(\"이미지없음\")\nelse :\n for p in p_list:\n ctr = pag.center(p)\n if ctr[0] >= f1[0] and ctr[0] <= f2[0] and ctr[1] >= f1[1] and ctr[1] < f2[1]:\n print(pag.center(p))\n pag.click(p, None, 1, 0.5)\n\n\n\n","sub_path":"image_click copy.py","file_name":"image_click copy.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392380036","text":"import torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import StepLR\n\nimport os\nimport time\nimport math\nimport random\nimport argparse\n\nfrom dsn import *\nfrom utils import *\n#from utils import progress_bar\n\nparser = argparse.ArgumentParser(description=\"Feature Mearusement\")\nparser.add_argument(\"--lr\",default=0.05, type=float, help=\"learning rate\")\nparser.add_argument(\"--resume\", \"-r\", action=\"store_true\", help=\"resume from checkpoint\")\nargs = parser.parse_args()\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nbest_acc = 0 # best validation accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\nprint(\"-------start data preparation----------\")\nss_num = 4\n\ndataset_dir = '/media/jinzhuo/wjz/Data/MASS/ss4/'\n\nstart_time = time.time()\nif os.path.isfile('../dsn_data/ss'+str(ss_num)+'_train.pt') and os.path.isfile('../dsn_data/ss'+str(ss_num)+'_test.pt'):\n print('loader exist')\n trainloader = torch.load('../dsn_data/ss'+str(ss_num)+'_train.pt')\n valloader = torch.load('../dsn_data/ss'+str(ss_num)+'_test.pt')\nelse:\n print('loader dont exist')\n files = os.listdir(dataset_dir)\n l = list(range(len(files)))\n random.shuffle(l)\n s = math.ceil(len(files)*0.8)\n train_files = [files[i] for i in l[:s]]\n val_files = [files[i] for i in l[s:]]\n trainloader = make_dataloader(dataset_dir, train_files, batch_size=128, shuffle=True, num_workers=0)\n valloader = make_dataloader(dataset_dir, val_files, batch_size=128, shuffle=True, num_workers=0)\n torch.save(trainloader, '../dsn_data/ss'+str(ss_num)+'_train.pt')\n torch.save(valloader, '../dsn_data/ss'+str(ss_num)+'_test.pt')\n'''\ntrainloader = torch.load('../../dsn_data/fpz_cz_tr_loader.pt')\nvalloader = torch.load('../../dsn_data/fpz_cz_val_loader.pt')\n'''\nprint(\"-------%s seconds for data preparation----------\" % (time.time() - start_time))\n\nprint(\"building model...\")\nnet = DeepSleepNet()\nnet = net.to(device)\nif device == \"cuda\":\n net = nn.DataParallel(net)\n cudnn.benchmark = True\n\nif args.resume:\n # load checkpoint\n print(\"resuming from checkpoint\")\n assert os.path.isdir(\"checkpoint\"), \"Error: no checkpoint directory found\"\n checkpoint = torch.load(\"./checkpoint/ckpt.pth\")\n net.load_state_dict(checkpoint[\"net\"])\n best_acc = checkpoint[\"acc\"]\n start_epoch = checkpoint[\"epoch\"]\n print(\"best acc: \", best_acc)\n optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)\nelse:\n optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n\ncriterion = nn.CrossEntropyLoss()\nlr_scheduler = StepLR(optimizer, step_size=50, gamma=0.1)\n\n# Training\ndef train(epoch):\n print('train epoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device, dtype=torch.float), targets.to(device, dtype=torch.long) # RuntimeError: Expected object of scalar type Long but got scalar type Byte for argument #2 'target' in call to _thnn_nll_loss_forward\n print(inputs.size(), targets.size())\n optimizer.zero_grad()\n outputs = net(inputs)\n targets = torch.max(targets, 1)[1]\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n# Validation\ndef val(epoch):\n print('val epoch: %d' % epoch)\n global best_acc\n net.eval()\n val_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(valloader):\n inputs, targets = inputs.to(device, dtype=torch.float), targets.to(device, dtype=torch.long) # RuntimeError: Expected object of scalar type Long but got scalar type Byte for argument #2 'target' in call to _thnn_nll_loss_forward\n outputs = net(inputs)\n targets = torch.max(targets, 1)[1] # solve issue \"RuntimeError multi target not supported at /pytorch/.../ClassNLLCrit..\"\n loss = criterion(outputs, targets)\n\n val_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(valloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (val_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n # Save checkpoint.\n acc = 100.*correct/total\n if acc > best_acc:\n print('Saving..')\n print(acc)\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/ckpt'+str(acc)+'.pth')\n best_acc = acc\n\nfor epoch in range(start_epoch, start_epoch+200):\n train(epoch)\n val(epoch)\n lr_scheduler.step()\n\nprint(\"best acc: \", best_acc)\n","sub_path":"dsn/trainval.py","file_name":"trainval.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"526926644","text":"from django import forms\nfrom django.forms.extras.widgets import SelectDateWidget\nfrom django.conf import settings\nimport json\n\n\nclass EmptyChoiceField(forms.ChoiceField):\n\tdef __init__(self, choices=(), empty_label=None, required=True, widget=None, label=None, initial=None, help_text=None, *args, **kwargs):\n\n\t\t# prepend an empty label if it exists (and field is not required!)\n\t\tif not required and empty_label is not None:\n\t\t\tchoices = tuple([(u'', empty_label)] + list(choices))\n\n\t\tsuper(EmptyChoiceField, self).__init__(choices=choices, required=required, widget=widget, label=label, initial=initial, help_text=help_text, *args, **kwargs) \n\nclass FormFields:\n\n\tdef get_user_list_field(self):\n\n\t\tfilter_params = {\"is_active\": \"True\"}\n\t\tusers_response = json.loads(settings.USER_SERVICE.list(\"user\", filter_params=filter_params).content)\n\t\tusers = EmptyChoiceField(choices=[(user['id'], \"{0} {1}\" .format(user['first_name'], user['last_name'])) for user in users_response], \n\t\twidget=forms.Select(attrs={'class':'form-control'}), required=False, empty_label=\"Show All\")\n\n\t\treturn users\n\n\tdef get_projects_list_field(self):\n\n\t\tfilter_params = {\"ordering\": \"title\"}\n\t\tprojects_response = json.loads(settings.PROJECT_SERVICE.list(\"project\", filter_params=filter_params).content)\n\t\tprojects = EmptyChoiceField(choices=[(x['pk'], x['title']) for x in projects_response],\n\t\twidget=forms.Select(attrs={'class':'form-control'}), required=False, empty_label=\"Show All\")\n\n\t\treturn projects\n\n\nclass ProjectSearchForm(forms.Form):\n\t\n\tform_fields = FormFields()\n\n\n\tstatus_options = ((\"True\", 'Active'), (\"False\", 'Inactive'))\n\tbillable_options = ((\"True\", 'Is Billable'), (\"False\", 'Not Billable'))\n\n\tsearch_status = EmptyChoiceField(choices=status_options, initial = \"True\",\n\t\twidget=forms.Select(attrs={'class':'form-control'}), required=False, empty_label=\"Show All\")\n\tsearch_billable = EmptyChoiceField(choices=billable_options,\n\t\twidget=forms.Select(attrs={'class':'form-control'}), required=False, empty_label=\"Show All\")\n\tprojects = form_fields.get_projects_list_field()\n\tusers = form_fields.get_user_list_field()\n\nclass EntrySearchForm(forms.Form):\n\t\n\tform_fields = FormFields()\n\n\tstatus_options = (('Open', 'Open'),('Submitted', 'Submitted'),('Declined', 'Declined'),('Approved', 'Approved'),('Deleted', 'Deleted'),('Invoicable', 'Invoicable'))\n\tdate_range_options = ((1, 'Today'), (2, 'This Week'), (3, 'This Month'), (5, 'Last Month'), (4, 'This Year'))\n\n\tsearch_status = EmptyChoiceField(choices=status_options, \n\t\twidget=forms.Select(attrs={'class':'form-control'}), required=False, empty_label=\"Show All\")\n\tsearch_date_range = EmptyChoiceField(choices=date_range_options, initial=3,\n\t\twidget=forms.Select(attrs={'class':'form-control'}), required=False, empty_label=\"Show All\")\n\tusers = form_fields.get_user_list_field()\n\tprojects = form_fields.get_projects_list_field()\n\n\n\nclass AddProjectResourceForm(forms.Form):\n\n\tform_fields = FormFields()\n\n\tusers = form_fields.get_user_list_field()\n\tselected_project = forms.CharField(widget=forms.TextInput(attrs={'class':'hidden'}))\n\tstart_date = forms.DateField(widget=SelectDateWidget(attrs={'class':'form-control'}))\n\tend_date = forms.DateField(widget=SelectDateWidget(attrs={'class':'form-control'}))\n\trate = forms.CharField(error_messages={'required': 'Please enter a rate'}, widget=forms.TextInput(attrs={'class':'form-control'}))\n\n\nclass AddProjectForm(forms.Form):\n\n\ttitle = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))\n\tdescription = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))\n\tstart_date = forms.DateField(widget=SelectDateWidget(attrs={'class':'form-control'}))\n\tend_date = forms.DateField(widget=SelectDateWidget(attrs={'class':'form-control'}))\n\tis_billable = forms.BooleanField(required=False)\n\tis_active = forms.BooleanField(required=False)\n\n\n\n\n","sub_path":"invoice/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287100039","text":"import glob\nimport sys\nimport os\n\ndst_w, dst_h = 1920, 1088\n\nimg_files = []\nimgdir = 'C:/Users/lli108/Downloads/tmp2/dog8'\nfor file in glob.glob(imgdir + \"/*.png\"):\n img_files.append(file)\nfor file in glob.glob(imgdir + \"/*.jpg\"):\n img_files.append(file)\nfor file in glob.glob(imgdir + \"/*.bmp\"):\n img_files.append(file)\nprint(len(img_files), 'images found')\n\noutdir = './out'\nos.makedirs(outdir, exist_ok=True)\n\nvf_filter = \"scale=%d:%d:force_original_aspect_ratio=decrease,pad=%d:%d:(ow-iw)/2:(oh-ih)/2\" % (dst_w, dst_h, dst_w, dst_h)\nvf_filter = ' \"' + vf_filter + '\" '\nfor i, file in enumerate(img_files):\n outfile = ' ' + outdir + '/' + str(i).zfill(4) + '.bmp'\n ffmpeg_cmdline = 'ffmpeg -y -i ' + file + ' -vf ' + vf_filter + outfile + ' 2>tmp.log'\n os.system(ffmpeg_cmdline)\n print(os.path.split(file)[1], 'converted')\n\n#os.system('ffmpeg -y -i ./out/%04d.bmp -pix_fmt yuv420p -vcodec libx264 -preset slow out.264')\nos.system('ffmpeg -y -i ./out/%04d.bmp -pix_fmt yuv420p -vcodec libx265 -preset slow out.265')\n\nprint('done')\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"95118089","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"Test #06.\"\"\"\n\nimport sys\nfrom PyQt4 import QtGui\n\nclass Center(QtGui.QWidget):\n \"\"\"Widget in the center of the screen.\"\"\"\n def __init__(self, parent = None):\n QtGui.QWidget.__init__(self, parent)\n\n self.setWindowTitle('center')\n self.resize(250, 150)\n self.center()\n\n def center(self):\n \"\"\"Move widget in the center.\"\"\"\n screen = QtGui.QDesktopWidget().screenGeometry()\n size = self.geometry()\n self.move((screen.width() - size.width()) / 2,\n (screen.height() - size.height()) / 2)\n\ndef main():\n \"\"\"Test #06.\"\"\"\n app = QtGui.QApplication(sys.argv)\n center = Center()\n center.show()\n sys.exit(app.exec_())\n","sub_path":"tests/t_06.py","file_name":"t_06.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"94990035","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nSET_DPI = 100\nFIGSIZE = (18, 10)\n\n\ndef plot_channel(table, channel, filename, figsize=FIGSIZE, dpi=SET_DPI):\n plt.figure(figsize=figsize, dpi=dpi)\n plt.title(channel)\n plt.plot(table.index.values, table[channel], '-')\n plt.savefig(filename + '_' + channel + '_time.png', dpi=dpi)\n plt.close()\n\n\ndef plot_trigger(time, trigger, outfile, options, figsize=FIGSIZE, dpi=SET_DPI):\n def time2ntr(x):\n return x / options.tr\n\n def ntr2time(x):\n return x * options.tr\n\n thrline = np.ones(time.shape) * options.thr\n fig = plt.figure(figsize=figsize, dpi=dpi)\n subplot = fig.add_subplot(211)\n subplot.set_title('trigger and time')\n subplot.set_ylim([-0.2, options.thr * 10])\n subplot.plot(time, trigger, '-', time, thrline, 'r-.', time, time, '-')\n subplot = fig.add_subplot(223)\n subplot.set_xlim([-options.tr * 4, options.tr * 4])\n subplot.set_ylim([-0.2, options.thr * 3])\n subplot.secondary_xaxis('top', functions=(time2ntr, ntr2time))\n subplot.plot(time, trigger, '-', time, time, '-')\n subplot = fig.add_subplot(224)\n subplot.set_xlim([options.tr * (options.num_timepoints_expected - 4),\n options.tr * (options.num_timepoints_expected + 4)])\n subplot.set_ylim([-0.2, options.thr * 3])\n subplot.secondary_xaxis('top', functions=(time2ntr, ntr2time))\n subplot.plot(time, trigger, '-', time, time, '-')\n plt.savefig(outfile + '_trigger_time.png', dpi=dpi)\n plt.close()\n","sub_path":"phys2bids/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412121556","text":"import os\nimport inspect\nimport importlib\nfrom aiohttp.web import View\nfrom lib.handler_lib import BaseHandler\nfrom lib.applog import app_log\ninfo, debug, error, warn = app_log.info, app_log.debug, app_log.error, app_log.warning\n\n\ndef is_package(path):\n \"\"\"判断一个路径是否是一个包\n \"\"\"\n return os.path.isdir(path) and os.path.exists(os.path.join(path, '__init__.py'))\n\n\ndef setup_routes(app):\n \"\"\"从model目录导入基于BaseHandler的处理类,加入路由\n \"\"\"\n base_path = os.path.abspath(os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n os.path.pardir),\n 'model'))\n debug('import base_path %s', base_path)\n for _dirpath, _dirnames, _filenames in os.walk(base_path):\n l_ignore_dir = filter(lambda x: x.startswith('.'), _dirnames)\n for _dir in l_ignore_dir:\n info('remove %s', _dir)\n _dirnames.remove(_dir)\n\n if is_package(_dirpath) and _filenames:\n # 得到包名\n pkg_name = _dirpath[len(base_path) + len(os.sep):].replace('/', '.') or 'model'\n _filenames = filter(lambda x: x.endswith('_handler.py'), _filenames)\n #得到handler结尾的py文件列表,依次作为模块导入\n for _file in _filenames:\n try:\n mod = importlib.import_module('%s.%s' % (pkg_name, os.path.splitext(_file)[0]), pkg_name)\n except Exception as e:\n warn('import error %s %s', '%s.%s' % (pkg_name, os.path.splitext(_file)[0]), e)\n else:\n # 导入模块中的处理类\n # 模块中可导入的处理类必须满足如下条件:\n # 不以_开头 and 必须是类 and ((不是BaseHandler and 是BaseHandler的子类) or (不是View and 是View的子类))\n for _class in (x for x in map(lambda x: getattr(mod, x),\n filter(lambda x: not x.startswith('_'), dir(mod))) if inspect.isclass(x) and x is not BaseHandler and issubclass(x, View) and x is not View):\n info('add_router %s %s', repr(_class.PATH), _class)\n app.router.add_route('*', _class.PATH, _class)\n","sub_path":"lib/load_handler.py","file_name":"load_handler.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"284144921","text":"class Solution:\n def reverseWords(self, s: str) -> str:\n def reverse(w):\n n = len(w); nw = [0] * n\n for i in range(n):\n nw[i] = w[n - 1 - i]\n return ''.join(nw)\n \n tokens = s.split()\n ns = []\n\n for token in tokens:\n ns.append(reverse(token))\n \n return ' '.join(ns)\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.reverseWords(\"Let's take LeetCode contest\"))\n","sub_path":"leetcode/easy/reverse-words-in-a-string-iii.py","file_name":"reverse-words-in-a-string-iii.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"531771150","text":"\"\"\"Django settings for oc project.\"\"\"\nimport os\nimport socket\n\nfrom django.utils.translation import ugettext_lazy as _\n\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# ----- Base configuration ----- #\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nOPENSHIFT_REPO_DIR = os.environ.get('OPENSHIFT_REPO_DIR', '')\n\nON_OPENSHIFT = OPENSHIFT_REPO_DIR != ''\n\nif ON_OPENSHIFT:\n DATA_DIR = os.environ['OPENSHIFT_DATA_DIR']\n LOG_DIR = os.environ['OPENSHIFT_LOG_DIR']\n DEBUG = False\n TEMPLATE_DEBUG = False\n SECRET_KEY = os.environ['OPENSHIFT_SECRET_TOKEN']\n ALLOWED_HOSTS = [os.environ['OPENSHIFT_APP_DNS'], socket.gethostname()]\nelse:\n DATA_DIR = BASE_DIR\n LOG_DIR = os.path.join(os.path.join(BASE_DIR, 'logs'))\n DEBUG = True\n TEMPLATE_DEBUG = True\n SECRET_KEY = '+*^#b1@rvl_t!3xrb2tz!vuaho9t+ieou)fmm1*i3!9$=nc6#g'\n ALLOWED_HOSTS = []\n\nDEBUG = DEBUG or 'DEBUG' in os.environ\nif ON_OPENSHIFT and DEBUG:\n print(\"*** Warning - Debug mode is on ***\")\n# ----- END Base configuration ----- #\n\n\n# ----- App configuration ----- #\nINSTALLED_APPS = (\n 'modeltranslation',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n # Required by Django Admin\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_extensions',\n 'rest_framework',\n 'ordered_model',\n 'compressor',\n 'main',\n 'ropeelements',\n 'siebert',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n # Required by Django Admin\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'main.urls'\n\nWSGI_APPLICATION = 'wsgi.application'\n\n# Angular's $resource provider removes trailing slashes\nAPPEND_SLASH = False\n# ----- END App configuration ----- #\n\n\n# ----- REST framework ----- #\nREST_FRAMEWORK = {\n 'DATE_FORMAT': None,\n 'DATETIME_FORMAT': None,\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n # 'rest_framework.parsers.FormParser',\n # 'rest_framework.parsers.MultiPartParser'\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly',\n ),\n}\n\n\n# ----- Database configuration ----- #\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(DATA_DIR, 'db.sqlite3'),\n }\n}\n\n\n# ----- Internationalization ----- #\nLANGUAGE_CODE = 'de'\n\nLANGUAGES = (\n ('en', _('English')),\n ('de', _('German')),\n)\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'ropeelements', 'locale'),\n)\n\nMODELTRANSLATION_AUTO_POPULATE = True\n\nTIME_ZONE = 'Europe/Vienna'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n# ----- END Internationalization ----- #\n\n\n# ----- Static files ----- #\nSTATIC_URL = '/static/'\n\nif ON_OPENSHIFT:\n STATIC_ROOT = os.path.join(OPENSHIFT_REPO_DIR, 'wsgi', 'static')\nelse:\n STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL.strip('/'))\n\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'bower_components'),\n)\n\n# List classes that know how to find static files in various locations\nSTATICFILES_FINDERS = (\n 'main.staticfiles.FileSystemFinder',\n 'main.staticfiles.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# (Recursive) glob patterns for files to be collected into STATIC_ROOT\n# by the ``collectstatic`` command.\n# Since js and css files are precompressed they need not be included.\nCOLLECT_STATIC_FILES = [\n 'bootstrap/fonts/?*.*',\n 'jquery-ui/themes/smoothness/theme.css',\n 'jquery-ui/themes/smoothness/images/?*.*',\n]\n\nCOLLECT_STATIC_APP_FILES = [\n 'main/images/?*.*',\n 'ropeelements/images/?*.*',\n 'siebert/images/?*.*',\n 'modeltranslation/**/?*.*',\n 'ordered_model/?*.*',\n 'admin/**/?*.*',\n]\n# ----- END Static files ----- #\n\n\n# ----- django-compresor ----- #\n# JS and CSS files are compressed only if DEBUG is False, whereas the less\n# precompiler is always applied.\n_lessc_cmd = os.path.join(BASE_DIR, 'node_modules', 'less', 'bin', 'lessc')\n\n_less_paths = (\n os.path.join(BASE_DIR, 'ropeelements', 'static', 'ropeelements',\n 'less'),\n os.path.join(BASE_DIR, 'siebert', 'static', 'siebert', 'less'),\n os.path.join(BASE_DIR, 'bower_components', 'bootstrap', 'less'),\n)\n\n_lessc_options = '--include-path=' + os.pathsep.join(_less_paths)\n\nCOMPRESS_PRECOMPILERS = (\n ('text/less',\n '{0} {1} {{infile}} {{outfile}}'.format(_lessc_cmd, _lessc_options)),\n)\n\nCOMPRESS_CSS_FILTERS = (\n # Normalize URLs in url() statements\n 'compressor.filters.css_default.CssAbsoluteFilter',\n 'compressor.filters.cssmin.CSSMinFilter',\n)\n\nCOMPRESS_OFFLINE = True if ON_OPENSHIFT else False\n# ----- END django-compresor ----- #\n\n\n# ----- Media files ----- #\nMEDIA_URL = '/static/media/'\n\nif ON_OPENSHIFT:\n MEDIA_ROOT = os.path.join(DATA_DIR, 'media')\nelse:\n MEDIA_ROOT = os.path.join(BASE_DIR, *MEDIA_URL.strip('/').split('/'))\n SERVE_MEDIA = True\n\n\n# ----- Logging ----- #\nif not ON_OPENSHIFT and not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n\nLOGGING = {\n 'version': 1,\n # The default is True, which would disable gunicorn loggers\n # 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] \"\n \"%(message)s\",\n 'datefmt': \"%d/%b/%Y %H:%M:%S\"\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'django.utils.log.NullHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n },\n 'logfile': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOG_DIR, 'django.log'),\n 'maxBytes': 500000,\n 'backupCount': 9,\n 'formatter': 'standard',\n },\n }\n}\n\nif ON_OPENSHIFT:\n LOGGING['loggers'] = {\n 'django': {\n 'handlers': ['logfile'],\n 'propagate': True,\n 'level': 'WARNING',\n }\n }\nelse:\n LOGGING['handlers']['db_logfile'] = {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOG_DIR, 'db.log'),\n 'maxBytes': 500000,\n }\n LOGGING['loggers'] = {\n 'django': {\n 'handlers': ['logfile', 'console'],\n 'propagate': True,\n 'level': 'WARNING',\n },\n 'django.db': {\n 'handlers': ['db_logfile'],\n 'propagate': False,\n 'level': 'DEBUG',\n },\n }\n# ----- END Logging ----- #\n","sub_path":"main/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120186626","text":"import requests\nfrom string import ascii_uppercase\n\ndef gamestart(roomname, playercode):\n\n getURL = 'http://aicontest.handong.edu/api/sessions/' + roomname + '/get/'\n params = {'playerid' : playercode}\n requests.get(getURL, params=params)\n\ndef placestone(playercode, s1, s2='' ):\n\n if playercode[-1] is 'b':\n postURL = 'http://aicontest.handong.edu/api/black-session/' + playercode + '/p1_post/'\n else:\n postURL = 'http://aicontest.handong.edu/api/white-session/' + playercode + '/p2_post/'\n\n data = { 'room' : playercode, 's1' : s1, 's2' : s2 }\n requests.post(postURL, data=data)\n\ndef getstatus(roomname, playercode):\n\n getURL = 'http://aicontest.handong.edu/api/sessions/' + roomname + '/get/'\n params = {'playerid' : playercode}\n get_data = requests.get(getURL, params=params).json()\n\n stone = []\n status = {}\n black = []\n white = []\n\n for i in ascii_uppercase[:-7] :\n for j in range(1,20):\n stone.append(i+str(j))\n for s in stone:\n status.setdefault(s,'empty')\n\n for get in get_data:\n data = get['x'] + str(get['y'])\n if get['color'] == 'black':\n black.append(data)\n if get['color'] == 'white':\n white.append(data)\n status.update({data : get['color']})\n\n turn = get_data[-1]['color']\n if turn == 'black':\n turn = 'white'\n elif turn == 'white':\n turn = 'black'\n else:\n turn = 'Notready'\n\n status.update({'black' : black , 'white': white, 'turn' : turn})\n\n return status\n\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107648852","text":"'''File Handling\nThe key function for working with files in Python is the open() function.\n\nThe open() function takes two parameters; filename, and mode.\n\nThere are four different methods (modes) for opening a file:\n\n\"r\" - Read - Default value. Opens a file for reading, error if the file does not exist\n\n\"a\" - Append - Opens a file for appending, creates the file if it does not exist\n\n\"w\" - Write - Opens a file for writing, creates the file if it does not exist\n\n\"x\" - Create - Creates the specified file, returns an error if the file exists\n\nIn addition you can specify if the file should be handled as binary or text mode\n\n\"t\" - Text - Default value. Text mode\n\n\"b\" - Binary - Binary mode (e.g. images)'''\n\ndef Write_file():\n # 1-\n Wfile = open('TestFile.txt', 'w') # Make a new file in output mode ('w' is write)\n Wfile.write('''Four people died after a fire broke out in a shopping mall\n in South Korea. The fire started in a children´s play area inside the mall.\n No children were inside the play area at the time.''') # Write strings of characters to it\n Wfile.close() # Close to flush output buffers to disk\n\n\n # 2-\n AppendFile = open('TestFile.txt', 'a') # lzmtha any bktb bs ma yms74 el 2dym\n AppendFile.write('\\nhi mego') # enma ydef 3aleh\n AppendFile.close()\n\ndef Read_File():\n\t\t\t\t\t\t\t#1-Reaf file for first time\n\n\tRfile = open('TestFile.txt', 'r')# 'r' (read) is the default processing mode\n\ttext=Rfile.read()# Read entire file into a string\n\tprint(text)\t# print interprets control characters\n\t# -----------------------\n\t\t\t\t\t\t\t#2-Read again\n\n\tprint(text) # if i would to read the same file again i can't don't tell me why\n\ttext.seek(0,0) # i don't know but what i know if i would to do this i can use method \"seek(0,0)\"\n\tprint(text) # that's mean i never read this file so read it\n\n\tRfile.close()\n\t# -----------------------\n\t\t\t\t\t\t\t# 3-loops (for) Read again\n\n\tRdata = open('data.txt')\n\tfor line in Rdata: print(line)\n\n\tRdata.close()\n\ndef Read_and_Readline():\n\tRfile = open('TestFile.txt', 'r')\n\n\tprint(Rfile.read()) # da bekr2 el txt\n\tprint(Rfile.read(5)) # da bekr2 2ol 5 7rof mn el txt\n\tprint(Rfile.readline()) # da bekr2 el str el 2ol mn el txt\n\n\t# -----------------------\n\tRfile.seek(0, 0)#read again\n\t# -----------------------\n\n\n\tprint(Rfile.read(30)) # this number means read first char in file to char number 30\n\tprint(Rfile.read(28)) # if i will do this again python will start from my end =(31)\n\n\t# -----------------------\n\tRfile.seek(0, 0) # so if i would to read the same line again you should to use 'seek(0,0)'\n\t# -----------------------\n\n\tprint(Rfile.read(30))\n\n\tRfile.close()\n\ndef tell_me_how_many_charcters_in_file():\n\tRfile = open('TestFile.txt', 'r')\n\n\tprint(Rfile.tell()) # how many charcters in this file\n\n\tRfile.close()\n\ndef delete_file():\n\timport os\n\tif os.path.exists(\"my_file.txt\"):\n\t\tos.remove(\"my_file.txt\")\n\telse:\n\t\tprint(\"The file does not exists\")\n\n\n#-------------------------------#\ndef Binary_Bytes_Files(*rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr):\n\timport struct\n\tpacked = struct.pack('>i4sh', 7, b'spam', 8) # Create packed binary data\n\tpacked \t\t\t\t\t\t\t\t\t\t\t# 10 bytes, not objects or text\n\t#>>> b'\\x00\\x00\\x00\\x07spam\\x00\\x08'\n\n\tfile = open('data.bin', 'wb') \t\t\t\t# Open binary output file\n\tfile.write(packed)#10 \t\t\t\t\t\t\t# Write packed binary data\n\tfile.close()\n\n\t'''Reading binary data back is essentially symmetric; not all programs need to tread so\n\tdeeply into the low-level realm of bytes, but binary files make this easy in Python:'''\n\n\tdata = open('data.bin', 'rb').read() # Open/read binary data file\n\tdata \t\t\t\t\t\t\t\t\t# 10 bytes, unaltered\n\t#b'\\x00\\x00\\x00\\x07spam\\x00\\x08'\n\tdata[4:8]#b'spam' \t\t\t\t\t\t# Slice bytes in the middle\n\tlist(data)\t\t\t\t\t\t\t # A sequence of 8-bit bytes\n\t#[0, 0, 0, 7, 115, 112, 97, 109, 0, 8]\n\tstruct.unpack('>i4sh', data)#(7, b'spam', 8) # Unpack into objects again\n\ndef Unicode_Text_Files():\n\tfile = open('unidata.txt', 'w', encoding = 'utf-8')\n\tS = 'sp\\xc4m'\n\tfile.write(S)\n\tfile.colse()\n\n\t#----------------------------------\n\t#do this\n\n\ttext = open('unidata.txt', encoding = 'utf-8').read() # Read/decode UTF-8 text\n\t#'spÄm'\n\traw = open('unidata.txt', 'rb').read()\n\t#b'sp\\xc3\\x84m'\n\n\t# ----------------------------------\n\t# or this that's the same\n\n\ttext.encode('utf-8') # Manual encode to bytes\n\t#b'sp\\xc3\\x84m'\n\traw.decode('utf-8') # Manual decode to str\n\t#'spÄm'\n\n\t#----------------------------\n\timport codecs\n\tcodecs.open('unidata.txt', encoding = 'utf8').read() # 2.X: read/decode text\n\t#>>> u'sp\\xc4m'\n\topen('unidata.txt', 'rb').read() # 2.X: read raw bytes\n\t#>>> 'sp\\xc3\\x84m'\n\topen('unidata.txt').read() # 2.X: raw/undecoded too #>>> 'sp\\xc3\\x84m'\n\n\n\n","sub_path":"Learning_python/11.Files.py","file_name":"11.Files.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"584232754","text":"# parameters.py\nfrom Tools import *\nimport INIreader as ini\n\n\ndef loadParameters(self):\n\n # Project version\n try:\n projectVersion = str(common.projectVersion)\n except BaseException:\n projectVersion = \"Unknown\"\n print(\"\\nProject version \" + projectVersion)\n\n print (\"random number seed (1 to get it from the clock) :\", common.mySeed)\n if common.mySeed == 1:\n random.seed()\n else:\n random.seed(common.mySeed)\n\n self.nAgents = common.nAgents\n print(\"Employing\",self.nAgents,\"bland agent(s)\")\n\n #self.worldXSize= input(\"X size of the world? \")\n self.worldXSize = 50\n print(\"X size of the world? \", self.worldXSize)\n\n #self.worldYSize= input(\"Y size of the world? \")\n self.worldYSize = 50\n print(\"Y size of the world? \", self.worldYSize)\n\n self.nCycles = common.nCycles\n print(\"Number of cycles (0 = exit) :\", self.nCycles)\n","sub_path":"6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/basicConfParser/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"400626987","text":"from Muzisyen import Muzisyen\nfrom Gitar import Gitar\nfrom Bateri import Bateri\n\n\ngitar = Gitar()\ngitar.Marka = \"Yamaha\"\ngitar.Aciklama = \"Pahalı\"\nsound = gitar.Cal()\n\nmuzisyen = Muzisyen()\nmuzisyen.Adi = \"Murat\"\nmuzisyen.Soyadi = \"Vuranok\"\nmuzisyen.Enstruman = gitar\n\nresult = \"\"\"\nAdı : {}\nSoyadı : {}\nEnstruman Sesi : {}\nMarka : {}\nAçıklama : {}\n\"\"\".format(muzisyen.Adi,\n muzisyen.Soyadi,\n sound,\n muzisyen.Enstruman.Marka,\n muzisyen.Enstruman.Aciklama)\nprint(result)\n\n\n\n","sub_path":"2) OOP/Lesson 6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"221832459","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Wlashback 1.0\nWlashback is module for backup local projects and MySQL database\nfor python versions over 3.\n\nYou should create settings file like a wlashback.cfg\nand set path\n\n@author: Gordeev Andrey \n\"\"\"\n\nimport os\nimport time\nimport tempfile\nimport tarfile\nfrom shutil import copyfile\nfrom configparser import ConfigParser\n\n\n__all__ = ['backup', ]\n\n\n# timestamp template\nts_now = '%H:%M %d.%m.%Y'\nts_directory = '%d%m%Y_%H%M'\ntimestamp = lambda option: time.strftime(option)\n\nrelated_keys = {\n 'settings': {'key': 'wlashconf'},\n 'database': {'key': 'database',\n 'ms': 'dbms',\n 'parameters': [\n 'name',\n 'user',\n 'password',\n 'host',\n 'port'], },\n 'output': {'key': 'backup_output', },\n 'input': {'key': 'backup_input', },\n}\n\n\nclass DataBase(object):\n def __init__(self, conf, *args, **kwargs):\n self.conf = conf\n\n def handler(self):\n if self.conf.get('dbms') == 'mysql':\n return MySQLHandler(self.conf)\n\n\nclass MySQLHandler(object):\n\n def __init__(self, conf):\n self.dump_code = '-1'\n for k, v in conf.items():\n setattr(self, k, v)\n\n def get_dump(self, path):\n path += '.sql'\n command = ' '.join(\n ['mysqldump',\n '-u {username}'.format(username=getattr(self, 'user')),\n\n ('-p \\'{passwd}\\''.format(passwd=self.password)) if\n hasattr(self, 'password') else '',\n\n ('-h {host}'.format(host=self.host)) if\n hasattr(self, 'host') else '',\n\n ('-P {port}'.format(port=self.port)) if\n hasattr(self, 'port') else '',\n\n '{name}'.format(name=getattr(self, 'name')),\n '> %s' % path])\n\n self.dump_code = os.system(command)\n return path\n\n\nclass Directory(object):\n def __init__(self, path, *args, **kwargs):\n self._path = path[:-1] if path[-1] == '/' else path\n\n @property\n def path(self):\n return self._path\n\n @property\n def name(self):\n return os.path.basename(self.path)\n\n\nclass InputDirectory(Directory):\n pass\n\n\nclass OutputDirectory(Directory):\n _full_path = None\n\n @property\n def full_path(self):\n return self._full_path\n\n @full_path.setter\n def full_path(self, path):\n self._full_path = path\n\n\nclass Section(object):\n status_success = 'success'\n status_fail = 'fail'\n def __init__(self, name, input_dirs=None, output_dirs=None, db=None):\n self._status = self.status_fail\n self.name = name\n self._inputs = list()\n self._outputs = list()\n self._db = list()\n\n mapper = lambda conf: _field.append(_class(conf))\n a = ((input_dirs, self._inputs, InputDirectory),\n (output_dirs, self._outputs, OutputDirectory),\n (db, self._db, DataBase), )\n for _arg, _field, _class in a:\n list(map(mapper, _arg))\n\n @property\n def status(self):\n return self._status\n\n @status.setter\n def status(self, status):\n self._status = status\n\n @property\n def outputs(self):\n return self._outputs\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def db(self):\n for db in self._db:\n yield db.handler()\n\n\nclass Wlashback(object):\n settings_key = related_keys['settings']['key']\n output_key = related_keys['output']['key']\n input_key = related_keys['input']['key']\n db_key = related_keys['database']['key']\n db_params = related_keys['database']['parameters']\n dbms_key = related_keys['database']['ms']\n\n def __init__(self, config):\n self.start_time = timestamp(ts_now)\n self.finish_time = timestamp(ts_now)\n self.start_time_dir = timestamp(ts_directory)\n self.config = config\n self._settings = None\n self._sections = list()\n\n def status(self):\n return {'start': self.start_time,\n 'finish': self.finish_time,\n 'sections': {str(x): x.status for x in self.sections}, }\n\n @property\n def sections(self):\n return self._sections\n\n @sections.setter\n def sections(self, section):\n self._sections.append(section)\n\n def parse(self):\n for section in self.config.sections():\n init = {'db': [],\n 'out_dirs': '',\n 'in_dirs': '', }\n curr_section = self.config[section]\n if curr_section.name == self.settings_key:\n self._settings = curr_section.items()\n continue\n if curr_section.get(self.output_key):\n for key, config in curr_section.items():\n if key == self.output_key:\n init['out_dirs'] = \\\n [x for x in curr_section.get(key).split()]\n elif key == self.input_key:\n init['in_dirs'] = \\\n [x for x in curr_section.get(key).split()]\n elif key.startswith(self.db_key):\n # TODO: catch the split() exception\n rawdb = config.split()\n dbconf = {self.dbms_key: key.split('__')[1]}\n\n for cset in rawdb:\n name, value = cset.split('=')\n if name in self.db_params:\n dbconf[name] = value\n\n init['db'].\\\n append(dbconf)\n section = Section(name=curr_section.name,\n input_dirs=init['in_dirs'],\n output_dirs=init['out_dirs'],\n db=init['db'])\n self.sections = section\n else:\n raise AttributeError('Check fields in config file')\n\n def backup(self, section):\n for dout in section.outputs:\n dout.full_path = os.path.join(\n dout.path,\n self.start_time_dir)\n if not os.path.exists(dout.full_path):\n os.makedirs(dout.full_path)\n\n with tempfile.TemporaryDirectory(prefix='wlb_') as wb_container:\n for din in section.inputs:\n tar_name = os.path.join(wb_container, din.name)\n with tarfile.open(tar_name, 'w:gz') as tar:\n tar.add(din.path,\n arcname=os.path.basename(din.path),\n recursive=True)\n for dout in section.outputs:\n copyfile(tar_name,\n os.path.join(dout.full_path,\n 'source.tar.gz'))\n\n with tempfile.TemporaryDirectory(\n prefix='database_', dir=wb_container) as db_dir:\n for db in section.db:\n tar_name = os.path.join(wb_container, db.name)\n with tarfile.open(tar_name, 'w:gz') as tar:\n sql_path = db.get_dump(os.path.join(db_dir, db.name))\n tar.add(sql_path, arcname=db.name, recursive=True)\n for dout in section.outputs:\n copyfile(\n tar_name,\n os.path.join(\n dout.full_path,\n 'database__{}.tar.gz'.format(db.name)))\n\n section.status = Section.status_success\n\n def action(self):\n self.parse()\n list(map(self.backup, self.sections))\n self.finish_time = timestamp(ts_now)\n\n\ndef backup(config_path):\n config = ConfigParser()\n if config.read(config_path):\n wb = Wlashback(config)\n wb.action()\n return wb.status()\n else:\n raise IOError('missing .cfg file!')\n","sub_path":"wlashback/wlashback.py","file_name":"wlashback.py","file_ext":"py","file_size_in_byte":8015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"342959412","text":"import matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom ipywidgets import interact, IntSlider, FloatSlider\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets\n\n\ndef get_data(irises = [\"iris setosa\",\"iris virginica\"]):\n \n iris_dict = {0:\"iris setosa\",1:\"iris versicolor\",2:\"iris virginica\"}\n iris = datasets.load_iris()\n X = iris.data # we only take the first two features.\n y = np.array([iris_dict[iris] for iris in iris.target])\n return X[np.isin(y,irises)], y[np.isin(y,irises)]\n\ndef print_data(X,Y,columns):\n dict_param = {\"Длина чашелистника\":0, \"Ширина чашелистника\":1,\"Длина лепестка\":2,\"Ширина лепестка\":3}\n N = np.array([dict_param[par] for par in columns]) \n df = pd.DataFrame(np.hstack((X[:,N], Y.reshape(-1,1))), columns=columns+['Вид Ириса'])\n display(df)\n\n\ndef create_base_plot():\n plt.rcParams.update({'font.size': 15})\n plt.figure(figsize=(8, 6), dpi=100)\n plt.grid()\n \n\ndef plot_sign():\n plt.figure(figsize=(8, 4), dpi=100)\n plt.plot([-22, 0, 0, 22], [-1, -1, 1, 1])\n plt.xlabel(\"t\", fontsize=17)\n plt.ylabel(\"sign(t)\", fontsize=17)\n plt.grid()\n plt.ylim([-2, 2])\n plt.xlim([-21, 21])\n plt.show()\n\ndef plot_data_with_gip(X,Y):\n\n teta_0 = FloatSlider(min=-1.8, max=0, step=0.3, value=-0.92533091, description='theta_0: ')\n teta_1 = FloatSlider(min=1, max=3, step=0.25, value=1.25, description='theta_1: ')\n teta_2 = FloatSlider(min=-5, max=-1, step=0.5, value=-2.5, description='theta_2: ')\n\n @interact(teta_0=teta_0, teta_1=teta_1, teta_2=teta_2)\n def _plot_data_with_gip(teta_0,teta_1,teta_2):\n create_base_plot()\n plt.scatter(X[:,0][Y=='iris setosa'], X[:,1][Y=='iris setosa'], label='iris setosa', c='g')\n plt.scatter(X[:,0][Y=='iris virginica'], X[:,1][Y=='iris virginica'], label='iris virginica', c='b')\n plt.plot(X[:,0], (teta_0 +teta_1*X[:,0])/-teta_2)\n plt.xlabel(\"Длина чашелистника, см\", fontsize=17)\n plt.ylabel(\"Ширина чашелистника, см\", fontsize=17)\n plt.legend(prop={'size': 12})\n plt.xlim([4, 8])\n plt.ylim([2, 4.5])\n plt.show()\n\n\ndef plot_data(X,Y,columns):\n dict_param = {\"Длина чашелистника\":0, \"Ширина чашелистника\":1,\"Длина лепестка\":2,\"Ширина лепестка\":3}\n N = [dict_param[par] for par in columns]\n create_base_plot()\n irises = set(Y)\n for iris in irises:\n plt.scatter(X[:,N[0]][Y==iris], X[:,N[1]][Y==iris], label=iris)\n plt.xlabel(f\"{columns[0]}, см\", fontsize=17)\n plt.ylabel(f\"{columns[1]}, см\", fontsize=17)\n plt.legend(prop={'size': 12})\n plt.show()\n \n \ndef plot_data_moon(X,Y):\n create_base_plot()\n irises = set(Y)\n for iris in irises:\n plt.scatter(X[:,0][Y==iris], X[:,1][Y==iris], label=iris)\n plt.xlabel(f\"Признак 1\", fontsize=17)\n plt.ylabel(f\"Признак 2\", fontsize=17)\n plt.legend(prop={'size': 12})\n plt.show() \n\ndef plot_decision_regions(X, y,columns, classifier):\n # настроить генератор маркеров и палитру\n dict_param = {\"Длина чашелистника\":0, \"Ширина чашелистника\":1,\"Длина лепестка\":2,\"Ширина лепестка\":3}\n dict_ = {0:\"iris setosa\",1:\"iris versicolor\",2:\"iris virginica\"}\n N = [dict_param[par] for par in columns]\n create_base_plot()\n markers = ('s','x','o', '^','v')\n colors = ('red', 'blue', 'lightgreen','gray','суаn')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # вывести поверхность решения\n x1_min, x1_max = X[:,N[0]].min() - 1, X[:,N[0]].max() + 1\n x2_min, x2_max = X[:,N[1]].min() - 1, X[:,N[1]].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min,x1_max,0.02),\n np.arange(x2_min,x2_max,0.02))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(0, xx1.max())\n plt.ylim(0, xx2.max())\n # показать все образцы\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(X[y==cl,N[0]], X[y==cl, N[1]],alpha=0.8, color=cmap(idx),marker=markers[idx], label=dict_[cl])\n \n plt.xlabel(f\"{columns[0]}, см\", fontsize=17)\n plt.ylabel(f\"{columns[1]}, см\", fontsize=17)\n plt.legend(prop={'size': 12})\n plt.show()\n\n\ndef plot_indent():\n plt.figure(figsize=(8, 4), dpi=100)\n plt.plot([-22, 0, 0, 22], [1, 1, 0, 0], label=\"$f(M_i) = [M_i<0]$\")\n plt.xlabel(\"$M_i$\", fontsize=17)\n plt.ylabel(\"$f(M_i)$\", fontsize=17)\n plt.grid()\n plt.legend()\n plt.ylim([-1, 2])\n plt.xlim([-21, 21])\n plt.show()\n\ndef plot_indent_with_maj():\n plt.figure(figsize=(8, 4), dpi=100)\n x = np.linspace(-10, 8.0,100)\n y1 = (1 - x)**2\n y2 = np.array(list(map(lambda x: max(0, 1-x), x)))\n y3 = np.e**(-x)\n y4 = np.log(1 + y3)\n plt.plot([-22, 0, 0, 22], [1, 1, 0, 0], label=\"$[M_i<0]$\")\n plt.plot(x,y1, label=\"$(1 - M_i)^2$\")\n plt.plot(x,y2, label=\"$max(0,1 - M_i)$\")\n plt.plot(x,y3, label=\"$e^{-M_i}$\")\n plt.plot(x,y4, label=\"$ln(1 + e^{-M_i})$\")\n plt.xlabel(\"$M_i$\", fontsize=17)\n plt.ylabel(\"$g(M_i)$\", fontsize=17)\n plt.grid()\n plt.legend()\n plt.ylim([-1, 7])\n plt.xlim([-7, 8])\n plt.show()\n\n\ndef plot_finaly_trained_model(X, kind_iris, theta):\n create_base_plot()\n teta_0, teta_1, teta_2 = theta\n plt.scatter(X[:,0][kind_iris=='iris setosa'], X[:,1][kind_iris=='iris setosa'], label='iris setosa', c='g')\n plt.scatter(X[:,0][kind_iris=='iris virginica'], X[:,1][kind_iris=='iris virginica'], label='iris virginica', c='b')\n plt.plot(X[:,0], (teta_0 +teta_1*X[:,0])/-teta_2)\n #plt.arrow(7,(teta_0 +teta_1*7)/-teta_2, teta_1/3,teta_2/3,head_width=0.2,width=0.01, label='$\\Theta$')\n plt.legend(prop={'size': 12})\n plt.xlim([4, 8])\n plt.ylim([2, 4.5])\n plt.show()\n\ndef create_data(X):\n X_ones = np.ones(X.shape[0])\n return np.column_stack([X_ones, X]) \n\ndef poly(X,n=1):\n X1 = X.copy()\n for i in range(1,n):\n X1 = np.hstack((X1,X**(i+1)))\n return X1\n \n \ndef plot_decision_regions_softmax(X, y,columns, classifier , test_idx=None, resolution=0.02):\n # настроить генератор маркеров и палитру\n create_base_plot()\n dict_param = {\"Длина чашелистника\":0, \"Ширина чашелистника\":1,\"Длина лепестка\":2,\"Ширина лепестка\":3}\n dict_ = {0:\"iris setosa\",1:\"iris versicolor\",2:\"iris virginica\"}\n N = [dict_param[par] for par in columns]\n markers = ('s','x','o', '^','v')\n colors = ('red', 'blue', 'lightgreen','gray','суаn')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # вывести поверхность решения\n x1_min, x1_max = X[:,N[0]].min() - 1, X[:,N[0]].max() + 1\n x2_min, x2_max = X[:,N[1]].min() - 1, X[:,N[1]].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),\n np.arange(x2_min,x2_max,resolution))\n X_field = create_data(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = classifier.predict(X_field)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n # показать все образцы\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(X[y==cl,N[0]], X[y==cl, N[1]],alpha=0.8, color=cmap(idx),marker=markers[idx], label=dict_[cl])\n \n plt.xlabel(f\"{columns[0]}, см\", fontsize=17)\n plt.ylabel(f\"{columns[1]}, см\", fontsize=17)\n plt.legend(prop={'size': 12})\n plt.show()\n\n \n","sub_path":"mts/lectures/5_classification/classification_helper_mult.py","file_name":"classification_helper_mult.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"397232772","text":"import torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\n#Download CIFAR-10\nfrom torchvision import datasets\nfrom torchvision import transforms\ndata_path = '../data-unversioned/p1ch7/'\ncifar10 = datasets.CIFAR10(data_path, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4915, 0.4823, 0.4468),(0.2470, 0.2435, 0.2616))]))\ncifar10_val = datasets.CIFAR10(data_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4915, 0.4823, 0.4468),(0.2470, 0.2435, 0.2616))]))\n\n#cifar10_tensor = datasets.CIFAR10(data_path, train=True, download=False, transform=transforms.ToTensor())\n\n# Limit number of classes (Build Dataset)\nlabel_map = {6: 0, 7:1, 8:2, 9: 3}\nclass_names = ['frog', 'horse', 'ship', 'truck']\ncifar4 = [(img, label_map[label])\n for img, label in cifar10\n if label in [6,7,8,9]]\ncifar4_val = [(img, label_map[label])\n for img, label in cifar10_val\n if label in [6,7,8,9]]\n\n#Normalize\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nn_out = 4;\n\n#Net\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)\n self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1)\n self.fc1 = nn.Linear(8 * 8 * 8, 32)\n self.fc2 = nn.Linear(32, n_out)\n \n def forward(self, x):\n out = F.max_pool2d(torch.tanh(self.conv1(x)), 2)\n out = F.max_pool2d(torch.tanh(self.conv2(out)), 2)\n out = out.view(-1, 8 * 8 * 8)\n out = torch.tanh(self.fc1(out))\n out = self.fc2(out)\n return out\n\n# Model\nmodel = Net()\n\n#Calculate loss\nloss=nn.NLLLoss()\n\n#Test output\n#img, label = cifar2[0]\n#out = model(img.view(-1).unsqueeze(0))\n#print(loss(out, torch.tensor([label])))\n\n#Train\nimport torch.optim as optim\nimport datetime\ndevice = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))\nprint(f\"Training on device {device}\")\n\ntrain_loader = torch.utils.data.DataLoader(cifar4, batch_size=64, shuffle=True)\nlearning_rate = 1e-2\noptimizer = optim.SGD(model.parameters(), lr=learning_rate)\nloss_fn = nn.CrossEntropyLoss()\ntotal_loss=np.array([])\nn_epochs = 200\n\ndef training_loop(n_epochs, optimizer, model, loss_fn, train_loader, total_loss):\n for epoch in range(1, n_epochs+1):\n loss_train=0.0\n for imgs, labels in train_loader:\n imgs=imgs.to(device=device)\n labels=labels.to(device=device)\n outputs = model(imgs)\n loss = loss_fn(outputs, labels)\n\n l2_lambda = 0.001\n l2_norm = sum(p.pow(2.0).sum()\n for p in model.parameters())\n loss = loss+l2_lambda*l2_norm\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_train += loss.item()\n total_loss = np.append(total_loss, float(loss_train/len(train_loader)))\n if epoch == 1 or epoch %10 == 0:\n #total_loss = np.append(total_loss, float(loss_train/len(train_loader)))\n print('{} Epoch: {}, Loss {}'.format(datetime.datetime.now(), epoch, float(loss_train/len(train_loader))))\n return total_loss\n\nmodel = Net().to(device=device)\nstart = time.time()\ntotal_loss = training_loop(n_epochs, optimizer, model, loss_fn, train_loader,total_loss)\nfinish = time.time()\nprint('Training time: %f' % (finish - start))\n\n#Validate\nimport collections\nval_loader = torch.utils.data.DataLoader(cifar4_val, batch_size=64, shuffle=False)\n\nall_acc_dict = collections.OrderedDict()\n\ndef validate(model, train_loader, val_loader):\n accdict={}\n for name, loader in [(\"train\", train_loader), (\"val\", val_loader)]:\n correct = 0\n total = 0\n \n with torch.no_grad():\n for imgs, labels in loader:\n imgs = imgs.to(device=device)\n labels = labels.to(device=device)\n outputs = model(imgs)\n _, predicted = torch.max(outputs, dim=1)\n total += labels.shape[0]\n correct += int((predicted == labels).sum())\n\n print(\"Accuracy {}: {:.2f}\".format(name, correct/total))\n accdict[name] = correct/total\n return accdict\n\nnumel_list = [p.numel()\n for p in model.parameters()\n if p.requires_grad == True]\nprint(sum(numel_list), numel_list)\n\nall_acc_dict[\"baseline\"] = validate(model, train_loader, val_loader)\n#plot loss\nplt.plot(range(len(total_loss)),total_loss,color=\"blue\")\nplt.title(\"Model Loss\")\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.show()\nplt.savefig('problem2a_loss.png')\n","sub_path":"HW4/problem2a.py","file_name":"problem2a.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"397084444","text":"import re\nimport time\nfrom .messages import Chat\nfrom .messages import Log as report\n\nclass Connection():\n irc_header_pattern = r\"^:\\w+!\\w+@\\w+\\.tmi\\.twitch\\.tv PRIVMSG #\\w+ :\"\n\n def __init__(self, connect_to, socket, log = print, sleep = time.sleep):\n self.socket = socket\n self.log = log\n self.sleep = sleep\n self.keep_IRC_running = True\n self.seconds_per_message = 1 / 120\n self.host = connect_to['irc_url']\n self.port = connect_to['irc_port']\n self.auth = connect_to['oauth_token']\n self.name = connect_to['bot_name']\n self.chan = connect_to['channel']\n self.irc_header = re.compile(Connection.irc_header_pattern)\n self.make_initial_twitch_connection()\n self.last_response = ('bot', 'No Messages Recieved')\n\n def send(self, message):\n irc_id = f':{self.name}!{self.name}@{self.name}.tmi.twitch.tv'\n answer = f'{irc_id} PRIVMSG #{self.chan} :{message}\\r\\n'\n encoded_answer = answer.encode(\"utf-8\")\n self.socket.send(encoded_answer)\n\n def scan_for_messages(self):\n while self.keep_IRC_running:\n self.scan()\n\n def scan(self):\n self.sleep(self.seconds_per_message)\n try:\n raw_response = self.socket.recv(1024).decode(\"utf-8\")\n self.last_response = self.report(raw_response)\n except:\n self.last_response = self.last_response\n\n def report(self, response):\n if Connection.its_a_ping(response):\n return self.send_a_pong()\n username = re.search(r\"\\w+\", response).group(0)\n if username == self.name:\n return self.last_response\n response_body = self.irc_header.sub(\"\", response)\n return self.log_response(username, response_body)\n\n def log_response(self, username, response_body):\n self.log(report.connect_response(username, response_body))\n return (username, response_body)\n\n def its_a_ping(response):\n return response == \"PING :tmi.twitch.tv\\r\\n\"\n\n def send_a_pong(self):\n self.socket.send(\"PONG :tmi.twitch.tv\\r\\n\".encode(\"utf-8\"))\n self.log(report.connect_pong)\n return self.last_response\n\n def make_initial_twitch_connection(self):\n try:\n self.connect()\n self.send_creds()\n self.send_botname()\n self.join_channel()\n self.send_hello()\n self.socket.setblocking(0)\n self.log(report.connect_complete)\n except:\n self.log(report.connect_failure)\n self.keep_IRC_running = False\n\n def connect(self):\n self.log(report.connect_loading)\n self.socket.connect((self.host, self.port))\n\n def send_creds(self):\n self.log(report.connect_pass)\n self.socket.send(f\"PASS {self.auth}\\r\\n\".encode(\"utf-8\"))\n\n def send_botname(self):\n self.log(report.connect_nick)\n self.socket.send(f\"NICK {self.name}\\r\\n\".encode(\"utf-8\"))\n\n def join_channel(self):\n self.log(report.connect_join)\n self.socket.send(f\"JOIN #{self.chan}\\r\\n\".encode(\"utf-8\"))\n\n def send_hello(self):\n self.sleep(1)\n self.log(report.connect_hi)\n self.send(Chat.good_morning)\n","sub_path":"src/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"1818309","text":"load(\"//internal:common.bzl\", \"ProtoCompileInfo\")\n\nRustProtoLibInfo = provider(fields = {\n \"name\": \"rule name\",\n \"lib\": \"lib.rs file\",\n})\n\ndef _strip_extension(f):\n return f.basename[:-len(f.extension) - 1]\n\ndef _rust_proto_lib_impl(ctx):\n \"\"\"Generate a lib.rs file for the crates.\"\"\"\n compilation = ctx.attr.compilation[ProtoCompileInfo]\n lib_rs = ctx.actions.declare_file(\"%s/lib.rs\" % compilation.label.name)\n\n # Add externs\n content = [\"extern crate protobuf;\"]\n if ctx.attr.grpc:\n content.append(\"extern crate grpcio;\")\n content.append(\"extern crate futures;\")\n content.append(\"\") # Newline\n\n # List each output\n srcs = [f for files in compilation.output_files.values() for f in files]\n for f in srcs:\n content.append(\"pub mod %s;\" % _strip_extension(f))\n content.append(\"pub use %s::*;\" % _strip_extension(f))\n\n # Write file\n ctx.actions.write(\n lib_rs,\n \"\\n\".join(content) + \"\\n\",\n False,\n )\n\n return [RustProtoLibInfo(\n name = ctx.label.name,\n lib = lib_rs,\n ), DefaultInfo(\n files = depset([lib_rs]),\n )]\n\nrust_proto_lib = rule(\n implementation = _rust_proto_lib_impl,\n attrs = {\n \"compilation\": attr.label(\n providers = [ProtoCompileInfo],\n mandatory = True,\n ),\n \"grpc\": attr.bool(\n mandatory = True,\n ),\n },\n output_to_genfiles = True,\n)\n","sub_path":"rust/rust_proto_lib.bzl","file_name":"rust_proto_lib.bzl","file_ext":"bzl","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323362959","text":"# Copyright 2020 Palantir Technologies\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport json\nimport uuid\nimport random\nimport string\nimport logging\nimport configparser\nfrom typing import Optional\nfrom fastapi import FastAPI, Response, Request, status\nfrom mangum import Mangum\nfrom pydantic import BaseModel\nfrom datetime import datetime\nimport requests\n\napp = FastAPI()\nweb_handler = Mangum(app)\n\npreshared_key = os.environ.get('PRESHARED_KEY')\nif preshared_key is None:\n print(\"No preshared key! Be careful!\")\n\nwebhook_url = os.environ.get('SLACK_WEBHOOK')\nif webhook_url is None:\n print(\"No slack webhook defined, logging only mode\")\n\nclass AlertModel(BaseModel):\n alertUrl: str\n allAssociatedUsernames: str\n psk: str\n referrer: Optional[str] = None\n alertTimestamp: int\n alertType: str\n suspectedUsername: Optional[str] = 'null'\n suspectedHost: Optional[str] = 'null'\n clientId: str\n\n\n###############################################################################\n# Status endpoint. Used to test connection\n#\n# curl -X GET http://localhost:8000/status\n# \n###############################################################################\n@app.get(\"/status\")\ndef health_check():\n return {\"status\": \"healthy\"}\n\n###############################################################################\n# Alerting endpoint\n#\n# curl -X POST http://localhost:8000/alert --data '{\"allAssociatedUsernames\":\"bob\",\"alertUrl\":\"https://www.grubhub.com\",\"psk\":\"foobar\",\"referrer\":\"https://www.google.com\",\"alertType\":\"reuse\",\"suspectedUsername\":\"testuser\",\"suspectedHost\":\"testhost\",\"alertTimestamp\":1611703424585,\"clientId\":\"foo\"}'\n#\n###############################################################################\n@app.post(\"/alert\")\ndef alert(alert: AlertModel, request: Request, response: Response):\n logging.info(\"Received a credential reuse alert!\")\n \n if (preshared_key):\n if (alert.psk != preshared_key):\n logging.info(\"Alert did not include correct pre-shared key! Correct key: {preshared_key}. Provided key: {alert.psk}\")\n response.status_code = 400\n return {\"status\": \"Incorrect PSK\"}\n\n logging_message = f\"src_ip={request.client.host} \"\n\n for key in alert:\n if (key[0] == \"alertTimestamp\"):\n key = (key[0], friendly_timestamp(key[1]))\n if (key[0] != \"psk\"):\n logging_message += f\"{key[0]}={key[1]} \"\n\n logging.info(logging_message)\n\n if (alert.alertType == \"reuse\"):\n friendly_message = f\"A user with associated usernames {alert.allAssociatedUsernames} reused their password on {alert.alertUrl}!\"\n elif (alert.alertType == \"domhash\"):\n friendly_message = f\"{alert.alertUrl} triggered a dom hash alert for a user with associated usernames {alert.allAssociatedUsernames}.\"\n elif (alert.alertType == \"userreport\"):\n friendly_message = f\"A user with associated usernames {alert.allAssociatedUsernames} reported {alert.alertUrl} as a phishing page.\"\n elif (alert.alertType == \"falsepositive\"):\n friendly_message = f\"A user with associated usernames {alert.allAssociatedUsernames} reported a false positive alert on {alert.alertUrl}.\"\n elif (alert.alertType == \"personalpassword\"):\n friendly_message = f\"A user with associated usernames {alert.allAssociatedUsernames} reported that PhishCatch alerted on a personal password at {alert.alertUrl}.\"\n else:\n logging.error(\"Invalid alert type\")\n friendly_message = f\"A user with associated usernames {alert.allAssociatedUsernames} fired an unknown alert on {alert.alertUrl}! Referrer: {alert.referrer}. Is the server up to date?\"\n\n if alert.suspectedUsername is not 'null' and alert.suspectedUsername is not 'null':\n friendly_message += f\" Suspected account for this leak: {alert.suspectedUsername} from {alert.suspectedHost}.\"\n friendly_message += f\" Referrer: {alert.referrer}. Timestamp: {alert.alertTimestamp}. Client ID: {alert.clientId}.\"\n friendly_message += f\" Request IP: {request.client.host}\"\n\n logging.info(friendly_message)\n\n try:\n slack_alert_handler(friendly_message)\n except Exception as error:\n logging.error(error)\n response.status_code = 500\n return {\"status\": \"Couldn't send slack alert\"}\n\n return {\"status\": \"alert success\"}\n\ndef friendly_timestamp(timestamp):\n datetime.fromtimestamp(timestamp / 1000).isoformat()\n\ndef slack_alert_handler(message: str):\n if webhook_url is None:\n return\n\n logging.info(\"Attempting to send slack alert!\")\n\n logging.info(message)\n send_slack_alert(username='AlertingBot!', message=message, emoji=':robot_face:')\n\ndef send_slack_alert(username: str, message: str, emoji: str):\n logging.info(\"Sending slack alert\")\n \n data = {\n 'text': message,\n 'username': username,\n 'icon_emoji': emoji\n }\n\n response = requests.post(\n str(webhook_url), \n data=json.dumps(data), \n headers={'Content-Type': 'application/json'}\n )\n\n logging.info('Slack response: ' + str(response.text))\n logging.info('Slack response code: ' + str(response.status_code))\n","sub_path":"server/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"434980711","text":"import os\nimport nltk\nimport speech_recognition as sr\nfrom nltk.parse.stanford import StanfordParser\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tree import *\nfrom conf import JAR_DIR\n# from nltk.parse.corenlp import stanford\n\nos.environ['STANFORD_PARSER'] = JAR_DIR\nos.environ['STANFORD_MODELS'] = JAR_DIR\nos.environ['JAVAHOME'] = \"C:/Program Files/Java/jdk-14.0.2/bin/java.exe\"\nnltk.download('wordnet')\n\ndef filter_stop_words(words):\n stopwords_set = set(['a', 'an','am', 'the','for', 'is','be','to'])\n # stopwords_set = set(stopwords.words(\"english\"))\n words = list(filter(lambda x: x not in stopwords_set, words))\n return words\n\n\ndef lemmatize_tokens(token_list):\n lemmatizer = WordNetLemmatizer()\n lemmatized_words = []\n for token in token_list:\n token = lemmatizer.lemmatize(token)\n lemmatized_words.append(lemmatizer.lemmatize(token,pos=\"v\"))\n\n return lemmatized_words\n\n\ndef label_parse_subtrees(parent_tree):\n tree_traversal_flag = {}\n\n for sub_tree in parent_tree.subtrees():\n tree_traversal_flag[sub_tree.treeposition()] = 0\n return tree_traversal_flag\n\n\ndef handle_noun_clause(i, tree_traversal_flag, modified_parse_tree, sub_tree):\n # if clause is Noun clause and not traversed then insert them in new tree first\n if tree_traversal_flag[sub_tree.treeposition()] == 0 and tree_traversal_flag[sub_tree.parent().treeposition()] == 0:\n tree_traversal_flag[sub_tree.treeposition()] = 1\n modified_parse_tree.insert(i, sub_tree)\n i = i + 1\n return i, modified_parse_tree\n\n\ndef handle_verb_prop_clause(i, tree_traversal_flag, modified_parse_tree, sub_tree):\n # if clause is Verb clause or Proportion clause recursively check for Noun clause\n for child_sub_tree in sub_tree.subtrees():\n if child_sub_tree.label() == \"NP\" or child_sub_tree.label() == 'PRP':\n if tree_traversal_flag[child_sub_tree.treeposition()] == 0 and tree_traversal_flag[child_sub_tree.parent().treeposition()] == 0:\n tree_traversal_flag[child_sub_tree.treeposition()] = 1\n modified_parse_tree.insert(i, child_sub_tree)\n i = i + 1\n return i, modified_parse_tree\n\n\ndef modify_tree_structure(parent_tree):\n # Mark all subtrees position as 0\n tree_traversal_flag = label_parse_subtrees(parent_tree)\n # Initialize new parse tree\n modified_parse_tree = Tree('ROOT', [])\n i = 0\n for sub_tree in parent_tree.subtrees():\n if sub_tree.label() == \"NP\":\n i, modified_parse_tree = handle_noun_clause(i, tree_traversal_flag, modified_parse_tree, sub_tree)\n if sub_tree.label() == \"VP\" or sub_tree.label() == \"PRP\":\n i, modified_parse_tree = handle_verb_prop_clause(i, tree_traversal_flag, modified_parse_tree, sub_tree)\n\n # recursively check for omitted clauses to be inserted in tree\n for sub_tree in parent_tree.subtrees():\n for child_sub_tree in sub_tree.subtrees():\n if len(child_sub_tree.leaves()) == 1: #check if subtree leads to some word\n if tree_traversal_flag[child_sub_tree.treeposition()] == 0 and tree_traversal_flag[child_sub_tree.parent().treeposition()] == 0:\n tree_traversal_flag[child_sub_tree.treeposition()] = 1\n modified_parse_tree.insert(i, child_sub_tree)\n i = i + 1\n\n return modified_parse_tree\n\ndef convert_eng_to_isl(input_string):\n\n if len(list(input_string.split(' '))) is 1:\n return list(input_string.split(' '))\n\n # Initializing stanford parser\n parser = StanfordParser()\n\n # Generates all possible parse trees sort by probability for the sentence\n possible_parse_tree_list = [tree for tree in parser.parse(input_string.split())]\n\n # Get most probable parse tree\n parse_tree = possible_parse_tree_list[0]\n #print(parse_tree,\"______________________\")\n\n # Convert into tree data structure\n parent_tree = ParentedTree.convert(parse_tree)\n \n #print(parent_tree,\"______________________\")\n \n modified_parse_tree = modify_tree_structure(parent_tree)\n\n parsed_sent = modified_parse_tree.leaves()\n return parsed_sent\n\ndef pre_process(sentence):\n words = list(sentence.split())\n f = open('words.txt', 'r')\n eligible_words = f.read()\n f.close()\n final_string = \"\"\n\n for word in words:\n if word not in eligible_words:\n for letter in word:\n final_string += \" \" + letter\n else:\n final_string += \" \" + word\n\n return final_string\n\n# DRIVER CODE\n\ndef isl(text):\n input_string = text.capitalize()\n # input_string = input_string.lower()\n isl_parsed_token_list = convert_eng_to_isl(input_string)\n #print(isl_parsed_token_list,\"_______________________\")\n # lemmatize tokens\n lemmatized_isl_token_list = lemmatize_tokens(isl_parsed_token_list)\n \n #print(lemmatized_isl_token_list,\"_______________________\")\n # remove stop words\n filtered_isl_token_list = filter_stop_words(lemmatized_isl_token_list)\n \n #print(filtered_isl_token_list,\"_______________________\")\n isl_text_string = \"\"\n\n for token in filtered_isl_token_list:\n isl_text_string += token\n isl_text_string += \" \"\n\n isl_text_string = isl_text_string.lower()\n print(\"ISL:{\"+isl_text_string+\"}\")\n return isl_text_string\n\n","sub_path":"speech_to_isl.py","file_name":"speech_to_isl.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"34951594","text":"def fib(n):\n a1 = 0\n a2 = 1\n\n if n < 1:\n raise Exception(\"invalid argument\")\n elif n == 1:\n return a1\n elif n == 2:\n return a2\n\n res = 0\n for i in range(2, n):\n res = a1 + a2\n a1 = a2\n a2 = res\n return res\n\n\ndef main():\n print(fib(9))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"15_1_5_fib_DP.py","file_name":"15_1_5_fib_DP.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"283641952","text":"# -*- coding:utf-8 -*-\n\n\"\"\"Realize command line interface.\"\"\"\n\nimport argparse\nimport os\n\nDEFAULT_PATH = os.getcwd()\n\n\ndef get_args():\n \"\"\"Return command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"\"\"'Page loader' is a written\n in Python utility which downloads requested web page with local\n resources.\"\"\")\n parser.add_argument(\n '-o',\n '--output',\n default=DEFAULT_PATH,\n help='set output path (default: current directory)',\n )\n parser.add_argument(\n 'url',\n help='set requested web page in full format (with schema)',\n )\n return parser.parse_args()\n","sub_path":"page_loader/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"458630900","text":"import re\n\n\nclass data_check:\n \"\"\"\n 文本格式、类型检查\n \"\"\"\n\n def __init__(self, data_dict_):\n self.data_dict = data_dict_\n self.flag_format_birth_date = False\n self.flag_format_death_date = False\n self.flag_value_birth_year = False # 出生时间的年份限制\n self.flag_value_birth_month = False # 出生时间的月份限制\n self.flag_value_birth_day = False # 出生时间的日限制\n self.flag_value_death_year = False # 去世时间的年份限制\n self.flag_value_death_month = False # 去世时间的月份限制\n self.flag_value_death_day = False # 去世时间的日限制\n self.flag_format_person_weight_id = False\n self.flag_format_person_category_id = False\n self.flag_age_greater_than_100 = False # 年龄大于100的标记\n self.flag_age_less_than_0 = False # 年龄小于0的标记,即出生、死亡日期不符合逻辑\n self.flag_age_greater_than_0_and_less_than_10 = False # 年龄在0到10之间\n self.flag_age_diff_greater_than_2 = False # 根据死亡时间与出生时间得出的年龄与实际给出的age字段值相差大于2的\n self.flag_age_noexist = False # 有出生和死亡时间但age为空\n\n self.flag_is_digital_age = False # 数字类型检测\n self.flag_is_digital_weight_id = False\n self.flag_is_digital_category_id = False\n\n self.flag_introduction_noexist = False # 简介空值检测\n self.flag_all_name_noexist = False # 姓名空值检测\n\n self.time_of_birth = self.data_dict[\"time_of_birth\"]\n self.time_of_death = self.data_dict[\"time_of_death\"]\n self.introduction = self.data_dict[\"introduction\"]\n self.all_name = self.data_dict[\"all_name\"]\n\n def is_digital_age(self, ):\n \"\"\"\n :param age: 数字检测\n :return:\n \"\"\"\n if self.data_dict[\"age\"]:\n age = str(self.data_dict[\"age\"])\n # if age.startswith(\"-\"):\n # self.flag_is_digital_age = True\n # else:\n age_sub = re.sub(r'\\d', \"\", age) # \\d匹配任意数字,将其替换为空\n if age_sub:\n self.flag_is_digital_age = True\n\n return self.flag_is_digital_age\n\n def is_digital_weight_id(self, weight_id=\"1\"):\n \"\"\"\n :param weight_id: 数字检测\n :return:\n \"\"\"\n weight_id = str(self.data_dict[\"person_weight_id\"])\n weight_id_sub = re.sub(r'\\d', \"\", weight_id) # \\d匹配任意数字,将其替换为空\n if weight_id_sub:\n self.flag_is_digital_weight_id = True\n return self.flag_is_digital_weight_id\n\n def is_digital_category_id(self, category_id=\"2\"):\n \"\"\"\n :param category_id: 数字检测\n :return:\n \"\"\"\n category_id = str(self.data_dict[\"person_category_id\"])\n category_id_sub = re.sub(r'\\d', \"\", category_id) # \\d匹配任意数字,将其替换为空\n if category_id_sub:\n self.flag_is_digital_category_id = True\n return self.flag_is_digital_category_id\n\n def format_check_birth_date(self):\n \"\"\"\n 出生日期格式检查\n :return:\n \"\"\"\n if self.time_of_birth:\n time_of_birth = self.time_of_birth.replace(\"-\", \"\")\n time_of_birth_sub = re.sub(r'\\d', \"\", time_of_birth) # \\d匹配任意数字,将其替换为空\n if time_of_birth_sub:\n self.flag_format_birth_date = True\n print(\"出生日期时间格式错误\")\n else:\n find_split_list = re.findall(\"-\", self.time_of_birth)\n if len(find_split_list) != 2 and len(find_split_list) != 3:\n self.flag_format_birth_date = True\n print(\"出生日期时间格式错误\")\n else:\n date_split = self.time_of_birth.split(\"-\")\n date_len = len(date_split)\n if date_len == 4:\n year = date_split[1]\n mon = date_split[2]\n day = date_split[3]\n if len(mon) != 2 or len(day) != 2 or len(year) == 0:\n print(\"出生日期时间格式错误\")\n self.flag_format_birth_date = True\n elif date_len == 3:\n year = date_split[0]\n mon = date_split[1]\n day = date_split[2]\n if len(mon) != 2 or len(day) != 2 or len(year) == 0:\n print(\"出生日期时间格式错误\")\n self.flag_format_birth_date = True\n else:\n print(\"出生日期时间格式错误\")\n self.flag_format_birth_date = True\n\n return self.flag_format_birth_date\n\n def format_check_death_date(self):\n \"\"\"\n 死亡日期格式检查\n :return:\n \"\"\"\n if self.time_of_death:\n time_of_death = self.time_of_death.replace(\"-\", \"\")\n time_of_death_sub = re.sub(r'\\d', \"\", time_of_death) # \\d匹配任意数字,将其替换为空\n if time_of_death_sub:\n self.flag_format_death_date = True\n print(\"死亡日期时间格式错误\")\n else:\n find_split_list = re.findall(\"-\", self.time_of_death)\n if len(find_split_list) != 2 and len(find_split_list) != 3:\n self.flag_format_death_date = True\n print(\"死亡日期时间格式错误\")\n else:\n date_split = self.time_of_death.split(\"-\")\n date_len = len(date_split)\n if date_len == 4:\n year = date_split[1]\n mon = date_split[2]\n day = date_split[3]\n if len(mon) != 2 or len(day) != 2 or len(year) == 0:\n print(\"死亡日期时间格式错误\")\n self.flag_format_death_date = True\n elif date_len == 3:\n year = date_split[0]\n mon = date_split[1]\n day = date_split[2]\n if len(mon) != 2 or len(day) != 2 or len(year) == 0:\n print(\"死亡日期时间格式错误\")\n self.flag_format_death_date = True\n else:\n print(\"死亡日期时间格式错误\")\n self.flag_format_death_date = True\n return self.flag_format_death_date\n\n def check_birthday_year_month_day(self):\n \"\"\"\n 1、限制:日<=31、月<=12、年!=0xx\n 一年有12个月,其中1月、3月、5月、7月、8月、10月、12月为31天;\n 4月、6月、9月、11月为30天;\n 2���为28天(闰年为29天)。\n 2、月份是00时,日必是00\n :return:\n \"\"\"\n if self.time_of_birth:\n time_of_birth_split = self.time_of_birth.split(\"-\")\n # if str(self.time_of_birth).startswith(\"-\"): # 公元前 出生\n year = time_of_birth_split[-3]\n month = time_of_birth_split[-2]\n day = time_of_birth_split[-1]\n\n if year.startswith(\"0\"):\n self.flag_value_birth_year = True\n print(\"出生时间-年份错误!不能为0xx\")\n if month == \"00\":\n if day != \"00\":\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!请改为00\")\n else:\n if not str(month).startswith(\"0\") and int(month) > 12:\n print(\"出生时间-月份错误!大于12了\")\n self.flag_value_birth_month = True\n\n if day != \"00\":\n if month == \"01\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于31了\")\n elif month == \"02\":\n if not str(day).startswith(\"0\") and int(day) > 29: # 粗限制2月份天数,暂时不做闰年判断限制2月天数\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于29了\")\n elif month == \"03\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于31了\")\n elif month == \"04\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于30了\")\n elif month == \"05\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于31了\")\n elif month == \"06\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于30了\")\n elif month == \"07\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于31了\")\n elif month == \"08\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于31了\")\n elif month == \"09\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于30了\")\n elif month == \"10\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于31了\")\n elif month == \"11\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于30了\")\n elif month == \"12\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_birth_day = True\n print(\"出生时间-日错误!大于31了\")\n else:\n print(\"出生时间-月份格式错误,不在设定 00-12 范围!\")\n self.flag_value_birth_month = True\n return self.flag_value_birth_year, self.flag_value_birth_month, self.flag_value_birth_day\n\n def check_death_year_month_day(self):\n \"\"\"\n 1、限制:日<=31、月<=12、年!=0xx\n 一年有12个月,其中1月、3月、5月、7月、8月、10月、12月为31天;\n 4月、6月、9月、11月为30天;\n 2月为28天(闰年为29天)。\n 2、月份是00时,日必是00\n :return:\n \"\"\"\n if self.time_of_death:\n time_of_death_split = self.time_of_death.split(\"-\")\n # if str(self.time_of_death).startswith(\"-\"): # 公元前 出生\n year = time_of_death_split[-3]\n month = time_of_death_split[-2]\n day = time_of_death_split[-1]\n if year.startswith(\"0\"):\n self.flag_value_death_year = True\n print(\"死亡时间-年份错误!不能为0xx\")\n if month == \"00\":\n if day != \"00\":\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!请改为00\")\n else:\n if not str(month).startswith(\"0\") and int(month) > 12:\n print(\"死亡时间-月份错误!大于12了\")\n self.flag_value_death_month = True\n\n if day != \"00\":\n if month == \"01\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于31了\")\n elif month == \"02\":\n if not str(day).startswith(\"0\") and int(day) > 29: # 粗限制2月份天数,暂时不做闰年判断限制2月天数\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于29了\")\n elif month == \"03\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于31了\")\n elif month == \"04\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于30了\")\n elif month == \"05\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于31了\")\n elif month == \"06\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于30了\")\n elif month == \"07\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于31了\")\n elif month == \"08\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于31了\")\n elif month == \"09\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于30了\")\n elif month == \"10\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于31了\")\n elif month == \"11\":\n if not str(day).startswith(\"0\") and int(day) > 30:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于30了\")\n elif month == \"12\":\n if not str(day).startswith(\"0\") and int(day) > 31:\n self.flag_value_death_day = True\n print(\"死亡时间-日错误!大于31了\")\n else:\n print(\"死亡时间-月份格式错误,不在设定 00-12 范围!\")\n self.flag_value_death_month = True\n return self.flag_value_death_year, self.flag_value_death_month, self.flag_value_death_day\n\n def value_check_birthday_death_date(self):\n\n \"\"\"\n Type_error:\n 1、出生时间在死亡时间之后;\n 2、死亡时间与出生时间相差>100岁;\n 3、根据死亡时间与出生时间得出的年龄与实际给出的age字段值相差大于2的;\n 4、有出生和死亡时间但age为空的;\n 5、年龄age<10;\n \"\"\"\n\n if self.time_of_birth and self.time_of_death:\n if str(self.time_of_death).startswith(\"-\"): # 公元前 去世\n time_of_death_ = self.time_of_death[1:-1]\n year_death = time_of_death_.split(\"-\")[0]\n time_of_birth_ = self.time_of_birth[1:-1]\n year_birth = time_of_birth_.split(\"-\")[0]\n age = int(year_birth) - int(year_death)\n else: # 公元后 去世\n if str(self.time_of_birth).startswith(\"-\"): # 公元前 出生\n time_of_birth_ = self.time_of_birth[1:-1]\n year_birth = time_of_birth_.split(\"-\")[0]\n year_death = self.time_of_death.split(\"-\")[0]\n age = int(year_birth) + int(year_death)\n else: # 公元后 出生\n year_birth = self.time_of_birth.split(\"-\")[0]\n year_death = self.time_of_death.split(\"-\")[0]\n age = int(year_death) - int(year_birth)\n if int(age) > 100:\n self.flag_age_greater_than_100 = True\n # print(age)\n elif int(age) <= 0:\n self.flag_age_less_than_0 = True\n elif int(age) > 0 and int(age) <= 10:\n self.flag_age_greater_than_0_and_less_than_10 = True\n\n # 计算年龄差值\n if self.data_dict[\"age\"]:\n age_diff = abs(int(age) - int(self.data_dict[\"age\"]))\n if age_diff > 2:\n print(\"basic_info_error_根据死亡与出生时间计算年龄与现存age字段值相差大于2\")\n self.flag_age_diff_greater_than_2 = True\n else:\n print(\"ERROR! 有出生和死亡时间但age为空\")\n self.flag_age_noexist = True\n\n return self.flag_age_greater_than_100, self.flag_age_less_than_0, \\\n self.flag_age_greater_than_0_and_less_than_10, \\\n self.flag_age_diff_greater_than_2, self.flag_age_noexist\n\n def value_check_introduction(self):\n \"\"\"\n 限制:简介不能为空!\n :return:\n \"\"\"\n\n if not self.introduction:\n self.flag_introduction_noexist = True\n return self.flag_introduction_noexist\n\n def value_check_all_name(self):\n \"\"\"\n 限制:姓名不能为空!\n :return:\n \"\"\"\n\n if not self.all_name:\n self.flag_all_name_noexist = True\n return self.flag_all_name_noexist\n\n def is_leap_year(self, year):\n \"\"\"\n 闰年判断\n :param year:\n :return:\n \"\"\"\n # year = int(input(\"输入一个年份: \"))\n if (year % 4) == 0:\n if (year % 100) == 0:\n if (year % 400) == 0:\n print(\"{0} 是闰年\".format(year)) # 整百年能被400整除的是闰年\n return True\n else:\n print(\"{0} 不是闰年\".format(year))\n return False\n else:\n print(\"{0} 是闰年\".format(year)) # 非整百年能被4整除的为闰年\n return True\n else:\n print(\"{0} 不是闰年\".format(year))\n return False\n","sub_path":"version1.0/data_check.py","file_name":"data_check.py","file_ext":"py","file_size_in_byte":19659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"94092399","text":"\n# coding: utf-8\n\n# In[7]:\n\n\nimport pandas as pd\n\na = [1, 2, 3]\nb = [3, 4, 5]\nc = [5, 6, 7]\nd = [7, 8, 9]\n\ndf2 = pd.DataFrame([a, b, c, d], index=[\"a\", \"b\", \"c\", \"d\"], columns = [\"x\", \"y\", \"z\"])\ndf2.info()\n\n\n# In[9]:\n\n\nimport os\nos.chdir(\"D:\\GreyAtom\\Datasets\")\n\n\n# In[10]:\n\n\nweather = pd.read_csv(\"weather_small_2012_1.csv\")\nweather.info()\n\n\n# In[11]:\n\n\nweather['Temp (C)'].unique()\n\n\n# In[12]:\n\n\nweather['Temp (C)'].nunique()\n\n\n# In[13]:\n\n\nweather = pd.read_csv(\"weather_small_2012.csv\")\nweather.info()\n\n\n# In[14]:\n\n\nweather.head()\n\n\n# In[16]:\n\n\nweather[\"Dew Point Temp (C)\"].value_counts()\n\n\n# In[19]:\n\n\nipl = pd.read_csv(\"ipl_matches_small.csv\")\nipl.head()\n\n\n# In[24]:\n\n\nipl[[\"toss_winner\", \"toss_decision\"]][1:11]\n\n\n# In[26]:\n\n\nweather.head()\n\n\n# In[29]:\n\n\nv1 = weather.loc[(weather[\"Wind Spd (km/h)\"] > 24) & (weather[\"Visibility (km)\"] > 25)] \nv1\n\n\n# In[30]:\n\n\nv1 = weather.loc[(weather[\"Wind Spd (km/h)\"] > 24) | (weather[\"Visibility (km)\"] > 25)] \nv1\n\n","sub_path":"Python - Pandas - Recap - weather_small_2012_1 Dataset.py","file_name":"Python - Pandas - Recap - weather_small_2012_1 Dataset.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"241858662","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nimport time\r\nimport re\r\nimport csv\r\nimport webbrowser\r\nimport math \r\nimport pandas as pd\r\n\r\nLARGE_FONT = (\"Verdana\", 12)\r\nNORM_FONT = (\"Verdana\", 10)\r\nSMALL_FONT = (\"Verdana\", 8)\r\n\r\n\r\ndef wuxiaScript(link, chapter_number):\r\n url = link + str(chapter_number)\r\n #url = \"https://wuxiaworld.com/novel/overgeared/og-chapter-\" + chapterNumber\r\n try:\r\n data_extracted = requests.get(url)\r\n except:\r\n return False, chapter_number, url\r\n if data_extracted.status_code != 200:\r\n return False, chapter_number, url\r\n soup = BeautifulSoup(data_extracted.text, 'html.parser')\r\n title = soup.select(\r\n \"#content-container > div.section > div > div.panel.panel-default > div:nth-child(1) > div.caption.clearfix > div:nth-child(3) > h4\")\r\n if str(title) == \"[]\":\r\n return False, chapter_number, url\r\n if re.search(\"\\\\(Teaser\\\\)\", str(title)) is not None:\r\n return False, chapter_number, url\r\n next_chapter_number = int(chapter_number) + 1\r\n return True, next_chapter_number, url\r\n\r\n\r\ndef mangadexScript(link, chapter_number):\r\n try:\r\n data_extracted = requests.get(link)\r\n except:\r\n return False, chapter_number, link\r\n if data_extracted.status_code != 200:\r\n return False, chapter_number, link\r\n soup = BeautifulSoup(data_extracted.text, 'html.parser')\r\n\r\n row_number = 2\r\n while(True):\r\n flag = \"\"\r\n try:\r\n flag = soup.select(\"#content > div.edit.tab-content > div > div:nth-child(\" + str(row_number) + \") > div > div > div.chapter-list-flag.col-auto.text-center.order-lg-4 > span\")\r\n except: \r\n return False, chapter_number, link\r\n\r\n if str(flag) == \"[]\":\r\n return False, chapter_number, link\r\n\r\n if row_number > 30 or (re.search(\"title=\\\"English\\\"\", str(flag)) is not None):\r\n break\r\n row_number += 1\r\n \r\n \r\n last_chapter_element = soup.select(\r\n \"#content > div.edit.tab-content > div > div:nth-child(\" + str(row_number) + \") > div > div > div.col.col-lg-5.row.no-gutters.align-items-center.flex-nowrap.text-truncate.pr-1.order-lg-2 > a\"\r\n ) #content > div.edit.tab-content > div > div:nth-child(3)> div > div > div.chapter-list-flag.col-auto.text-center.order-lg-4 > span\r\n\r\n last_chapter_element_trimmed = re.search(\"Ch\\. \\d{1,4}(\\.\\d)?\", str(last_chapter_element))\r\n last_chapter_number = str(last_chapter_element_trimmed.group(0))[4:]\r\n\r\n if last_chapter_number != str(chapter_number):\r\n url = \"https://mangadex.org\" + str(re.search(\"href=\\\"/chapter/\\d{1,}\\\"\", str(last_chapter_element)).group(0)[6:-1])\r\n return True, last_chapter_number, url\r\n else:\r\n return False, chapter_number, link\r\n \r\n\r\ndef leviatanScript(link, chapter_number):\r\n try:\r\n data_extracted = requests.get(link)\r\n except:\r\n return False, chapter_number, link\r\n if data_extracted.status_code != 200:\r\n return False, chapter_number, link\r\n soup = BeautifulSoup(data_extracted.text, 'html.parser')\r\n\r\n last_chapter_element = \"\"\r\n\r\n try:\r\n last_chapter_element = soup.select(\"#content > div > div.row > div.col-lg-9.col-md-8.col-xs-12.text-muted > div.row.py-2 > div > div.card.p-4 > div > div:nth-child(1) > div > a.item-author.text-color\")\r\n except:\r\n return False, chapter_number, link\r\n\r\n last_chapter_element_trimmed = re.search(\"Chapter \\d{1,4}\", str(last_chapter_element))\r\n last_chapter_number = str(last_chapter_element_trimmed.group(0))[8:]\r\n\r\n if last_chapter_number != str(chapter_number):\r\n url = str(re.search(\"href=\\\".*\\\"\", str(last_chapter_element)).group(0)[6:-1])\r\n return True, last_chapter_number, url\r\n else:\r\n return False, chapter_number, link\r\n\r\n\r\ndef mangakakalotScript(link, chapter_number):\r\n try:\r\n data_extracted = requests.get(link)\r\n except:\r\n return False, chapter_number, link\r\n if data_extracted.status_code != 200:\r\n return False, chapter_number, link\r\n soup = BeautifulSoup(data_extracted.text, 'html.parser')\r\n\r\n last_chapter_element = \"\"\r\n\r\n try:\r\n last_chapter_element = soup.select(\"body > div.body-site > div.container.container-main > div.container-main-left > div.panel-story-chapter-list > ul > li:nth-child(1) > a\")\r\n except:\r\n return False, chapter_number, link\r\n\r\n last_chapter_element_trimmed = re.search(\"Chapter \\d{1,4}(\\.\\d)?\", str(last_chapter_element))\r\n last_chapter_number = str(last_chapter_element_trimmed.group(0))[8:]\r\n if last_chapter_number != str(chapter_number):\r\n url = str(re.search(\"href=\\\"[^\\\"]*\\\"\", str(last_chapter_element)).group(0)[6:-1])\r\n return True, last_chapter_number, url\r\n else:\r\n return False, chapter_number, link\r\n\r\n\r\ndef webtoonsScript(link, chapter_number):\r\n try:\r\n data_extracted = requests.get(link)\r\n except:\r\n return False, chapter_number, link\r\n if data_extracted.status_code != 200:\r\n return False, chapter_number, link\r\n soup = BeautifulSoup(data_extracted.text, 'html.parser')\r\n\r\n last_chapter_element = \"\"\r\n last_chapter_url_element = \"\"\r\n\r\n try:\r\n last_chapter_element = soup.select(\"#_listUl > li:nth-child(1) > a > span.tx\")\r\n last_chapter_url_element = soup.select(\"#_listUl > li:nth-child(1) > a\")\r\n except:\r\n return False, chapter_number, link\r\n print(last_chapter_element)\r\n\r\n last_chapter_element_trimmed = re.search(\"#\\d{1,4}\", str(last_chapter_element))\r\n last_chapter_number = str(last_chapter_element_trimmed.group(0))[1:]\r\n if last_chapter_number != str(chapter_number):\r\n url = str(re.search(\"href=\\\"[^\\\"]*\\\"\", str(last_chapter_url_element)).group(0)[6:-1]).replace(\"amp;\", \"\")\r\n return True, last_chapter_number, url\r\n else:\r\n return False, chapter_number, link\r\n\r\n\r\n\r\ndef openBrowser(link, popup):\r\n webbrowser.open(link)\r\n popup.destroy()\r\n\r\n\r\ndef popupmsg(msg, link, count):\r\n popup = tk.Tk()\r\n popup.wm_title(\"New Chapter\")\r\n if count > 0:\r\n msg += \" (+\" + str(count) + \")\"\r\n label = ttk.Label(popup, text=msg, font=NORM_FONT)\r\n label.pack(side=\"top\", fill=\"x\", pady=10)\r\n B1 = ttk.Button(popup, text=\"Okay\", command=popup.destroy)\r\n B1.pack(side=\"left\")\r\n B2 = ttk.Button(popup, text=\"Go to website\", command= lambda: openBrowser(link, popup))\r\n B2.pack(side=\"right\")\r\n popup.mainloop()\r\n\r\n\r\ncsv_intervals_info = pd.read_csv(\"timeIntervals.csv\")\r\nstartUpTime = csv_intervals_info.values[0][0]\r\ntimeInterval = csv_intervals_info.values[0][1]\r\n\r\ntime.sleep(int(startUpTime))\r\nwhile True:\r\n\r\n csv_info = pd.read_csv(\"info.csv\", dtype={'nextChapter': object})\r\n csv_header = csv_info.columns\r\n\r\n rowNumber = 0\r\n for row in csv_info.values:\r\n #get csv info\r\n name = row[0]\r\n website = row[1]\r\n link = row[2]\r\n chapter = int(row[4]) if math.modf(float(row[4]))[0] == 0 else row[4]\r\n msg = str(row[3])\r\n tracking = str(row[5]) == \"True\"\r\n if(not tracking):\r\n continue\r\n is_new_chapter = False\r\n atleast_one_new_chapter = False\r\n start_chapter = chapter\r\n new_chapter = -1\r\n msg_new_chapter = -1\r\n true_link = \"\"\r\n msg_link = \"\"\r\n print(name)\r\n\r\n count = -1\r\n #check if there is a new manga\r\n while(True):\r\n if website == \"wuxia\":\r\n is_new_chapter, new_chapter, true_link = wuxiaScript(link, chapter)\r\n elif website == \"mangadex\":\r\n is_new_chapter, new_chapter, true_link = mangadexScript(link, chapter)\r\n elif website == \"leviatan\":\r\n is_new_chapter, new_chapter, true_link = leviatanScript(link, chapter)\r\n elif website == \"mangakakalot\":\r\n is_new_chapter, new_chapter, true_link = mangakakalotScript(link, chapter)\r\n elif website == \"webtoons\":\r\n is_new_chapter, new_chapter, true_link = webtoonsScript(link, chapter)\r\n else:\r\n continue\r\n\r\n if(not is_new_chapter):\r\n break\r\n \r\n chapter = new_chapter\r\n count += 1\r\n if count == 0:\r\n msg_new_chapter = new_chapter\r\n msg_link = true_link\r\n atleast_one_new_chapter = True \r\n if website == \"mangadex\" or website == \"leviatan\" or website == \"mangakakalot\":\r\n count = float(new_chapter) - float(start_chapter)\r\n \r\n if(atleast_one_new_chapter):\r\n if math.modf(float(new_chapter))[0] == 0:\r\n csv_info[\"nextChapter\"][rowNumber] = str(int(new_chapter))\r\n else:\r\n csv_info[\"nextChapter\"][rowNumber] = str(new_chapter)\r\n \r\n\r\n #message\r\n if atleast_one_new_chapter:\r\n if website == \"wuxia\":\r\n msg_new_chapter -= 1\r\n msg = msg.replace(\"###\", str(msg_new_chapter), -1)\r\n popupmsg(msg, msg_link, int(count - 1) if math.modf(count)[0] == 0 else count - 1)\r\n\r\n rowNumber += 1\r\n\r\n\r\n csv_info.to_csv(\"info.csv\", index=False)\r\n time.sleep(int(timeInterval))\r\n","sub_path":"tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":9364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"109721016","text":"import json\nimport random\n\nimport ara.models as m\nfrom ara.models import db\n\n\ndef ansible_run(complete=True, gather_facts=True, ara_record=False):\n '''Simulate a simple Ansible run by creating the\n expected database objects. This roughly approximates the\n following playbook:\n\n - hosts: host-\n gather_facts: true\n tasks:\n - test-action:\n when: not ara_record\n - ara_record:\n key: 'test key'\n value: 'test value'\n when: ara_record\n\n Where `` is a random integer generated each time this\n function is called.\n\n Set the `complete` parameter to `False` to simulate an\n aborted Ansible run.\n Set the `gathered_facts` parameter to `False` to simulate a run with no\n facts gathered.\n Set the `ara_record` parameter to `True` to simulate a run with an\n ara_record task.\n '''\n\n playbook = m.Playbook(path='testing.yml')\n play = m.Play(playbook=playbook, name='test play')\n host = m.Host(name='host-%04d' % random.randint(0, 9999),\n playbook=playbook)\n\n if ara_record:\n task = m.Task(play=play, playbook=playbook, action='ara_record')\n msg = 'Data recorded in ARA for this playbook.'\n else:\n task = m.Task(play=play, playbook=playbook, action='test-action')\n msg = 'This is a test'\n\n result = m.TaskResult(task=task, status='ok', host=host, result=msg)\n\n ctx = dict(\n playbook=playbook,\n play=play,\n task=task,\n host=host,\n result=result)\n\n if gather_facts:\n facts = m.HostFacts(host=host, values='{\"fact\": \"value\"}')\n ctx['facts'] = facts\n\n if ara_record:\n data = m.Data(playbook=playbook, key='test key', value='test value')\n ctx['data'] = data\n\n for obj in ctx.values():\n if hasattr(obj, 'start'):\n obj.start()\n db.session.add(obj)\n\n db.session.commit()\n\n if complete:\n stats = m.Stats(playbook=playbook, host=host)\n ctx['stats'] = stats\n db.session.add(stats)\n ctx['playbook'].complete = True\n\n for obj in ctx.values():\n if hasattr(obj, 'stop'):\n obj.stop()\n\n db.session.commit()\n\n return ctx\n","sub_path":"tests/unit/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"554785222","text":"\"\"\" routes \"\"\"\nfrom ptah import config\nfrom pyramid.config.util import make_predicates\nfrom pyramid.request import route_request_iface\nfrom pyramid.urldispatch import RoutesMapper\nfrom pyramid.interfaces import IRequest, IRouteRequest, IRoutesMapper\n\n\ndef register_route(name, pattern=None, factory=None, header=None,\n traverse=None, pregenerator=None, use_global_views=False,\n xhr=False, request_method=None,\n path_info=None, request_param=None,\n accept=None, custom_predicates=()):\n\n # these are route predicates; if they do not match, the next route\n # in the routelist will be tried\n ignored, predicates, ignored = make_predicates(\n xhr=xhr,\n request_method=request_method,\n path_info=path_info,\n request_param=request_param,\n header=header,\n accept=accept,\n traverse=traverse,\n custom=custom_predicates)\n\n info = config.DirectiveInfo()\n info.attach(\n config.Action(\n register_route_impl,\n (name,pattern,factory,predicates,pregenerator,use_global_views),\n discriminator = ('ptah.view:route', name),\n order = 1))\n\n\ndef register_route_impl(cfg, name, pattern, factory,\n predicates, pregenerator, use_global_views):\n request_iface = cfg.registry.queryUtility(IRouteRequest, name=name)\n if request_iface is None:\n if use_global_views:\n bases = (IRequest,)\n else:\n bases = ()\n request_iface = route_request_iface(name, bases)\n cfg.registry.registerUtility(request_iface, IRouteRequest, name=name)\n\n mapper = cfg.registry.queryUtility(IRoutesMapper)\n if mapper is None:\n mapper = RoutesMapper()\n cfg.registry.registerUtility(mapper, IRoutesMapper)\n\n return mapper.connect(name, pattern, factory, predicates=predicates,\n pregenerator=pregenerator, static=False)\n","sub_path":"ptah/view/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212515546","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef compute_sum_of_hourglass( arr, i, j):\n # i, j is upper-left anchor point of hourglass\n\n\n height, width = len(arr), len( arr[0] )\n\n sum_value = 0\n\n # boundary checking\n if i < 0 or j < 0 or (i+2) > (height-1) or (j+2) > (width-1):\n return None\n\n \n sum_value += sum( arr[ i ][ j:j+3 ] )\n sum_value += sum( arr[ i+2 ][ j:j+3 ] )\n sum_value += arr[ i+1 ][ j+1 ]\n\n return sum_value\n\n\n# Complete the hourglassSum function below.\ndef hourglassSum(arr):\n\n hourglass = []\n\n for i in range( len(arr) - 1):\n for j in range( len(arr[0]) - 1) :\n\n result = compute_sum_of_hourglass(arr, i, j)\n\n if result is not None:\n hourglass.append( result )\n \n #print( hourglass )\n return max( hourglass )\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n arr = []\n\n for _ in range(6):\n arr.append(list(map(int, input().rstrip().split())))\n\n result = hourglassSum(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"Data Structures/Array/2D Array - DS/2D_array_ds.py","file_name":"2D_array_ds.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"289159756","text":"# -*- coding: utf-8 -*-\nfrom smart_core.rest import RestfulController\nfrom smart_core.middleware import get_current_user\nfrom smart_finance.twitterbot.models import User\nfrom datetime import datetime\nfrom django.db.models import Sum\n\n\n@RestfulController.register('api/twitterbot\\-user')\nclass UserController(RestfulController):\n\n _model = User\n\n @RestfulController.expose('instance/?$')\n def instance(self, request):\n rst = {}\n\n user, create = User.objects.get_or_create(user=request.user)\n\n if create:\n rst.update(created=True, success=True)\n\n rst.update(self.model_to_dict(user))\n\n return rst\n\n def query(self):\n query = RestfulController.query(self)\n return query.filter(user=get_current_user())\n\n def model_to_dict(self, ins):\n rst = super(UserController, self).model_to_dict(ins)\n\n rst.update(\n twitter_screen_name=ins.twitter_screen_name,\n twitter_id=int(ins.twitter_id or 0),\n user=ins.user.pk if ins.user else None,\n user_str=str(ins.user) if ins.user else None,\n )\n\n return rst\n","sub_path":"src/smart_finance/twitterbot/api/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"520551359","text":"# Copyright (c) 2019 Remi Salmon\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\n\ndef sigmoid(x):\n return(1.0/(1.0+np.exp(-x)))\n\ndef d_sigmoid(x):\n f = sigmoid(x)\n return(f*(1.0-f))\n\ndef relu(x):\n return((x > 0)*x)\n\ndef d_relu(x):\n return((x > 0)*1.0)\n\ndef linear(x):\n return(x)\n\ndef d_linear(x):\n return(1.0)\n\ndef softmax(x):\n x = x-x.max()\n return(np.exp(x)/(np.sum(np.exp(x), axis = 0)))\n\ndef d_cost_MSE(y_hat, y):\n return(y_hat-y) # df/dy_hat of f = MSE(y_hat) = (1/2)*np.power(y_hat-y, 2)\n\ndef d_costactivation_BCE_sigmoid(y_hat, y):\n return(y_hat-y) # df/dy_hat of f = BCE(sigmoid()) with BCE(y_hat) = -(y*np.log(y_hat)+(1-y)*np.log(1-y_hat)))\n\ndef d_costactivation_CE_softmax(y_hat, y):\n return(y_hat-y) # df/dy_hat of f = CE(softmax()) with CE(y_hat) = -sum(y*np.log(y_hat)))\n\ndef nnnn_accuracy(Y_hat, Y, one_hot = True): # compute nnnn accuracy\n accuracy = 0\n\n n = Y.shape[1]\n\n for i in range(n):\n if one_hot: # classification accuracy\n if np.all((Y_hat[:, i] > 0.5)*1 == Y[:, i]):\n accuracy += 1\n else: # regression accuracy\n accuracy += 1-np.linalg.norm(Y_hat[:, i]-Y[:, i])/np.linalg.norm(Y[:, i])\n\n accuracy = accuracy/n\n\n return(accuracy)\n\ndef nnnn_init(nnnn_structure): # initialize nnnn weights and biases gradient matrices\n w = {}\n b = {}\n\n for i in np.arange(1, len(nnnn_structure)):\n w[i] = np.random.randn(nnnn_structure[i]['nodes'], nnnn_structure[i-1]['nodes'])*np.sqrt(2/(nnnn_structure[i]['nodes']+nnnn_structure[i-1]['nodes']))\n b[i] = np.zeros((nnnn_structure[i]['nodes'], 1))\n\n return(w, b)\n\ndef nnnn_forward(x, w, b, nnnn_structure): # compute nnnn output\n z_hist = {}\n a_hist = {}\n\n a = x\n a_hist[0] = a\n\n for i in np.arange(1, len(nnnn_structure)):\n activation = nnnn_structure[i]['activation']\n\n z = np.dot(w[i], a)+b[i]\n a = activation(z)\n\n z_hist[i] = z\n a_hist[i] = a\n\n return(a, z_hist, a_hist)\n\ndef nnnn_grad(x, y, w, b, nnnn_structure, nnnn_cost): # compute nnnn output + weights and biases gradient matrices\n y_hat, z_hist, a_hist = nnnn_forward(x, w, b, nnnn_structure)\n\n dw = {}\n db = {}\n\n for i in reversed(np.arange(1, len(nnnn_structure))):\n if i == len(nnnn_structure)-1:\n if nnnn_structure[i]['activation'] == softmax:\n delta = d_costactivation_CE_softmax(y_hat, y)\n\n elif nnnn_structure[i]['activation'] == sigmoid:\n if nnnn_cost == 'BCE':\n delta = d_costactivation_BCE_sigmoid(y_hat, y)\n\n elif nnnn_cost == 'MSE':\n delta = d_cost_MSE(y_hat, y)*d_sigmoid(z_hist[i])\n\n elif nnnn_structure[i]['activation'] == relu:\n delta = d_cost_MSE(y_hat, y)*d_relu(z_hist[i])\n\n elif nnnn_structure[i]['activation'] == linear:\n delta = d_cost_MSE(y_hat, y)*d_linear(z_hist[i])\n\n else:\n if nnnn_structure[i]['activation'] == sigmoid:\n d_activation = d_sigmoid\n\n elif nnnn_structure[i]['activation'] == relu:\n d_activation = d_relu\n\n elif nnnn_structure[i]['activation'] == linear:\n d_activation = d_linear\n\n delta = np.dot(w[i+1].T, delta)*d_activation(z_hist[i])\n\n dw[i] = np.dot(delta, a_hist[i-1].T)\n db[i] = delta\n\n return(y_hat, dw, db)\n\ndef nnnn_train(X, Y, alpha, iterations, w, b, nnnn_structure, nnnn_cost = None): # train nnnn with X = [nb_dimensions, nb_samples] Y = [nb_classes, nb_samples], nnnn_structure = [size_layer1, ...]\n Y_hat = np.zeros(Y.shape)\n\n accuracy_hist = np.zeros(iterations)\n\n one_hot = True if nnnn_structure[-1]['activation'] in [softmax, sigmoid] else False\n\n n = Y.shape[1]\n\n for i in range(iterations):\n for j in np.random.permutation(n):\n x = X[:, j].reshape((-1, 1)) # reshape because NumPy\n y = Y[:, j].reshape((-1, 1)) # reshape because NumPy\n\n y_hat, dw, db = nnnn_grad(x, y, w, b, nnnn_structure, nnnn_cost)\n\n Y_hat[:, j] = y_hat.reshape((1, -1)) # reshape because NumPy\n\n for k in np.arange(1, len(nnnn_structure)):\n w[k] = w[k]-alpha*dw[k]\n b[k] = b[k]-alpha*db[k]\n\n accuracy_hist[i] = nnnn_accuracy(Y_hat, Y, one_hot)\n\n print('iteration '+str(i+1)+'/'+str(iterations)+', accuracy = '+str(accuracy_hist[i]))\n\n return(w, b, accuracy_hist)\n\ndef nnnn_test(x, w, b, nnnn_structure): # compute nnnn output\n a = x\n\n for i in np.arange(1, len(nnnn_structure)):\n activation = nnnn_structure[i]['activation']\n\n z = np.dot(w[i], a)+b[i]\n a = activation(z)\n\n return(a)\n","sub_path":"nnnn.py","file_name":"nnnn.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"183516719","text":"#!/usr/bin/env python3\n\n\"\"\"Fetch results from Terra\n\"\"\"\n\nimport argparse\nimport csv\nimport collections\nimport concurrent.futures\nimport contextlib\nimport copy\nimport functools\nimport glob\nimport gzip\nimport io\nimport json\nimport logging\nimport multiprocessing\nimport operator\nimport os\nimport os.path\nimport pathlib\nimport random\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\n#print(fiss.meth_list(args=argparse.Namespace()))\nimport firecloud.api as fapi\n\n# * Utils\n\n_log = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s')\n\nMAX_INT32 = (2 ** 31)-1\n\ndef dump_file(fname, value):\n \"\"\"store string in file\"\"\"\n with open(fname, 'w') as out:\n out.write(str(value))\n\ndef _pretty_print_json(json_val, sort_keys=True):\n \"\"\"Return a pretty-printed version of a dict converted to json, as a string.\"\"\"\n return json.dumps(json_val, indent=4, separators=(',', ': '), sort_keys=sort_keys)\n\ndef _write_json(fname, json_val):\n dump_file(fname=fname, value=_pretty_print_json(json_val))\n\ndef _load_dict_sorted(d):\n return collections.OrderedDict(sorted(d.items()))\n\ndef _json_loads(s):\n return json.loads(s.strip(), object_hook=_load_dict_sorted, object_pairs_hook=collections.OrderedDict)\n\ndef _json_loadf(fname):\n return _json_loads(slurp_file(fname))\n\ndef slurp_file(fname, maxSizeMb=50):\n \"\"\"Read entire file into one string. If file is gzipped, uncompress it on-the-fly. If file is larger\n than `maxSizeMb` megabytes, throw an error; this is to encourage proper use of iterators for reading\n large files. If `maxSizeMb` is None or 0, file size is unlimited.\"\"\"\n fileSize = os.path.getsize(fname)\n if maxSizeMb and fileSize > maxSizeMb*1024*1024:\n raise RuntimeError('Tried to slurp large file {} (size={}); are you sure? Increase `maxSizeMb` param if yes'.\n format(fname, fileSize))\n with open_or_gzopen(fname) as f:\n return f.read()\n\ndef open_or_gzopen(fname, *opts, **kwargs):\n mode = 'r'\n open_opts = list(opts)\n assert type(mode) == str, \"open mode must be of type str\"\n\n # 'U' mode is deprecated in py3 and may be unsupported in future versions,\n # so use newline=None when 'U' is specified\n if len(open_opts) > 0:\n mode = open_opts[0]\n if sys.version_info[0] == 3:\n if 'U' in mode:\n if 'newline' not in kwargs:\n kwargs['newline'] = None\n open_opts[0] = mode.replace(\"U\",\"\")\n\n # if this is a gzip file\n if fname.endswith('.gz'):\n # if text read mode is desired (by spec or default)\n if ('b' not in mode) and (len(open_opts)==0 or 'r' in mode):\n # if python 2\n if sys.version_info[0] == 2:\n # gzip.open() under py2 does not support universal newlines\n # so we need to wrap it with something that does\n # By ignoring errors in BufferedReader, errors should be handled by TextIoWrapper\n return io.TextIOWrapper(io.BufferedReader(gzip.open(fname)))\n\n # if 't' for text mode is not explicitly included,\n # replace \"U\" with \"t\" since under gzip \"rb\" is the\n # default and \"U\" depends on \"rt\"\n gz_mode = str(mode).replace(\"U\",\"\" if \"t\" in mode else \"t\")\n gz_opts = [gz_mode]+list(opts)[1:]\n return gzip.open(fname, *gz_opts, **kwargs)\n else:\n return open(fname, *open_opts, **kwargs)\n\ndef available_cpu_count():\n \"\"\"\n Return the number of available virtual or physical CPUs on this system.\n The number of available CPUs can be smaller than the total number of CPUs\n when the cpuset(7) mechanism is in use, as is the case on some cluster\n systems.\n\n Adapted from http://stackoverflow.com/a/1006301/715090\n \"\"\"\n\n cgroup_cpus = MAX_INT32\n try:\n def get_cpu_val(name):\n return float(slurp_file('/sys/fs/cgroup/cpu/cpu.'+name).strip())\n cfs_quota = get_cpu_val('cfs_quota_us')\n if cfs_quota > 0:\n cfs_period = get_cpu_val('cfs_period_us')\n _log.debug('cfs_quota %s, cfs_period %s', cfs_quota, cfs_period)\n cgroup_cpus = max(1, int(cfs_quota / cfs_period))\n except Exception as e:\n pass\n\n proc_cpus = MAX_INT32\n try:\n with open('/proc/self/status') as f:\n status = f.read()\n m = re.search(r'(?m)^Cpus_allowed:\\s*(.*)$', status)\n if m:\n res = bin(int(m.group(1).replace(',', ''), 16)).count('1')\n if res > 0:\n proc_cpus = res\n except IOError:\n pass\n\n _log.debug('cgroup_cpus %d, proc_cpus %d, multiprocessing cpus %d',\n cgroup_cpus, proc_cpus, multiprocessing.cpu_count())\n return min(cgroup_cpus, proc_cpus, multiprocessing.cpu_count())\n\ndef execute(action, **kw):\n succeeded = False\n try:\n _log.debug('Running command: %s', action)\n subprocess.check_call(action, shell=True, **kw)\n succeeded = True\n finally:\n _log.debug('Returned from running command: succeeded=%s, command=%s', succeeded, action)\n\nSEL_NAMESPACE='um1-encode-y2s1'\nSEL_WORKSPACE='selection-sim'\nTERRA_METHOD_NAME='test-cosi2-method-01'\nTERRA_CONFIG_NAME='dockstore-tool-cms2'\nTERRA_GS_BUCKET='fc-21baddbc-5142-4983-a26e-7d85a72c830b'\nGITHUB_REPO='dockstore-tools-cms2'\n\n","sub_path":"terra/fetch-results.py","file_name":"fetch-results.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"75446322","text":"from django.contrib import admin\nfrom customers.models import Customer,Contact\n\nclass ContactInline(admin.TabularInline):\n\tmodel=Contact\n\textra=3\n\nclass CustomerAdmin(admin.ModelAdmin):\n\n\tinlines=[ContactInline]\n\tlist_display=(\n\t\t'customer_name',\n\t\t'customer_tel',\n\t\t'customer_fox',\n\t\t'customer_address',\n\t\t'customer_email',\n\t\t'customer_bank',\n\t\t'customer_account',\n\t\t'customer_taxid',\n\t\t'customer_documents1',\n\t\t'customer_documents2',\n\t\t'customer_documents3',\n\t\t)\n\tlist_filter=['pub_date']\n\tsearch_fields=['name']\n\nadmin.site.register(Customer,CustomerAdmin)\n# Register your models here.\n","sub_path":"mol/customers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"250336137","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport random\n\n\ndef disable_urllib3_warning():\n \"\"\"\n https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning\n InsecurePlatformWarning 警告的临时解决方案\n \"\"\"\n try:\n import requests.packages.urllib3\n requests.packages.urllib3.disable_warnings()\n except Exception:\n pass\n\n\ndef generate_timestamp():\n \"\"\"生成 timestamp\n :return: timestamp string\n \"\"\"\n return int(time.time())\n\n\ndef generate_nonce():\n \"\"\"生成 nonce\n :return: nonce string\n \"\"\"\n return random.randrange(1000000000, 2000000000)\n\n\ndef convert_ext_to_mime(extension):\n \"\"\"将扩展名转换为 MIME 格式\n :return: mime string\n \"\"\"\n table = {\n 'jpg': 'image/jpeg',\n 'jpeg': 'image/jpeg',\n 'amr': 'audio/amr',\n 'mp3': 'audio/mpeg',\n 'mp4': 'video/mp4',\n }\n\n if extension in table:\n return table[extension]\n raise ValueError(\"Invalid extension in MIME table\")\n\n\ndef is_allowed_extension(extension, type='upload_media'):\n \"\"\"检查扩展名是否是可以上传到服务器\n :return: True if ok\n \"\"\"\n table = ('jpg', 'jpeg', 'amr', 'mp3', 'mp4')\n\n if extension in table:\n return True\n return False\n","sub_path":"wechat_sdk/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"199467976","text":"# 문제6\n# 숨겨진 카드의 수를 맞추는 게임입니다.\n# 1-100까지의 임의의 수를 가진 카드를 한 장 숨기고 이 카드의 수를 맞추는 게임입니다.\n# 아래의 화면과 같이 카드 속의 수가 57인 경우를 보면 수를 맞추는 사람이 40이라고 입력하면 \"더 높게\",\n# 다시 75이라고 입력하면 \"더 낮게\" 라는 식으로 범위를 좁혀가며 수를 맞추고 있습니다.\n# 게임을 반복하기 위해 y/n이라고 묻고 n인 경우 종료됩니다.\n\nfrom random import *\nimport sys\n\nresult = randint(1,100)\nmin,max,cnt = 1,100,1\nwhile True:\n print('{0}~{1}'.format(min,max))\n num = input( '{0} >'.format(cnt))\n if num.isdigit()==False:\n print('숫자를 입력하세요')\n continue\n num = int(num)\n if result != num and result > num:\n print('더 높게')\n min = num\n elif result != num and result < num:\n print('더 낮게')\n max = num\n else:\n print('정답 입니다.')\n if 'y'==input('다시도전하시겠습니까?(y/n)'):\n result = randint(1, 100)\n min,max,cnt = 1,100,1\n continue\n sys.exit(0)\n cnt = cnt +1\n\n\n\n\n\n\n","sub_path":"prom06.py","file_name":"prom06.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"648811757","text":"class Settings:\r\n def __init__(self):\r\n\r\n # Toggle button, if it is true, crawl the playlist of search results, \r\n # if it is false, crawl a single playlist\r\n \r\n self.toggle = True\r\n # Set search keywords\r\n self.search_keyword = '中文说唱'\r\n # Set result limit\r\n self.result_limit = 50\r\n\r\n # Set playlist url, only when crawling one playlist\r\n self.playlist_url = 'https://music.163.com/#/discover/toplist?id=991319590'\r\n self.playlist_title = '中文说唱'\r\n # Set the strength of word segmentation filtering\r\n self.more = 'm' \r\n # Divided into ``,'m' to go to the tone,'e' to go to English and tone.\r\n # 'en' to remove high-frequency English\r\n # Print ranking\r\n self.word_rank = True\r\n self.num = 100 \r\n\r\n if self.toggle == True:\r\n self.csv_fname = self.search_keyword\r\n else:\r\n self.csv_fname = self.playlist_title\r\n\r\n\r\n","sub_path":"6settings.py","file_name":"6settings.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212817452","text":"# algorithm to determine if string has all unique characters\n\n\ndef is_unique(value : str) ->bool: #this will be O(n^2)\n #simple implementation, go through each char, and see if it's in the whole string\n for char in value:\n counter = 0\n for test_char in value:\n if test_char == char:\n counter += 1\n if counter > 1:\n return False\n return True\n\ndef is_unique3(value):\n #with datastruct, this will be O(n),\n hash_table = {}\n for char in value:\n if char in hash_table:\n return False\n hash_table[char] = char\n return True\n\ndef is_unique2(value: str) -> bool:\n #do a sort, each sort compare with previous, if same, not unique\n #this will be o(log(n))\n sorted_str = sorted(value)\n for index in range(len(sorted_str)-1):\n\n if sorted_str[index] == sorted_str[index+1]:\n return False\n\n return True\n\ndef main():\n input_string = 'abcdefga'\n print(is_unique(input_string))\n print(is_unique2(input_string))\n print(is_unique3(input_string))\n\n\nif __name__ == '__main__':\n main()","sub_path":"cracking_code_interview/1string/1.1.py","file_name":"1.1.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"322676100","text":"def spiralize(number):\n if n < 1 or n % 2 == 0:\n return None\n elif n == 1:\n return 1\n\n else:\n numbers = [1]\n numbers_needed = 2 * n - 1\n increment = 2\n while len(numbers) < numbers_needed:\n increment = int(len(numbers) * 0.5 + 1.5)\n for p in range(4):\n numbers.append(numbers[-1] + increment)\n return sum(numbers)\n\n\nif __name__ == \"__main__\":\n start = time()\n ans = spiralize(501)\n elapsed_time = (timer() - start) * 500\n if ans:\n print(\"Found Sum\")\n else:\n print(\"No answer found\")\n\nreturn return_value\n","sub_path":"spiral/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"146816923","text":"#! /usr/bin/env python3\nnode_name = \"HEIDENHAIN_ND287\"\n\nimport rclpy\nimport ogameasure\nimport time\nimport sys\nimport random\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import String\n\nclass ND287(object):\n def __init__(self):\n self.node = rclpy.create_node(node_name)\n\n self.node.declare_parameter(\"~az_usbport\")\n self.node.declare_parameter(\"~el_usbport\")\n az_port = self.node.get_parameter(\"~az_usbport\").get_parameter_value().string_value\n el_port = self.node.get_parameter(\"~el_usbport\").get_parameter_value().string_value\n\n self.encorder_az = ogameasure.HEIDENHAIN.ND287(az_port)\n self.encorder_el = ogameasure.HEIDENHAIN.ND287(el_port)\n\n topic_name_az = '/dev/'+node_name+az_port\n topic_name_az = topic_name_az.replace('_','/').replace('.','_')\n topic_name_el = '/dev/'+node_name+el_port\n topic_name_el = topic_name_el.replace('_','/').replace('.','_')\n\n self.pub_az = self.node.create_publisher(Float64, topic_name_az, 1)\n self.pub_el = self.node.create_publisher(Float64, topic_name_el, 1)\n self.az = self.get_az()\n# self.az = self.get_az_simu()\n\n #ループ(スレッドの代わり)\n self.node.create_timer(0.01,self.publish_az)\n self.node.create_timer(0.01,self.publish_el)\n\n\n\n def get_az(self):\n _az = self.encorder_az.output_position_display_value()\n az = float(_az.strip(b\"\\x02\\x00\\r\\n\").decode())\n return az\n\n def get_el(self):\n _el = self.encorder_el.output_position_display_value()\n el = float(_el.strip(b\"\\x02\\x00\\r\\n\").decode())\n return el\n\n\n def get_az_simu(self):\n az = 180+random.random()\n return az\n\n def get_el_simu(self):\n el = 45+random.random()\n return el\n\n\n\n \n def publish_el(self):\n el = self.get_el()\n# el = self.get_el_simu()\n msg = Float64()\n msg.data = float(el)\n self.pub_el.publish(msg)\n \n\n def publish_az(self):\n count = 0\n az = self.az\n az2 = self.get_az()\n # az2 = self.get_az_simu()\n hensa = az2-az\n if hensa > 100: #0->360\n count = count - 1\n elif hensa < -100: #360->0\n count = count + 1\n azaz = az2 + count*360\n msg = Float64()\n msg.data = float(azaz)\n self.pub_az.publish(msg)\n self.az = az2\n\n\n\ndef main(args=None):\n rclpy.init(args=args)\n encorder = ND287()\n rclpy.spin(encorder.node)\n encorder.node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ros2pkg_ogameasure/HEIDENHAIN/ND287.py","file_name":"ND287.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"314435982","text":"\"\"\"\nconnect.py\n\nConnect two disks with transversal, and return graph which it forms as an immersion.\n\nBy connect we mean take two disks, insert edges between corresponding vertices.\n\"\"\"\n\nimport collections\nimport copy\nimport logging\nimport pprint\n\nimport graph\n\nVS = collections.namedtuple(\"VerexSide\", \"vertex side\")\nLHE = collections.namedtuple(\"LabeledHalfEdge\", \"side base index\")\nTV = collections.namedtuple(\"TypeValue\", [\"type\", \"value\"])\n\ndef connect(dwt1, dwt2, twist):\n g = graph.Graph()\n\n half_edges = []\n\n assert dwt1.circum == dwt2.circum, \"? %s %s\" % (dwt1, dwt2)\n if dwt1 == dwt2:\n dwt2 = copy.deepcopy(dwt2)\n assert dwt1 != dwt2\n\n dwt1.twist = 0\n dwt2.twist = twist\n dwts = {1: dwt1, 2: dwt2}\n\n for side, dwt in dwts.items():\n for tv in dwt.vertices():\n g.add_node(VS(tv, side))\n for tv2 in dwt.neighbors(tv):\n if tv2.type in [\"boundary\", \"interior\"]:\n if tv.value < tv2.value:\n g.add_edge(VS(tv, side), VS(tv2, side))\n if tv.value == tv2.value:\n g.add_edge(VS(tv, side), VS(tv2, side))\n else:\n assert tv2.type == \"half\", \"Weird type type = \" + repr(tv2.type)\n half_edges.append(LHE(side, tv, tv2.value))\n\n # sanity checks \n unique_half_edges = set((lhe.side, lhe.index) for lhe in half_edges)\n assert len(unique_half_edges) == len(half_edges), \"Half edges contain dups.\"\n del unique_half_edges\n \n while len(half_edges) > 0:\n lhe1 = half_edges.pop()\n\n side1 = lhe1.side\n side2 = 3 - side1\n tv1 = lhe1.base\n count, i = search(dwts[side1], dwts[side2], lhe1.index)\n\n # it might be on side1 or side2 based on how many hops.\n match_side = side1\n if count % 2 == 1:\n match_side = side2\n\n [lhe2] = [l for l in half_edges if l.side == match_side and l.index == i]\n tv2 = lhe2.base\n assert lhe2 in half_edges, \"Connects to non-existing half-edge.\"\n half_edges.remove(lhe2)\n logging.debug(\">>> vertex being inserted = \" + str((lhe1, lhe2)))\n g.add_edge(VS(tv1, side1), VS(tv2, match_side))\n\n return g\n\n\ndef search(dwt1, dwt2, i1):\n flag, i2 = dwt2.next(i1)\n if flag == \"exit\":\n count, result = search(dwt2, dwt1, i2)\n return count+1, result\n if flag.startswith(\"vertex\"):\n return 1, i2\n assert False\n\n\ndef compatible(dwt1, dwt2, twist):\n dwt1.twist = 0\n dwt2.twist = twist\n for i in range(dwt1.circum):\n if dwt1.is_deg2(i) and dwt2.is_deg2(i):\n return False\n return True\n\ndef fuse(dwt1, dwt2, twist):\n if not compatible(dwt1, dwt2, twist):\n return None\n\n g = graph.Graph()\n\n half_edges = []\n\n dwt1.twist = 0\n dwt2.twist = twist\n dwts = {1: dwt1, 2: dwt2}\n\n edges = []\n half_edges = []\n boundary = []\n\n for side, dwt in dwts.items():\n for j in dwt.pd.interior_vertices:\n v = TV(\"interior\", j)\n for w in dwt.i_neighbors(v):\n if w.type == \"interior\":\n if v < w:\n g.add_edge(VS(v, side), VS(w, side))\n elif w.type == \"half\":\n half_edges.append(LHE(side, v, w.value))\n else:\n assert False\n\n for i in range(dwt1.circum):\n for side, dwt in dwts.items():\n if dwt.is_deg2(i):\n boundary.append(i)\n half_edges.append(LHE(side, TV(\"boundary\", i), i))\n\n assert len(set(boundary)) == len(boundary)\n\n # walk around the fused boundary.\n for i in range(len(boundary)):\n i1 = boundary[i]\n i2 = boundary[(i+1) % len(boundary)]\n g.add_edge(VS(TV(\"boundary\", i1), -1), VS(TV(\"boundary\", i2), -1))\n\n\n total_hops = 0\n while len(half_edges) > 0:\n lhe1 = half_edges.pop()\n\n side1 = lhe1.side\n side2 = 3 - side1\n tv1 = lhe1.base\n count, i = search(dwts[side1], dwts[side2], lhe1.index)\n\n total_hops += count\n\n # it might be on side1 or side2 based on how many hops.\n match_side = side1\n if count % 2 == 1:\n match_side = side2\n if len([l for l in half_edges if l.index == i]) != 1:\n print(\"he\", half_edges)\n print([l for l in half_edges if l.index == i])\n quit()\n\n [lhe2] = [l for l in half_edges if l.index == i]\n tv2 = lhe2.base\n assert lhe2 in half_edges, \"Connects to non-existing half-edge.\"\n half_edges.remove(lhe2)\n logging.debug(\">>> vertex being inserted = \" + str((lhe1, lhe2)))\n\n if tv1.type == \"boundary\":\n side1 = -1\n if tv2.type == \"boundary\":\n match_side = -1\n g.add_edge(VS(tv1, side1), VS(tv2, match_side))\n \n return g\n\n\n","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"389290538","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\nimport os\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nimport glob\nimport plot_lib as pl\n\n# Read voltage data.\n#start_time = datetime(2022, 7, 22, 11, 0, 0).timestamp() # Measurement start time in JST\n#datetime_start = datetime(2023, 2, 7, 12, 0, 0) # Measurement start time in JST\n\n## Input File Data\n# Multiple File Reading\n\nt1_s, offset_uV = pl.get_volt(glob.glob('log60.*.csv'))\n\n# Read temperature data\nt2_s,temperature_degC = pl.get_temp(glob.glob('52186B01.*.csv'))\n\n# Index Slice for Cut off extra data\nt1_l, t2_l = pl.multiple_slices(t1_s, t2_s, 60)\n\ndata_C=pl.join_slices(temperature_degC,t2_l)\n#print(f'# Deley(s):{abs(t1_s[t1_l][0]-t2_s[t2_l][0])}')\n\n#-------------------------------------------\n\nt1_c = pl.join_slices(t1_s,t1_l)\nfig = plt.figure(figsize=(14.4, 7.2))\nfig.suptitle('NI-9252 Offset Voltage vs Temperature(Least Squares Fitting)('+str(datetime.fromtimestamp(round(t1_c[0])))+')')\n\nprint('# Ch, mean(uV), rate(uV/C)')\nfor i in range(0,4):\n data_V=pl.join_slices(offset_uV[i],t1_l)\n ax1 = fig.add_subplot(2, 2, i+1)\n ax1.plot(data_C, data_V,'b.')\n if i == 2 or i == 3:\n ax1.set_xlabel('Temperature [deg C]')\n else:\n ax1.set_xlabel('')\n ax1.set_ylabel('Offset Voltage [uV]', color = 'blue')\n# ax1.set_xlim(14, 40)\n# ax1.set_ylim(floor_uV[i], floor_uV[i] + 100)\n parameter0 = [0.,0.]\n result = optimize.leastsq(pl.fit_func,parameter0,args=(data_C,data_V))\n a_fit=result[0][0]\n b_fit=result[0][1]\n print(i,data_V.mean(),a_fit)\n ax1.set_title(f'Ch {i+1}' )\n ax1.plot(data_C,a_fit*data_C+b_fit,'r-', label='fitted line')\n ax1.text(ax1.get_xlim()[0],ax1.get_ylim()[-1],f'[Rate: {a_fit:.3f} (uV/C)]')\n \nfig.savefig('plot_fit.png')\nplt.show()\n","sub_path":"opt/pyplot/plot_fit4ch.py","file_name":"plot_fit4ch.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163171448","text":"import os\nimport random\nimport pytest\nimport pathlib\nimport tempfile\nimport numpy as np\nimport pandas as pd\n\nfrom cytominer_eval.transform import metric_melt\nfrom cytominer_eval.operations import precision_recall\n\nrandom.seed(123)\ntmpdir = tempfile.gettempdir()\n\n# Load CRISPR dataset\nexample_file = \"SQ00014610_normalized_feature_select.csv.gz\"\nexample_file = pathlib.Path(\n \"{file}/../../example_data/gene/{eg}\".format(\n file=os.path.dirname(__file__), eg=example_file\n )\n)\n\ndf = pd.read_csv(example_file)\n\nmeta_features = [\n x for x in df.columns if (x.startswith(\"Metadata_\") or x.startswith(\"Image_\"))\n]\nfeatures = df.drop(meta_features, axis=\"columns\").columns.tolist()\n\nsimilarity_melted_df = metric_melt(\n df=df,\n features=features,\n metadata_features=meta_features,\n similarity_metric=\"pearson\",\n eval_metric=\"precision_recall\",\n)\n\nreplicate_groups = [\"Metadata_gene_name\", \"Metadata_cell_line\"]\n\n\ndef test_precision_recall():\n result = precision_recall(\n similarity_melted_df=similarity_melted_df,\n replicate_groups=replicate_groups,\n k=10,\n )\n\n assert len(result.k.unique()) == 1\n assert result.k.unique()[0] == 10\n\n # ITGAV has a really strong profile\n assert (\n result.sort_values(by=\"recall\", ascending=False)\n .reset_index(drop=True)\n .iloc[0, :]\n .Metadata_gene_name\n == \"ITGAV\"\n )\n\n assert all(x in result.columns for x in replicate_groups)\n","sub_path":"cytominer_eval/tests/test_operations/test_precision_recall.py","file_name":"test_precision_recall.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"230343151","text":"import os\nimport json\nimport pickle\nimport joblib\nimport pandas as pd\nfrom flask import Flask, jsonify, request\nfrom peewee import (\n SqliteDatabase, PostgresqlDatabase, Model, IntegerField,\n FloatField, TextField, IntegrityError\n)\nfrom playhouse.shortcuts import model_to_dict\n\n\n########################################\n# Begin database stuff\n\nDB = SqliteDatabase('predictions.db')\n\n\nclass Prediction(Model):\n observation_id = IntegerField(unique=True)\n observation = TextField()\n proba = FloatField()\n true_class = IntegerField(null=True)\n\n class Meta:\n database = DB\n\n\nDB.create_tables([Prediction], safe=True)\n\n# End database stuff\n########################################\n\n########################################\n# Unpickle the previously-trained model\n\n\nwith open(os.path.join('data', 'baseline_model_columns.json')) as fh:\n columns = json.load(fh)\n\n\nwith open(os.path.join('data', 'baseline_model_pipeline.pickle'), 'rb') as fh:\n pipeline = joblib.load(fh)\n\n\nwith open(os.path.join('data', 'baseline_model_dtypes.pickle'), 'rb') as fh:\n dtypes = pickle.load(fh)\n\n\n# End model un-pickling\n########################################\n\n########################################\n# Input validation functions\n\n\ndef check_request(request):\n \"\"\"\n Validates that our request is well formatted\n \n Returns:\n - assertion value: True if request is ok, False otherwise\n - error message: empty if request is ok, False otherwise\n \"\"\"\n \n if \"id\" not in request:\n error = \"Field `id` missing from request: {}\".format(request)\n return False, error\n \n if \"observation\" not in request:\n error = \"Field `observation` missing from request: {}\".format(request)\n return False, error\n \n return True, \"\"\n\n\n\ndef check_valid_column(observation):\n \"\"\"\n Validates that our observation only has valid columns\n \n Returns:\n - assertion value: True if all provided columns are valid, False otherwise\n - error message: empty if all provided columns are valid, False otherwise\n \"\"\"\n \n valid_columns = {\n \"SubjectRaceCode\",\n \"SubjectSexCode\",\n \"SubjectEthnicityCode\",\n \"StatuteReason\", \n \"InterventionReasonCode\", \n \"ResidentIndicator\", \n \"SearchAuthorizationCode\",\n \"SubjectAge\",\n \"hour\",\n \"day_of_week\",\n }\n \n keys = set(observation.keys())\n \n if len(valid_columns - keys) > 0: \n missing = valid_columns - keys\n error = \"Missing columns: {}\".format(missing)\n return False, error\n \n if len(keys - valid_columns) > 0: \n extra = keys - valid_columns\n error = \"Unrecognized columns provided: {}\".format(extra)\n return False, error \n\n return True, \"\"\n\n\n\ndef check_categorical_values(observation):\n \"\"\"\n Validates that all categorical fields are in the observation and values are valid\n \n Returns:\n - assertion value: True if all provided categorical columns contain valid values, \n False otherwise\n - error message: empty if all provided columns are valid, False otherwise\n \"\"\"\n \n valid_category_map = {\n \"InterventionReasonCode\": [\"V\", \"E\", \"I\"],\n \"SubjectRaceCode\": [\"W\", \"B\", \"A\", \"I\"],\n \"SubjectSexCode\": [\"M\", \"F\"],\n \"SubjectEthnicityCode\": [\"H\", \"M\", \"N\"],\n \"SearchAuthorizationCode\": [\"O\", \"I\", \"C\", \"N\"],\n \"ResidentIndicator\": [True, False],\n \"StatuteReason\": [\n 'Stop Sign', 'Other', 'Speed Related', 'Cell Phone', 'Traffic Control Signal', 'Defective Lights', \n 'Moving Violation', 'Registration', 'Display of Plates', 'Equipment Violation', 'Window Tint', \n 'Suspended License', 'Seatbelt', 'Other/Error', 'STC Violation', 'Administrative Offense', 'Unlicensed Operation'], \n \"day_of_week\": [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n }\n \n for key, valid_categories in valid_category_map.items():\n if key in observation:\n value = observation[key]\n if value not in valid_categories:\n error = \"Invalid value provided for {}: {}. Allowed values are: {}\".format(\n key, value, \",\".join([\"'{}'\".format(v) for v in valid_categories]))\n return False, error\n else:\n error = \"Categorical field {} missing\"\n return False, error\n\n return True, \"\"\n\n\ndef check_hour(observation):\n \"\"\"\n Validates that observation contains valid hour value \n \n Returns:\n - assertion value: True if hour is valid, False otherwise\n - error message: empty if hour is valid, False otherwise\n \"\"\"\n \n hour = observation.get(\"hour\")\n \n if not hour:\n error = \"Field `hour` missing\"\n return False, error\n\n if not isinstance(hour, int):\n error = \"Field `hour` is not an integer\"\n return False, error\n \n if hour < 0 or hour > 24:\n error = \"Field `hour` is not between 0 and 24\"\n return False, error\n\n return True, \"\"\n\n\ndef check_age(observation):\n \"\"\"\n Validates that observation contains valid hour value \n \n Returns:\n - assertion value: True if hour is valid, False otherwise\n - error message: empty if hour is valid, False otherwise\n \"\"\"\n \n age = observation.get(\"SubjectAge\")\n \n if not age: \n error = \"Field `SubjectAge` missing\"\n return False, error\n\n if not isinstance(age, int):\n error = \"Field `SubjectAge` is not an integer\"\n return False, error\n \n if age < 10 or age > 100:\n error = \"Field `SubjectAge` is not between 10 and 100\"\n return False, error\n\n return True, \"\"\n\n\n# End input validation functions\n########################################\n\n########################################\n# Begin webserver stuff\n\napp = Flask(__name__)\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n obs_dict = request.get_json()\n \n request_ok, error = check_request(obs_dict)\n if not request_ok:\n response = {'error': error}\n return jsonify(response)\n\n _id = obs_dict['id']\n observation = obs_dict['observation']\n\n columns_ok, error = check_valid_column(observation)\n if not columns_ok:\n response = {'error': error}\n return jsonify(response)\n\n categories_ok, error = check_categorical_values(observation)\n if not categories_ok:\n response = {'error': error}\n return jsonify(response)\n\n hour_ok, error = check_hour(observation)\n if not hour_ok:\n response = {'error': error}\n return jsonify(response)\n\n age_ok, error = check_age(observation)\n if not age_ok:\n response = {'error': error}\n return jsonify(response)\n\n obs = pd.DataFrame([observation], columns=columns).astype(dtypes)\n proba = pipeline.predict_proba(obs)[0, 1]\n prediction = pipeline.predict(obs)[0]\n response = {'prediction': bool(prediction), 'proba': proba}\n p = Prediction(\n observation_id=_id,\n proba=proba,\n observation=request.data,\n )\n try:\n p.save()\n except IntegrityError:\n error_msg = \"ERROR: Observation ID: '{}' already exists\".format(_id)\n response[\"error\"] = error_msg\n print(error_msg)\n DB.rollback()\n return jsonify(response)\n\n \n@app.route('/update', methods=['POST'])\ndef update():\n obs = request.get_json()\n try:\n p = Prediction.get(Prediction.observation_id == obs['id'])\n p.true_class = obs['true_class']\n p.save()\n return jsonify(model_to_dict(p))\n except Prediction.DoesNotExist:\n error_msg = 'Observation ID: \"{}\" does not exist'.format(obs['id'])\n return jsonify({'error': error_msg})\n\n\n \nif __name__ == \"__main__\":\n app.run()\n","sub_path":"S06 - DS in the Real World/BLU14 - Deployment in Real World/protected_server.py","file_name":"protected_server.py","file_ext":"py","file_size_in_byte":7930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"383473743","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom .models import Superhero\n\n# Create your views here.\n\ndef index(request):\n all_heroes = Superhero.objects.all()\n context = {\n 'all_heroes' : all_heroes\n }\n return render(request, 'superheroes/index.html', context)\n\ndef detail(request, hero_id):\n single_hero = Superhero.objects.get(pk = hero_id)\n context = {\n 'single_hero' : single_hero\n }\n return render(request, 'superheroes/details.html', context)\n\ndef create(request):\n if request.method == \"POST\":\n name = request.POST.get('name')\n alter_ego = request.POST.get('alter_ego')\n primary = request.POST.get('primary')\n secondary = request.POST.get('secondary')\n catch_phrase = request.POST.get('catch_phrase')\n new_hero = Superhero(name=name, alter_ego=alter_ego, primary_ability=primary, secondary_ability=secondary, catch_phrase=catch_phrase)\n new_hero.save()\n return HttpResponseRedirect(reverse('superheroes:index'))\n else:\n return render(request, 'superheroes/create.html')\n\ndef edit(request, hero_id):\n hero = Superhero.objects.get(pk=hero_id)\n if request.method == \"POST\":\n hero.name = request.POST.get('name')\n hero.alter_ego = request.POST.get('alter_ego')\n hero.primary_ability = request.POST.get('primary')\n hero.secondary_ability = request.POST.get('secondary')\n hero.catch_phrase = request.POST.get('catch_phrase')\n hero.save()\n return HttpResponseRedirect(reverse('superheroes:detail', args=[hero.pk]))\n else:\n context = {\n 'hero': hero\n }\n return render(request, 'superheroes/edit.html', context)\n\ndef delete(request, hero_id):\n hero = Superhero.objects.get(pk=hero_id)\n context ={\n 'hero': hero\n }\n obj = get_object_or_404(Superhero, id = hero.id)\n if request.method ==\"GET\":\n obj.delete()\n return HttpResponseRedirect(\"/\")\n \n return render(request, \"superheroes:index\", context)","sub_path":"super_hero_project/superhero_project/superheroes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"563938371","text":"from odoo import models, fields, api\n\n\nclass StockPicking(models.Model):\n _inherit = 'stock.picking'\n\n delivery_date = fields.Datetime('Fecha de entrega')\n\n shipping_number = fields.Integer('Número Embarque')\n\n shipping_id = fields.Many2one(\n 'custom.shipment',\n 'Embarque'\n )\n\n contract_correlative = fields.Integer('corr')\n\n contract_correlative_view = fields.Char(\n 'N° Orden',\n compute='_get_correlative_text'\n )\n\n consignee_id = fields.Many2one(\n 'res.partner',\n 'Consignatario',\n domain=[('customer', '=', True)]\n )\n\n notify_ids = fields.Many2many(\n 'res.partner',\n domain=[('customer', '=', True)]\n )\n\n agent_id = fields.Many2one(\n 'res.partner',\n 'Agente',\n domain=[('is_agent', '=', True), ('commission', '>', 0)]\n )\n\n total_commission = fields.Float(\n 'Valor Comisión',\n compute='_compute_total_commission'\n )\n\n charging_mode = fields.Selection(\n [\n ('piso', 'A Piso'),\n ('slip_sheet', 'Slip Sheet'),\n ('palet', 'Paletizado')\n ],\n 'Modo de Carga'\n )\n\n booking_number = fields.Char('N° Booking')\n\n bl_number = fields.Char('N° BL')\n\n client_label = fields.Boolean('Etiqueta Cliente', default=False)\n\n container_number = fields.Char('N° Contenedor')\n\n freight_value = fields.Float('Valor Flete')\n\n safe_value = fields.Float('Valor Seguro')\n\n total_value = fields.Float(\n 'Valor Total',\n compute='_compute_total_value',\n store=True\n )\n\n value_per_kilogram = fields.Float(\n 'Valor por kilo',\n compute='_compute_value_per_kilogram',\n store=True\n )\n\n remarks = fields.Text('Comentarios')\n\n container_type = fields.Many2one(\n 'custom.container.type',\n 'Tipo de contenedor'\n )\n\n @api.model\n @api.depends('freight_value', 'safe_value')\n def _compute_total_value(self):\n print('')\n # cambiar amount_total\n # data = self.amount_total - self.freight_value - self.safe_value\n # self.total_value = data\n\n @api.model\n @api.depends('total_value')\n def _compute_value_per_kilogram(self):\n print('')\n # qty_total = 0\n # for line in self.order_line:\n # qty_total = qty_total + line.product_uom_qty\n # if qty_total > 0:\n # self.value_per_kilogram = self.total_value / qty_total\n\n @api.model\n @api.depends('agent_id')\n def _compute_total_commission(self):\n print('')\n # cambiar amount_total\n # self.total_commission = (self.agent_id.commission / 100) * self.amount_total\n\n @api.model\n # @api.depends('contract_id')\n def _get_correlative_text(self):\n print('')\n # if self.contract_id:\n # if self.contract_correlative == 0:\n # existing = self.contract_id.sale_order_ids.search([('name', '=', self.name)])\n # if existing:\n # self.contract_correlative = existing.contract_correlative\n # if self.contract_correlative == 0:\n # self.contract_correlative = len(self.contract_id.sale_order_ids)\n # else:\n # self.contract_correlative = 0\n # if self.contract_id.name and self.contract_correlative and self.contract_id.container_number:\n # self.contract_correlative_view = '{}-{}/{}'.format(\n # self.contract_id.name,\n # self.contract_correlative,\n # self.contract_id.container_number\n # )\n # else:\n # self.contract_correlative_view = ''\n","sub_path":"dimabe_export_order/models/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"436112765","text":"from PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QLabel\nimport sys\n\n\nclass Window(QMainWindow):\n\n# for the window size and icons.\n def __init__(self):\n super().__init__()\n\n self.title = \"PyQt5 Widget Positioning\"\n self.top = 100\n self.left = 100\n self.width = 600\n self.height = 500\n self.setWindowIcon(QtGui.QIcon(\"icon.png\"))\n self.InitWindow()\n\n def InitWindow(self):\n\n\n ### can move any widget by using this method\n self.label1 = QLabel(\"Please\", self)\n self.label1.move(50, 50)\n\n self.label2 = QLabel(\"Subscribe\", self)\n self.label2.move(100, 100)\n\n self.label3 = QLabel(\"My\", self)\n self.label3.move(150, 150)\n\n self.label4 = QLabel(\"Channel\", self)\n self.label4.move(200, 200)\n self.setWindowTitle(self.title)\n self.setGeometry(self.top, self.left, self.width, self.height)\n self.show()\n\nApp = QApplication(sys.argv)\nwindow = Window()\nsys.exit(App.exec())","sub_path":"PyQt5_gui/WidgetPositioning.py","file_name":"WidgetPositioning.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"129923498","text":"import requests\nurl = 'http://m.ip138.com/ip.asp?ip='\nurl1 = input('Enter a url: ')\ntry:\n r = requests.get(url + url1)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n print(r.text[-500:])\nexcept:\n print('Fail.')\n\n#BIT:202.204.80.112\n#BJFU 202.204.112.87\n","sub_path":"ch1ex5_GetIP.py","file_name":"ch1ex5_GetIP.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"150271632","text":"import operator\nimport six\n\ntry:\n from triton import AST_NODE as TAstN\n triton_available = True\nexcept ImportError:\n triton_available = False\n\nfrom arybo.lib import MBA, MBAVariable, flatten\n\ndef _get_mba(n,use_esf):\n mba = MBA(n)\n mba.use_esf = use_esf\n return mba\n\ndef triton2arybo(e, use_esf=False):\n ''' Convert a subset of Triton's AST into Arybo's representation\n\n Args:\n e: Triton AST\n use_esf: use ESFs when creating the final expression\n\n Returns:\n An :class:`arybo.lib.MBAVariable` object\n '''\n\n children_ = e.getChilds()\n children = (triton2arybo(c,use_esf) for c in children_)\n reversed_children = (triton2arybo(c,use_esf) for c in reversed(children_))\n\n Ty = e.getKind()\n if Ty == TAstN.ZX:\n n = next(children)\n v = next(children)\n n += v.nbits\n if n == v.nbits:\n return v\n return v.zext(n)\n if Ty == TAstN.SX:\n n = next(children)\n v = next(children)\n n += v.nbits\n if n == v.nbits:\n return v\n return v.sext(n)\n if Ty == TAstN.DECIMAL:\n return e.getValue()\n if Ty == TAstN.BV:\n cst = next(children)\n nbits = next(children)\n return _get_mba(nbits,use_esf).from_cst(cst)\n if Ty == TAstN.EXTRACT:\n last = next(children)\n first = next(children)\n v = next(children)\n return v[first:last+1]\n if Ty == TAstN.CONCAT:\n return flatten(reversed_children)\n if Ty == TAstN.VARIABLE:\n name = e.getValue()\n return _get_mba(e.getBitvectorSize(),use_esf).var(name)\n\n # Logical/arithmetic shifts\n shifts = {\n TAstN.BVLSHR: operator.rshift,\n TAstN.BVSHL: operator.lshift,\n TAstN.BVROL: lambda x,n: x.rol(n),\n TAstN.BVROR: lambda x,n: x.ror(n)\n }\n shift = shifts.get(Ty, None)\n if not shift is None:\n n = next(children)\n v = next(children)\n if isinstance(n, MBAVariable):\n n = n.to_cst()\n if not isinstance(n, six.integer_types):\n raise ValueError(\"arithmetic/logical shifts by a symbolic value isn't supported yet.\") \n return shift(v,n)\n\n # Unary op\n unops = {\n TAstN.BVNOT: lambda x: ~x,\n TAstN.BVNEG: operator.neg\n }\n unop = unops.get(Ty, None)\n if unop != None:\n return unop(next(children))\n\n # Binary ops\n # Division is a special case because we only support division by a known\n # integer\n if Ty == TAstN.BVUDIV:\n a = next(children)\n n = next(children)\n if isinstance(n, MBAVariable):\n n = n.to_cst()\n if not isinstance(n, six.integer_types):\n raise ValueError(\"unsigned division is only supported by a known integer!\")\n return a.udiv(n)\n\n binops = {\n TAstN.BVADD: operator.add,\n TAstN.BVSUB: operator.sub,\n TAstN.BVAND: operator.and_,\n TAstN.BVOR: operator.or_,\n TAstN.BVXOR: operator.xor,\n TAstN.BVMUL: operator.mul,\n TAstN.BVNAND: lambda x,y: ~(x&y),\n TAstN.BVNOR: lambda x,y: ~(x|y),\n TAstN.BVXNOR: lambda x,y: ~(x^y),\n }\n binop = binops[Ty]\n return reduce(binop, children)\n\n","sub_path":"arybo/tools/triton_.py","file_name":"triton_.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"174757525","text":"from periscope.models.subject import SubjectProperty\n\n\nage = SubjectProperty(\"Age\") # for baboon study\nage_class = SubjectProperty(\"Age Class\")\nsex = SubjectProperty(\"Sex\")\nweight = SubjectProperty(\"Weight\")\nspecies = SubjectProperty(\"Species\")\nmedical_conditions = SubjectProperty(\"Medical Condition\")\nvaccination_background = SubjectProperty(\"Vaccination Background\")\nbp_challenges = SubjectProperty(\"Bordatella Pertussis Challenges\")\nmedication_background = SubjectProperty(\"Medication Background\")\nadverse_effects = SubjectProperty(\"Adverse Effects\")\n\nclinical_site = SubjectProperty(\"Clinical Site\")\nsubgroup = SubjectProperty(\"Subgroup\")\nstudy = SubjectProperty(\"Study\")\nspecies = SubjectProperty(\"Species\")\n","sub_path":"periscope/domain/subject.py","file_name":"subject.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"1342385","text":"import psycopg2\nimport time\nimport os\n\n'''connection = psycopg2.connect(user = \"isneqpdtrrraup\",\n password = \"d6f221a93bf2e038e37179cf4e0d4a18c321241340b0bf4368439fa212fc9b06\",\n host = \"ec2-176-34-183-20.eu-west-1.compute.amazonaws.com\",\n port = \"5432\",\n database = \"delcjv8nidnnch\")'''\n\n#connection = psycopg2.connect(\"postgres://isneqpdtrrraup:d6f221a93bf2e038e37179cf4e0d4a18c321241340b0bf4368439fa212fc9b06@ec2-176-34-183-20.eu-west-1.compute.amazonaws.com:5432/delcjv8nidnnch\")\n\nconnection = psycopg2.connect(os.environ['DATABASE_URL'])\n\ncursor = connection.cursor()\n\ncursor.execute(\"SELECT version();\")\nrecord = cursor.fetchone()\nprint(\"You are connected to - \", record,\"\\n\")\n\n\n'''repeater = int(input(\"Want to repeat this process how many times? \"))\ntimer = int(input(\"How many seconds between each repeat? \"))'''\n\nfor y in range(100): \n print(\"\\n\")\n tableToSync = '''SELECT * FROM test.valuestosync'''\n tableSynced = '''SELECT * FROM test.valuessynced'''\n \n cursor.execute(tableToSync)\n toInsert = cursor.fetchall()\n \n print(y + 1,\": Inserting in Sync table \\n\", toInsert)\n for x in toInsert:\n stringToInsert = \"Insert into test.valuessynced(valord, sync) values (\" + str(x[0]) + \", 's')\"\n cursor.execute(stringToInsert)\n stringToExe = \"Update test.valuestosync set sync = 's' WHERE valorc = \" + str(x[0])\n cursor.execute(stringToExe)\n connection.commit()\n \n cursor.execute(tableToSync + \" WHERE sync = 's'\")\n toDelete = cursor.fetchall()\n \n print(y + 1,\": Deleting To Sync table \\n\", toDelete)\n for x in toDelete:\n stringToDel = \"DELETE FROM test.valuestosync where valorc = \" + str(x[0])\n cursor.execute(stringToDel)\n connection.commit()\n \n if(y is not 99):\n time.sleep(20)\n","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597599709","text":"# -*- coding: utf-8 -*-\n# quickstarted Options:\n# sqlalchemy: True\n# auth: sqlalchemy\n# mako: True\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\n\nclassifiers = [\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n]\n\ntestpkgs = [\n 'WebTest >= 1.2.3',\n 'nose',\n 'coverage',\n 'gearbox',\n]\n\ninstall_requires = [\n 'TurboGears2==2.3.7',\n 'Genshi',\n 'Mako',\n 'zope.sqlalchemy >= 0.4',\n 'sqlalchemy',\n 'alembic',\n 'repoze.who',\n 'who_ldap>=3.2.1',\n 'python-ldap-test==0.2.1',\n 'unicode-slugify==0.1.3',\n 'pytz==2014.7',\n 'rq==0.7.1',\n 'filedepot>=0.5.0',\n 'preview-generator',\n]\n\nsetup(\n name='tracim',\n version='1.0.0',\n description=(\n 'Tracim is a plateform software designed to improve '\n 'traceability and productivity in collaborative work.'\n ),\n author='Damien ACCORSI',\n author_email='damien.accorsi@free.fr',\n url='https://github.com/tracim/tracim',\n packages=find_packages(exclude=['ez_setup']),\n install_requires=install_requires,\n include_package_data=True,\n test_suite='nose.collector',\n tests_require=testpkgs,\n package_data={\n 'tracim': [\n 'i18n/*/LC_MESSAGES/*.mo',\n 'templates/*/*',\n 'public/*/*',\n ]\n },\n message_extractors={\n 'tracim': [\n ('**.py', 'python', None),\n ('templates/**.mak', 'mako', {'input_encoding': 'utf-8'}),\n ('public/**', 'ignore', None)\n ]\n },\n entry_points={\n 'paste.app_factory': [\n 'main = tracim.config.middleware:make_app'\n ],\n 'gearbox.plugins': [\n 'turbogears-devtools = tg.devtools'\n ],\n 'gearbox.commands': [\n 'ldap_server = tracim.command.ldap_test_server:LDAPTestServerCommand',\n 'user_create = tracim.command.user:CreateUserCommand',\n 'user_update = tracim.command.user:UpdateUserCommand',\n 'mail sender = tracim.command.mail:MailSenderCommend',\n ]\n },\n dependency_links=[\n 'http://github.com/algoo/preview-generator/tarball/master#egg=preview_generator-1.0',\n ],\n zip_safe=False,\n)\n","sub_path":"tracim/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"357838055","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"MDMS-szymonzaczek\",\n version=\"0.1\",\n author=\"Szymon Zaczek\",\n author_email=\"szymon.zaczek@edu.p.lodz.pl\",\n description=long_description,\n long_description='An interface to one of the most popular Molecular Dynamics codes - Amber - which aids user in '\n 'preparing and running their own simulations.',\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/szymonzaczek/MDMS\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: Linux\",\n ],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"50186477","text":"\"\"\"\n\n Author: Hieu Dang\n Date: 10/03/2019\n File Name: greekTerm.py\n Purpose: demonstrate using dictionaries\n\n\"\"\"\n#Declaration\ngeek = {\"1st\": \"first\", \"2nd\": \"second\", \"3rd\": \"third\", \"4th\": \"fourth\", \"5th\":\"fifth\"}\nchoice = None\n\n#MAIN MODULE\n#INPUT\n\n\n#PROCESSING\nwhile choice != \"0\":\n print(\n \"\"\"\n Geek Translator\n 0 - Quit\n 1 - Look up a Geek Term\n 2 - Add a Geek Term\n 3 - Redefine a Geek Term\n 4 - Delete a Geek Term\n\n \"\"\"\n )\n\n choice = input(\"Choice: \")\n print()\n \n #Exit\n if choice == \"0\":\n print(\"Good bye\")\n\n #Get a definition\n elif choice == \"1\":\n term = input(\"What term do you want me to translate? \")\n if term in geek:\n definition = geek[term]\n print(\"\\n\", term, \"means\", definition)\n else:\n print(\"\\nSorry, I don't know the term\")\n\n #add a term-definition pair\n elif choice == \"2\":\n term = input(\"What term do you want me to add? \")\n if term not in geek:\n definition = input(\"\\nWhat's the definition? \")\n geek[term] = definition #add a pair in the dictionary\n print(\"\\n\", term, \"has been added.\")\n else:\n print(\"\\nThat term already exist! Try redefining it!\")\n \n #redefine an existing term\n elif choice == \"3\":\n term = input(\"\\nWhat term do you want to redefine? \")\n if term in geek:\n definition = input(\"\\nWhat is the new definition? \")\n geek[term] = definition\n print(\"\\n\", term, \"has been redefined!\")\n else:\n print(\"\\\\nThat term doesn't exist! Try adding it!\")\n \n #delete a pair in dictionary\n elif choice == \"4\":\n term = input(\"\\nWhat term do you want to delete? \")\n if term in geek:\n del geek[term]\n print(\"\\nOkay! I deleted\", term)\n else:\n print(\"\\nI can't do that!\", term, \"doesn't exist in the dictionay.\")\n \n #some unknown choice\n else:\n print(\"\\nSorry, but\", choice, \"isn't a valid choice.\")\n\ninput(\"\\nPress the enter key to exit!\")\n\n\n#OUTPUT\n","sub_path":"Programming/Python/Practice/greekTerm.py","file_name":"greekTerm.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"534902897","text":"#\n# @lc app=leetcode.cn id=1021 lang=python\n#\n# [1021] 删除最外层的括号\n#\n\n# @lc code=start\nclass Solution(object):\n def removeOuterParentheses(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n ans = ''\n stack = []\n for c in S:\n if c == \"(\":\n if stack:\n ans += c\n stack.append(c)\n else:\n stack.pop()\n if stack:\n ans += c\n return ans\n\n# @lc code=end\n\n","sub_path":"Week_02/1021.删除最外层的括号.py","file_name":"1021.删除最外层的括号.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271807759","text":"\"\"\"\n 给定两个字符串 text1 和 text2,返回这两个字符串的最长公共子序列。\n 一个字符串的 子序列 是指这样一个新的字符串:它是由原字符串在不改变字符的相对顺序的情况下删除某些字符(也可以不删除任何字符)后组成的新字符串。\n 例如,\"ace\" 是 \"abcde\" 的子序列,但 \"aec\" 不是 \"abcde\" 的子序列。两个字符串的「公共子序列」是这两个字符串所共同拥有的子序列。\n 若这两个字符串没有公共子序列,则返回 0。\n\n 示例 1:\n 输入:text1 = \"abcde\", text2 = \"ace\"\n 输出:3\n 解释:最长公共子序列是 \"ace\",它的长度为 3。\n\n 示例 2:\n 输入:text1 = \"abc\", text2 = \"abc\"\n 输出:3\n 解释:最长公共子序列是 \"abc\",它的长度为 3。\n\n 示例 3:\n 输入:text1 = \"abc\", text2 = \"def\"\n 输出:0\n 解释:两个字符串没有公共子序列,返回 0。\n\"\"\"\nfrom functools import lru_cache\n\n\nclass Solution:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n pass\n\n @classmethod\n def recursive(cls, text1: str, text2: str) -> int:\n \"\"\"\n 从头开始算\n i 代表text1 的下标\n j 代表text2 的下标\n\n 如果 text1[i] == text2[j] 则结果+1\n 否则取 i+1, j 和 i, j+1 这两个结果的最大值。\n\n \"\"\"\n text1_len = len(text1)\n text2_len = len(text2)\n\n @lru_cache(None)\n def helper(i: int, j: int) -> int:\n if i >= text1_len or j >= text2_len:\n return 0\n if text1[i] == text2[j]:\n return helper(i + 1, j + 1) + 1\n return max(helper(i, j + 1), helper(i + 1, j))\n\n return helper(0, 0)\n\n @classmethod\n def dp(cls, text1: str, text2: str) -> int:\n \"\"\"\n i j 不代表下标\n dp[i][j] 代表 text1到i之前 text2到j之前 的最大子序列长度。\n\n dp[i][j] = dp[i-1][j-1] + 1 if text1[i] == text2[j]\n or\n dp[i][j] = max(dp[i-1][j], dp[i][j-1]) if text1[i] != text2[j]\n\n \"\"\"\n text1_len = len(text1)\n text2_len = len(text2)\n\n # 多增加1位 处理边界问题\n dp = [[0 for i in range(text2_len + 1)] for j in range(text1_len + 1)]\n\n for i in range(text1_len + 1):\n for j in range(text2_len + 1):\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1]) if text1[i - 1] != text2[j - 1] else dp[i - 1][j - 1] + 1\n return dp[text1_len][text2_len]\n\nif __name__ == '__main__':\n print(Solution().dp(\"abcde\", \"ace\"))\n","sub_path":"Week_08/G20200343030545/LeetCode_1143_545.py","file_name":"LeetCode_1143_545.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80263332","text":"\"\"\"\n \"mvn\" = multi-variate Normal.\n Module to handle everything pertaining to multivariate normal distributions.\n This includes functionality involving full covariance structure, diagonal\n covariance structure and circulant covariance structure at the moment.\n\"\"\"\n\nimport autograd.numpy as np\nfrom autograd.scipy.linalg import cholesky as chol\nfrom autograd.scipy.linalg import solve_triangular as solve_tri\nfrom common import fft, ifft, sqrabs, log2pi\nimport cov\nimport matplotlib.pyplot as plt\nimport autograd.numpy.random as rng\nfrom autograd.numpy.random import multivariate_normal as rmvn\n\n\n\n# Computes the KL-divergence between a pair of multivariate Gaussians with\n# diagonal covariance matrices.\n#\n# Inputs:\n# mu0 - mean of first distribution. (D)\n# mu1 - mean of second distribution. (D)\n# s20 - diagonal of covariance matrix for first distribution. (D)\n# s21 - diagonal of covariance matrix for second distribution. (D)\n#\n# Outputs:\n# the KL-divergence D_KL(N(mu0, Sigma0) || N(mu1, Sigma1)), where Sigma0, Sigma1\n# are the diagonal covariance matrices specified by s20, s21. (scalar)\n#\ndef DKLDiagNormal(mu0, mu1, s20, s21):\n if s20.shape[0] != s21.shape[0]:\n raise ValueError('s20 and s21 must have the same shape.')\n ratio = s20 / s21\n return 0.5 * (np.sum(ratio) + np.sum(sqrabs(mu1 - mu0) / s21) -\\\n mu0.shape[0] - np.sum(np.log(ratio)))\n\n\n\n# Computes the KL-divergence between two normal distributions with full\n# covariance structure.\n#\n# Inputs:\n# mu0 - mean of first distribution. (D)\n# mu1 - mean of second distribution. (D)\n# Sigma0 - covariance matrix of first distribution. (D,D)\n# Sigma1 - covariance matrix of second distribution. (D,D)\n#\n# Outputs:\n# the KL-divergence D_KL(N(mu0, Sigma0) || N(mu1, Sigma1)). (scalar)\n#\ndef DKLFullNormal(mu0, mu1, Sigma0, Sigma1):\n\n # Compute the cholesky factorisations relied on throughout.\n L0, L1 = chol(Sigma0, lower=True), chol(Sigma1, lower=True)\n\n # Compute the trace term tr(\\inv{Sigma1} Sigma0). Decompose this using chol as\n # tr(\\inv{L1} L0 (\\inv(L1) L0)^T) from cheapest and most stable computation.\n out = np.sum(solve_tri(L1, L0, lower=True)**2)\n\n # Compute quadratic form term (mu0 - mu1)^T \\inv(Sigma1) (mu0 - mu1) using the\n # usual trick with cholesky.\n out += np.sum(solve_tri(L1, mu0 - mu1, lower=True) ** 2)\n\n # Compute determinant term using cholesky. Can just sum over the log of the\n # diagonals.\n out += 2 * np.sum(np.log(np.diag(L1) / np.diag(L0)))\n\n # Subtract dimensionality and halve.\n out = 0.5 * (out - mu0.shape[0])\n return out\n\n\n\n# Computes the entropy of an uncorrelated multivariate Gaussian.\n#\n# Inputs:\n# s2 - vector containing diagonal of covariance matrix. (D)\n#\n# Outputs:\n# entropy of the multivariate normal distribution with diagonal covariance s2.\ndef HDiagNormal(s2):\n return 0.5 * (s2.shape[0] * np.log(2 * np.pi) + np.sum(np.log(s2)))\n\n\n\n# Compute the entropy with full covariance.\n#\n# Inputs:\n# Sigma - covariance matrix. (D,D)\n#\n# Outputs:\n# entropy of the multivariate normal distribution with covariance Sigma.\n#\ndef HFullNormal(Sigma):\n D = Sigma.shape[0]\n L = chol(Sigma + 1e-12 * np.eye(D))\n return 0.5 * D * log2pi() + np.sum(np.log(np.diag(L)))\n\n\n\n# Computes the log pdf of an uncorrelated multivariate Gaussian.\n#\n# Inputs:\n# x - observation. (N)\n# mu - mean. (N)\n# s2 - diagonal of covariance matrix. (N)\n# N - number of dimensions distributed over. (scalar)\n#\n# Outputs:\n# log probability density at x. (scalar)\n#\ndef lpdfDiagNormal(x, mu, s2, N):\n if s2.shape[0] == N:\n return -0.5 * (N * log2pi() + np.sum(np.log(s2)) +\\\n np.sum(sqrabs(x - mu) / s2))\n elif s2.shape[0] == 1:\n return -0.5 * (N * log2pi() + N * np.log(s2) +\\\n np.sum(sqrabs(x - mu) / s2))\n else:\n raise ValueError('s2.shape[0] != (N or 1)')\n\n\n\n# Computes the log-probability of a multi-variate Gaussian.\n#\n# Inputs:\n# x - observation. (N)\n# mu - mean. (N)\n# Sigma - covariance matrix. (N,N)\n# N - number of dimensions distributed over. (scalar)\n#\n# Outputs:\n# log probability density at x. (scalar)\ndef lpdfFullNormal(x, mu, Sigma, N):\n L, delta = cholesky(Sigma + 1e-12 * np.eye(N)) + 1e-12 * np.eye(N), x - mu\n alpha = solve(L, delta)\n out_np = -0.5 * (N * log2pi() + 2 * np.sum(np.log(np.diag(L))) +\\\n np.dot(alpha, alpha))\n return out_np\n\n\ndef lpdfFullNormalSc(x, mu, Sigma, N):\n L = sp.linalg.cholesky(Sigma + 1e-12 * np.eye(N), lower=True) + 1e-12 * np.eye(N)\n delta = x - mu\n alpha = sp.linalg.solve_triangular(L, delta, lower=True)\n out_sc = -0.5 * (N * log2pi() + 2 * np.sum(np.log(np.diag(L))) +\\\n np.dot(alpha, alpha))\n return out_sc\n\n\n\n# Draw from a Normal distribution with full covariance structure.\n#\n# Inputs:\n# mu - mean (D)\n# Sigma - covariance matrix (D,D)\n# N - the number of generates to draw. Defaults to 1.\n#\n# Outputs:\n# x - generate from N(mu, Sigma)\n#\ndef rndFullNormal(mu, Sigma, N=1):\n out = mu + np.dot(rng.randn(N, mu.shape[0]), chol(Sigma))\n if N > 1:\n return out\n else:\n return out.flatten()\n\n# Draw from a multivariant normal whose covariance is circulant.\n#\n# Inputs:\n# mu - mean.\n# Gamma - eigenvalues of circulant.\n#\n# Output:\n# x ~ N(mu, FT Gamma FT*)\n#\ndef rndCircNormal(mu, Gamma):\n N = Gamma.shape[0]\n return np.real(mu + fft(np.sqrt(Gamma) *(rng.randn(N) + 1j * rng.randn(N))))\n\n\n\n# Perform unit tests.\ndef main():\n\n rng.seed(15485863)\n\n # Generate some simple GP data.\n N = 100\n X = np.arange(N) / np.float64(N)\n pars = cov.init_eq(s2=1.0, l2=0.01)\n fcov = cov.circ_factory(cov.eq, 1.0, pars)\n mu0 = np.zeros(N)\n s2n0, s2n1 = 1e-3, 1e-1\n Sigma0 = fcov(X, X) + np.eye(N) * s2n0\n Sigma1 = fcov(X, X) + np.eye(N) * s2n1\n mu0 = rmvn(np.zeros(N), Sigma0)\n mu1 = rmvn(mu0, Sigma1)\n\n # Test out the KL-divergence computations.\n def test_kl_divergence():\n if DKLFullNormal(mu0, mu0, Sigma0, Sigma0) > 1e-9:\n print('Test 1 failed: KL-divergence between distribution and itself > 0.')\n if DKLFullNormal(mu1, mu1, Sigma1, Sigma1) > 1e-9:\n print('Test 2 failed: KL-divergence between distribution and itself > 0.')\n delta = np.abs(DKLFullNormal(mu0, mu1, s2n0*np.eye(N), s2n1*np.eye(N)) -\\\n DKLDiagNormal(mu0, mu1, s2n0*np.ones(N), s2n1*np.ones(N)))\n if delta > 1e-9:\n print('Test 3 failed: KL-divergence different between diag and full.')\n print('Delta is ' + str(delta))\n #test_kl_divergence()\n\n # Test the multivariate normal generator.\n x = rndFullNormal(np.zeros(N), Sigma0, N=5)\n print(x)\n y = rndFullNormal(mu0, Sigma1, N=5)\n plt.plot(x.T, 'b', y.T, 'r')\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"exp/circgp/mvn.py","file_name":"mvn.py","file_ext":"py","file_size_in_byte":6565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"276613194","text":"import requests \r\nfrom pathlib import Path \r\nimport sys\r\n#path = Path('/vol/v3/ben_ak/raster_files/dem/')\r\n\r\ndef get_dem(path): \r\n\tfor i in range(52,73): #iterate through rows (lat)53,73\r\n\t\tfor j in range(128,181): #iterate through cols (lon)128,181\r\n\t\t\tprint(i,j)\r\n\t\t\timage_url = f\"https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/2/TIFF/n{i}w{j}/USGS_2_n{i}w{j}.tif\"\r\n\t\t\t \r\n\t\t\t# URL of the image to be downloaded is defined as image_url \r\n\t\t\t# req = requests.head(image_url,allow_redirects=True)\r\n\t\t\t# header = req.headers\r\n\t\t\t# content_type = header.get('content-type')\r\n\t\t\t# if content_type.lower() =='application/xml': \r\n\t\t\t# \tprint('its a file,downloading...')\r\n\t\t\tr = requests.get(image_url) # create HTTP response object \r\n\t\t \r\n\t\t\t# send a HTTP request to the server and save \r\n\t\t\t# the HTTP response in a response object called r \r\n\t\t\twith open(path/f\"USGS_2_n{i}w{j}.tif\",'wb') as f: \r\n\r\n\t\t\t \r\n\t\t\t # Saving received content as a png file in \r\n\t\t\t # binary format \r\n\t\t\t \r\n\t\t\t # write the contents of the response (r.content) \r\n\t\t\t # to a new file in binary mode. \r\n\t\t\t f.write(r.content) \r\n\t\t\t# else: \r\n\t\t\t# \tprint('its not a file, skipping...')\r\n\t\t\t#print(req.headers['content-length'])\t\r\n\t\t\t\r\n\r\ndef main(): \r\n\t# # script param\r\n\tscript = sys.argv[0]\r\n\t# first command line param\r\n\tpath = sys.argv[1]\r\n\tout_path = Path(path)\r\n\tget_dem(out_path)\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"get_dem_data.py","file_name":"get_dem_data.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"267937866","text":"import requests\nimport json\n\ndef sort_hanzi(hanzi, hanzi_object):\n\n\tdefinitions = []\n\tpinyin_number = []\n\tpinyin_plain = []\n\tpinyin_accent = []\n\ttones = []\n\tenglish = []\n\teng = \"\"\n\n\n\t# CHECK FOR LENGTH - Y: HOMONYMS, HETERONYMS | N: WORDS, SENTENCES\n\tif len(hanzi) == 1:\n\n\n\n\n\t\t# CHECK FOR NUMBER OF DEFINITIONS - Y: HOMONYMS | N: HETERONYMS\n\t\tif len(hanzi_object['definitions'].keys()) == 1:\n\t\t\t# CONVERT ONE DEFINITION KEY TO STRING\n\t\t\tfor p in hanzi_object['definitions'].keys():\n\t\t\t\tpinyin_number = p\n\n\t\t\tpinyin_accent = hanzi_object[\"definitions\"][pinyin_number][\"pinyin\"]\n\t\t\tpinyin_plain = pinyin_number[:-1]\n\t\t\ttones = pinyin_number[-1]\n\n\t\t\tenglish = \", \".join(hanzi_object['definitions'][pinyin_number]['translations'])\n\n\t\t\tprint(\"Hanzi: {}\\nNumber: {}\\nAccent: {}\\nPlain: {}\\nTone: {}\\nEnglish: {}\".format(hanzi, pinyin_number, pinyin_accent, pinyin_plain, tones, english))\n\n\n\n\n\t\telse:\n\t\t\t# NOT FINISHED!!! _____________\n\t\t\t# CONVERT THE DEFINITIONS KEYS TO LIST OF STRINGS\n\t\t\tfor p in hanzi_object['definitions'].keys():\n\t\t\t\tpinyin_number.append(p)\n\t\t\t\tpinyin_plain.append(p[:-1])\n\t\t\t\ttones.append(p[-1])\n\n\t\t\tfor definition in pinyin_number:\n\t\t\t\tpinyin_accent.append(hanzi_object[\"definitions\"][definition][\"pinyin\"])\n\t\t\t\tenglish.append(\", \".join(hanzi_object[\"definitions\"][definition][\"translations\"]))\n\n\n\n\n\n\t\t\tfor i in range(len(pinyin_number)):\n\t\t\t\teng += \"{}: {}\\n\".format(pinyin_accent[i], english[i])\n\t\t\t\t# DELETE THE LAST \\n\n\t\t\teng = eng[:-1]\n\n\t\t\tpinyin_number = \", \".join(pinyin_number)\n\t\t\tpinyin_plain = \", \".join(pinyin_plain)\n\t\t\tpinyin_accent = \", \".join(pinyin_accent)\n\t\t\ttones = \", \".join(tones)\n\n\t\t\tprint(pinyin_number)\n\t\t\tprint(pinyin_plain)\n\t\t\tprint(pinyin_accent)\n\t\t\tprint(tones)\n\t\t\tprint(eng)\n\n\n\n\n\n\telse:\n\t\tpass\n\ndef main():\n\thanzi = \"没\"\n\n\tresponse = requests.get(\"https://api.pinyin.pepe.asia/hanzi/{}\".format(hanzi))\n\thanzi_object = response.text[1:-1]\n\thanzi_object = json.loads(hanzi_object)\n\n\tsort_hanzi(hanzi, hanzi_object)\n\n\t#print(hanzi_object)\n\nmain()\n","sub_path":"chinese_flashcards.py","file_name":"chinese_flashcards.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"255940034","text":"'''\nRequirements:\nnltk, numpy, re, sklearn, bs4, keras\n\n- FrequencyExtractor\n- DocumentsSimilarity\n- TextPreprocessor\n- WordToIndexTransformer\n'''\n\nfrom sklearn.base import TransformerMixin, BaseEstimator\nimport nltk\nfrom nltk.corpus.reader.wordnet import NOUN, VERB, ADJ, ADV\nimport re\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import pairwise_distances\n\nfrom keras.preprocessing import sequence\n\n\nclass FrequencyExtractor(BaseEstimator, TransformerMixin):\n '''Feature extractor\n Compute the ratio of:\n - words with repeating letters,\n - words with uppercase,\n - words with first capital,\n - parts of speech: 'NOUN', 'VERB', 'ADJ', 'ADV', 'PRON', 'DET',\n 'ADP', 'NUM', 'CONJ', 'PRT', '.', 'X'\n to all words in a document\n # and\n # - a number of sentences to the maximal number in the corpus\n '''\n\n def __init__(self):\n pass\n\n def words_tokenize(self, X):\n '''Split texts into words removing punctuation'''\n X_tokenized = [list(filter(None, re.split('[^\\\\w\\'*]', doc))) for doc in X]\n return np.array(X_tokenized)\n\n def sentences_tokenize(self, X):\n '''Split texts into sentences'''\n X_tokenized = [nltk.sent_tokenize(doc) for doc in X]\n return np.array(X_tokenized)\n\n def fit(self, X, y=None):\n # X_sent_tokenized = self.sentences_tokenize(X)\n # self.max_sentences_no = np.max(list(map(len, X_sent_tokenized)))\n return self\n\n def _compute_matching_words_rate(self, regex, X):\n '''Compute the ratio of matching words to all words in a tweet'''\n matching_words_count = np.array([len(re.findall(regex, doc)) for doc in X])\n\n X_tokenized = self.words_tokenize(X)\n words_count = np.array(list(map(len, X_tokenized)))\n words_count[words_count == 0] = 1\n\n matching_words_rate = matching_words_count / words_count\n return matching_words_rate\n\n def compute_words_with_repeating_letters_rate(self, X):\n regex = '\\\\b\\\\w*(([a-zA-Z])\\\\2{2,})\\\\w*\\\\b'\n matching_words_rate = self._compute_matching_words_rate(regex, X)\n return matching_words_rate\n\n def compute_words_with_uppercase_rate(self, X):\n regex = '\\\\b[A-Z]{2,}\\\\b'\n matching_words_rate = self._compute_matching_words_rate(regex, X)\n return matching_words_rate\n\n def compute_words_with_first_capital_rate(self, X):\n regex = '\\\\b[A-Z][a-z]+\\\\b'\n matching_words_rate = self._compute_matching_words_rate(regex, X)\n return matching_words_rate\n\n def compute_parts_of_speech_rates(self, X):\n TAG_LIST = ['NOUN', 'VERB', 'ADJ', 'ADV', 'PRON', 'DET',\n 'ADP', 'NUM', 'CONJ', 'PRT', '.', 'X']\n\n X_tokenized = [nltk.word_tokenize(doc) for doc in X]\n X_tagged = nltk.pos_tag_sents(X_tokenized, tagset='universal')\n freqdist_gen = (nltk.FreqDist(tag for _, tag in doc_tagged)\n for doc_tagged in X_tagged)\n\n parts_of_speech_rates = np.array([[freqdist.get(tag, 0) for tag in TAG_LIST]\n for freqdist in freqdist_gen])\n words_count = np.tile(parts_of_speech_rates.sum(axis=1), (len(TAG_LIST), 1)).T\n parts_of_speech_rates = parts_of_speech_rates / words_count\n return parts_of_speech_rates\n\n def compute_number_of_sentences_rate(self, X):\n X_sent_tokenized = self.sentences_tokenize(X)\n sentences_count = list(map(len, X_sent_tokenized))\n no_of_sentences_rate = np.array(sentences_count) / self.max_sentences_no\n return no_of_sentences_rate\n\n def transform(self, X):\n '''Transform documents to the matrix of rates'''\n\n repeating_letters_rate = self.compute_words_with_repeating_letters_rate(X)\n uppercase_rate = self.compute_words_with_uppercase_rate(X)\n first_capital_rate = self.compute_words_with_first_capital_rate(X)\n parts_of_speech_rates = self.compute_parts_of_speech_rates(X)\n # no_of_sentences_rate = self.compute_number_of_sentences_rate(X)\n\n X_transformed = np.c_[repeating_letters_rate,\n uppercase_rate,\n first_capital_rate,\n parts_of_speech_rates,\n # no_of_sentences_rate\n ]\n return X_transformed\n\n\nclass DocumentsSimilarity(BaseEstimator, TransformerMixin):\n '''Feature extractor\n Compute the similarity of data to\n documents labeled as pos_label consisting of one sentence\n Parameters:\n :param pos_label: integer\n the class to compare\n :param preprocessor: transformer\n the fitted transformer to preprocess texts\n '''\n\n def __init__(self, pos_label=1, preprocessor=None):\n self.pos_label = pos_label\n self.preprocessor = preprocessor\n\n def text_preprocess(self, X):\n return self.preprocessor.transform(X)\n\n def get_docs_one_sent(self, X, y):\n '''Get documents labeled as pos_label consisting of one sentence'''\n X_one_sent = np.array([doc for i, doc in enumerate(X)\n if len(nltk.sent_tokenize(doc)) == 1 and y[i] == self.pos_label])\n return X_one_sent\n\n def fit(self, X, y):\n '''Find the document term matrix for\n documents labeled as pos_label consisting of one sentence'''\n\n X_one_sent = self.get_docs_one_sent(X, y)\n if self.preprocessor:\n X_one_sent = self.text_preprocess(X_one_sent)\n\n token_pattern = '(?u)\\\\b[a-z][a-z\\'*]+\\\\b'\n self.vectorizer = CountVectorizer(token_pattern=token_pattern, min_df=2)\n self.vectorizer.fit(X_one_sent)\n self.base_sent_dtm = self.vectorizer.transform(X_one_sent)\n return self\n\n def sentences_tokenize(self, X):\n '''Split texts into sentences'''\n X_sent_tokenized = [nltk.sent_tokenize(doc) for doc in X]\n return np.array(X_sent_tokenized)\n\n def compute_similarities(self, X_sent_tokenized):\n sent_dtms = [[self.vectorizer.transform([sent]) for sent in doc_sent_tokenized]\n for doc_sent_tokenized in X_sent_tokenized]\n\n similarities = [np.max([np.max((1 - pairwise_distances(sent_dtm, self.base_sent_dtm,\n metric='cosine')))\n for sent_dtm in doc])\n for doc in sent_dtms]\n return np.array(similarities)\n\n def transform(self, X):\n '''Transform documents to the array of similarities'''\n\n X_sent_tokenized = self.sentences_tokenize(X)\n if self.preprocessor:\n X_sent_tokenized = [self.text_preprocess(doc_sent_tokenized)\n for doc_sent_tokenized in X_sent_tokenized]\n\n X_transformed = self.compute_similarities(X_sent_tokenized)\n return X_transformed\n\n\nclass TextPreprocessor(BaseEstimator, TransformerMixin):\n '''Text preprocessor\n Normalize documents\n Parameters:\n :param stopwords: list\n list of stopwords\n :param process: string\n * 'stem' - stemming,\n * 'lem' - lemmatization,\n * '' - nothing\n '''\n \n def __init__(self, stopwords=[], process=''):\n self.stopwords = stopwords\n self.process = process\n\n def fit(self, X=None, y=None):\n if self.process == 'lem':\n train_tagged_corpus = nltk.corpus.brown.tagged_sents()\n tagger = nltk.DefaultTagger('X')\n for n in range(1, 4):\n tagger = nltk.NgramTagger(n, train_tagged_corpus, backoff=tagger)\n self._tagger = tagger\n return self\n\n def normalize_question_marks(self, X):\n '''Replace:\n * ? --> onequestionmark\n * ??... --> manyquestionmarks\n '''\n regex = '(? oneexclamationmark\n * !!... --> manyexclamationmarks\n '''\n regex = '(? onedot\n * ..(...) --> manydots\n '''\n regex = '(? quotationmark\n '''\n regex = '\"'\n X_normalized = [re.sub(regex, ' quotationmark ', doc) for doc in X]\n return np.array(X_normalized)\n\n def normalize_emoticons(self, X):\n '''Replace:\n * e.g. :) --> emoticonhappyface\n * e.g. :( --> emoticonsadface\n * <3... --> emoticonheart\n '''\n regex = '[:;=8x]-?[)D\\\\]}>*]'\n X_normalized = [re.sub(regex, ' emoticonhappyface ', doc) for doc in X]\n\n regex = '[:;=8x]\\'?-?[/(x#|\\\\[{<]'\n X_normalized = [re.sub(regex, ' emoticonsadface ', doc) for doc in X_normalized]\n\n regex = '<3+'\n X_normalized = [re.sub(regex, ' emoticonheart ', doc) for doc in X_normalized]\n return np.array(X_normalized)\n \n def normalize_repeating_letters(self, X):\n '''Replace:\n * letters repeated three or more times --> one letter\n '''\n regex = '(([a-z*])\\\\2{2,})'\n X_normalized = [re.sub(regex, '\\\\2', doc, flags=re.IGNORECASE) for doc in X]\n return np.array(X_normalized)\n \n def normalize_laugh(self, X):\n '''Replace:\n * e.g. hehehe --> haha\n '''\n regex = '(b?w?a?(ha|he)\\\\2{1,}h?)'\n X_normalized = [re.sub(regex, ' haha ', doc, flags=re.IGNORECASE) for doc in X]\n return np.array(X_normalized)\n\n def _replace_matches(self, doc, matches):\n doc_transformed = doc\n for match in matches:\n doc_transformed = doc_transformed.replace(match, ' ' + match.replace(' ', '') + ' ', 1)\n return doc_transformed\n\n def join_scattered_letters(self, X):\n '''Replace:\n * e.g. n e v e r --> never\n '''\n regex = '([^\\\\w](?:[a-z] ){4,}(?:[a-z]\\\\b)?)'\n matches_gen = (re.findall(regex, doc, flags=re.IGNORECASE) for doc in X)\n X_normalized = [self._replace_matches(doc, matches)\n for doc, matches in zip(X, matches_gen)]\n return np.array(X_normalized)\n\n def translate_shortcuts(self, X):\n '''Replace:\n * e.g. u --> you\n # * e.g. u --> shortyou\n '''\n short2full_dict = {'\\\\bu\\\\b': 'you',\n '\\\\br\\\\b': 'are',\n '\\\\sm\\\\s': 'am',\n '\\\\bb/c\\\\b': 'because',\n '[^\\\\w:;/]c\\\\b': 'see'\n }\n X_translated = X.copy()\n for k, v in short2full_dict.items():\n # X_translated = [re.sub(k, ' short' + v + ' ', doc, flags=re.IGNORECASE)\n X_translated = [re.sub(k, ' ' + v + ' ', doc, flags=re.IGNORECASE)\n for doc in X_translated]\n return np.array(X_translated)\n\n def normalize_urls(self, X):\n '''Replace:\n * e.g. http://xxx.pl --> addressurl\n '''\n regex = '\\\\w+://\\\\S+'\n X_normalized = [re.sub(regex, ' addressurl ', doc) for doc in X]\n return np.array(X_normalized)\n\n def normalize_emails(self, X):\n '''Replace:\n * e.g. xxx@xxx.com --> addressemail\n '''\n regex = '\\\\b(\\\\w[\\\\w.-]+@[\\\\w-][\\\\w.-]*\\\\w(?:\\\\.[a-zA-Z]{1,4}))\\\\b'\n X_normalized = [re.sub(regex, ' addressemail ', doc) for doc in X]\n return np.array(X_normalized)\n\n def translate_html_symbols(self, X):\n '''Replace:\n * e.g. < --> <\n '''\n extra_dict = {'&;': '\\''}\n X_translated = X.copy()\n for k, v in extra_dict.items():\n X_translated = [re.sub(k, v, doc) for doc in X_translated]\n\n X_translated = [BeautifulSoup(doc, 'html.parser').get_text()\n if doc not in {'.', '\\\\', '/'} else doc\n for doc in X_translated]\n return np.array(X_translated)\n\n def normalize(self, X):\n # addresses\n X_normalized = self.normalize_urls(X)\n X_normalized = self.normalize_emails(X_normalized)\n\n # html symbols\n X_normalized = self.translate_html_symbols(X_normalized)\n\n # words\n X_normalized = self.normalize_repeating_letters(X_normalized)\n X_normalized = self.normalize_laugh(X_normalized)\n X_normalized = self.join_scattered_letters(X_normalized)\n X_normalized = self.translate_shortcuts(X_normalized)\n\n # punctuation\n X_normalized = self.normalize_emoticons(X_normalized)\n X_normalized = self.normalize_question_marks(X_normalized)\n X_normalized = self.normalize_exclamation_marks(X_normalized)\n X_normalized = self.normalize_dots(X_normalized)\n X_normalized = self.normalize_quotation_marks(X_normalized)\n\n # convert to lowercase\n X_normalized = [doc.lower() for doc in X_normalized]\n return np.array(X_normalized)\n\n def tokenize(self, X):\n '''Split texts into words removing punctuation'''\n X_tokenized = [list(filter(None, re.split('[^\\\\w\\'*]', doc))) for doc in X]\n return np.array(X_tokenized)\n\n def remove_stopwords(self, X_tokenized):\n stopwords_set = set(self.stopwords)\n X_no_stopwords = [[token for token in doc_tokenized if token not in stopwords_set]\n for doc_tokenized in X_tokenized]\n return X_no_stopwords\n\n def stem(self, X_tokenized):\n stemmer = nltk.PorterStemmer()\n X_stemmed = [[stemmer.stem(token) for token in doc_tokenized]\n for doc_tokenized in X_tokenized]\n return X_stemmed\n\n def lemmatize(self, X_tokenized):\n pos_dict = {'J': ADJ,\n 'R': ADV,\n 'N': NOUN,\n 'V': VERB\n }\n\n X_tagged = [self._tagger.tag(doc_tokenized) for doc_tokenized in X_tokenized]\n\n lemmatizer = nltk.stem.WordNetLemmatizer()\n X_lemmatized = [[lemmatizer.lemmatize(token, pos=pos_dict[pos[0]])\n if pos[0] in pos_dict else token for token, pos in doc_tagged]\n for doc_tagged in X_tagged]\n return X_lemmatized\n\n def transform(self, X):\n '''Transform documents to the normalized documents'''\n X_normalized = self.normalize(X)\n\n X_tokenized = self.tokenize(X_normalized)\n\n if self.stopwords:\n X_tokenized = self.remove_stopwords(X_tokenized)\n\n if self.process == 'stem':\n X_tokenized = self.stem(X_tokenized)\n elif self.process == 'lem':\n X_tokenized = self.lemmatize(X_tokenized)\n\n X_preprocessed = [' '.join(doc_tokenized) for doc_tokenized in X_tokenized]\n return np.array(X_preprocessed)\n\n\nclass WordToIndexTransformer(BaseEstimator, TransformerMixin):\n '''Transformer\n Transform words to indices\n Parameters:\n :param preprocessor: transformer\n the fitted transformer to preprocess texts\n Attributes:\n :param index_to_word_: list\n the list of unique words\n :param unique_words_no_: integer\n the number of unique words\n :param word_to_index_: dictionary\n the dictionary in the form ``{word: index}``\n '''\n\n def __init__(self, preprocessor):\n self.preprocessor = preprocessor\n self.maxlen = 280\n\n def fit(self, X, y=None):\n '''Assign indices to words'''\n\n X_preprocessed = self.preprocessor.fit_transform(X)\n X_tokenized = [doc.split(' ') for doc in X_preprocessed]\n\n all_words = [token for doc_tokenized in X_tokenized for token in doc_tokenized]\n unique_words = np.unique(all_words)\n\n self.index_to_word_ = unique_words.tolist()\n self.unique_words_no_ = len(self.index_to_word_)\n self.word_to_index_ = {w: i for i, w in enumerate(self.index_to_word_)}\n return self\n\n def transform(self, X):\n '''Transform documents to the matrix of indices'''\n\n X_preprocessed = self.preprocessor.transform(X)\n X_tokenized = [doc.split(' ') for doc in X_preprocessed]\n X_transformed = [[self.word_to_index_.get(token, self.unique_words_no_)\n for token in doc_tokenized]\n for doc_tokenized in X_tokenized]\n X_transformed = sequence.pad_sequences(X_transformed, maxlen=self.maxlen)\n return X_transformed\n","sub_path":"mytextpreprocessing.py","file_name":"mytextpreprocessing.py","file_ext":"py","file_size_in_byte":17346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565602235","text":"#!/usr/bin/env python\n\n# Warn for use of `--interactive` inside Makefiles (#11468).\n#\n# Encourage the use of `$(TEST_HC_OPTS_INTERACTIVE)` instead of\n# `$(TEST_HC_OPTS) --interactive -ignore-dot-ghci -v0`. It's too easy to\n# forget one of those flags when adding a new test.\n\nimport sys\nimport os\nimport json\nimport re\n\npath = sys.argv[1]\nwarnings = []\nif os.path.isfile(path):\n with open(path) as f:\n for lineno, line in enumerate(f):\n if '--interactive' in line:\n warning = {\n 'severity': 'warning',\n 'message': 'Use `$(TEST_HC_OPTS_INTERACTIVE)` instead of `--interactive -ignore-dot-ghci -v0`',\n 'line': lineno+1,\n }\n warnings.append(warning)\n\nprint(json.dumps(warnings))\n","sub_path":".arc-linters/check-makefiles.py","file_name":"check-makefiles.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"194559034","text":"import wikipedia\n\nfrom apps.bot.classes.Exceptions import PWarning\nfrom apps.bot.classes.bots.VkBot import VkBot\nfrom apps.bot.classes.common.CommonCommand import CommonCommand\n\nwikipedia.set_lang(\"ru\")\n\n\nclass Wikipedia(CommonCommand):\n name = \"вики\"\n names = [\"википедия\"]\n help_text = \"поиск информации в википедии\"\n help_texts = [\n \"(фраза) - поиск информации в википедии\",\n \"р - рандомная статья в википедии\"\n ]\n args = 1\n\n def start(self):\n self.bot.set_activity(self.event.peer_id)\n\n is_random = False\n if self.event.args[0].lower() in [\"рандом\", \"р\"]:\n is_random = True\n search_query = wikipedia.random()\n else:\n search_query = self.event.original_args\n try:\n page = wikipedia.page(search_query)\n if page.summary != '':\n msg = f\"{page.original_title}\\n\\n{page.summary}\\n\\nПодробнее: {page.url}\"\n else:\n msg = f\"{page.original_title}\\n\\n{page.content}\\n\\nПодробнее: {page.url}\"\n output = {'msg': msg, 'attachments': [page.url]}\n if page.images:\n attachments = self.bot.upload_photos(page.images, 3)\n if isinstance(self.bot, VkBot):\n output['attachments'] += attachments\n else:\n if len(attachments) > 1:\n self.bot.parse_and_send_msgs(self.event.peer_id, {'msg': msg, 'attachments': attachments})\n if is_random:\n output['keyboard'] = self.bot.get_inline_keyboard(self.name, args={\"random\": \"р\"})\n return output\n except wikipedia.DisambiguationError as e:\n options = set(e.options)\n msg = \"Нашел сразу несколько. Уточните\\n\"\n msg += \"\\n\".join([x for x in options])\n raise PWarning(msg)\n except wikipedia.PageError:\n msg = \"Не нашёл такой страницы\\n\"\n search = wikipedia.search(self.event.original_args)\n if len(search) == 0:\n msg += \"Результат поиска ничего не дал\"\n else:\n msg += \"Я нашел возможные варианты:\\n\"\n search = list(map(lambda x: f\"- {x}\", search))\n msg += \"\\n\".join(search)\n return msg\n\n# Если он серит в консоль, то\n# lib/wikipedia/wikipedia.py:389\n# lis = BeautifulSoup(html, 'html.parser').find_all('li')\n","sub_path":"apps/bot/commands/Wikipedia.py","file_name":"Wikipedia.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"602859208","text":"from flask import Flask,jsonify,request\nfrom models.bilibili_item import BiliBiliVideoItem\nfrom spiders.index import start_crawl\n\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\n\n\n@app.route('/')\ndef help():\n return jsonify({'help':\n {'/crawl':'crawl videos',\n '/get':'get video from db'}\n })\n\n\n@app.route('/crawl',methods=['GET'])\ndef crawl():\n text = request.args.get('name',default='蔡徐坤',type=str)\n page = request.args.get('page',default=5,type=int)\n count = start_crawl(text,page)\n return jsonify({'ret':'ok, now have {} videos in the db'.format(count)})\n\n\n@app.route('/get',methods=['GET'])\ndef video():\n page=request.args.get('page',default=0,type=int)\n size=request.args.get('size',default=10,type=int)\n try:\n res,count = BiliBiliVideoItem.query_items(page,size)\n return jsonify({'page':page,'size':size,'total_count':count,'count':len(res),'res':res})\n except Exception as e:\n print(e)\n return jsonify({'error':'error happened'})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=5000,debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"298262054","text":"import pandas as pd\nimport dash\nfrom dash.dependencies import Input, Output\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport plotly.graph_objs as go\n\napp = dash.Dash(__name__)\nserver = app.server\n\ndf = pd.read_csv('https://raw.githubusercontent.com/balakrishnans0214/Dashdropdown/master/All.csv')\nnames = df.columns[1:-1]\navailable_indicators = df['Sector'].unique()\n\napp.layout = html.Div(\n [\n html.Div([\n dcc.Dropdown(\n id='drop',\n options=[{'label': i, 'value': i} for i in available_indicators],\n value='Energy',\n style={'width': '50%'}\n ),\n dcc.Dropdown(\n id='ddl_x',\n options=[{'label': i, 'value': i} for i in names],\n value='Price',\n style={'width': '50%'}\n ),\n dcc.Dropdown(\n id='ddl_y',\n options=[{'label': i, 'value': i} for i in names],\n value='Market Cap',\n style={'width': '50%'}\n ),\n ], style={'width': '100%', 'display': 'inline-block'}),\n html.Div([\n dcc.Graph(id='graph1')\n ], style={'width': '100%', 'display': 'inline-block'})\n ]\n)\n\n\n@app.callback(\n Output(component_id='graph1', component_property='figure'),\n [\n Input(component_id='drop', component_property='value'),\n Input(component_id='ddl_x', component_property='value'),\n Input(component_id='ddl_y', component_property='value')\n ]\n)\ndef update_output(drop, ddl_x_value, ddl_y_value):\n dff = df[df.Sector.str.contains(drop)]\n\n figure = {\n 'data': [\n go.Scatter(\n x=dff[ddl_x_value],\n y=dff[ddl_y_value],\n )\n ],\n 'layout':\n go.Layout(\n height=350,\n hovermode='closest',\n title=go.layout.Title(text='Dash Interactive Data Visualization', xref='paper', x=0)\n )\n\n }\n return figure\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, port=8080)\n","sub_path":"newapp.py","file_name":"newapp.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"589243173","text":"'''\r\n\r\n \\\\SCRIPTERS//\r\n *************\r\n [1] Sameer Kumar\r\n [2] Parastou Yaghmai\r\n [3] Ekaagar Singh Hara\r\n\r\n Authorship/Authority of different files are as folllowing :-\r\n ************************************************************\r\n 1. Source Code :: Sameer Kumar\r\n 2. Pre-processing Code (R) :: Parastou Yaghmai\r\n 3. Baselines :: Parastou Yaghmai\r\n 4. Human Annotation :: Parastou Yaghmai, Ekaagar Singh Hara, Sameer Kumar\r\n 5. Origins :: Ekaagar Singh Hara\r\n 6. Installs :: Sameer Kumar & Parastou Yaghmai\r\n 7. Word Cloud :: Parastou Yaghmai & Ekaagar Singh Hara\r\n\r\n\r\n Program Title :: Topic Modelling & Visualization on Election Tweet\r\n *************\r\n Problem :: To identify the purpose & Topic of Millions of tweet which were released at the time of 2016 American Elections.\r\n ******* We identify the topics which were talked about in the huge set of tweets sent from 946 handles. It will help us understand \r\n the whether there were any influence by these tweets into the 2016 US election.\r\n\r\n Program Run :: The program is executed in Python with importing the Semi-clean dataset created in R. \r\n Original File Name :: IRAA.csv\r\n Semi-cleaned File Name :: Semicleantweet.csv\r\n Cleaned File Name :: cleaneddata1.csv\r\n\r\n Steps involved in execution of the program & knowledge mining according to it :-\r\n ********************************************************************************\r\n [i] The dataset is imported as csv file and attribute dropping is implemented.\r\n \r\n [ii] Data pre-processing is done on the imported dataset thereby removing punctuations. The texts are converted into\r\n lower cases. Stopwords are removed using english stopword library. Lemmatization and Stemming worked on to convert the\r\n words into their canonical form. Frequent words are removed in R script by idetifying the stopwords separately.\r\n \r\n [iii] The cleaned and processed data gets converted into a dictionary. Tokenized dataset comes into play. Dictionary\r\n consists of every token(word) wwith its unique ID & the count of appearing in the document.\r\n \r\n [iv] LDA modelling technique using Uni-grams is applied onto the processed dictionary Bag of Words. The Optimum number \r\n of appropriate topics to be modelled is calculated by calculating the coherence scores for each model processed from 1 to 15.\r\n The value is selected based on the plot of coherence score and number of topics. The optimum vaue is elected based on the drop &\r\n increase in the graph along with good coherence score. The model is selected with topic# = 14 and saved into a text file for later use.\r\n The LDA model selected is also saved in order to use the model for any other imports being performed on the data set.\r\n \r\n [v] Furthermore, the model is applied back to the tweet set to map each topic to the most probable topic number(topic#). The resulting \r\n auto-mated mapping/annotatons are exported to a csv file.\r\n \r\n [vi] The annotated csv file is compared with the 3 different human/manual annotated files and accuracy is calculated for approx 300-350 tweets.\r\n Three diffferent manual annotations were done for minimizing the error percentage and to calculated precised accuracy. \r\n\r\nNote* :: \r\n********\r\n Human annotation used for accuracy calculation of the model built as the paradign used is unsupervised modelling technique. \r\n 300-400 approx. tweets were compared based on human and auto-mated annotations and accuracy was calculated to be 85%. \r\n'''\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n'''\r\nPackages/Module:\r\n****************\r\n\r\n Modules/Packages list required for the installation & execution of various \r\n python functions for carrying out the necessary programs. These include nltk,\r\n gensim, panda, numpy etc.\r\n \r\n*Note* :: Some of the modules/packages are listed as not used because\r\n the commands are commented due to limited usage or used\r\n only at special times and places in order to check for \r\n particualr result.\r\n \r\n'''\r\n\r\nimport pandas as pd\r\nimport re\r\nimport nltk\r\nfrom nltk import word_tokenize\r\nfrom nltk.util import ngrams\r\nfrom collections import Counter\r\nfrom nltk.collocations import *\r\nfrom nltk import FreqDist\r\nimport string\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nimport csv\r\nimport gensim\r\nfrom gensim.corpora import Dictionary\r\nfrom gensim.models import CoherenceModel, LdaModel\r\nfrom gensim import corpora, models\r\nfrom gensim.utils import simple_preprocess\r\nfrom gensim.parsing.preprocessing import STOPWORDS\r\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\r\nfrom nltk.stem.porter import *\r\nimport numpy as np\r\nfrom pprint import pprint\r\nimport matplotlib.pyplot as plt\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n'''\r\nData Input and Attribute Removal:\r\n*********************************\r\n\r\n This section deals with the read IN function for the code.\r\n Input CSV file is read here and two un-necessary columns are \r\n being dropped so that only the required attribute is available \r\n for running the appropriate model. The file is imported from R \r\n therefore it automatically saves extra columns at the time of \r\n writing it to CSV. The dataset used for final results consisted\r\n of 1 Million tweets to process. \r\n \r\n'''\r\n\r\ndata=pd.read_csv(\"C:/Users/csame/Desktop/AIT 690/russian-troll-tweets/Semicleantweet.csv\",header=None,encoding='Latin-1')\r\ndata.drop(data.columns[[0]], axis=1, inplace=True)\r\ndata.drop(data.columns[[0]], axis=1, inplace=True)\r\n#data.head()\r\n#print(data)\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n'''\r\nData Cleaning & Stopwords Removal:\r\n**********************************\r\n\r\n Word lemmatizer imported and used. Removal of stopwords from the dataset\r\n thereby comparing it with the english stopwords bag of words which contains\r\n the most frequent stopwords that need to be removed. We compare the attribute\r\n with the english list of words and remove all those that gives a match.\r\n Punctuations are also removed from the dataset so that only the required\r\n words to model them into BoW(Bag of Words) remain and unnecessary characters, words, \r\n punctuations are removed completely.\r\n \r\n'''\r\n\r\nps=nltk.WordNetLemmatizer()\r\nstopwords = nltk.corpus.stopwords.words('english')\r\ndata.columns = ['content']\r\ndata_needed=pd.DataFrame({'content':data['content']})\r\ndef rem_stopwords(text):\r\n text= \"\".join([word for word in text if word not in string.punctuation])\r\n tokens= re.split('\\W+',text)\r\n text=\" \".join([ps.lemmatize(word) for word in tokens if word not in stopwords])\r\n return text\r\ndata_needed['cleaned_content'] = data_needed['content'].apply(lambda x: rem_stopwords(str(x).lower()))#\r\nprint(data_needed['cleaned_content'])\r\ndata_clean = data_needed[['cleaned_content']].copy()\r\ndata_clean.to_csv('C:/Users/csame/Desktop/AIT 690/russian-troll-tweets/cleaneddata1.csv', encoding = 'Latin-1')\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n'''\r\nData Lemmatizing, Stemming & Corpus Formation:\r\n********************************************** \r\n\r\n Steps invloved to read, convert and preprocess the file and to \r\n Generate Dictionary of the processed documents:\r\n \r\n 1. The data file written to csv after cleaning/removal of stopwords\r\n is imported here again. The file contains a lot of special\r\n characters and symbols which were important for visualization\r\n purpose but were read as float. Due to this, the pre-processing map\r\n was not able to execute without errors, which termed the file as float \r\n and not string for operations to be performed on. Therefore, the file \r\n was read again as CSV but with one modification which is, the complete\r\n file is being converted into String format and then read IN for further\r\n execution as all the characters and words are considered to be String.\r\n\r\n 2. Document of the dataframe is created and document is processed \r\n with lemmatizer and stemmer thereby cleaning the data a bit more \r\n for trimming it for similar words and so on. Snowball Stemmer is\r\n utilized for the stemming of the document.\r\n \r\n 3. After the Stemming and Lemmatizing of the document,\r\n the processed document is converted into a dictionary\r\n using the gensim library/module. \r\n \r\n 4. Applying a few filters on the dictionary and converting\r\n it into \"Bag Of Words\" format and saving that for future\r\n model building. Doc2bow method has been applied to carry out the \r\n procedure. \r\n \r\n'''\r\n\r\ndata= []\r\nwith open('C:/Users/csame/Desktop/AIT 690/russian-troll-tweets/cleaneddata1.csv','r+') as reader_file:\r\n for read_ln in reader_file.read().split('\\n')[:-1]:\r\n x = []\r\n for a in read_ln.split(','):\r\n try:\r\n x.append(int(a))\r\n except ValueError as ve:\r\n x.append(a.encode('utf-8').decode('utf-8'))\r\n data.append(x)\r\ndata = pd.DataFrame(data[1:],columns = ['index','cleaned_content'])\r\n\r\n#print(data.dtypes)\r\n#print(data)\r\n\r\ndata1 = data[['cleaned_content']]\r\ndata1['index'] = data1.index\r\ndocuments = data1\r\n\r\n#print(documents)\r\n\r\n#len(documents)\r\n\r\nnp.random.seed(2018)\r\nnltk.download('wordnet')\r\nprint(WordNetLemmatizer().lemmatize('went', pos='v'))\r\n\r\nstemmer = SnowballStemmer('english')\r\ndef lemmatize_stemming(text):\r\n return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))\r\n\r\ndef preprocess(text):\r\n result = []\r\n for token in gensim.utils.simple_preprocess(text):\r\n if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:\r\n result.append(lemmatize_stemming(token))\r\n return result\r\n\r\n\r\nprocessed_docs = (documents['cleaned_content']).map(preprocess)\r\nprocessed_docs[:15]\r\n\r\ndictionary = gensim.corpora.Dictionary(processed_docs)\r\n\r\ndictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)\r\nbow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n'''\r\nRough/Verification Code:\r\n************************\r\n\r\n For verifying the execution of various commands performed \r\n on the data set, there are usually few sets of commands to\r\n execute and check if the output is correct or verifying one\r\n small part of the dataset and code it to give the result.\r\n These are the few executale commands which were used in the code \r\n to check for few results based upon our need at particular times.\r\n These are being commented so that it doesnt affect the running of\r\n the original code. \r\n \r\n'''\r\n\r\n#doc_sample = documents[(documents['index']) == 123].values[0][0]\r\n#print('original document: ')\r\n\r\n#words = []\r\n\r\n#for word in doc_sample.split(' '):\r\n# words.append(word)\r\n#print(words)\r\n\r\n#print('\\n\\n tokenized and lemmatized document: ')\r\n#print(preprocess(doc_sample))\r\n\r\n#count = 0\r\n#for k, v in dictionary.iteritems():\r\n# print(k, v)\r\n# count += 1\r\n# if count > 40:\r\n# break\r\n\r\n#bow_doc_145900 = bow_corpus[145900]\r\n#\r\n#for i in range(len(bow_doc_145900)):\r\n# print(\"Word {} (\\\"{}\\\") appears {} time.\".format(bow_doc_145900[i][0], \r\n# dictionary[bow_doc_145900[i][0]], \r\n# bow_doc_145900[i][1]))\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n'''\r\nLatent Dirichlet Allocation :: Topic Modelling\r\n**********************************************\r\n\r\nStep taken into consideration for carrying out LDA modelling\r\non the obtained document file:\r\n \r\n 1. Choosing the size/limit of the model running or topic\r\n selection list thereby setting the upper and lower\r\n parameter of execution of the model run. The code\r\n executes for 14 values.\r\n \r\n 2. Generating Different models based on the limits given\r\n and formulating several Coherence values for them. \r\n 14 different sets of Topic sets are created from\r\n running the model loop.\r\n \r\n 3. Comparing the obtained coherence values in order to\r\n select the most appropriate model and the topics\r\n modelled is the next task. For that purpose, the\r\n plot between coherence value and model number is \r\n obtained. The graph depicts value of coherence and model\r\n number and exhibits a technique to choose the optimum K.\r\n \r\n 4. Checking the obtained result of the optimum model and\r\n saving it to .txt* format for future importing if used\r\n anywhere else for the same tweet. \r\n \r\n 5. Writing and saving the Topics formulated into a text file\r\n to use it futher for manual annotations of the tweet set and \r\n to use it anytime later. \r\n\r\nSummary:\r\n*******\r\n Formulating the model based on conherence score and selecting optimum value of Topic number based on the plot\r\n between coherence score and topics. Saving the Apporpriate LDA model and topic file for future uses. \r\n \r\n'''\r\n\r\nlimit=15 \r\nstart=1 \r\nstep=1\r\ncoherence_values = []\r\nmodel_list = []\r\nfor num_topics in range(start, limit, step):\r\n #lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=num_topics, id2word=dictionary, passes=2, workers=2)\r\n lda_model = LdaModel(corpus=bow_corpus, num_topics=num_topics, id2word=dictionary)\r\n model_list.append(lda_model)\r\n coherencemodel = CoherenceModel(model=lda_model, texts=processed_docs, dictionary=dictionary, coherence='c_v')\r\n coherence_values.append(coherencemodel.get_coherence())\r\n \r\n#print(coherence_values) #Cross-Verifying the Coherence value\r\n#print(*coherence_values, sep = \"\\n\") \r\n\r\nlimit=15; start=1; step=1;\r\nx = range(start, limit, step)\r\nplt.xlabel(\"Topic_Count\",)\r\nplt.ylabel(\"Model_coherence_value\")\r\nplt.plot(x,coherence_values,'red')\r\n\r\n#model_list[13]\r\n#model_list[13].save(\"C:/Users/csame/Desktop/AIT 690/russian-troll-tweets/Optimum_LDA_Model.txt\") \r\n\r\n#print(model_list[13].show_topics())\r\n\r\n\r\n\r\ntopics = lda_model.print_topics(num_words=10) #Verifying and Printing the Optimum K-values\r\nfor i,topic in topics:\r\n print(\"Topic Number:\", i , \"(\", topic, \")\")\r\n print(\"\\n\")\r\n \r\n#with open('C:/Users/csame/Desktop/AIT 690/russian-troll-tweets/TopicsSelected.txt', 'w') as f:\r\n# for item in model_list[13].show_topics():\r\n# f.write(\"%s\\n\" % item)\r\n#model_list[13].show_topics().(\"C:/Users/csame/Desktop/AIT 690/russian-troll-tweets/TopicsSelected.csv\")\r\n \r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n'''\r\nMapping of topics & Sorting them to dataframe:\r\n**********************************************\r\n\r\n The topics are formulated and used to annotate the original tweet\r\n file for topic modelling. The topic attribute or dataframe is \r\n obtained and then mapped onto the original file for accuracy generation\r\n via comparison with manual annotations. \r\n The Resulting dataframe is saved and written to a csv for carrying out the\r\n mapping of manual annotations and later formualizing accuracy of the model.\r\n\r\n'''\r\n#Sorting and assigning the Topic#\r\nlda_new=lda_model[bow_corpus]\t\t\t\t\t\t\t\t\t\t\t\t\t\r\ni=0\r\nsorted_lda = []\r\nwhile i= 1 )\n assert( all(isinstance(item, int) for item in linear_layer_sizes) == True )\n assert( all(isinstance(item, float) for item in linear_layer_dropouts) == True )\n\n # TODO: Check both these paths\n full_file_list = [file for file in os.listdir(training_data_path) if file.endswith(\"csv\")]\n random.shuffle(full_file_list)\n\n print('Total number of files: ', len(full_file_list))\n\n #TODO: This is a random number that has no meaning\n file_list = random.sample(full_file_list, 350)\n #file_list = full_file_list\n\n if not os.path.isfile(os.path.join(training_data_path, \"headers.txt\")):\n error_message = 'Path: ' + training_data_path + ' does not contain `headers.txt`'\n raise FileNotFoundError(error_message)\n\n header_file = os.path.join(training_data_path, \"headers.txt\")\n\n training_list, validation_list = train_test_split(file_list, test_size=0.25, random_state=424242)\n\n print('Logs in training dataset: ', len(training_list))\n print('Logs in validation dataset: ', len(validation_list))\n\n #TODO: Need to be able to take any input target - for now hardcoding to 'Sonic'\n training_dataset = structured_data_loader.StructuredDataset(training_data_path, header_file, training_list, ['Sonic'])\n validation_dataset = structured_data_loader.StructuredDataset(training_data_path, header_file, validation_list, ['Sonic'])\n\n y, x = training_dataset[0]\n continuous_features = len(x)\n print('Number of continuous features: ', continuous_features)\n\n training_loader = data.DataLoader(training_dataset, batch_size=batch_size, num_workers=12, shuffle=True, drop_last=True)\n validation_loader = data.DataLoader(validation_dataset, batch_size=batch_size, num_workers=8, shuffle=False, drop_last=False)\n\n print('Number of minibatches in training: ', len(training_loader))\n print('Number of minibatches in validation: ', len(validation_loader))\n\n model_name = 'SonicModel_' + '_'.join([str(i) for i in linear_layer_sizes]) + '_' +\\\n '_'.join([str(i) for i in linear_layer_dropouts]) + \\\n str(continuous_features) + '.pkl'\n\n model_path = os.path.join(model_save_location, model_name)\n\n if visualization:\n vis = visdom.Visdom()\n\n tr_win = vis.line(X=np.zeros(1,), Y=np.zeros(1,), \n opts=dict(xlabel='Minibatches', \n ylabel='Training loss',\n title='Training loss',\n legend=['Loss']))\n\n val_win = vis.line(X=np.zeros(1,), Y=np.zeros(1,),\n opts=dict(xlabel='Minibatches',\n ylabel='Validation loss',\n title='Validation loss',\n legend=['Loss']))\n\n embedding_sizes = None\n n_continuous_variables = continuous_features\n embedding_dropout = None\n output_size = 1\n use_batchnorm = True\n\n print('Hidden layer sizes: ', linear_layer_sizes)\n print('Hidden layer dropouts: ', linear_layer_dropouts)\n model = structured_models.StructuredModel(embedding_sizes, n_continuous_variables, embedding_dropout, output_size, linear_layer_sizes, linear_layer_dropouts)\n model = nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))\n model.cuda()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.99, weight_decay=5e-4)\n # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=8, verbose=True)\n\n best_val_loss = 10000000000.0\n\n for epoch in range(n_epoch):\n\n model.train()\n \n for i, (y, x) in tqdm(enumerate(training_loader)):\n \n features = x.float().cuda()\n\n y_expected = y.float().cuda()\n y_predicted = model(features)\n loss = torch.norm(y_predicted-y_expected) / np.sqrt(batch_size)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n #TODO: Enable visdom visualization\n if visualization and ((i+1) % 200 == 0):\n vis.line(X=torch.ones((1,)) * (i + len(training_loader) * epoch), Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(), win=tr_win, update='append')\n\n if (i+1) % 1000 == 0:\n print(\"Epoch [%d/%d], Minibatch: %d, Training loss: %.4f\" %(epoch+1, n_epoch, i, loss.data[0]))\n \n model.eval()\n \n val_batch_count = 0\n val_loss = 0.0\n \n for j, (y, x) in tqdm(enumerate(validation_loader)):\n\n with torch.no_grad():\n val_batch_count += 1\n features = x.float().cuda(non_blocking=True)\n\n y_expected = y.float().cuda(non_blocking=True)\n y_predicted = model(features)\n\n batch_loss = torch.norm(y_predicted-y_expected) / np.sqrt(batch_size)\n\n val_loss += batch_loss\n\n #TODO: Enable visdom visualization\n if visualization and ((j+1) % 200 == 0):\n vis.line(X=torch.ones((1,)) * (j + len(validation_loader) * epoch), Y=torch.Tensor([batch_loss.data[0]]).unsqueeze(0).cpu(), win=val_win, update='append')\n\n if (j+1) % 1000 == 0:\n print(\"Epoch [%d/%d], Minibatch: %d, Validation loss: %.4f\" %(epoch+1, n_epoch, j, batch_loss.data[0]))\n\n val_loss = val_loss / float(val_batch_count)\n \n # scheduler.step(val_loss.cpu().data[0])\n \n if val_loss.cpu().data[0] <= best_val_loss:\n print(\"Best validation error: %.4f, Current validation error: %.4f\" %(best_val_loss, val_loss.cpu().data[0])) \n best_val_loss = val_loss.cpu().data[0]\n \n # TODO: Enable restarting - needs to write more than just the state\n state = {'epoch': epoch+1,\n 'model_state': model.state_dict(),\n 'optimizer_state': optimizer.state_dict(),}\n \n print('Saving model at: ', model_path)\n torch.save(state, model_path)\n\nif __name__ == '__main__':\n help_string = \"PyTorch LogNet training\"\n\n parser = argparse.ArgumentParser(description=help_string)\n\n parser.add_argument('-t', '--training-path', type=str, metavar='DIR', help='Path where the training data exists', required=True)\n parser.add_argument('-m', '--model-location', type=str, metavar='DIR', help='Path where the trained model needs to be stored', required=True)\n\n parser.add_argument('-bs', '--batch-size', type=int, metavar='N', help='Batch size (default: 20)', default=20, required=False)\n parser.add_argument('-epoch', '--epoch-count', type=int, metavar='N', help='Number of epochs (default: 40)', default=40, required=False)\n parser.add_argument('-lr', '--learning-rate', type=float, metavar='LR', help='Initial learning rate (default: 0.005)', default=5e-3, required=False)\n parser.add_argument('--visdom', type=bool, help='Enable realtime visualization (default: True)', default=True, required=False)\n\n parser.add_argument('-hl', '--hidden-layers', type=int, nargs='+', metavar='LIST', default=[100, 50, 25],\n help='Size of linear layers (default: [100, 50, 25])', required=False)\n parser.add_argument('-do', '--dropout-layers', type=float, nargs='+', metavar='LIST', default=[0.01, 0.005, 0.005],\n help='Dropout of linear layers (default: [0.01, 0.005, 0.005])', required=False)\n\n args = parser.parse_args()\n\n main(args)\n\n sys.exit(1)\n\n","sub_path":"LogNet-master/LogNet-master/examples/regression_train.py","file_name":"regression_train.py","file_ext":"py","file_size_in_byte":8304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"200108083","text":"from data_loader import DataLoader\nfrom gensim.models import KeyedVectors\nimport numpy as np\nimport pickle\nimport random\nfrom utils import make_emb_for_tweet, make_tweet_mat_emb, get_topics_for_tweets\n\ndef extract_timestamp(created_at):\n if created_at.startswith('20'):\n date, time = created_at.split()\n return date, time\n pieces = created_at.split()\n month_name, day, year = pieces[1].lower(), pieces[2], pieces[-1]\n if month_name == 'jan':\n month = '01'\n elif month_name == 'feb':\n month = '02'\n elif month_name == 'mar':\n month = '03'\n elif month_name == 'apr':\n month = '04'\n elif month_name == 'may':\n month = '05'\n elif month_name == 'jun':\n month = '06'\n elif month_name == 'jul':\n month = '07'\n elif month_name == 'aug':\n month = '08'\n elif month_name == 'sep':\n month = '09'\n elif month_name == 'oct':\n month = '10'\n elif month_name == 'nov':\n month = '11'\n else:\n assert(month_name == 'dec')\n month = '12'\n date = '{}-{}-{}'.format(year, month, day)\n time = pieces[3]\n return date, time\n\ndef sort_tids_by_timestamp(tids, dl):\n time_tid_tuples = []\n for tid in tids:\n date, time = extract_timestamp(dl.get_tweet(tid)['created_at'])\n time_tid_tuples.append((date, time, tid))\n sorted_tids = sorted(time_tid_tuples, key=lambda x: (x[0], x[1]))\n return sorted_tids\n\ndef next_day(date):\n y0, m0, d0 = [int(tok) for tok in date.split('-')]\n y1, m1, d1 = y0, m0, d0\n if d0 == 28 and m0 == 4 and y0 % 4 != 0: # 4/28/yy to 5/1/yy in non-leap year\n d1 = 1\n m1 += 1\n elif d0 == 29 and m0 == 4: # 4/29/yy to 5/1/yy in leap year\n d1 = 1\n m1 += 1\n elif d0 == 30 and m0 in {2, 4, 6, 9, 11}: # -/30/yy to -/1/yy\n d1 = 1\n m1 += 1\n elif d0 == 31 and m0 in {1, 3, 5, 7, 8, 10}: # -/31/yy to -/1/yy\n d1 = 1\n m1 += 1\n elif d0 == 31 and m0 == 12: # new year's\n d1 = 1\n m1 = 1\n y1 += 1\n else: # just a normal increment\n d1 += 1\n y1, m1, d1 = str(y1), str(m1), str(d1)\n if len(m1) == 1:\n m1 = '0' + m1\n if len(d1) == 1:\n d1 = '0' + d1\n if len(y1) == 2:\n y1 = '20' + y1\n return '{}-{}-{}'.format(y1, m1, d1)\n\ndef find_continuous_intervals(sorted_dates):\n intervals = []\n i = 0\n while i < len(sorted_dates):\n j = i\n while j < len(sorted_dates)-1 and \\\n sorted_dates[j+1] in {sorted_dates[j], next_day(sorted_dates[j])}: # either same day or next day\n j += 1\n intervals.append((i, j))\n i = j+1\n return intervals\n\ndef make_emb_seq_for_user(uid, dl, wv):\n tids = dl.get_tweets_by_user(uid)\n sorted_time_tid_tups = sort_tids_by_timestamp(tids, dl)\n embs = []\n dates = []\n for date, time, tid in sorted_time_tid_tups:\n emb = make_emb_for_tweet(wv, dl.get_tweet(tid)['tokens'])\n if emb is not None: # there is at least one word vec for this tweet's tokens\n embs.append(emb)\n dates.append(date)\n intervals = find_continuous_intervals(dates)\n lengths = [i[1]-i[0]+1 for i in intervals]\n return np.array(embs), lengths\n\ndef get_all_emb_seqs(uids, dl, wv):\n all_embs = []\n all_lengths = []\n for uid in uids:\n embs, lengths = make_emb_seq_for_user(uid, dl, wv)\n all_embs.append(embs)\n all_lengths.append(lengths)\n X = np.concatenate(all_embs, axis=0)\n L = np.concatenate(all_lengths, axis=0)\n return X, L\n\ndef make_topic_seq_for_user(uid, dl, wv, mix):\n tids = dl.get_tweets_by_user(uid)\n sorted_time_tid_tups = sort_tids_by_timestamp(tids, dl)\n dates = []\n for date, time, tid in sorted_time_tid_tups:\n emb = make_emb_for_tweet(wv, dl.get_tweet(tid)['tokens'])\n if emb is not None: # there is at least one word vec for this tweet's tokens\n dates.append(date)\n topics = get_topics_for_tweets(dl, wv, mix, tids)\n intervals = find_continuous_intervals(dates)\n lengths = [i[1]-i[0]+1 for i in intervals]\n return np.array(topics), lengths\n\ndef get_all_topic_seqs(uids, dl, wv, mix):\n all_topics = []\n all_lengths = []\n for uid in uids:\n topics, lengths = make_topic_seq_for_user(uid, dl, wv, mix)\n all_topics.append(topics)\n all_lengths.append(lengths)\n X = np.concatenate(all_topics, axis=0)\n L = np.concatenate(all_lengths, axis=0)\n return X, L\n\ndef make_train_test(dl, cutoff=1000, train_prop=.7):\n all_users = list(dl.get_user_ids())\n valid_users = []\n for uid in all_users:\n if len(dl.get_tweets_by_user(uid)) >= cutoff:\n valid_users.append(uid)\n np.random.shuffle(valid_users)\n train_cutoff = int(len(valid_users) * train_prop)\n return valid_users[:train_cutoff], valid_users[train_cutoff:]\n\nif __name__ == '__main__':\n # wv = KeyedVectors.load_word2vec_format('./models/word_embs_d300.bin', binary=True)\n dl = DataLoader()\n # mix = pickle.load(open('./models/tweet_gmm_k10.pkl', 'rb'))\n # users = range(100,105)\n # X, lengths = get_all_topic_sequences(users, dl, wv, mix)\n # print('Num sequences:', len(lengths))\n # print(X.shape)\n # print(X[0])\n train_uids, test_uids = make_train_test(dl)\n print(len(train_uids), len(test_uids))\n pickle.dump(train_uids, open('./saved/train_uids.pkl', 'wb'))\n pickle.dump(test_uids, open('./saved/test_uids.pkl', 'wb'))\n\n\n\n","sub_path":"markov_preprocessing.py","file_name":"markov_preprocessing.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"129568506","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@desc: 首页\n@time: 2018/6/23\n\"\"\"\n\nfrom operator import itemgetter\nfrom os import path\nfrom controller.base import BaseHandler\n\n\nclass InvalidPageHandler(BaseHandler):\n def get(self):\n if '/api/' in self.request.path:\n self.set_status(404, reason='Not found')\n return self.finish()\n if path.exists(path.join(self.get_template_path(), self.request.path.replace('/', ''))):\n return self.render(self.request.path.replace('/', ''))\n self.set_status(404, reason='Not found')\n self.render('_404.html')\n\n\nclass ApiTable(BaseHandler):\n URL = '/api'\n\n def get(self):\n \"\"\" 显示网站所有API和路由的响应类 \"\"\"\n handlers = []\n for cls in self.application.handlers:\n handler = cls(self.application, self.request)\n for method in handler._get_methods().split(','):\n method = method.strip()\n if method != 'OPTIONS':\n func = cls.__dict__[method.lower()]\n if isinstance(cls.URL, list):\n for i, url in enumerate(cls.URL):\n handlers.append((url, method, func.__doc__))\n elif isinstance(cls.URL, tuple):\n handlers.append((cls.URL[0], method, func.__doc__))\n else:\n handlers.append((cls.URL, method, func.__doc__))\n handlers.sort(key=itemgetter(0))\n self.render('_api.html', version=self.application.version, handlers=handlers)\n","sub_path":"controller/views/invalid.py","file_name":"invalid.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271156470","text":"import numpy as np\n\nfrom cadCAD.configuration.utils import config_sim\nfrom simulations.validation.conviction_helpers import *\n#import networkx as nx\nfrom scipy.stats import expon, gamma\n\n\n#functions for partial state update block 1\n\n#Driving processes: arrival of participants, proposals and funds\n##-----------------------------------------\ndef gen_new_participant(network, new_participant_holdings):\n \n i = len([node for node in network.nodes])\n \n network.add_node(i)\n network.nodes[i]['type']=\"participant\"\n \n s_rv = np.random.rand() \n network.nodes[i]['sentiment'] = s_rv\n network.nodes[i]['holdings']=new_participant_holdings\n \n for j in get_nodes_by_type(network, 'proposal'):\n network.add_edge(i, j)\n \n rv = np.random.rand()\n a_rv = 1-4*(1-rv)*rv #polarized distribution\n network.edges[(i, j)]['affinity'] = a_rv\n network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']\n network.edges[(i, j)]['conviction'] = 0\n \n return network\n \n\nscale_factor = 1000\n\ndef gen_new_proposal(network, funds, supply, total_funds, trigger_func):\n j = len([node for node in network.nodes])\n network.add_node(j)\n network.nodes[j]['type']=\"proposal\"\n \n network.nodes[j]['conviction']=0\n network.nodes[j]['status']='candidate'\n network.nodes[j]['age']=0\n \n rescale = scale_factor*funds/total_funds\n r_rv = gamma.rvs(3,loc=0.001, scale=rescale)\n network.node[j]['funds_requested'] = r_rv\n \n network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)\n \n participants = get_nodes_by_type(network, 'participant')\n proposing_participant = np.random.choice(participants)\n \n for i in participants:\n network.add_edge(i, j)\n if i==proposing_participant:\n network.edges[(i, j)]['affinity']=1\n else:\n rv = np.random.rand()\n a_rv = 1-4*(1-rv)*rv #polarized distribution\n network.edges[(i, j)]['affinity'] = a_rv\n \n network.edges[(i, j)]['conviction'] = 0\n network.edges[(i,j)]['tokens'] = 0\n return network\n \n \n\ndef driving_process(params, step, sL, s):\n \n #placeholder plumbing for random processes\n arrival_rate = 10/s['sentiment']\n rv1 = np.random.rand()\n new_participant = bool(rv1<1/arrival_rate)\n if new_participant:\n h_rv = expon.rvs(loc=0.0, scale=1000)\n new_participant_holdings = h_rv\n else:\n new_participant_holdings = 0\n \n network = s['network']\n affinities = [network.edges[e]['affinity'] for e in network.edges ]\n median_affinity = np.median(affinities)\n \n proposals = get_nodes_by_type(network, 'proposal')\n fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]\n \n funds = s['funds']\n total_funds_requested = np.sum(fund_requests)\n \n proposal_rate = 10/median_affinity * total_funds_requested/funds\n rv2 = np.random.rand()\n new_proposal = bool(rv2<1/proposal_rate)\n \n sentiment = s['sentiment']\n funds = s['funds']\n scale_factor = 1+4000*sentiment**2\n \n #this shouldn't happen but expon is throwing domain errors\n if scale_factor > 1: \n funds_arrival = expon.rvs(loc = 0, scale = scale_factor )\n else:\n funds_arrival = 0\n \n return({'new_participant':new_participant,\n 'new_participant_holdings':new_participant_holdings,\n 'new_proposal':new_proposal, \n 'funds_arrival':funds_arrival})\n\n \n#Mechanisms for updating the state based on driving processes\n##---\ndef update_network(params, step, sL, s, _input):\n \n print(params)\n print(type(params))\n \n network = s['network']\n funds = s['funds']\n supply = s['supply']\n trigger_func = params['trigger_func']\n\n new_participant = _input['new_participant'] #T/F\n new_proposal = _input['new_proposal'] #T/F\n\n if new_participant:\n new_participant_holdings = _input['new_participant_holdings']\n network = gen_new_participant(network, new_participant_holdings)\n \n if new_proposal:\n network= gen_new_proposal(network,funds,supply )\n \n #update age of the existing proposals\n proposals = get_nodes_by_type(network, 'proposal')\n \n for j in proposals:\n network.nodes[j]['age'] = network.nodes[j]['age']+1\n if network.nodes[j]['status'] == 'candidate':\n requested = network.nodes[j]['funds_requested']\n network.nodes[j]['trigger'] = trigger_func(requested, funds, supply)\n else:\n network.nodes[j]['trigger'] = np.nan\n \n key = 'network'\n value = network\n \n return (key, value)\n\ndef increment_funds(params, step, sL, s, _input):\n \n funds = s['funds']\n funds_arrival = _input['funds_arrival']\n\n #increment funds\n funds = funds + funds_arrival\n \n key = 'funds'\n value = funds\n \n return (key, value)\n\ndef increment_supply(params, step, sL, s, _input):\n \n supply = s['supply']\n supply_arrival = _input['new_participant_holdings']\n\n #increment funds\n supply = supply + supply_arrival\n \n key = 'supply'\n value = supply\n \n return (key, value)\n\n#functions for partial state update block 2\n\n#Driving processes: completion of previously funded proposals\n##-----------------------------------------\n\ndef check_progress(params, step, sL, s):\n \n network = s['network']\n proposals = get_nodes_by_type(network, 'proposal')\n \n completed = []\n for j in proposals:\n if network.nodes[j]['status'] == 'active':\n grant_size = network.nodes[j]['funds_requested']\n base_completion_rate=params['base_completion_rate']\n likelihood = 1.0/(base_completion_rate+np.log(grant_size))\n if np.random.rand() < likelihood:\n completed.append(j)\n \n return({'completed':completed})\n\n\n#Mechanisms for updating the state based on check progress\n##---\ndef complete_proposal(params, step, sL, s, _input):\n \n network = s['network']\n participants = get_nodes_by_type(network, 'participant')\n \n completed = _input['completed']\n for j in completed:\n network.nodes[j]['status']='completed'\n for i in participants:\n force = network.edges[(i,j)]['affinity']\n sentiment = network.node[i]['sentiment']\n network.node[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)\n \n key = 'network'\n value = network\n \n return (key, value)\n\ndef update_sentiment_on_completion(params, step, sL, s, _input):\n \n network = s['network']\n proposals = get_nodes_by_type(network, 'proposal')\n completed = _input['completed']\n \n grants_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='active'])\n \n grants_completed = np.sum([network.nodes[j]['funds_requested'] for j in completed])\n \n sentiment = s['sentiment']\n \n force = grants_completed/grants_outstanding\n mu = params['sentiment_decay']\n if (force >=0) and (force <=1):\n sentiment = get_sentimental(sentiment, force, mu)\n else:\n sentiment = get_sentimental(sentiment, 0, mu)\n \n \n key = 'sentiment'\n value = sentiment\n \n return (key, value)\n\ndef get_sentimental(sentiment, force, decay=0):\n mu = decay\n sentiment = sentiment*(1-mu) + force\n \n if sentiment > 1:\n sentiment = 1\n \n return sentiment\n\n#functions for partial state update block 3\n\n#Decision processes: trigger function policy\n##-----------------------------------------\n\ndef trigger_function(params, step, sL, s):\n \n network = s['network']\n funds = s['funds']\n supply = s['supply']\n proposals = get_nodes_by_type(network, 'proposal')\n tmin = params['tmin']\n \n accepted = []\n triggers = {}\n for j in proposals:\n if network.nodes[j]['status'] == 'candidate':\n requested = network.nodes[j]['funds_requested']\n age = network.nodes[j]['age']\n threshold = trigger_threshold(requested, funds, supply)\n if age > tmin:\n conviction = network.nodes[j]['conviction']\n if conviction >threshold:\n accepted.append(j)\n else:\n threshold = np.nan\n \n triggers[j] = threshold\n \n \n \n return({'accepted':accepted, 'triggers':triggers})\n\ndef decrement_funds(params, step, sL, s, _input):\n \n funds = s['funds']\n network = s['network']\n accepted = _input['accepted']\n\n #decrement funds\n for j in accepted:\n funds = funds - network.nodes[j]['funds_requested']\n \n key = 'funds'\n value = funds\n \n return (key, value)\n\ndef update_proposals(params, step, sL, s, _input):\n \n network = s['network']\n accepted = _input['accepted']\n triggers = _input['triggers']\n participants = get_nodes_by_type(network, 'participant')\n proposals = get_nodes_by_type(network, 'proposals')\n sensitivity = params['sensitivity']\n \n for j in proposals:\n network.nodes[j]['trigger'] = triggers[j]\n \n #bookkeeping conviction and participant sentiment\n for j in accepted:\n network.nodes[j]['status']='active'\n network.nodes[j]['conviction']=np.nan\n #change status to active\n for i in participants:\n \n #operating on edge = (i,j)\n #reset tokens assigned to other candidates\n network.edges[(i,j)]['tokens']=0\n network.edges[(i,j)]['conviction'] = np.nan\n \n #update participants sentiments (positive or negative) \n affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]\n if len(affinities)>1:\n max_affinity = np.max(affinities)\n force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity\n else:\n force = 0\n \n #based on what their affinities to the accepted proposals\n network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)\n \n \n key = 'network'\n value = network\n \n return (key, value)\n\ndef update_sentiment_on_release(params, step, sL, s, _input):\n \n network = s['network']\n proposals = get_nodes_by_type(network, 'proposal')\n accepted = _input['accepted']\n \n proposals_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate'])\n \n proposals_accepted = np.sum([network.nodes[j]['funds_requested'] for j in accepted])\n \n sentiment = s['sentiment']\n force = proposals_accepted/proposals_outstanding\n if (force >=0) and (force <=1):\n sentiment = get_sentimental(sentiment, force, False)\n else:\n sentiment = get_sentimental(sentiment, 0, False)\n \n key = 'sentiment'\n value = sentiment\n \n return (key, value)\n\ndef participants_decisions(params, step, sL, s):\n network = s['network']\n participants = get_nodes_by_type(network, 'participant')\n proposals = get_nodes_by_type(network, 'proposal')\n candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']\n sensitivity = params['sensitivity']\n \n gain = .01\n delta_holdings={}\n proposals_supported ={}\n for i in participants:\n force = network.nodes[i]['sentiment']-sensitivity\n delta_holdings[i] = network.nodes[i]['holdings']*gain*force\n \n support = []\n for j in candidates:\n affinity = network.edges[(i, j)]['affinity']\n cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])\n if cutoff <.5:\n cutoff = .5\n \n if affinity > cutoff:\n support.append(j)\n \n proposals_supported[i] = support\n \n return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})\n\ndef update_tokens(params, step, sL, s, _input):\n \n network = s['network']\n delta_holdings = _input['delta_holdings']\n proposals = get_nodes_by_type(network, 'proposal')\n proposals_supported = _input['proposals_supported']\n participants = get_nodes_by_type(network, 'participant')\n alpha = params['alpha']\n \n for i in participants:\n network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]\n supported = proposals_supported[i]\n total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])\n for j in proposals:\n if j in supported:\n normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity\n network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']\n else:\n network.edges[(i, j)]['tokens'] = 0\n \n prior_conviction = network.edges[(i, j)]['conviction']\n current_tokens = network.edges[(i, j)]['tokens']\n network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction\n \n for j in proposals:\n network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])\n \n key = 'network'\n value = network\n \n return (key, value)\n\ndef update_supply(params, step, sL, s, _input):\n \n supply = s['supply']\n delta_holdings = _input['delta_holdings']\n delta_supply = np.sum([v for v in delta_holdings.values()])\n \n supply = supply + delta_supply\n \n key = 'supply'\n value = supply\n \n return (key, value)\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# The Partial State Update Blocks\npartial_state_update_blocks = [\n {\n 'policies': {\n #new proposals or new participants\n 'random': driving_process\n },\n 'variables': {\n 'network': update_network,\n 'funds':increment_funds,\n 'supply':increment_supply\n }\n },\n {\n 'policies': {\n 'completion': check_progress #see if any of the funded proposals completes\n },\n 'variables': { # The following state variables will be updated simultaneously\n 'sentiment': update_sentiment_on_completion, #note completing decays sentiment, completing bumps it\n 'network': complete_proposal #book-keeping\n }\n },\n {\n 'policies': {\n 'release': trigger_function #check each proposal to see if it passes\n },\n 'variables': { # The following state variables will be updated simultaneously\n 'funds': decrement_funds, #funds expended\n 'sentiment': update_sentiment_on_release, #releasing funds can bump sentiment\n 'network': update_proposals #reset convictions, and participants sentiments\n #update based on affinities\n }\n },\n {\n 'policies': {\n 'participants_act': participants_decisions, #high sentiment, high affinity =>buy\n #low sentiment, low affinities => burn\n #assign tokens to top affinities\n },\n 'variables': {\n 'supply': update_supply,\n 'network': update_tokens #update everyones holdings\n #and their conviction for each proposal\n }\n }\n]\n\nn= 25 #initial participants\nm= 3 #initial proposals\n\ninitial_sentiment = .5\n\nnetwork, initial_funds, initial_supply, total_requested = initialize_network(n,m,total_funds_given_total_supply,trigger_threshold)\n\ninitial_conditions = {'network':network,\n 'supply': initial_supply,\n 'funds':initial_funds,\n 'sentiment': initial_sentiment}\n\n#power of 1 token forever\n# conviction_capactity = [2]\n# alpha = [1-1/cc for cc in conviction_capactity]\n# print(alpha)\n\nparams={\n 'sensitivity': [.75],\n 'tmin': [7], #unit days; minimum periods passed before a proposal can pass\n 'sentiment_decay': [.001], #termed mu in the state update function\n 'alpha': [0.5, 0.9],\n 'base_completion_rate': [10],\n 'trigger_func': [trigger_threshold]\n}\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Settings of general simulation parameters, unrelated to the system itself\n# `T` is a range with the number of discrete units of time the simulation will run for;\n# `N` is the number of times the simulation will be run (Monte Carlo runs)\ntime_periods_per_run = 250\nmonte_carlo_runs = 1\n\nsimulation_parameters = config_sim({\n 'T': range(time_periods_per_run),\n 'N': monte_carlo_runs,\n 'M': params\n})\n\n\nfrom cadCAD.configuration import append_configs\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# The configurations above are then packaged into a `Configuration` object\nappend_configs(\n initial_state=initial_conditions, #dict containing variable names and initial values\n partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions\n sim_configs=simulation_parameters #dict containing simulation parameters\n)\n\nfrom cadCAD.engine import ExecutionMode, ExecutionContext, Executor\nfrom cadCAD import configs\n\nexec_mode = ExecutionMode()\nmulti_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)\nrun = Executor(exec_context=multi_proc_ctx, configs=configs)\nraw_result, tensor = run.execute()\n\n# exec_mode = ExecutionMode()\n# exec_context = ExecutionContext(context=exec_mode.multi_proc)\n# # run = Executor(exec_context=exec_context, configs=configs)\n# executor = Executor(exec_context, configs) # Pass the configuration object inside an array\n# raw_result, tensor = executor.execute() # The `main()` method returns a tuple; its first elements contains the raw results","sub_path":"simulations/validation/conviction_system_logic.py","file_name":"conviction_system_logic.py","file_ext":"py","file_size_in_byte":18126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323659796","text":"# Basic necessities\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\n# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n# Flask api\nfrom flask import Flask, jsonify\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n # f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/precipitation
\"\n # f\"/api/v1.0/stations
\"\n f\"/api/v1.0/stations
\"\n # f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/<start>
\"\n f\"/api/v1.0/<start>/<end>
\"\n f\"
\"\n f\"
\"\n f\"NOTE: For date routes, use format: %Y-%m-%d
\"\n f\"
\"\n f\"Extra Credit... ( ͡° ͜ʖ ͡°) \"\n )\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n \"\"\"\n Convert the query results to a Dictionary using date as the key and prcp as the value.\n Return the JSON representation of your dictionary.\n \"\"\"\n\n # Unsure if the '1 year' stipulation is required here; 1 for yes, 0 for no\n single_year = 1\n\n if single_year:\n end_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\n start_date = f\"{int(end_date[:4])-1}{end_date[4:]}\"\n rain_data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= start_date).all() \n else:\n rain_data = session.query(Measurement.date, Measurement.prcp).all()\n\n rain_dict = []\n\n for date, prcp in rain_data:\n rain_dict.append({date: prcp})\n\n return jsonify(rain_dict)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n \"\"\"\n Return a JSON list of stations from the dataset.\n \"\"\"\n\n stations = session.query(Station.station, Station.name).all()\n\n return jsonify(stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n \"\"\"\n query for the dates and temperature observations from a year from the last data point.\n Return a JSON list of Temperature Observations (tobs) for the previous year.\n \"\"\"\n\n end_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\n start_date = f\"{int(end_date[:4])-1}{end_date[4:]}\"\n\n temp_obs = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= start_date).all()\n\n return jsonify(temp_obs)\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n \"\"\"\n Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start date.\n Calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.\n \"\"\"\n\n def calc_temp_start(start):\n \"\"\"TMIN, TAVG, and TMAX for a given date.\n \n Args:\n start (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n\n return jsonify(calc_temp_start(start))\n\n\n@app.route(\"/api/v1.0//\")\ndef start_end(start, end):\n \"\"\"\n Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start-end range.\n Calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.\n \"\"\"\n\n def calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n \n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n\n return jsonify(calc_temps(start, end))\n\n\n#########################\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"08-Flask/assignment/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"539732026","text":"import os\nimport sys\nimport random\nimport numpy as np\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nfrom torchvision import datasets, transforms\n\n\nclass Flatten(nn.Module):\n \"\"\"\n For a convinient outputing of all layers\n rank is issued by:\n convert from keras(2.1.3), for the datasets \n keras: (num_datasets, height , width, channel)\n PyTorch: (num_datasets, channel, height, width)\n need permute and contiguous()\n But in Keras==0.3.1, not need to\n \"\"\"\n def forward(self, X, rank=None):\n if rank and isinstance(rank, (list, tuple)) and\\\n len(X.shape) == len(rank):\n X = X.permute(*rank).contiguous()\n return X.view(X.size(0), -1)\n\n\ndef brier_score(input, target):\n r\"\"\"\n\n See :class:`~torch.nn.NLLLoss` for details.\n\n Args:\n input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`\n in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`\n in the case of K-dimensional loss.\n target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`,\n or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K >= 1` for\n K-dimensional loss.\n\n Example::\n >>> # input is of size N x C = 3 x 5\n >>> input = autograd.Variable(torch.randn(3, 5))\n >>> # each element in target has to have 0 <= value < C\n >>> target = autograd.Variable(torch.LongTensor([1, 0, 4]))\n >>> output = brier_score(input, target)\n >>> output.backward()\n \"\"\"\n dim = input.dim()\n if dim == 2:\n input = F.softmax(input, 1)\n input[range(input.shape[0]), target] -= 1\n # print(input)\n # print(\"square\", input**2)\n # print(\"sum:\", (input**2).sum(1))\n # print(\"final:\", (input**2).sum(1) / (input.shape[1]))\n return (input**2).sum(1) / (input.shape[1])\n else:\n raise ValueError('Expected 2 dimensions (got {})'.format(dim))\n\n\ndef save_best_model(model, seed, path=\".\"):\n torch.save(model.state_dict(), os.path.join(path, \"best_params_%d.pkl\" % seed))\n\n\ndef output_of_all_layers(model, X, use_cuda=False):\n if isinstance(X, np.ndarray):\n X = torch.FloatTensor(X)\n elif isinstance(X, Variable):\n X = X.data\n elif isinstance(X, torch.Tensor):\n pass\n else:\n print(\"I didn't understand other types now.\")\n sys.exit(1)\n\n if use_cuda:\n X = X.cuda()\n\n X = Variable(X)\n for name, module in model._modules.items():\n x = module(x)\n print(x.shape)\n print(name)\n \n if (len(x.shape) == 4):\n t = x[0, 0]\n print(t.shape)\n if t.shape[0] > 4:\n mid_r = int(t.shape[0]/2)-2\n else:\n mid_r = 0\n if t.shape[1] > 4: \n mid_c = int(t.shape[1]/2)-2\n else:\n mid_c = 0\n print(mid_r, mid_c)\n print(t.data.numpy()[mid_r:mid_r+5, mid_c:mid_c+5])\n else:\n print(x[0].shape)\n print(x[0].data.numpy()[:5])\n pass\n\n\nif __name__ == \"__main__\":\n from CNN import BayCNN\n model = BayCNN()\n usecuda = False\n args = {'log': 0}\n if usecuda:\n args['use_cuda'] = True\n model.cuda()\n np.random.seed(1)\n model.apply(weights_init)\n print(x_temp.shape)\n x = x_temp\n","sub_path":"pytorchTools.py","file_name":"pytorchTools.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208259579","text":"from flask import Flask, render_template, g, session, url_for, redirect\nfrom flask.ext.sqlalchemy import SQLAlchemy\napp = Flask(__name__)\napp.config.from_object('config')\n\ndb = SQLAlchemy(app)\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html'), 404\n\nfrom acm_phoenix.users.views import mod as usersModule\napp.register_blueprint(usersModule)\n\nfrom acm_phoenix.users.models import User\n@app.before_request\ndef before_request():\n \"\"\"\n pull user's profile from the database before every request are treated\n \"\"\"\n g.user = None\n if 'user_id' in session:\n g.user = User.query.get(session['user_id']);\n\n\n@app.route('/')\ndef show_home():\n \"\"\"\n Display home page to visitors\n \"\"\"\n g.user = None\n if 'user_id' in session:\n g.user = User.query.get(session['user_id'])\n return render_template('home.html')\n\n@app.route('/logout')\ndef logout():\n \"\"\"\n Removes user information from session\n \"\"\"\n session.pop('user_id', None)\n return redirect(url_for('show_home'))\n","sub_path":"acm_phoenix/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578219521","text":"# 2729번 | 이진수 덧셈 | 실버 4\n\nt = int(input())\n\nfor i in range(t):\n # str형으로 입력받음\n a, b = input().split()\n\n # 2진수에서 10진수로 변환\n result = int(a, 2) + int(b, 2)\n \n # 10진수에서 2진수로 변환\n print(bin(result)[2:])\n","sub_path":"Algorithm/BYEOLYI/2729.py","file_name":"2729.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"598858370","text":"from discord.ext import commands\nfrom pathlib import Path\nfrom cogs.util.error import ExampleError\nimport discord\nimport os\nimport aiosqlite\n\n\nasync def get_prefix(bot, message):\n fetch = await bot.db.execute(f'SELECT Prefix FROM Guild WHERE ID={message.guild.id}')\n result = await fetch.fetchone()\n if not result[0]:\n prefix = '!'\n else:\n prefix = result[0]\n return commands.when_mentioned_or(*prefix)(bot, message)\n\n\nbot = commands.AutoShardedBot(command_prefix=get_prefix)\n\n\n@bot.event\nasync def on_ready():\n await bot.wait_until_ready()\n print(f'\\n\\nLogged in as: {bot.user.name} - {bot.user.id}')\n\n\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.errors.CommandNotFound):\n example = list(str(command) for command in bot.walk_commands() if ctx.invoked_with in str(command))\n if example:\n await ctx.send(f'Did you mean {\" \".join(set(example))}?')\n else:\n await ctx.send('ur dumb thats not even a command')\n\n if isinstance(error, ExampleError):\n print(f'{ctx.author.name} tried to use {ctx.command.name} like a little shit')\n return\n\n if isinstance(error, commands.CommandOnCooldown):\n embed = discord.Embed(title=f'Command: {ctx.command.name}', colour=discord.Colour(0xFF0000),\n description=f\"{ctx.author.name}, you are on cooldown for this command for {error.retry_after:.2f}s\")\n await ctx.send(embed=embed)\n return\n\n else:\n e = discord.Embed(colour=discord.Colour(0xFF0000), description=f\"{error}\")\n await ctx.send(embed=e)\n\n\n@bot.check #no replying to bots\nasync def __before_invoke(ctx):\n if not ctx.message.author.bot:\n return True\n\n\ndef load_some_cogs():\n bot.startup_extensions = []\n path = Path('./cogs')\n for dirpath, dirnames, filenames in os.walk(path):\n if dirpath.strip('./') == str(path):\n for cog in filenames:\n if cog.endswith('.py') and not cog.startswith('_'):\n extension = 'cogs.'+cog[:-3]\n bot.startup_extensions.append(extension)\n\n if __name__ == \"__main__\":\n for extension in bot.startup_extensions:\n try:\n bot.load_extension(extension)\n print(f'Loaded {extension}')\n except Exception as e:\n exc = f'{type(e).__name__}: {e}'\n print(f'Failed to load extension {extension}\\n{exc}')\n\n\nasync def create_dbconnect():\n bot.db = await aiosqlite.connect(\"autonickfromrole.db\")\n\n\nload_some_cogs()\nbot.loop.create_task(create_dbconnect())\nbot.run('token')","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"567167507","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2018, Digitalprizm and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom bnd.api.attendance_list import load_data\nimport json\nimport requests\n\nclass AttendanceProcess(Document):\n\tdef onload(self):\n\t\talist=load_data()\n\t\t#frappe.msgprint(\"hii\")\n\t\tself.get(\"__onload\").attendance_list = alist\n\n\n@frappe.whitelist()\ndef calling_attendance_api(process_date,enroll_number):\n\tr = requests.get('http://192.168.16.194/subtest/api/Aprocess?_date={0}&_enroll={1}'.format(process_date,enroll_number))\n\tapi=r.text\n\treturn api\n\n\n\n\n@frappe.whitelist()\ndef calling_attendance_date_api(process_date):\n\tr = requests.get('http://192.168.16.194/subtest/api/Aprocess?_date={0}'.format(process_date))\n\tapi=r.text\n\treturn api\n\n\t\n","sub_path":"bnd/bnd/doctype/attendance_process/attendance_process.py","file_name":"attendance_process.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"650625410","text":"from __future__ import print_function\nfrom sklearn.metrics import f1_score\nfrom sklearn.base import BaseEstimator\nfrom sklearn.grid_search import GridSearchCV\nfrom training import get_data_sklearn_format\nimport pycrfsuite\nimport usaddress\n\n\ndef f1_with_flattening(estimator, X, y):\n \"\"\"\n Calculate F1 score by flattening the predictions of the\n estimator across all addresses. For example, given the following\n addresses as input\n ['1 My str', '2 Your blvd'],\n the predictions of the model will be flattened like so:\n ['AddressNumber', 'StreetName', 'StreetNamePostType', 'AddressNumber', 'StreetName', 'StreetNamePostType']\n and compared to a similarly flattened gold standard labels. This calculates the overall\n quality of the model across all addresses as opposed to how well it does\n at any particular address.\n :param X: list of addresses to tag\n :param y: list of gold standard tuples\n \"\"\"\n predicted = estimator.predict(X)\n flat_pred, flat_gold = [], []\n for a, b in zip(predicted, y):\n if len(a) == len(b):\n flat_pred.extend(a)\n flat_gold.extend(b)\n return f1_score(flat_gold, flat_pred)\n\n\nclass AddressEstimator(BaseEstimator):\n \"\"\"\n A sklearn-compatible wrapper for a usaddress trainer\n \"\"\"\n model_path = 'usaddress/usaddr.crfsuite'\n\n def __init__(self, c1=1, c2=1, feature_minfreq=0):\n \"\"\"\n :param c1: L1 regularisation coefficient\n :param c2: L2 regularisation coefficient\n :param feature_minfreq: minimum feature frequency\n :return:\n \"\"\"\n self.c1 = c1\n self.c2 = c2\n self.feature_minfreq = feature_minfreq\n\n def fit(self, X, y, **params):\n # sklearn requires parameters to be declared as fields of the estimator,\n # an we can't have a full stop there. Replace with an underscore\n params = {k.replace('_', '.'): v for k, v in self.__dict__.items()}\n trainer = pycrfsuite.Trainer(verbose=False, params=params)\n for address, labels in zip(X, y):\n tokens = usaddress.tokenize(address)\n trainer.append(usaddress.addr2features(tokens), labels)\n trainer.train(self.model_path)\n reload(usaddress)\n\n def predict(self, X):\n reload(usaddress) # tagger object is defined at the module level, update now\n predictions = []\n for address in X:\n predictions.append([foo[1] for foo in usaddress.parse(address)])\n return predictions\n\n\nif __name__ == '__main__':\n # refer to http://www.chokkan.org/software/crfsuite/manual.html\n # for description of parameters\n cv = GridSearchCV(AddressEstimator(), {'c1': [10 ** x for x in range(-2, 2)],\n 'c2': [10 ** x for x in range(-2, 4)],\n 'feature_minfreq': [0, 3, 5]},\n scoring=f1_with_flattening, verbose=5)\n X, y = get_data_sklearn_format()\n cv.fit(X, y)\n print(cv.best_params_)\n for foo in cv.grid_scores_:\n print(foo)\n","sub_path":"training/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"268653027","text":"from collections import OrderedDict\n\nfl=OrderedDict()\n\nfl={\"Kevin\":[\"python\",\"ruby\"],\"Harry\":[\"C\",\"Wolfram Language\"],\"Eva\":[\"C\",\"matlab\",\"python\"]}\n\nfor name,langlist in fl.items():\n print(name.title()+\"'s fav languages are:\")\n for lang in langlist:\n print(\"\\t\"+lang.title())\n","sub_path":"flanguage2.py","file_name":"flanguage2.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"2825452","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport functools\nimport itertools\nimport operator\n\ndef ifgsm(model, X, y, niters=10, epsilon=0.01, learning_rate=0.005):\n out = model(X)\n error_original = (out.max(1)[1] != y).float().sum() / X.size(0)\n\n X_pert = X.clone()\n X_pert.requires_grad = True\n \n for i in range(niters):\n output_perturbed = model(X_pert)\n loss = nn.CrossEntropyLoss()(output_perturbed, y)\n loss.backward()\n pert = learning_rate * X_pert.grad.detach().sign()\n \n # add perturbation\n X_pert = X_pert.detach() + pert\n X_pert.requires_grad = True\n \n # make sure we don't modify the original image beyond epsilon\n X_pert = normalize_and_scale(X_pert.detach() - X.clone(), epsilon) + X.clone()\n X_pert.requires_grad = True\n\n # clamp image\n X_pert = X_pert.detach().clamp(X.min(), X.max())\n X_pert.requires_grad = True\n\n return X_pert\n\n\ndef momentum_ifgsm(model, X, y, niters=10, epsilon=0.01, visualize=False, learning_rate=0.005, decay=0.9):\n X_pert = X.clone()\n X_pert.requires_grad = True\n \n momentum = 0\n for _ in range(niters):\n output_perturbed = model(X_pert)\n loss = nn.CrossEntropyLoss()(output_perturbed, y)\n loss.backward()\n \n momentum = decay * momentum + X_pert.grad / torch.sum(torch.abs(X_pert.grad))\n pert = learning_rate * momentum.sign()\n\n # add perturbation\n X_pert = X_pert.detach() + pert\n X_pert.requires_grad = True\n \n # make sure we don't modify the original image beyond epsilon\n X_pert = normalize_and_scale(X_pert.detach() - X.clone(), epsilon) + X.clone()\n X_pert.requires_grad = True\n\n # adjust to be within [-1, 1]\n X_pert = X_pert.detach().clamp(X.min(), X.max())\n X_pert.requires_grad = True\n \n return X_pert\n\n\nclass Proj_Loss(torch.nn.Module):\n\n def __init__(self):\n super(Proj_Loss,self).__init__()\n\n def forward(self, old_attack_mid , new_mid, original_mid, coeff):\n x = (old_attack_mid - original_mid).view(1,-1)\n y = (new_mid - original_mid).view(1,-1)\n #print(x.norm(), y.norm())\n x_norm = x / x.norm()\n y_norm = y / y.norm()\n #angle_loss = torch.mm(x_norm, y_norm.transpose(0,1))\n #magnitude_gain = y.norm() / x.norm()\n proj_loss = torch.mm(y, x_norm.transpose(0,1)) / x.norm()\n# print(proj_loss)\n #print(x_norm.size(), x_norm)\n return proj_loss\n\n\nclass Mid_layer_target_Loss(torch.nn.Module):\n\n def __init__(self):\n super(Mid_layer_target_Loss,self).__init__()\n\n def forward(self, old_attack_mid , new_mid, original_mid, coeff):\n x = (old_attack_mid - original_mid).view(1,-1)\n y = (new_mid - original_mid).view(1,-1)\n #print(y.norm())\n x_norm = x / x.norm()\n if (y == 0).all():\n y_norm = y\n else:\n y_norm = y / y.norm()\n angle_loss = torch.mm(x_norm, y_norm.transpose(0,1))\n magnitude_gain = y.norm() / x.norm()\n# print(str(angle_loss.float()) + \" \" + str(magnitude_gain.float()) )\n return angle_loss + magnitude_gain * coeff\n \n\n\"\"\"Return: perturbed x\"\"\"\nmid_output = None\n\ndef ILA(with_projection, model, X, X_attack, y, feature_layer, niters=10, epsilon=0.01, coeff=1.0, learning_rate=1):\n \n X = X.detach()\n X_pert = torch.zeros(X.size()).cuda()\n X_pert.copy_(X).detach()\n X_pert.requires_grad = True\n \n \n def get_mid_output(m, i, o):\n global mid_output\n mid_output = o\n \n h = feature_layer.register_forward_hook(get_mid_output)\n \n out = model(X)\n mid_original = torch.zeros(mid_output.size()).cuda() \n mid_original.copy_(mid_output)\n \n out = model(X_attack)\n mid_attack_original = torch.zeros(mid_output.size()).cuda() \n mid_attack_original.copy_(mid_output)\n \n \n for i in range(niters): \n output_perturbed = model(X_pert)\n # generate adversarial example by max middle layer pertubation in the direction of increasing loss\n if with_projection:\n loss = Proj_Loss()(mid_attack_original.detach(), mid_output, mid_original.detach(),coeff)\n else:\n loss = Mid_layer_target_Loss()(mid_attack_original.detach(), mid_output, mid_original.detach(),coeff)\n \n loss.backward()\n pert = learning_rate * X_pert.grad.detach().sign()\n\n # minimize loss\n X_pert = X_pert.detach() + pert\n X_pert.requires_grad = True\n\n \n # make sure we don't modify the original image beyond epsilon\n X_pert = normalize_and_scale(X_pert.detach() - X.clone(), epsilon) + X.clone()\n X_pert.requires_grad = True\n\n # clamp image\n X_pert = X_pert.detach().clamp(X.min(), X.max())\n X_pert.requires_grad = True\n \n \n h.remove()\n return X_pert\n\n\nbatch_size=32\nmean_arr, stddev_arr = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n\n# normalization (L-inf norm projection) code for output delta\ndef normalize_and_scale(delta_im, epsilon):\n\n delta_im = delta_im / delta_im.abs().max() # now -1..1 \n delta_im = delta_im.clone() + 1 # now 0..2\n delta_im = delta_im.clone() * 0.5 # now 0..1\n\n # normalize image color channels\n for c in range(3):\n delta_im[:,c,:,:] = (delta_im[:,c,:,:].clone() - mean_arr[c]) / stddev_arr[c]\n \n # threshold each channel of each image in deltaIm according to inf norm\n # do on a per image basis as the inf norm of each image could be different\n# for i in range(batch_size):\n for i in range(delta_im.size(0)):\n # do per channel l_inf normalization\n for ci in range(3):\n l_inf_channel = delta_im[i,ci,:,:].clone().detach().abs().max()\n mag_in_scaled_c = epsilon/stddev_arr[ci]\n delta_im[i,ci,:,:] = delta_im[i,ci,:,:].clone() * np.minimum(1.0, mag_in_scaled_c / l_inf_channel.cpu().numpy())\n\n return delta_im\n","sub_path":"imagenet_experiments/imagenet_attacks.py","file_name":"imagenet_attacks.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"467336580","text":"import os\r\nimport time\r\nfrom configparser import ConfigParser\r\nfrom collections import OrderedDict\r\nfrom utils import Logger\r\nfrom data.dataset import Dataset\r\nfrom evaluator import FoldOutEvaluator\r\nimport tensorflow as tf\r\n\r\n\r\ndef _read_config(model):\r\n config = ConfigParser()\r\n config.read(\"tfrec.ini\")\r\n tfrec_config = OrderedDict(config._sections[\"tfrec\"].items())\r\n # model_name = tfrec_config[\"model\"]\r\n model_name = model.__class__.__name__\r\n\r\n model_config_path = os.path.join(\"./conf\", model_name + \".ini\")\r\n config.read(model_config_path)\r\n model_config = OrderedDict(config._sections[\"hyperparameters\"].items())\r\n\r\n train_file = tfrec_config[\"train_file\"]\r\n tfrec_config[\"data_name\"] = train_file.split(\"/\")[-1].split(\"\\\\\")[-1].split(\".\")[0]\r\n\r\n return tfrec_config, model_config\r\n\r\n\r\ndef _create_logger(tfrec_config, model_config):\r\n # create logger\r\n data_name = tfrec_config[\"data_name\"]\r\n model_name = tfrec_config[\"model\"]\r\n log_dir = os.path.join(\"log\", data_name, model_name)\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n\r\n logger_name = '_'.join([\"{}={}\".format(arg, value) for arg, value in model_config.items()\r\n if len(value) < 20])\r\n special_char = {'/', '\\\\', '\\\"', ':', '*', '?', '<', '>', '|', '\\t'}\r\n logger_name = [c if c not in special_char else '_' for c in logger_name]\r\n logger_name = ''.join(logger_name)\r\n timestamp = time.time()\r\n\r\n logger_name = logger_name[:200]\r\n # data name, model name, param, timestamp\r\n logger_name = \"%s_%s_%s_%d.log\" % (data_name, model_name, logger_name, timestamp)\r\n logger_name = os.path.join(log_dir, logger_name)\r\n logger = Logger(logger_name)\r\n\r\n # write configuration into log file\r\n info = '\\n'.join([\"{}={}\".format(arg, value) for arg, value in tfrec_config.items()])\r\n logger.info(\"\\nTFRec information:\\n%s \" % info)\r\n\r\n logger.info(\"\\n\")\r\n logger.info(\"Recommender:%s\" % model_name)\r\n logger.info(\"Dataset name:\\t%s\" % data_name)\r\n argument = '\\n'.join([\"{}={}\".format(arg, value) for arg, value in model_config.items()])\r\n logger.info(\"\\nHyperparameters:\\n%s \" % argument)\r\n\r\n return logger\r\n\r\n\r\ndef _eval_parameter(tfrec_config, model_config):\r\n # get parameters\r\n config = OrderedDict(tfrec_config, **model_config)\r\n for key, value in config.items():\r\n try:\r\n config[key] = eval(value)\r\n except:\r\n config[key] = value\r\n return config\r\n\r\n\r\nclass AbstractRecommender(object):\r\n def __init__(self):\r\n tfrec_config, model_config = _read_config(self)\r\n self.logger = _create_logger(tfrec_config, model_config)\r\n self.config = _eval_parameter(tfrec_config, model_config)\r\n self.dataset = Dataset(self.config)\r\n self.logger.info(\"\\nuser number=%d\\nitem number=%d\" % (self.dataset.num_users, self.dataset.num_items))\r\n self.evaluator = FoldOutEvaluator(self.dataset.train_matrix, self.dataset.test_matrix)\r\n\r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n self.sess = tf.Session(config=config)\r\n\r\n def train_model(self):\r\n raise NotImplementedError\r\n \r\n def evaluate_model(self):\r\n raise NotImplementedError\r\n\r\n def predict_for_eval(self, users):\r\n raise NotImplementedError\r\n","sub_path":"model/base/AbstractRecommender.py","file_name":"AbstractRecommender.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"619647545","text":"import os\n\nfrom django.test import TransactionTestCase, Client\nfrom django.urls import reverse\n\nfrom ports.models import Port\nfrom MacPorts.config import TEST_PORTINDEX_JSON\n\n\nclass TestDependencies(TransactionTestCase):\n reset_sequences = True\n\n def setUp(self):\n self.client = Client()\n Port.load(TEST_PORTINDEX_JSON)\n\n def test_search(self):\n response1 = self.client.get(reverse('ports_search'), data={\n 'search_by': 'name',\n 'name': 'port',\n 'search_text': 'port'\n })\n\n response2 = self.client.get(reverse('ports_search'), data={\n 'search_by': 'description',\n 'description': 'categoryA',\n 'search_text': 'categoryA'\n })\n\n response3 = self.client.get(reverse('ports_search'), data={\n 'search_by': 'name',\n 'name': 'port-A5',\n 'search_text': 'port-A5'\n })\n\n self.assertEquals(response1.context['ports'].count(), 8)\n self.assertEquals(response2.context['ports'].count(), 6)\n self.assertEquals(response3.context['ports'].count(), 1)\n\n def test_search_in_category(self):\n response = self.client.get(reverse('search_ports_in_category'), data={\n 'name': 'port-A3',\n 'categories__name': 'categoryA',\n })\n\n self.assertEquals(response.context['ports'].count(), 1)\n\n def test_search_in_maintainer(self):\n response = self.client.get(reverse('search_ports_in_maintainer'), data={\n 'name': 'port-A',\n 'maintainers__name': 'user',\n })\n\n self.assertEquals(response.context['ports'].count(), 4)\n\n","sub_path":"app/ports/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"31140787","text":"import numpy as np\r\nimport cv2\r\nfrom PIL import Image\r\n\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')\r\ncap = cv2.VideoCapture(0)\r\nfgbg = cv2.createBackgroundSubtractorMOG2()\r\n\r\n# while(True):\r\n# # Capture frame-by-frame\r\n# ret, frame = cap.read()\r\n# fgmask = fgbg.apply(frame)\r\n\r\n#\r\n# cv2.imshow('frame', fgmask)\r\n# cv2.imshow('frame_original', frame)\r\n# if cv2.waitKey(20) & 0xFF == ord('q'):\r\n# break\r\n\r\npanel = np.zeros([100, 700], np.uint8)\r\ncv2.namedWindow(\"panel\")\r\n\r\ndef nothing(x):\r\n pass\r\n\r\ncv2.createTrackbar(\"L - h\", \"panel\", 0, 255, nothing)\r\ncv2.createTrackbar(\"U - h\", \"panel\", 255, 255, nothing)\r\n\r\ncv2.createTrackbar(\"L - s\", \"panel\", 0, 255, nothing)\r\ncv2.createTrackbar(\"U - s\", \"panel\", 255, 255, nothing)\r\n\r\ncv2.createTrackbar(\"L - v\", \"panel\", 0, 255, nothing)\r\ncv2.createTrackbar(\"U - v\", \"panel\", 255, 255, nothing)\r\n\r\ncv2.createTrackbar(\"S ROWS\", \"panel\", 0, 480, nothing)\r\ncv2.createTrackbar(\"E ROWS\", \"panel\", 480, 480, nothing)\r\n\r\ncv2.createTrackbar(\"S COLS\", \"panel\", 0, 640, nothing)\r\ncv2.createTrackbar(\"E COLS\", \"panel\", 640, 640, nothing)\r\n\r\nwhile(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n\r\n s_r = cv2.getTrackbarPos(\"S ROWS\", \"panel\")\r\n e_r = cv2.getTrackbarPos(\"E ROWS\", \"panel\")\r\n s_c = cv2.getTrackbarPos(\"S COLS\", \"panel\")\r\n e_c = cv2.getTrackbarPos(\"E COLS\", \"panel\")\r\n\r\n roi = frame[s_r: e_r, s_c: e_c]\r\n hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\r\n\r\n l_h = cv2.getTrackbarPos(\"L - h\", \"panel\")\r\n u_h = cv2.getTrackbarPos(\"U - h\", \"panel\")\r\n l_s = cv2.getTrackbarPos(\"L - s\", \"panel\")\r\n u_s = cv2.getTrackbarPos(\"U - s\", \"panel\")\r\n l_v = cv2.getTrackbarPos(\"L - v\", \"panel\")\r\n u_v = cv2.getTrackbarPos(\"U - v\", \"panel\")\r\n\r\n lower_green = np.array([l_h, l_s, l_v])\r\n upper_green = np.array([u_h, u_s, u_v])\r\n\r\n mask = cv2.inRange(hsv, lower_green, upper_green)\r\n mask_inv = cv2.bitwise_not(mask)\r\n\r\n bg = cv2.bitwise_and(roi, roi, mask=mask)\r\n fg = cv2.bitwise_and(roi, roi, mask=mask_inv)\r\n\r\n cv2.imshow('bg', bg)\r\n cv2.imshow(\"fg\", fg)\r\n cv2.imshow(\"panel\", panel)\r\n if cv2.waitKey(20) & 0xFF == ord('q'):\r\n break\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"Shooting_withoutBack.py","file_name":"Shooting_withoutBack.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145213355","text":"# This script should be used a PDAL filter.python \n# The idea is to check each and every point in a .las file\n# and, in case it is inside a cuboid with the specified coordinates,\n# it should be marked in any way (at the moment PointSourceId is set\n# to some arbitrary value\n# The logics is built on the vector geometry, description can be found here:\n# https://math.stackexchange.com/questions/1472049/check-if-a-point-is-inside-a-rectangular-shaped-area-3d\n# and here:\n# https://stackoverflow.com/questions/59828533/python-code-to-check-if-a-point-is-inside-or-outside-a-cuboid\n#\n# Should be used a function for PDAL filter.python:\n# pdal pipeline $PROJECT_DIR/json/pipeline_filter.json\n\n# Created by Serge Shmygelskyy aka Shmyg\n\nimport numpy\n\ndef apply_filter(ins,outs):\n\n vertex_1 = [-10,-10,-10]\n vertex_2 = [-10, -10, 10]\n vertex_3 = [-10, 10, -10]\n vertex_4 = [10, -10, -10]\n\n vector_1 = numpy.subtract(vertex_1, vertex_2)\n vector_2 = numpy.subtract(vertex_1, vertex_3)\n vector_3 = numpy.subtract(vertex_1, vertex_4)\n \n xCoordinate = ins['X'].astype('float64')\n yCoordinate = ins['Y'].astype('float64')\n zCoordinate = ins['Z'].astype('float64')\n \n point = numpy.array([[xCoordinate],[yCoordinate],[zCoordinate]])\n\n point_source_id = ins['PointSourceId'] + 66\n\n# if min(numpy.dot(vector_1, vertex_1), numpy.dot(vector_1, vertex_2)) <= \\\n# numpy.dot(vector_1, point) <= max(numpy.dot(vector_1, vertex_1), numpy.dot(vector_1, vertex_2)) \\\n# and min(numpy.dot(vector_2, vertex_1), numpy.dot(vector_2, vertex_3)) <= \\\n# numpy.dot(vector_2, point) <= max(numpy.dot(vector_2, vertex_1), numpy.dot(vector_1, vertex_3)) \\\n# and min(numpy.dot(vector_3, vertex_1), numpy.dot(vector_3, vertex_4)) <= \\\n# numpy.dot(vector_3, point) <= max(numpy.dot(vector_3, vertex_1), numpy.dot(vector_3, vertex_4)) :\n#\n outs['PointSourceId'] = point_source_id\n\t\n return True\n","sub_path":"python/ApplyFilter.py","file_name":"ApplyFilter.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216569516","text":"\"\"\"\n\tfor문을 배워보자.\nfor문의 구조\nfor 변수 in 리스트(또는 튜플, 문자열)\n\"\"\"\n\n#1. 전형적인 for문\ntest_list = ['one', 'two', 'three']\nfor i in test_list:\n\tprint(i)\n\n#2. 다양한 for문의 사용\na = [(1,2), (3,4), (5,6)]\nfor (first,last) in a:\n\tprint(first + last)\n\tprint(\"--------------------------\")\n\n#for문과 함께 자주 사용하는 range함수\na = range(10)\nprint(a)\n\na = range(1,11)\nprint(a)\n\n#range함수의 예시 살펴보기\nsum = 0\nfor i in range(1,11):\n\tsum = sum + i\n\tprint(sum)\n\n#리스트 안에 for문 포함하기\na = [1,2,3,4]\nresult = []\nfor num in a:\n\tresult.append(num*3)\nprint(result)\n\nprint(\"-------이것을 더 쉽게 바꿔보자-------\")\n\nresult2 = [num * 3 for num in a]\nprint(result2)\n\n#짝수에만 3을 곱하여 담아 보자\nresult3 = [num * 3 for num in a if num % 2 == 0]\nprint(result3)\n\n#for문을 2개 이상 사용해서 해보자(복잡함)\nresult4 = [x*y for x in range(2,10) \n\t\t\t\tfor y in range(1,10)]\nprint(result4)\n\n","sub_path":"3장/03-3_for.py","file_name":"03-3_for.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325497886","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/clareau/dat/Research/BuenrostroResearch/lareau_dev/bap/bap/cli_reanno.py\n# Compiled at: 2018-10-29 20:09:49\n# Size of source mod 2**32: 1310 bytes\nimport click, os, os.path, sys, shutil, yaml, random, string, itertools, time, pysam, csv, re\nfrom itertools import groupby\nfrom .bapHelp import *\nfrom pkg_resources import get_distribution\n\n@click.command()\n@click.version_option()\n@click.option('--input', '-i', help='Input bam file.')\n@click.option('--output', '-o', help='Output bam file.')\n@click.option('--sep', '-s', default='_', help='Separator for reannotation. Assume: {barcode}_{readname} (default delim = \"_\")')\n@click.option('--tag', '-t', default='XB', help='Sam tag for barcode; by default, assume XB.\\n\\n')\ndef main(input, output, sep, tag):\n \"\"\"\n bap-reanno: Reannotate samples that were de-barcoded and aligned \n\n Caleb Lareau, clareau broadinstitute org \n \n \"\"\"\n __version__ = get_distribution('bap').version\n script_dir = os.path.dirname(os.path.realpath(__file__))\n click.echo(gettime() + 'Starting re-barcoding from bap pipeline v%s' % __version__)\n bam = pysam.AlignmentFile(input, 'rb')\n out = pysam.AlignmentFile(output, 'wb', template=bam)\n for read in bam:\n name = read.query_name\n ss = name.split(sep)\n read.query_name = ss[1]\n read.tags = read.tags + [(tag, ss[0])]\n out.write(read)","sub_path":"pycfiles/bap-atac-0.6.6.tar/cli_reanno.cpython-36.py","file_name":"cli_reanno.cpython-36.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"378687871","text":"\"\"\"Notebook versioner controller\"\"\"\n\nimport json\nimport logging\nimport uuid\nimport webapp2\n\nfrom httplib2 import Http\nfrom googleapiclient.discovery import build\nfrom oauth2client.contrib.appengine import AppAssertionCredentials\nfrom google.appengine.api import taskqueue\n\nimport nv_vars # pylint: disable=relative-import\n\nSTARTUP_SCRIPT_FILE = 'notebook_versioner.sh'\n\n\ndef get_credentials():\n \"\"\"Authorizes a request to Google Cloud Platform.\"\"\"\n credentials = AppAssertionCredentials(\n 'https://www.googleapis.com/auth/cloud-platform')\n http_auth = credentials.authorize(Http())\n return build('compute', 'v1', http=http_auth)\n\n\nclass CreateVM(webapp2.RequestHandler):\n \"\"\"Handles the creation of instances.\"\"\"\n\n def get(self):\n \"\"\"Handles the creation of instances.\"\"\"\n unique_id = str(uuid.uuid4())[:6]\n with open(STARTUP_SCRIPT_FILE, 'r') as ss_file:\n startup_script = ss_file.read()\n instance_config = nv_vars.INSTANCE_CONFIG % (\n unique_id, json.dumps(startup_script))\n\n compute = get_credentials()\n compute.instances().insert(\n project=nv_vars.PROJECT,\n zone=nv_vars.ZONE,\n body=json.loads(instance_config)).execute()\n\n taskqueue.add(url=\"/delete\",\n params={'name': \"nv-worker-%s\" % unique_id},\n countdown=nv_vars.WAIT_FOR_VM_DELETE)\n\n self.response.write('Done.')\n\n\nclass DeleteVM(webapp2.RequestHandler):\n \"\"\"Handles the deletion of instances.\"\"\"\n\n def post(self):\n \"\"\"Handles the deletion of instances.\"\"\"\n name = self.request.get('name')\n logging.info(\"Deleting instance id: %s\", name)\n\n compute = get_credentials()\n compute.instances().delete(\n project=nv_vars.PROJECT,\n zone=nv_vars.ZONE,\n instance=name).execute()\n\n self.response.write('Done.')\n\n\nAPP = webapp2.WSGIApplication([\n ('/create', CreateVM),\n ('/delete', DeleteVM),\n], debug=True)\n","sub_path":"controller/nv_control.py","file_name":"nv_control.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67456151","text":"import time, subprocess, socket, re\r\n\r\nafttFile=open(\"PackageNames.txt\", \"r\")\r\n\r\nfileName=\"\"\r\ncurrentPackageName=\"\"\r\n\r\nwindow_dump=None\r\ncurrent_window=None\r\n\r\nhsTe1 = '\\nDONE.\\n'\r\nhsTe2 = 'DONE.\\n'\r\nhsTe3 = '\\nDONE.\\nDONE\\n'\r\nhsTe4 = 'DONE.\\nDONE\\n'\r\nhsTe1 = hsTe1.encode(encoding='utf_8')\r\nhsTe2 = hsTe2.encode(encoding='utf_8')\r\nhsTe3 = hsTe3.encode(encoding='utf_8')\r\nhsTe4 = hsTe4.encode(encoding='utf_8')\r\n\r\nport = 5939\r\n\r\n\r\n########################## FEATURES TO EXTRACT ########################################\r\nclass feature:\r\n\t#adminstrative features\r\n\tfId = \"\"\r\n\tftype = \"\"\r\n\tlebel = 0 # class0: neither -- class 1: password -- class 2: username \r\n\txLocOnScreen = -1\r\n\tyLocOnScreen = -1\r\n\tlayoutWidth = -1\r\n\tisWidthZero = -1\r\n\thasInlineImage = 0\r\n\thasFocus = -1\r\n\r\n\t#features used for classification\r\n\trank = -1\r\n\tisTypeTextEdit = -1\r\n\tisTypeTextView = -1\r\n\tisTypeAutoComplete = -1\r\n\tisIdUsername = -1\r\n\tisIdPassword = -1\r\n\tisIdName = -1\r\n\tisIdNumber = -1\r\n\tisIdPhone = -1\r\n\tisIdEmail = -1\r\n\tisIdAccount = -1\r\n\thasText = -1\r\n\tisClickable = -1\r\n\tisFocusable = -1\r\n\tisVisible = -1\r\n\thasOverlappingRendering = -1\r\n\tmPrivateFlags = -1\r\n\tmViewFlags = -1\r\n\tmPrivateFlags_DRAWN = -1\r\n\t\r\nfeaturesToKeep = [\r\n\"mID\",\r\n\"mText\",\r\n\"isClickable\",\r\n\"isFocusable\",\r\n\"hasOverlappingRendering\",\r\n\"getVisibility\",\r\n\"mPrivateFlags\",\r\n\"mViewFlags\",\r\n\"mPrivateFlags_DRAWN\",\r\n\"getLocationOnScreen_x\",\r\n\"getLocationOnScreen_y\",\r\n\"hasFocus()\",\r\n\"layout:getWidth\"]\r\n\r\nuserNameWords = [\r\n\"username\",\"UserName\",\"USERNAME\",\r\n\"User\",\"user\",\"USER\"\r\n]\r\n\r\nemailWords = [\"email\", \"Email\", \"EMAIL\",\"E-MAIL\", \"e-mail\",\"E_MAIL\",\"e_mail\"]\r\nphoneWords = [\"Phone\",\"phone\",\"tel\", \"Tel\", \"TEL\", \"TELEPHONE\", \"telephone\", \"PHONE\"]\r\naccountWords = [\"account\",\"Account\",\"ACCOUNT\"]\r\nnameWords = [\"name\",\"Name\",\"NAME\"]\r\nnumberWords = [\"number\", \"Number\",\"NUMBER\",\"#\"]\r\n\r\npasswordWords = [\r\n\"pass\",\"Pass\",\"pin\", \"Pin\",\"PIN\",\"PASS\",\"PASSWORD\",\"PASSCODE\",\"PINCODE\"\r\n\"password\",\"Password\",\"passcode\", \"Passcode\", \"passCode\", \"pass_code\"\r\n]\r\n#######################################################################################\r\n\r\n\r\n###################################FUNCTIONS###########################################\r\n#Function for executing adb shell command\r\ndef adb(command, input=None):\r\n\tif not isinstance(command,list): command=command.split()\r\n\treturn subprocess.Popen(command, stdout=subprocess.PIPE, \r\n\t\t\t\t\t\tstderr=subprocess.STDOUT).communicate(input=input)[0]\r\n\r\n\r\n#Function for getting current window hash id\r\ndef getWindowHash():\r\n\r\n\twindow_dump=subprocess.Popen('adb shell dumpsys window windows', shell=True, stdout=subprocess.PIPE)\r\n\twindow_dump=window_dump.stdout.read()\r\n\r\n\tregexp=re.compile('Window #\\d+[:\\s\\r\\n]+Window\\{(?P[a-f\\d]{8}) (?P.*) paused=.*\\}:?[\\r\\n]+(?P<attributes>(?: .*[\\r\\n]+)+)', re.MULTILINE)\r\n\r\n\t# fetch current window\r\n\tm=re.search('mCurrentFocus=Window\\{(?P<hash>\\S+) (?P<title>\\S*) \\S+', str(window_dump))\r\n\t\r\n\tif m:\r\n\t\tcurrent_window=m.groupdict()\r\n\t\treturn [current_window][0]['hash']\r\n\r\n\r\n#Function for retriving the typeID of the currently selected widget\r\ndef getSelectedWidgetsID(dumResults):\r\n\t#find selected widget's ID\r\n\tout = \"\"\r\n\titemTypeLine = re.findall('mServedView=\\S*',str(dumResults))\r\n\tif len(itemTypeLine) != 0:\r\n\t\tbracSplit = re.split('{',itemTypeLine[0])\r\n\t\tfirstPartSplit = re.split('=',bracSplit[0])\r\n\t\tout = firstPartSplit[1] + '@' + bracSplit[1]\r\n\treturn out\r\n\r\n\r\n\r\n#######################################################################################\r\n#********************** START OF THE APP**********************************************#\r\n#######################################################################################\r\n\r\n############Read Packag Names###########\r\nfor line in afttFile:\r\n\r\n\tcurrentPackageName=line.strip(\"\\n\")\r\n\tcommand_start=\"adb shell monkey -p \" + currentPackageName + \" 1\"\r\n\tprint (\"\\nCurrent Package Name: \"+currentPackageName + \"\\n\")\r\n\r\n\tresult=subprocess.Popen(command_start, shell=True, stdout=subprocess.PIPE)\r\n\tresult=result.stdout.read()\r\n\r\n\twhile True:\r\n\t\r\n\t\tactivityNb = input(\"Please enter activity depth: \")\r\n\t\tprint(\"you entered: \" + str(activityNb) + \"\\n\")\r\n\t\t\r\n\t\tlabel = input(\"Please enter lable: 0 for nothing 1 for password 2 for username other for exit: \")\r\n\t\t\r\n\t\tif (label==0 or label==1 or label==2):\r\n\r\n\t\t\t###################communication start#############################################\r\n\t\t\tf=open(currentPackageName+\".txt\", \"w\")\r\n\r\n\t\t\tprint(\"communication start:\")\r\n\t\t\ttry: \r\n\t\t\t\ts=socket.socket(socket.AF_INET, socket.SOCK_STREAM) \r\n\t\t\t\tsubprocess.Popen('adb shell service call window 2', shell=True, stdout=subprocess.PIPE)\r\n\t\t\t\ttime.sleep(.5)\r\n\t\t\t\tsubprocess.Popen('adb shell service call window 1 i32 4939', shell=True, stdout=subprocess.PIPE)\r\n\t\t\t\ttime.sleep(.5)\r\n\r\n\t\t\t\tadb('adb forward tcp:%d tcp:4939' % port)\r\n\t\t\t\ts.connect(('127.0.0.1', port))\r\n\t\t\t\tprint (\"Port Used for Communication: %s\" %port)\r\n\r\n\r\n\t\t\t\thashId=getWindowHash()\r\n\t\t\t\thsTe = 'DUMP %s\\n'%hashId; \r\n\t\t\t\ts.sendall(hsTe.encode(encoding='utf_8'))\r\n\t\t\t\tprint (\"all dump Commands sent\")\r\n\t\r\n\t\t\t\ts.settimeout(360)\r\n\t\t\t\tdata=''\r\n\t\t\t\tt0 = time.time() \r\n\t\r\n\t\t\t\tprint (\"Listening for replies\")\r\n\t\t\t\twhile True:\r\n\t\t\t\t\tdatum=s.recv(32*1024)\r\n\t\t\t\t\t#print (\"Packet: \"+str(datum))\r\n\t\t\t\t\tf.write(str(datum))\r\n\r\n\r\n\t\t\t\t\tif (datum.endswith(hsTe1) or datum.endswith(hsTe2) or datum.endswith(hsTe3) or datum.endswith(hsTe4)):\r\n\t\t\t\t\t\tprint (\"End of Packet ransfer\\n\\n\")\r\n\t\t\t\t\t\tbreak \r\n\t\t\t\t\tif datum == '':\r\n\t\t\t\t\t\tprint (\"Socket closed\")\r\n\t\t\t\t\t\traise socket.error('Socket closed')\r\n\t\t\t\t\tif time.time() - t0 > 360:\r\n\t\t\t\t\t\tprint (\"Timeout\")\r\n\t\t\t\t\t\traise socket.error('Timeout') \r\n\r\n\t\t\t\t\r\n\t\t\t\t\tdata+=str(datum)\r\n\r\n\t\t\texcept socket.error:\r\n\t\t\t\tprint (\"connection failed checking for another port\")\r\n\t\t\t\tsubprocess.Popen(command+'service call window 2', shell=True, stdout=subprocess.PIPE)\r\n\t\t\t\ttime.sleep(.5)\r\n\t\t\t\tsubprocess.Popen(command+'service call window 1 i32 4939', shell=True, stdout=subprocess.PIPE)\r\n\t\t\t\ttime.sleep(.5)\r\n\t\t\t\ts.close()\r\n\r\n\t\t\tf.close()\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t#######################################################################################\r\n\t\t\t#********************** PARSING SECTION **********************************************#\r\n\t\t\t#######################################################################################\r\n\t\t\telementsFoundSoFar=[]\r\n\t\t\tif currentPackageName == \"\":\r\n\t\t\t\tprint(\"EROR: AFTT file was not created\\n\")\r\n\t\t\t\texit()\r\n\r\n\t\t\t########################### segmenting the file #######################################\r\n\t\t\tsrcFile=open(currentPackageName+\".txt\", \"r\")\r\n\t\t\toutFile=open(\"segmented_\" + currentPackageName + \".txt\",\"w\")\r\n\r\n\t\t\t#you want android.widget.EditText at the begining of the line\r\n\t\t\toutFile.write(srcFile.read().replace(r' \\n ',\"\\n\\n\\n\\n\"))\r\n\r\n\t\t\tsrcFile.close()\r\n\t\t\toutFile.close()\r\n\t\t\t#######################################################################################\r\n\r\n\r\n\t\t\t########### Loading The Whole File Into One String ####################################\r\n\t\t\tf=open(\"segmented_\" + currentPackageName + \".txt\", \"r\")\r\n\r\n\t\t\tfeaturesNb = 0\r\n\t\t\twholeFile = \"\"\r\n\t\t\r\n\t\t\tfor line in f:\r\n\t\t\t wholeFile += line;\r\n\r\n\t\t\tf.close()\r\n\r\n\t\t\t#######################################################################################\r\n\r\n\r\n\t\t\t############################### Parsing the Segmented File ############################\r\n\t\t\toutFile_csv=open(\"output_\"+str(label) + \"_\"+ str(currentPackageName) +\".csv\",\"w\")\r\n\r\n\t\t\twholeFileArr = re.split('\\n\\s{4,}',wholeFile)\r\n\r\n\t\t\tfor ind in range(len(wholeFileArr)):\r\n\t\t\t\tif ind != 0:\r\n\r\n\t\t\t\t\tcurFit = feature()\r\n\t\t\t\t\tsingleWidgetArr = re.split(' ', wholeFileArr[ind])\r\n\r\n\t\t\t\t\tfor indk in range(len(singleWidgetArr)):\r\n\r\n\t\t\t\t\t\tif indk == 0 :\r\n\t\t\t\t\t\t\tw_type_temp = singleWidgetArr[indk]\r\n\t\t\t\t\t\t\tcurFit.ftype = w_type_temp\r\n\r\n\t\t\t\t\t\t\tif (\"edit\" in w_type_temp or \"Edit\" in w_type_temp) and (\"text\" in w_type_temp or \"Text\" in w_type_temp):\r\n\t\t\t\t\t\t\t\tcurFit.isTypeTextEdit = 1\r\n\t\t\t\t\t\t\t\tfeaturesNb += 1\r\n\t\t\t\t\t\t\t\tcurFit.rank = featuresNb\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tcurFit.isTypeTextEdit = 0\r\n\r\n\t\t\t\t\t\t\tif (\"view\" in w_type_temp or \"View\" in w_type_temp) and (\"text\" in w_type_temp or \"Text\" in w_type_temp):\r\n\t\t\t\t\t\t\t\tcurFit.isTypeTextView = 1\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tcurFit.isTypeTextView = 0\r\n\r\n\t\t\t\t\t\t\tif (\"auto\" in w_type_temp or \"Auto\" in w_type_temp) and (\"complete\" in w_type_temp or \"Complete\" in w_type_temp):\r\n\t\t\t\t\t\t\t\tcurFit.isTypeAutoComplete = 1\r\n\t\t\t\t\t\t\t\tfeaturesNb += 1\r\n\t\t\t\t\t\t\t\tcurFit.rank = featuresNb\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tcurFit.isTypeAutoComplete = 0\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tif any(xxx in singleWidgetArr[indk] for xxx in featuresToKeep):\r\n\r\n\t\t\t\t\t\t\t\t#seperate out propertyName and value\r\n\t\t\t\t\t\t\t\tsingleProp = re.split(',', singleWidgetArr[indk])\r\n\t\t\t\t\t\t\t\tvalue = singleProp[1]\r\n\t\t\t\t\t\t\t\tsingleProp = re.split('=', singleWidgetArr[indk])\r\n\t\t\t\t\t\t\t\tprop = singleProp[0]\r\n\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"mID\":\r\n\t\t\t\t\t\t\t\t\tcurFit.fId = value\r\n\r\n\t\t\t\t\t\t\t\t\tif any(xx in value for xx in userNameWords):\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdUsername = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdUsername = 0\r\n\r\n\t\t\t\t\t\t\t\t\tif any(x in value for x in passwordWords):\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdPassword = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdPassword = 0\r\n\r\n\t\t\t\t\t\t\t\t\tif any(x in value for x in emailWords):\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdEmail = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdEmail = 0\r\n\r\n\t\t\t\t\t\t\t\t\tif any(x in value for x in nameWords):\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdName = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdName = 0\r\n\r\n\t\t\t\t\t\t\t\t\tif any(x in value for x in accountWords):\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdAccount = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdAccount = 0\r\n\r\n\t\t\t\t\t\t\t\t\tif any(x in value for x in numberWords):\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdNumber = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdNumber = 0\r\n\r\n\t\t\t\t\t\t\t\t\tif any(x in value for x in phoneWords):\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdPhone = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isIdPhone = 0\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"layout:getLocationOnScreen_x()\" :\r\n\t\t\t\t\t\t\t\t\tcurFit.xLocOnScreen = value\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"layout:getLocationOnScreen_y()\" :\r\n\t\t\t\t\t\t\t\t\tcurFit.yLocOnScreen = value\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"mViewFlags\" :\r\n\t\t\t\t\t\t\t\t\tcurFit.mViewFlags = value\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"mPrivateFlags\" :\r\n\t\t\t\t\t\t\t\t\tcurFit.mPrivateFlags = value\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"mPrivateFlags_DRAWN\" :\r\n\t\t\t\t\t\t\t\t\tcurFit.mPrivateFlags_DRAWN = value\r\n\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"layout:getWidth()\" :\r\n\t\t\t\t\t\t\t\t\tcurFit.layoutWidth = value\r\n\t\t\t\t\t\t\t\t\tif value is '0':\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isWidthZero = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isWidthZero = 0\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"getVisibility()\" :\r\n\t\t\t\t\t\t\t\t\tif value == \"VISIBLE\" or value == \"VIS'b'IBLE\":\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isVisible = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isVisible = 0\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"focus:isFocusable()\" :\r\n\t\t\t\t\t\t\t\t\tif value == \"true\":\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isFocusable = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isFocusable = 0\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"isClickable()\" :\r\n\t\t\t\t\t\t\t\t\tif value == \"true\":\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isClickable = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.isClickable = 0\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"drawing:hasOverlappingRendering()\" :\r\n\t\t\t\t\t\t\t\t\tif value == \"true\":\r\n\t\t\t\t\t\t\t\t\t\tcurFit.hasOverlappingRendering = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.hasOverlappingRendering = 0\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"text:mText\" :\r\n\t\t\t\t\t\t\t\t\tsingleProp = re.split(',', singleWidgetArr[indk])\r\n\t\t\t\t\t\t\t\t\tsingleProp = re.split('=', singleProp[0])\r\n\t\t\t\t\t\t\t\t\tif singleProp[1] == '0':\r\n\t\t\t\t\t\t\t\t\t\tcurFit.hasText = 0\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.hasText = 1\r\n\r\n\t\t\t\t\t\t\t\tif prop == \"focus:hasFocus()\" :\r\n\t\t\t\t\t\t\t\t\tif value == \"true\":\r\n\t\t\t\t\t\t\t\t\t\tcurFit.hasFocus = 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tcurFit.hasFocus = 0\r\n\r\n\r\n\t\t\t\t\telementsFoundSoFar.append(curFit)\r\n\t\t\t#######################################################################################\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t#######################################################################################\r\n\t\t\t#********************** Labeling SECTION *********************************************#\r\n\t\t\t#######################################################################################\r\n#\t\t\tfor elem in elementsFoundSoFar:\r\n#\t\t\t\tif elem.hasFocus and elem.rank != -1:\r\n#\t\t\t\t\telem.lebel = label\r\n\r\n\t\t\t#get screen dump\r\n\t\t\tdumResults=subprocess.Popen(\"adb shell dumpsys input_method\", shell=True, stdout=subprocess.PIPE)\r\n\t\t\tdumResults=dumResults.stdout.read()\r\n\t\t\tdumResults = str(dumResults)\r\n\t\t\ttime.sleep(3)\r\n\r\n\t\t\tif \"@\" in dumResults:\r\n\t\t\t\tprint('labling method 1')\r\n\t\t\t\tfullItemType = getSelectedWidgetsID(dumResults)\r\n\t\t\t\tprint(str(fullItemType))\r\n\r\n\t\t\t\t#find the element corresponding to the ID and mark the results\r\n\t\t\t\tif fullItemType != \"\":\r\n\t\t\t\t\tfor elem in elementsFoundSoFar:\r\n\t\t\t\t\t\tif elem.ftype == fullItemType:\r\n\t\t\t\t\t\t\telem.lebel = label\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"could not find ID of selected wid\")\r\n\r\n\r\n\t\t\telse:\r\n\t\t\t\tprint('labling method 2')\r\n\t\t\t\tfor elem in elementsFoundSoFar:\r\n\t\t\t\t\tif elem.hasFocus and elem.rank != -1:\r\n\t\t\t\t\t\telem.lebel = label\r\n\r\n \r\n\r\n\r\n\t \r\n\r\n\t\t\t#######################################################################################\r\n\t\t\t#********************** Writing the report********************************************#\r\n\t\t\t#######################################################################################\r\n\t\t\tprint(\"Savings the results for \" + currentPackageName + \"\\n\")\r\n\r\n\t\t\toutFile_csv.write(\"Label,Type,ID,xLoc,yLoc,width,hasFocus,ActivityDepth,totalNbOfWidgets,Rank,isWidthZero,isTypeEditText,isTypeTextView,isTypeAutoComplete,isIdUsername,isIdPassword,isIdEmail,isIdAccount,isIdPhone,isIdNumber,isIdName,hasText,isVisible,isFocusable,isClickable,hasOverlappingRendering,mPrivateFlags,mViewFlags,mPrivateFlags_DRAWN\" + '\\n')\r\n\t\t\tfor el in elementsFoundSoFar:\r\n\r\n\t\t\t\toutFile_csv.write(str(el.lebel) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.ftype) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.fId) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.xLocOnScreen) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.yLocOnScreen) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.layoutWidth) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.hasFocus) + ',')\r\n\r\n\t\t\t\toutFile_csv.write(str(activityNb) + ',')\r\n\t\t\t\toutFile_csv.write(str(featuresNb) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.rank) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isWidthZero) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isTypeTextEdit) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isTypeTextView) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isTypeAutoComplete) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isIdUsername) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isIdPassword) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isIdEmail) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isIdAccount) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isIdPhone) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isIdNumber) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isIdName) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.hasText) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isVisible) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isFocusable) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.isClickable) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.hasOverlappingRendering) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.mPrivateFlags) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.mViewFlags) + ',')\r\n\t\t\t\toutFile_csv.write(str(el.mPrivateFlags_DRAWN) + ',')\r\n\r\n\t\t\t\toutFile_csv.write('\\n')\r\n\r\n\t\t\toutFile_csv.close()\r\n\t\telse:\r\n\t\t\tbreak","sub_path":"widgetLabelers/labelDroid.py","file_name":"labelDroid.py","file_ext":"py","file_size_in_byte":15166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535316598","text":"import pytest\n\nfrom sklearn.utils.estimator_checks import check_estimator\n\nimport scipy.sparse\nimport numpy as np\n\nfrom vectorizers import TokenCooccurrenceVectorizer\nfrom vectorizers import NgramVectorizer\nfrom vectorizers import SkipgramVectorizer\nfrom vectorizers import DistributionVectorizer\nfrom vectorizers import HistogramVectorizer\nfrom vectorizers import KDEVectorizer\nfrom vectorizers import LabelledTreeCooccurrenceVectorizer\n\nfrom vectorizers import SequentialDifferenceTransformer\nfrom vectorizers import Wasserstein1DHistogramTransformer\n\nfrom vectorizers.distances import kantorovich1d\n\nfrom vectorizers._vectorizers import (\n ngrams_of,\n find_bin_boundaries,\n build_tree_skip_grams,\n remove_node,\n sequence_tree_skip_grams,\n)\nfrom vectorizers._window_kernels import (\n harmonic_kernel,\n triangle_kernel,\n flat_kernel,\n information_window,\n)\n\ntoken_data = (\n (1, 3, 1, 4, 2),\n (2, 1, 2, 3, 4, 1, 2, 1, 3, 2, 4),\n (4, 1, 1, 3, 2, 4, 2),\n (1, 2, 2, 1, 2, 1, 3, 4, 3, 2, 4),\n (3, 4, 2, 1, 3, 1, 4, 4, 1, 3, 2),\n (2, 1, 3, 1, 4, 4, 1, 4, 1, 3, 2, 4),\n)\n\ntext_token_data = (\n (\"foo\", \"pok\", \"foo\", \"wer\", \"bar\"),\n (),\n (\"bar\", \"foo\", \"bar\", \"pok\", \"wer\", \"foo\", \"bar\", \"foo\", \"pok\", \"bar\", \"wer\"),\n (\"wer\", \"foo\", \"foo\", \"pok\", \"bar\", \"wer\", \"bar\"),\n (\"foo\", \"bar\", \"bar\", \"foo\", \"bar\", \"foo\", \"pok\", \"wer\", \"pok\", \"bar\", \"wer\"),\n (\"pok\", \"wer\", \"bar\", \"foo\", \"pok\", \"foo\", \"wer\", \"wer\", \"foo\", \"pok\", \"bar\"),\n (\n \"bar\",\n \"foo\",\n \"pok\",\n \"foo\",\n \"wer\",\n \"wer\",\n \"foo\",\n \"wer\",\n \"foo\",\n \"pok\",\n \"bar\",\n \"wer\",\n ),\n)\n\ntext_token_data_permutation = ((\"wer\", \"pok\"), (\"bar\", \"pok\"), (\"foo\", \"pok\", \"wer\"))\ntext_token_data_subset = ((\"foo\", \"pok\"), (\"pok\", \"foo\", \"foo\"))\ntext_token_data_new_token = ((\"foo\", \"pok\"), (\"pok\", \"foo\", \"foo\", \"zaz\"))\n\nmixed_token_data = (\n (1, \"pok\", 1, 3.1415, \"bar\"),\n (\"bar\", 1, \"bar\", \"pok\", 3.1415, 1, \"bar\", 1, \"pok\", \"bar\", 3.1415),\n (3.1415, 1, 1, \"pok\", \"bar\", 3.1415, \"bar\"),\n (1, \"bar\", \"bar\", 1, \"bar\", 1, \"pok\", 3.1415, \"pok\", \"bar\", 3.1415),\n (\"pok\", 3.1415, \"bar\", 1, \"pok\", 1, 3.1415, 3.1415, 1, \"pok\", \"bar\"),\n (\"bar\", 1, \"pok\", 1, 3.1415, 3.1415, 1, 3.1415, 1, \"pok\", \"bar\", 3.1415),\n)\n\npoint_data = [\n np.random.multivariate_normal(\n mean=[0.0, 0.0], cov=[[0.5, 0.0], [0.0, 0.5]], size=50\n ),\n np.random.multivariate_normal(\n mean=[0.5, 0.0], cov=[[0.5, 0.0], [0.0, 0.5]], size=60\n ),\n np.random.multivariate_normal(\n mean=[-0.5, 0.0], cov=[[0.5, 0.0], [0.0, 0.5]], size=80\n ),\n np.random.multivariate_normal(\n mean=[0.0, 0.5], cov=[[0.5, 0.0], [0.0, 0.5]], size=40\n ),\n np.random.multivariate_normal(\n mean=[0.0, -0.5], cov=[[0.5, 0.0], [0.0, 0.5]], size=20\n ),\n]\n\nvalue_sequence_data = [\n np.random.poisson(3.0, size=100),\n np.random.poisson(12.0, size=30),\n np.random.poisson(4.0, size=40),\n np.random.poisson(5.0, size=90),\n np.random.poisson(4.5, size=120),\n np.random.poisson(9.0, size=60),\n np.random.poisson(2.0, size=80),\n]\n\npath_graph = scipy.sparse.csr_matrix(\n [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n)\npath_graph_labels = np.array([\"a\", \"b\", \"a\", \"c\"])\npath_graph_two_out = scipy.sparse.csr_matrix(\n [[0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n)\nunique_labels = np.array([\"a\", \"b\", \"c\", \"d\"])\nshifted_labels = np.array([\"b\", \"c\", \"d\", \"e\"])\ntree_sequence = [(path_graph, unique_labels), (path_graph, shifted_labels)]\nlabel_dictionary = {\"a\": 0, \"b\": 1, \"c\": 2, \"d\": 3, \"e\": 4}\nsub_dictionary = {\"a\": 0, \"b\": 1, \"c\": 2}\n\nseq_tree_sequence = [\n (scipy.sparse.csr_matrix([[0, 1], [0, 0]]), np.array([\"wer\", \"pok\"])),\n (scipy.sparse.csr_matrix([[0, 1], [0, 0]]), np.array([\"bar\", \"pok\"])),\n (\n scipy.sparse.csr_matrix([[0, 1, 0], [0, 0, 1], [0, 0, 0]]),\n np.array([\"foo\", \"pok\", \"wer\"]),\n ),\n]\n\n\ndef test_LabeledTreeCooccurrenceVectorizer():\n model = LabelledTreeCooccurrenceVectorizer(\n window_radius=2, window_orientation=\"after\"\n )\n result = model.fit_transform(tree_sequence)\n expected_result = scipy.sparse.csr_matrix(\n np.array(\n [\n [0, 1, 1, 0, 0],\n [0, 0, 2, 2, 0],\n [0, 0, 0, 2, 1],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0],\n ]\n )\n )\n assert np.allclose(result.toarray(), expected_result.toarray())\n\n result = model.transform(tree_sequence)\n expected_result = scipy.sparse.csr_matrix(\n np.array(\n [\n [0, 1, 1, 0, 0],\n [0, 0, 2, 2, 0],\n [0, 0, 0, 2, 1],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0],\n ]\n )\n )\n assert np.allclose(result.toarray(), expected_result.toarray())\n\n\ndef test_LabeledTreeCooccurrenceVectorizer_reduced_vocab():\n model = LabelledTreeCooccurrenceVectorizer(\n window_radius=2, window_orientation=\"after\", token_dictionary=sub_dictionary,\n )\n result = model.fit_transform(tree_sequence)\n assert result.shape == (3, 3)\n\n\n@pytest.mark.parametrize(\"min_token_occurrences\", [None, 2])\n@pytest.mark.parametrize(\"max_token_occurrences\", [None, 2])\n@pytest.mark.parametrize(\"min_document_occurrences\", [None, 1])\n@pytest.mark.parametrize(\"max_document_frequency\", [None, 0.7])\n@pytest.mark.parametrize(\"window_orientation\", [\"before\", \"after\", \"symmetric\", \"directional\"])\n@pytest.mark.parametrize(\"window_radius\", [1, 2])\n@pytest.mark.parametrize(\"kernel_function\", [\"harmonic\", \"flat\"])\n@pytest.mark.parametrize(\"mask_string\", [None, \"[MASK]\"])\ndef test_equality_of_CooccurrenceVectorizers(\n min_token_occurrences,\n max_token_occurrences,\n min_document_occurrences,\n max_document_frequency,\n window_radius,\n window_orientation,\n kernel_function,\n mask_string,\n):\n tree_model = LabelledTreeCooccurrenceVectorizer(\n window_radius=window_radius,\n window_orientation=window_orientation,\n kernel_function=kernel_function,\n min_occurrences=min_token_occurrences,\n max_occurrences=max_token_occurrences,\n max_tree_frequency=max_document_frequency,\n min_tree_occurrences=min_document_occurrences,\n mask_string = mask_string,\n )\n seq_model = TokenCooccurrenceVectorizer(\n window_radius=window_radius,\n window_orientation=window_orientation,\n kernel_function=kernel_function,\n min_occurrences=min_token_occurrences,\n max_occurrences=max_token_occurrences,\n max_document_frequency=max_document_frequency,\n min_document_occurrences=min_document_occurrences,\n mask_string=mask_string,\n )\n assert np.allclose(\n tree_model.fit_transform(seq_tree_sequence).toarray(),\n seq_model.fit_transform(text_token_data_permutation).toarray(),\n )\n assert np.allclose(\n tree_model.fit_transform(seq_tree_sequence).toarray(),\n tree_model.transform(seq_tree_sequence).toarray(),\n )\n assert np.allclose(\n seq_model.fit_transform(text_token_data_permutation).toarray(),\n seq_model.transform(text_token_data_permutation).toarray(),\n )\n assert np.allclose(\n tree_model.transform(seq_tree_sequence).toarray(),\n seq_model.transform(text_token_data_permutation).toarray(),\n )\n\n\ndef test_build_tree_skip_grams_contract():\n (result_matrix, result_labels) = build_tree_skip_grams(\n token_sequence=path_graph_labels,\n adjacency_matrix=path_graph,\n kernel_function=flat_kernel,\n window_size=2,\n )\n expected_result = scipy.sparse.csr_matrix(\n [[1.0, 1.0, 1.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0]]\n )\n assert np.allclose(result_matrix.toarray(), expected_result.toarray())\n\n\ndef test_build_tree_skip_grams_no_contract():\n (result_matrix, result_labels) = build_tree_skip_grams(\n token_sequence=unique_labels,\n adjacency_matrix=path_graph,\n kernel_function=flat_kernel,\n window_size=2,\n )\n assert np.allclose(result_matrix.toarray(), path_graph_two_out.toarray())\n assert np.array_equal(unique_labels, result_labels)\n\n\n@pytest.mark.parametrize(\n \"window_orientation\", [\"before\", \"after\", \"symmetric\", \"directional\"]\n)\ndef test_sequence_tree_skip_grams(window_orientation):\n result = sequence_tree_skip_grams(\n tree_sequence,\n kernel_function=flat_kernel,\n window_size=2,\n label_dictionary=label_dictionary,\n window_orientation=window_orientation,\n )\n expected_result = scipy.sparse.csr_matrix(\n np.array(\n [\n [0, 1, 1, 0, 0],\n [0, 0, 2, 2, 0],\n [0, 0, 0, 2, 1],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0],\n ]\n )\n )\n if window_orientation == \"before\":\n assert np.allclose(result.toarray(), expected_result.T.toarray())\n elif window_orientation == \"after\":\n assert np.allclose(result.toarray(), expected_result.toarray())\n elif window_orientation == \"symmetric\":\n assert np.allclose(\n result.toarray(), (expected_result + expected_result.T).toarray()\n )\n elif window_orientation == \"directional\":\n assert np.allclose(\n result.toarray(),\n scipy.sparse.hstack([expected_result.T, expected_result]).toarray(),\n )\n\n\ndef test_harmonic_kernel():\n kernel = harmonic_kernel([0, 0, 0, 0], 4.0)\n assert kernel[0] == 1.0\n assert kernel[-1] == 1.0 / 4.0\n assert kernel[1] == 1.0 / 2.0\n\n\ndef test_triangle_kernel():\n kernel = triangle_kernel([0, 0, 0, 0], 4.0)\n assert kernel[0] == 4.0\n assert kernel[-1] == 1.0\n assert kernel[1] == 3.0\n\n\ndef test_flat_kernel():\n kernel = flat_kernel([0] * np.random.randint(2, 10), 0.0)\n assert np.all(kernel == 1.0)\n\n\ndef test_ngrams_of():\n for ngram_size in (1, 2, 4):\n tokens = np.random.randint(10, size=np.random.poisson(5 + ngram_size))\n ngrams = ngrams_of(tokens, ngram_size)\n if len(tokens) >= ngram_size:\n assert len(ngrams) == len(tokens) - (ngram_size - 1)\n else:\n assert len(ngrams) == 0\n assert np.all(\n [ngrams[i][0] == tokens[i] for i in range(len(tokens) - (ngram_size - 1))]\n )\n assert np.all(\n [\n ngrams[i][-1] == tokens[i + (ngram_size - 1)]\n for i in range(len(tokens) - (ngram_size - 1))\n ]\n )\n\n\ndef test_find_bin_boundaries_min():\n data = np.random.poisson(5, size=1000)\n data = np.append(data, [0, 0, 0])\n bins = find_bin_boundaries(data, 10)\n # Poisson so smallest bin should be at 0\n assert bins[0] == 0.0\n\n\ndef test_find_boundaries_all_dupes():\n data = np.ones(100)\n with pytest.warns(UserWarning):\n bins = find_bin_boundaries(data, 10)\n assert len(bins) == 1\n\n\ndef test_token_cooccurrence_vectorizer_basic():\n vectorizer = TokenCooccurrenceVectorizer(window_orientation='symmetric')\n result = vectorizer.fit_transform(token_data)\n transform = vectorizer.transform(token_data)\n assert (result != transform).nnz == 0\n assert scipy.sparse.issparse(result)\n vectorizer = TokenCooccurrenceVectorizer(\n window_radius=1, window_orientation=\"after\"\n )\n result = vectorizer.fit_transform(token_data)\n transform = vectorizer.transform(token_data)\n assert (result != transform).nnz == 0\n assert result[0, 2] == 8\n assert result[1, 0] == 6\n\n\ndef test_token_cooccurrence_vectorizer_orientation():\n vectorizer = TokenCooccurrenceVectorizer(\n window_radius=1, window_orientation=\"directional\"\n )\n result = vectorizer.fit_transform(text_token_data)\n assert result.shape == (4, 8)\n # Check the pok preceded by wer value is 1\n row = vectorizer.token_label_dictionary_[\"pok\"]\n col = vectorizer.column_label_dictionary_[\"pre_wer\"]\n assert result[row, col] == 1\n result_before = TokenCooccurrenceVectorizer(\n window_orientation=\"before\"\n ).fit_transform(text_token_data)\n result_after = TokenCooccurrenceVectorizer(\n window_orientation=\"after\"\n ).fit_transform(text_token_data)\n assert np.all(result_after.toarray() == (result_before.transpose()).toarray())\n result_symmetric = TokenCooccurrenceVectorizer(\n window_orientation=\"symmetric\"\n ).fit_transform(text_token_data)\n assert np.all(\n result_symmetric.toarray() == (result_before + result_after).toarray()\n )\n\n\ndef test_token_cooccurrence_vectorizer_column_order():\n vectorizer = TokenCooccurrenceVectorizer().fit(text_token_data)\n vectorizer_permuted = TokenCooccurrenceVectorizer().fit(text_token_data_permutation)\n assert (\n vectorizer.token_label_dictionary_\n == vectorizer_permuted.token_label_dictionary_\n )\n\n\ndef test_token_cooccurrence_vectorizer_transform():\n vectorizer = TokenCooccurrenceVectorizer(window_orientation='symmetric')\n result = vectorizer.fit_transform(text_token_data_subset)\n transform = vectorizer.transform(text_token_data)\n assert result.shape == transform.shape\n assert transform[0, 0] == 34\n\n\ndef test_token_cooccurence_vectorizer_transform_new_vocab():\n vectorizer = TokenCooccurrenceVectorizer()\n result = vectorizer.fit_transform(text_token_data_subset)\n transform = vectorizer.transform(text_token_data_new_token)\n assert (result != transform).nnz == 0\n\n\ndef test_token_cooccurrence_vectorizer_text():\n vectorizer = TokenCooccurrenceVectorizer()\n result = vectorizer.fit_transform(text_token_data)\n assert scipy.sparse.issparse(result)\n transform = vectorizer.transform(text_token_data)\n assert (result != transform).nnz == 0\n vectorizer = TokenCooccurrenceVectorizer(\n window_radius=1, window_orientation=\"after\"\n )\n result = vectorizer.fit_transform(text_token_data)\n transform = vectorizer.transform(text_token_data)\n assert (result != transform).nnz == 0\n assert result[1, 2] == 8\n assert result[0, 1] == 6\n\n\ndef test_token_cooccurrence_vectorizer_fixed_tokens():\n vectorizer = TokenCooccurrenceVectorizer(token_dictionary={1: 0, 2: 1, 3: 2})\n result = vectorizer.fit_transform(token_data)\n assert scipy.sparse.issparse(result)\n vectorizer = TokenCooccurrenceVectorizer(\n window_radius=1, window_orientation=\"after\"\n )\n result = vectorizer.fit_transform(token_data)\n assert result[0, 2] == 8\n assert result[1, 0] == 6\n\n\ndef test_token_cooccurrence_vectorizerexcessive_prune():\n vectorizer = TokenCooccurrenceVectorizer(min_frequency=1.0)\n with pytest.raises(ValueError):\n result = vectorizer.fit_transform(token_data)\n\n\ndef test_token_cooccurrence_vectorizer_min_occur():\n vectorizer = TokenCooccurrenceVectorizer(min_occurrences=3)\n result = vectorizer.fit_transform(token_data)\n assert scipy.sparse.issparse(result)\n vectorizer = TokenCooccurrenceVectorizer(\n window_radius=1, window_orientation=\"after\"\n )\n result = vectorizer.fit_transform(token_data)\n assert result[0, 2] == 8\n assert result[1, 0] == 6\n\n\ndef test_token_cooccurrence_vectorizer_max_freq():\n vectorizer = TokenCooccurrenceVectorizer(max_frequency=0.2)\n result = vectorizer.fit_transform(token_data)\n assert scipy.sparse.issparse(result)\n vectorizer = TokenCooccurrenceVectorizer(\n window_radius=1, window_orientation=\"after\"\n )\n result = vectorizer.fit_transform(token_data)\n assert result[0, 2] == 8\n assert result[1, 0] == 6\n\n\ndef test_token_cooccurrence_vectorizer_info_window():\n vectorizer = TokenCooccurrenceVectorizer(window_function=\"information\")\n result = vectorizer.fit_transform(token_data)\n assert scipy.sparse.issparse(result)\n vectorizer = TokenCooccurrenceVectorizer(\n window_radius=1, window_orientation=\"after\"\n )\n result = vectorizer.fit_transform(token_data)\n assert result[0, 2] == 8\n assert result[1, 0] == 6\n\n\ndef test_token_cooccurrence_vectorizer_mixed():\n vectorizer = TokenCooccurrenceVectorizer()\n with pytest.raises(ValueError):\n vectorizer.fit_transform(mixed_token_data)\n\n\ndef test_ngram_vectorizer_basic():\n vectorizer = NgramVectorizer()\n result = vectorizer.fit_transform(token_data)\n assert scipy.sparse.issparse(result)\n transform_result = vectorizer.transform(token_data)\n assert np.all(transform_result.data == result.data)\n assert np.all(transform_result.tocoo().col == result.tocoo().col)\n\n\ndef test_ngram_vectorizer_text():\n vectorizer = NgramVectorizer()\n result = vectorizer.fit_transform(text_token_data)\n assert scipy.sparse.issparse(result)\n # Ensure that the empty document has an all zero row\n assert len((result[1, :]).data) == 0\n\n\ndef test_ngram_vectorizer_mixed():\n vectorizer = SkipgramVectorizer()\n with pytest.raises(ValueError):\n vectorizer.fit_transform(mixed_token_data)\n\n\ndef test_ngram_vectorizer_min_doc():\n vectorizer = NgramVectorizer(min_document_occurrences=2)\n count_matrix = vectorizer.fit_transform(text_token_data_permutation)\n assert count_matrix.shape == (3, 2)\n assert np.all(count_matrix.toarray() == np.array([[1, 1], [1, 0], [1, 1]]))\n\n\ndef test_ngram_vectorizer_min_doc_freq():\n vectorizer = NgramVectorizer(min_document_frequency=0.6)\n count_matrix = vectorizer.fit_transform(text_token_data_permutation)\n assert count_matrix.shape == (3, 2)\n assert np.all(count_matrix.toarray() == np.array([[1, 1], [1, 0], [1, 1]]))\n\n\ndef test_ngram_vectorizer_max_doc():\n vectorizer = NgramVectorizer(max_document_occurrences=1)\n count_matrix = vectorizer.fit_transform(text_token_data_permutation)\n assert count_matrix.shape == (3, 2)\n assert np.all(count_matrix.toarray() == np.array([[0, 0], [1, 0], [0, 1]]))\n\n\ndef test_ngram_vectorizer_max_doc_freq():\n vectorizer = NgramVectorizer(max_document_frequency=0.4)\n count_matrix = vectorizer.fit_transform(text_token_data_permutation)\n assert count_matrix.shape == (3, 2)\n assert np.all(count_matrix.toarray() == np.array([[0, 0], [1, 0], [0, 1]]))\n\n\ndef test_skipgram_vectorizer_basic():\n vectorizer = SkipgramVectorizer()\n result = vectorizer.fit_transform(token_data)\n assert scipy.sparse.issparse(result)\n transform_result = vectorizer.transform(token_data)\n assert np.all(transform_result.data == result.data)\n assert np.all(transform_result.tocoo().col == result.tocoo().col)\n\n\ndef test_skipram_vectorizer_max_doc():\n vectorizer = SkipgramVectorizer(max_document_occurrences=2)\n count_matrix = vectorizer.fit_transform(text_token_data_permutation)\n assert count_matrix.shape == (3, 1)\n assert np.all(count_matrix.toarray() == np.array([[0], [0], [1]]))\n\n\ndef test_skipram_vectorizer_min_doc():\n vectorizer = SkipgramVectorizer(min_document_occurrences=2)\n count_matrix = vectorizer.fit_transform(text_token_data_permutation)\n assert count_matrix.shape == (3, 2)\n assert np.all(count_matrix.toarray() == np.array([[0, 1], [0, 0], [1, 0]]))\n\n\ndef test_skipgram_vectorizer_text():\n vectorizer = SkipgramVectorizer()\n result = vectorizer.fit_transform(text_token_data)\n assert scipy.sparse.issparse(result)\n # Ensure that the empty document has an all zero row\n assert len((result[1, :]).data) == 0\n\n\ndef test_skipgram_vectorizer_mixed():\n vectorizer = SkipgramVectorizer()\n with pytest.raises(ValueError):\n vectorizer.fit_transform(mixed_token_data)\n\n\ndef test_distribution_vectorizer_basic():\n vectorizer = DistributionVectorizer(n_components=3)\n result = vectorizer.fit_transform(point_data)\n assert result.shape == (len(point_data), 3)\n transform_result = vectorizer.transform(point_data)\n assert np.all(result == transform_result)\n\n\ndef test_distribution_vectorizer_bad_params():\n vectorizer = DistributionVectorizer(n_components=-1)\n with pytest.raises(ValueError):\n vectorizer.fit(point_data)\n vectorizer = DistributionVectorizer(n_components=\"foo\")\n with pytest.raises(ValueError):\n vectorizer.fit(point_data)\n vectorizer = DistributionVectorizer()\n with pytest.raises(ValueError):\n vectorizer.fit(point_data[0])\n vectorizer = DistributionVectorizer()\n with pytest.raises(ValueError):\n vectorizer.fit(\n [np.random.uniform(size=(10, np.random.poisson(10))) for i in range(5)]\n )\n vectorizer = DistributionVectorizer()\n with pytest.raises(ValueError):\n vectorizer.fit([[[1, 2, 3], [1, 2], [1, 2, 3, 4]], [[1, 2], [1,], [1, 2, 3]]])\n\n\ndef test_histogram_vectorizer_basic():\n vectorizer = HistogramVectorizer(n_components=20)\n result = vectorizer.fit_transform(value_sequence_data)\n assert result.shape == (len(value_sequence_data), 20)\n transform_result = vectorizer.transform(value_sequence_data)\n assert np.all(result == transform_result)\n\n\ndef test_histogram_vectorizer_outlier_bins():\n vectorizer = HistogramVectorizer(n_components=20, append_outlier_bins=True)\n result = vectorizer.fit_transform(value_sequence_data)\n assert result.shape == (len(value_sequence_data), 20 + 2)\n transform_result = vectorizer.transform([[-1.0, -1.0, -1.0, 150.0]])\n assert transform_result[0][0] == 3.0\n assert transform_result[0][-1] == 1.0\n\n\ndef test_kde_vectorizer_basic():\n vectorizer = KDEVectorizer(n_components=20)\n result = vectorizer.fit_transform(value_sequence_data)\n assert result.shape == (len(value_sequence_data), 20)\n transform_result = vectorizer.transform(value_sequence_data)\n assert np.all(result == transform_result)\n\n\ndef test_seq_diff_transformer():\n transformer = SequentialDifferenceTransformer()\n result = transformer.fit_transform(value_sequence_data)\n for i in range(len(value_sequence_data)):\n assert np.allclose(\n result[i], value_sequence_data[i][1:] - value_sequence_data[i][:-1]\n )\n\n\ndef test_wass1d_transfomer():\n vectorizer = HistogramVectorizer()\n histogram_data = vectorizer.fit_transform(value_sequence_data)\n transformer = Wasserstein1DHistogramTransformer()\n result = transformer.fit_transform(histogram_data)\n for i in range(result.shape[0]):\n for j in range(i + 1, result.shape[0]):\n assert np.isclose(\n kantorovich1d(histogram_data[i], histogram_data[j]),\n np.sum(np.abs(result[i] - result[j])),\n )\n\n\ndef test_node_removal():\n graph = scipy.sparse.random(10, 10, 0.1, format=\"csr\")\n graph.data = np.ones_like(graph.data)\n # # Remove self-loops\n # for i in range(10):\n # if graph[i, i] != 0:\n # graph[i, i] = 0\n # graph.eliminate_zeros()\n node_to_remove = np.argmax(np.array(graph.sum(axis=0)).T[0])\n graph_less_node = remove_node(graph, node_to_remove, inplace=False)\n # assert (graph != graph_less_node).sum() > 0\n with pytest.raises(ValueError):\n graph_less_node = remove_node(graph, node_to_remove, inplace=True)\n inplace_graph = graph.tolil()\n remove_node(inplace_graph, node_to_remove, inplace=True)\n assert (inplace_graph != graph_less_node).sum() == 0\n\n assert np.all([node_to_remove not in row for row in inplace_graph.rows])\n assert len(inplace_graph.rows[node_to_remove]) == 0\n\n orig_graph = graph.tolil()\n for i, row in enumerate(orig_graph.rows):\n if node_to_remove in row and i != node_to_remove:\n assert np.all(\n np.unique(np.hstack([row, orig_graph.rows[node_to_remove]]))\n == np.unique(np.hstack([inplace_graph.rows[i], [node_to_remove]]))\n )\n","sub_path":"vectorizers/tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":23712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"266231996","text":"from PIL import Image\nimport numpy as np\nimport os.path\nfrom pycocotools.coco import COCO\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom trainers.utils.logger import Logger\nimport time\nfrom statistics import mean\n\n\nclass Batcher:\n def __init__(self, loader, batch_size):\n self.loader = iter(loader)\n self.batch_size = batch_size\n\n def __iter__(self):\n return self\n\n def __next__(self):\n images, labels = [], []\n while len(images) < self.batch_size:\n try:\n image, label = next(self.loader)\n comp_tensor = torch.ones((1), dtype=torch.long)\n if torch.eq(label[1], comp_tensor):\n images.extend(image)\n labels.append(label[0])\n except StopIteration:\n break\n if len(images) < self.batch_size:\n raise StopIteration\n else:\n return torch.stack(images), torch.stack(labels)\n\n\ndef UnbiasedPULoss(X, A, rho=0.7):\n \"\"\" Note: This must be constrained.\n \n X: outputs\n A: labels\n rho: noise rate \"\"\"\n X_ = (X - 1).pow(2)\n numer = X_ - (rho * (X.pow(2)))\n frac = (numer / (1 - rho))\n positive_case = frac * A\n zeroth_case = (1 - A) * (X.pow(2))\n loss = positive_case + zeroth_case\n return loss.sum()\n\n\nclass Trainer:\n def __init__(self, datagen, model, config):\n self.datagen = datagen\n self.train_loader = datagen.train_loader\n self.test_loader = datagen.test_loader\n self.device = torch.device('cuda' if torch.cuda.\n is_available() else 'cpu')\n self.model = model.to(self.device)\n self.config = config\n self.parameters = self.config[\"parameters\"]\n self.experiment_name = self.config[\"name\"]\n self.experiment_root = \"./experiments/\" + self.experiment_name + \"/\"\n self.log_path = self.experiment_root + \"/logs/\"\n self.logger = Logger(self.log_path)\n\n def update_lr(self, optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n def print_image(self):\n mask = squeezed.numpy()\n mask_ = np.multiply(mask, 200)\n Image.fromarray(mask_).show()\n seg.show()\n\n def write_checkpoint(self, epoch):\n checkpoint_filename = str(epoch) + \".ckpt\"\n checkpoint_path = (\n self.experiment_root + \"checkpoints/\" + checkpoint_filename)\n torch.save(self.model.state_dict(), checkpoint_filename)\n\n def run(self, checkpoint_path):\n num_epochs = int(self.parameters[\"epochs\"])\n batch_size = int(self.parameters[\"batch_size\"])\n learning_rate = int(self.parameters[\"learning_rate\"])\n\n if checkpoint_path:\n epochs_done = int(checkpoint_path.split('.')[1].split(\"/\")[3])\n num_epochs -= epochs_done\n self.model.load_state_dict(torch.load(checkpoint_path))\n\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)\n\n total_step = self.datagen.total_steps\n curr_lr = learning_rate\n for epoch in range(num_epochs):\n batcher = Batcher(self.train_loader, batch_size)\n step = 0\n start = time.time()\n data_times = []\n compute_times = []\n for images, labels in batcher:\n end = time.time()\n data_times.append(end - start)\n step += 1\n\n images = images.to(self.device)\n labels = labels.to(self.device)\n\n start = time.time()\n outputs = self.model(images)\n loss = criterion(outputs, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n end = time.time()\n compute_times.append(end-start)\n\n if (step + 1) % 100 == 0:\n print(\"Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}\".format(\n epoch + 1, num_epochs, step + 1, total_step,\n loss.item()))\n\n info = {'loss': loss.item()}\n self.logger.log(self.model, info, step)\n print(\"Mean data time = \" + str(mean(data_times)))\n print(\"Mean compute time = \" + str(mean(compute_times)))\n\n if (epoch + 1) % 20 == 0:\n curr_lr /= 3\n self.update_lr(optimizer, curr_lr)\n\n self.write_checkpoint(epoch)\n\n self.model.eval()\n\n def test(self, model):\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in self.test_loader:\n images = images.to(self.device)\n labels = labels.to(self.device)\n outputs = self.model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the model on the test images: {} %'.format(\n 100 * correct / total))","sub_path":"trainers/cocostuff_profiler.py","file_name":"cocostuff_profiler.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"88491507","text":"#makeIDWordMap.py\n#helper script to make the dictionary that maps ID numbers to words in reference\n\n#imports\n\nimport cPickle as cpkl\n#helpers\n\n#main process\n\nif __name__ == \"__main__\":\n #get training file\n trainFilename = \"../data/raw/lyrics_train.txt\"\n trainFile = open(trainFilename,\"rb\")\n #find line in trainfile for our reference\n neededLine = \"\"\n for line in trainFile:\n if (line[0] == \"%\"): #found our line\n neededLine = line\n break\n #strip down neededLine\n neededLine = neededLine[1:(len(neededLine) - 1)]\n #then split into list of string\n listOfStrings = neededLine.split(\",\")\n idToWord = {} #will build on this\n for i in xrange(len(listOfStrings)):\n #index starts at 1\n idToWord[i+1] = listOfStrings[i]\n #then export via pickling\n cpkl.dump(idToWord,open(\"../data/preprocessed/idToWordMap.pkl\",\"wb\"))\n","sub_path":"code/makeIDWordMap.py","file_name":"makeIDWordMap.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"540766084","text":"#section_03.py\n\nimport os\n\n# 파일명 변경하기 함수\ndef changeFilename(filelist, size, change):\n number = 0\n\n print('-'*50) \n for name in filelist:\n if name[-4: ]=='.jpg':\n number += 1\n strnumber = str(number).zfill(size)\n newname = '자연' + strnumber + '.jpg' \n if change:\n os.rename(path+name, path+newname)\n print(name, '->', newname)\n print('-'*50)\n\n# 프로그램 시작\nwhile True:\n print('프로그램을 종료하고 싶으면 q를 입력하세요.')\n\n # 폴더 경로 입력받기\n path = input('경로를 입력해주세요(형식: c:/temp/) ') \n\n if path == 'q':\n break\n elif path == '':\n path = 'c:/temp/'\n elif path[-1] != '/':\n path += '/'\n\n # 폴더에서 파일정보 가져오기 \n try:\n files = os.listdir(path)\n for file in files:\n print(file)\n except:\n print('경로를 다시 확인하세요.')\n continue\n\n # 파일정보 확인하기\n go = input('해당 폴더의 파일이 맞나요? (y/n) ').lower()\n if go == 'n':\n continue\n elif go == 'q':\n break\n\n size = len(input('숫자부분의 자릿수에 맞춰 0을 넣어주세요(ex, 000): '))\n\n # 파일 변경 전 확인하기\n changeFilename(files, size, False)\n\n go = input('이대로 변경할까요? (y/n) ').lower()\n if go == 'n':\n continue\n elif go == 'q':\n break\n\n # 파일 변경하기\n changeFilename(files, size, True)\n print('파일명 변경 완료')\n\n # 재실행 / 종료\n ans = input('또 다른 파일명을 수정할건가요?(y/n): ').lower().startswith('y')\n if ans:\n continue\n else:\n break\n \n# 프로그램 종료\n","sub_path":"9장 프로젝트 소스/section_03.py","file_name":"section_03.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"630370653","text":"from pathlib import Path\r\nfrom fontTools import ttLib\r\nimport os,errno, glob\r\nimport numpy as np\r\nfrom random import sample\r\n\r\nimport concurrent.futures\r\nimport time\r\nfrom multiprocessing import Value\r\n\r\nFONT_SPECIFIER_NAME_ID = 4\r\nFONT_SPECIFIER_FAMILY_ID = 1\r\n\r\n# file_path = Path('E:\\Google Drive\\Colab Notebooks')\r\nfile_path = Path('E:\\FontRecognition')\r\nfont_path = file_path.joinpath('Fonts_500')\r\ndataset_path = file_path.joinpath('Dataset_Final\\\\Dataset_test_50')\r\n\r\ntotal_images_per_font = 50\r\n\r\ndef shortName( font ):\r\n \"\"\"Get the short name from the font's names table\"\"\"\r\n name = \"\"\r\n family = \"\"\r\n for record in font['name'].names:\r\n if b'\\x00' in record.string:\r\n name_str = record.string.decode('utf-16-be')\r\n else: \r\n name_str = record.string.decode('latin-1')\r\n if record.nameID == FONT_SPECIFIER_NAME_ID and not name:\r\n name = name_str\r\n elif record.nameID == FONT_SPECIFIER_FAMILY_ID and not family: \r\n family = name_str\r\n if name and family: break\r\n return name, family\r\n\r\ndef generate_images(fonts, process_id):\r\n print(f'Begin generating image from process {process_id}')\r\n for font in fonts[:]:\r\n tt = ttLib.TTFont(font)\r\n font_name = shortName(tt)[0]\r\n space_counter = 0\r\n for char in reversed(font_name):\r\n if char == ' ':\r\n space_counter+=1\r\n else:\r\n break\r\n font_name = font_name[:len(font_name) - space_counter]\r\n synthetic_font_path = str(dataset_path.joinpath(font_name))\r\n font_type_used = str(font)\r\n try:\r\n os.makedirs(synthetic_font_path)\r\n print(f\"creating {font_name} dataset\")\r\n \"\"\"\r\n TRDG Configuration\r\n --outputdir\r\n count (c) = 1000 image\r\n blur (bl) = 0 - 3 (nilai tengah dari random)\r\n format (f) = 105px\r\n character_spacing (cs) = 3px (tidak bisa dirandom)\r\n background (b) = pictures (random)\r\n skew (k) = 6 affine transform random\r\n\r\n trdg cannot random cs.\r\n \"\"\"\r\n command_str = f\"trdg --output_dir \\\"{synthetic_font_path}\\\" -c {total_images_per_font} -b 3 -bl 3 -rbl -k 6 -rk -tc #000000,#4A4A4A -f 105 -cs 3 -ft \\\"{font_type_used}\\\"\"\r\n os.system('cmd /c '+command_str)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n total_image = len(glob.glob1(synthetic_font_path, '*.jpg'))\r\n if total_image < total_images_per_font:\r\n print(f\"{font_name} lack of {(total_images_per_font - total_image)}, Generating lacked image\")\r\n command_str = f\"trdg --output_dir \\\"{synthetic_font_path}\\\" -c {total_images_per_font} -b 3 -bl 3 -rbl -k 6 -rk -tc #000000,#4A4A4A -f 105 -cs 3 -ft \\\"{font_type_used}\\\"\"\r\n os.system('cmd /c '+command_str)\r\n \r\n # print(font_name)\r\n return f\"Image generator from process {[process_id]} finished\"\r\n\r\ndef check_all_directory(fonts, process_id):\r\n is_cleared = True\r\n all_folder_count = 0\r\n all_image_count = 0\r\n print(f'Begin checking all directory from process {process_id}')\r\n for font in fonts:\r\n tt = ttLib.TTFont(font)\r\n font_name = shortName(tt)[0]\r\n space_counter = 0\r\n for char in reversed(font_name):\r\n if char == ' ':\r\n space_counter+=1\r\n else:\r\n break\r\n font_name = font_name[:len(font_name) - space_counter]\r\n synthetic_font_path = str(dataset_path.joinpath(font_name))\r\n if os.path.isdir(synthetic_font_path):\r\n total_image = len(glob.glob1(synthetic_font_path, '*.jpg'))\r\n # print(f\"{font_name} is {total_image}\")\r\n all_folder_count+=1\r\n all_image_count+=total_image\r\n if total_image < total_images_per_font:\r\n print(f\"{font_name} lack of {(total_images_per_font - total_image)} on {font}\")\r\n print(f\"Generating {font_name} : {(total_images_per_font - total_image)} with {font}\")\r\n command_str = f\"trdg --output_dir \\\"{synthetic_font_path}\\\" -c {total_images_per_font - total_image} -b 3 -bl 3 -rbl -k 6 -rk -tc #000000,#4A4A4A -f 105 -cs 3 -ft \\\"{font_type_used}\\\"\"\r\n os.system('cmd /c '+command_str)\r\n is_cleared = False\r\n elif total_image > total_images_per_font:\r\n print(f\"{font_name} is over {(total_image - total_images_per_font)}\")\r\n print(f\"Removing {(total_image - total_images_per_font)} in {str(synthetic_font_path)}\")\r\n thefiles = os.listdir(synthetic_font_path)\r\n for file in sample(thefiles,(total_image - total_images_per_font)):\r\n # print(synthetic_font_path +\"'\\\\\"+ file)\r\n os.remove(synthetic_font_path +\"\\\\\"+ file)\r\n is_cleared = False\r\n else:\r\n pass\r\n # print(font_name)\r\n print(f\"Image generator from process {[process_id]} report is_cleared: {is_cleared}\")\r\n return all_folder_count, all_image_count\r\n\r\ndef main():\r\n types = ('*.ttf', '*.otf')\r\n all_fonts = []\r\n for files in types:\r\n all_fonts.extend(font_path.glob(files))\r\n all_fonts.sort()\r\n\r\n # splitting data\r\n all_fonts = np.array(all_fonts)\r\n print(f'{len(all_fonts)} detected')\r\n all_fonts = np.array_split(all_fonts, 8)\r\n print(f'{len(all_fonts)} splits are created')\r\n\r\n # creating multiple process and generate images\r\n processes = []\r\n process_id_counter = 1\r\n with concurrent.futures.ProcessPoolExecutor() as executor:\r\n for split in all_fonts:\r\n process = executor.submit(generate_images, split, process_id_counter)\r\n processes.append(process)\r\n process_id_counter +=1\r\n\r\n for p in concurrent.futures.as_completed(processes):\r\n print(p.result())\r\n \r\n # Checking all directories\r\n counter_font_folder = 0\r\n counter_all_images = 0\r\n print(\"Checking all directory\")\r\n processes = []\r\n process_id_counter = 1\r\n with concurrent.futures.ProcessPoolExecutor() as executor:\r\n for split in all_fonts:\r\n process = executor.submit(check_all_directory, split, process_id_counter)\r\n processes.append(process)\r\n process_id_counter +=1\r\n\r\n for p in concurrent.futures.as_completed(processes):\r\n # print(p.result())\r\n temp = p.result()\r\n dir_count = float(temp[0])\r\n img_count = float(temp[1])\r\n counter_font_folder+=dir_count\r\n counter_all_images+=img_count\r\n print(f\"Total Directories : {counter_font_folder}\")\r\n print(f\"Total Images : {counter_all_images}\")\r\n\r\n # generate_images(fonts, 0)\r\n\r\nif __name__ == \"__main__\":\r\n t1 = time.perf_counter()\r\n main()\r\n t2 = time.perf_counter()\r\n print(\"Synthetic data has been created\")\r\n print(f\"Finished in {round(t2 - t1, 2)} seconds\")","sub_path":"create_synthetic_data.py","file_name":"create_synthetic_data.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345089165","text":"\n# physical parameters\n\nphysics={\n 'h':0.004, # D=2h\n 'nu':1.0e-6,\n 'uTau':0.045\n }\n\n#print physics\n\nsampling={\n 'raw_sample_size':128,\n 'dataShape':(200,4) \n }\n\n# data entry parameters\n\ndataEntry={\n\t'timeStep':20,\n 'chunkStep':40,\n 'NbOfFiles':161,\n 'path':\"/store/8simu_tmp/gradP0_1p0125\",\n }\n\nparameters={\n 'physics':physics,\n 'sampling':sampling,\n 'dataEntry':dataEntry,\n }\n","sub_path":"userLineOn2Diagonals/para_statistic_2Diagonals.py","file_name":"para_statistic_2Diagonals.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641003734","text":"from app import db\nfrom uuid import uuid4\n\n\nclass Project(db.Model):\n id = db.Column(db.String(64), primary_key=True, index=True, unique=True, nullable=False)\n name = db.Column(db.String(128), nullable=False)\n description = db.Column(db.Text, nullable=False)\n professor_id = db.Column(db.String(64), db.ForeignKey('professor.id'), nullable=False)\n\n def __init__(self, name, description, professor_id):\n self.id = str(uuid4())\n self.name = name\n self.description = description\n self.professor_id = professor_id\n\n def create(self):\n db.session.add(self)\n db.session.commit()\n","sub_path":"server/app/models/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"429689633","text":"import socket\nimport os\nimport threading\n\nfrom PyQt5.Qt import *\n\nclass TcpServer(QWidget):\n client_connect_signal = pyqtSignal()\n recv_signal = pyqtSignal(str, )\n\n def __init__(self, parent=None, *args, **kwargs):\n super().__init__(parent, *args, **kwargs)\n self.tcp_server_scoket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"init\")\n\n\n def start_tcp_server(self, ip, port):\n try:\n self.tcp_server_scoket.bind((ip, port))\n except Exception as e:\n print(\"ip or port error!\")\n else:\n self.tcp_server_scoket.listen(128)\n self.tcp_server_scoket.setblocking(False)\n server_th = threading.Thread(target=self.tcp_connect_concurrency)\n server_th.start()\n print(\"listening!\")\n\n def service_client(self, new_socket, client_addr, recv_data):\n print(str(client_addr[0]) + \":\" + str(client_addr[1]) + \":\" +recv_data)\n data = str(client_addr[0]) + \":\" + str(client_addr[1]) + \":\" +recv_data\n self.recv_signal.emit(data)\n\n def tcp_connect_concurrency(self):\n self.client_sockte_dict = dict()\n while True:\n try:\n new_socket, client_addr = self.tcp_server_scoket.accept()\n except Exception as e:\n pass\n else:\n new_socket.setblocking(False)\n self.client_sockte_dict[str(client_addr)] = new_socket\n self.client_connect_signal.emit()\n\n for ip in list(self.client_sockte_dict.keys()):\n try:\n recv_data = self.client_sockte_dict[ip].recv(1024).decode(\"utf-8\")\n except Exception as e:\n pass\n else:\n if recv_data:\n self.service_client(self.client_sockte_dict[ip], client_addr, recv_data)\n else:\n self.client_sockte_dict[ip].close()\n del self.client_sockte_dict[ip]\n self.client_connect_signal.emit()\n #self.client_sockte_list.remove(self.client_sockte_dict[ip])\n\n def tcp_server_send(self, ip, data):\n self.client_sockte_dict[ip].send(data.encode(\"utf-8\"))\n\n def tcp_close_server(self):\n for client_socket in self.client_sockte_list:\n client_socket.close()\n self.tcp_server_scoket.close()\n\nif __name__ == '__main__':\n tcpserver = TcpServer()\n tcpserver.start_tcp_server(\"192.168.2.128\", 7890)","sub_path":"TcpServer.py","file_name":"TcpServer.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"420526916","text":"__author__ = 'taosituo'\n\"\"\"\nrun_evaluate : load test data which saved by run_model.py\n\"\"\"\n\nimport logging\nimport base_loader\n\n\nclass EvalOrderData(object):\n \"\"\"data unit for evaluate program get test data\n \"\"\"\n def __init__(self, company_id, account_id, key_id, sid):\n self.company_id = company_id\n self.account_id = account_id\n self.key_id = key_id\n self.sid = sid\n\n\nclass TestLoader(base_loader.BaseLoader):\n \"\"\"\n Load test data\n key_id : in order-model(cf cr) campaign_id\n in collection-model(cl) company_id\n self.__test_dict[key_id] = {idx:[obj1, obj2, obj3, ... ]}\n \"\"\"\n def __init__(self, file_name, file_schema, splitter):\n super(TestLoader, self).__init__(file_name, file_schema, splitter)\n self.__test_dict = dict()\n\n def load(self):\n \"\"\"load test data\n \"\"\"\n logger = logging.getLogger('root')\n logger.info('Class TestLoader load() start running')\n\n with open(self._file_name, 'r') as fr:\n for line in fr:\n line = line.strip()\n if line == '':\n continue\n segs = line.split('\\t')\n if len(segs) != self._file_schema_len:\n logger.error('Class LoadTest load(), load(), size of segs is error')\n raise base_loader.SchemaError(segs, self._file_schema_list)\n sid = segs[self._file_schema_list.index('sid')].strip()\n sid_list = sid.split('|')\n key_id = sid_list[0]\n idx = sid_list[1]\n company_id = segs[self._file_schema_list.index('company_id')].strip()\n account_id_list = segs[self._file_schema_list.index('account_id_list')].strip().split('|')\n if key_id not in self.__test_dict:\n self.__test_dict[key_id] = {}\n self.__test_dict[key_id][idx] = []\n for account_id in account_id_list:\n r = EvalOrderData(company_id, account_id, key_id, sid)\n self.__test_dict[key_id][idx].append(r)\n logger.info('Class LoadTest, load() run success')\n return self.__test_dict\n","sub_path":"io/test_loader.py","file_name":"test_loader.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"61396304","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport mdlog\nlog = mdlog.getLogger(__name__)\n\nfrom EventLoop import getLoop, pushEvent\nfrom EventList import PedalsEvent, ExitEvent, RestartEvent\n\nlog.info(\"Importing pedals!\")\n\nfrom piehid32 import *\n\nlog.info(\"Imported pedals!\")\n\nfrom ctypes import *\nfrom eventfd import eventfd\nimport struct\nimport sys, os\nimport atexit\n\nwakeupFd = eventfd(0, 0)\n\noldPedals = [0] * 3\n\n_pedalCb = None\n\ndef setPedalCallback(cb):\n \"\"\"\n Set a callback that gets executed on the pedal reading thread,\n unlike the PedalEvent which is always received on the main thread.\n \"\"\"\n global _pedalCb\n _pedalCb = cb\n\ndef getPedalCallback():\n return _pedalCb\n\ndef dataCb(data, deviceId, error):\n global oldPedals\n pedals = [0] * 3\n pedals[0] = (data[3] & 2) != 0\n pedals[1] = (data[3] & 4) != 0\n pedals[2] = (data[3] & 8) != 0\n for i in range(3):\n if pedals[i] != oldPedals[i]:\n changed = i\n oldPedals = pedals\n log.info(\"Pedals: [%s]\" % pedals)\n pushEvent(PedalsEvent(pedals, changed))\n if _pedalCb:\n _pedalCb(pedals, changed)\n global wakeupFd\n # We use eventfd to wakeup the main thread so it will\n # see the pedal event\n written = os.write(wakeupFd, c_longlong(1))\n if written != 8:\n log.error(\"Error writing to eventfd.\")\n return 0\n\ndef errorCb(deviceId, status):\n log.error(\"Error in pedals: [%s] [%s]\" % (deviceId, status))\n return 0\n\ndef readOut():\n num = os.read(wakeupFd, 8)\n log.debug(\"Pedal EventFD Read: %d\" % struct.unpack('@Q', num)[0])\n\ngetLoop().subscribeFile(wakeupFd, getLoop().FILE_INPUT, readOut)\n\n_dataCb = PHIDDataEvent(dataCb)\n_errorCb = PHIDErrorEvent(errorCb)\n\ninfoArrayType = TEnumHIDInfo * MAX_XKEY_DEVICES\nTEnumHIDInfoPtr = POINTER(TEnumHIDInfo)\n\ninfo = infoArrayType()\ncount = c_long(0)\n\nresult = EnumeratePIE(PI_VID, info, pointer(count))\n\ndev = None\nfor i in range(count.value):\n dev = pointer(info[i])\n log.info(\"Found XKeys Device:\")\n log.info(\"\\tPID: %04x\" % dev.contents.PID)\n log.info(\"\\tUsage Page: %04x\" % dev.contents.UP)\n log.info(\"\\tUsage: %04x\" % dev.contents.Usage)\n log.info(\"\\tVersion: %04x\" % dev.contents.Version)\n\n # magic constants from piehidtestgui!\n if dev.contents.UP == 0x000c and dev.contents.Usage == 0x0001: \n log.info(\"\\tSetting up interface.\")\n result = SetupInterfaceEx(dev.contents.Handle)\n if result != 0:\n log.info(\"Unable to open device. Error: %d\" % result)\n\n # Why break after one iteration? No idea, but if I don't\n # do this then my keyboard stops working until I unplug it\n # and plug it back in!\n break\n\nif dev.contents.Handle < 0:\n log.error(\"Unable to open device.\")\n sys.exit(1)\n\ndataArrayType = c_char * 33\ndata = dataArrayType()\nidx = c_int(0)\n\nresult = SetDataCallback(dev.contents.Handle, _dataCb)\nif result != 0:\n log.error(\"Unable to set data callback, Error: %d\" % result)\n result = SetErrorCallback(dev.contents.Handle, _errorCb)\n if result != 0:\n log.error(\"Unable to set error callback, Error: %d\" % result)\n\ndef exitHandler():\n global dev\n if dev:\n SetDataCallback(dev.contents.Handle, cast(None, PHIDDataEvent))\n SetErrorCallback(dev.contents.Handle, cast(None, PHIDErrorEvent))\n CleanupInterface(dev.contents.Handle)\n ClearBuffer(dev.contents.Handle)\n dev = None\n\n#atexit.register(exitHandler)\ngetLoop().subscribeEvent(ExitEvent, exitHandler)\ngetLoop().subscribeEvent(RestartEvent, exitHandler)\n\n# def pedalsTest(ev):\n# log.info(\"got pedals event\")\n\n# getLoop().subscribeEvent(PedalsEvent, pedalsTest)\n","sub_path":"Pedals.py","file_name":"Pedals.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"626082245","text":"# coding=utf-8\n# Copyright 2016-2017 F5 Networks Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom .testlib.bigip_client import BigIpClient\nfrom .testlib.fake_rpc import FakeRPCPlugin\nfrom f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver import \\\n iControlDriver\n\nfrom copy import deepcopy\nimport json\nimport os\nimport pytest\n\n\n@pytest.fixture\ndef bigip(request):\n\n bigip = BigIpClient(pytest.symbols.bigip_mgmt_ip_public,\n pytest.symbols.bigip_username,\n pytest.symbols.bigip_password)\n\n def fin():\n bigip.delete_folders()\n request.addfinalizer(fin)\n\n return bigip\n\n\n@pytest.fixture\ndef fake_plugin_rpc(services):\n\n rpcObj = FakeRPCPlugin(services)\n\n return rpcObj\n\n\n@pytest.fixture\ndef icontrol_driver(icd_config, fake_plugin_rpc):\n class ConfFake(object):\n def __init__(self, params):\n self.__dict__ = params\n for k, v in self.__dict__.items():\n if isinstance(v, unicode):\n self.__dict__[k] = v.encode('utf-8')\n\n def __repr__(self):\n return repr(self.__dict__)\n\n icd = iControlDriver(ConfFake(icd_config),\n registerOpts=False)\n\n icd.plugin_rpc = fake_plugin_rpc\n\n return icd\n\n\n@pytest.fixture()\ndef icd_config():\n oslo_config_filename = (\n os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../config/basic_agent_config.json')\n )\n OSLO_CONFIGS = json.load(open(oslo_config_filename))\n\n config = deepcopy(OSLO_CONFIGS)\n config['icontrol_hostname'] = pytest.symbols.bigip_mgmt_ip_public\n config['icontrol_username'] = pytest.symbols.bigip_username\n config['icontrol_password'] = pytest.symbols.bigip_password\n try:\n config['f5_vtep_selfip_name'] = pytest.symbols.f5_vtep_selfip_name\n except AttributeError:\n config['f5_vtep_selfip_name'] = \"selfip.external\"\n return config\n","sub_path":"test/functional/neutronless/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"42662370","text":"from tkinter import *\n\nroot=Tk()\nroot.geometry(\"200x200\") #PARA CAMBIAR EL TAMAÑO DE LA VENTANAAAAA\n\nvarOpcion=IntVar()\n\ndef imprimir():\n\t#print(varOpcion.get())\n\tif varOpcion.get() == 1:\n\t\tetiqueta.config(text=\"Has elegido masculino.\")\n\telif varOpcion.get() == 2:\n\t\tetiqueta.config(text=\"Has elegido femenino.\")\n\telse:\n\t\tetiqueta.config(text=\"Has elegido otros.\")\n\nLabel(root, text=\"Género:\").pack()\nRadiobutton(root, text=\"Masculino\", variable=varOpcion, value=1, command=imprimir, anchor=\"w\").pack()\nRadiobutton(root, text=\"Femenino\", variable=varOpcion, value=2, command=imprimir, anchor=\"w\").pack()\nRadiobutton(root, text=\"Otros\", variable=varOpcion, value=3, command=imprimir, anchor=\"w\").pack()\n\netiqueta=Label(root)\netiqueta.pack() #EL PACK ES IMPORTANTEEEE NO OLVIDAR\n\nroot.mainloop()","sub_path":"tkinter/radioButton.py","file_name":"radioButton.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"515720078","text":"from tornado import gen\nfrom os.path import isfile\n\nfrom cloud_require.tools.utensil import tor_up_file\nfrom cloud_require.handlers.base_handler import BaseHandler\n\n\nclass RedirectHandler(BaseHandler):\n __redirect_url = None\n\n def __init__(self, application, request, **kwargs):\n super(RedirectHandler, self).__init__(application, request, **kwargs)\n\n def initialize(self, **kwargs):\n super(RedirectHandler, self).initialize()\n self.__redirect_url = kwargs.get(\"url\", \"/\")\n\n @gen.coroutine\n def get(self):\n self.redirect(self.__redirect_url)\n\n @gen.coroutine\n def post(self):\n self.get()\n\n\nclass NotFoundHandler(BaseHandler):\n\n @gen.coroutine\n def get(self):\n self.send_error(404)\n\n @gen.coroutine\n def post(self):\n self.get()\n\n\nclass DownLoadHandler(BaseHandler):\n __down_path = None\n __down_file = None\n __is_dynamic = None\n\n def __init__(self, application, request, **kwargs):\n super(DownLoadHandler, self).__init__(application, request, **kwargs)\n\n def initialize(self, **kwargs):\n super(DownLoadHandler, self).initialize()\n self.__is_dynamic = kwargs.get(\"is_dynamic\", False)\n if not self.__is_dynamic:\n self.__down_file = kwargs.get(\"down_file\", \"claim.txt\")\n self.__down_path = kwargs.get(\"down_path\", \"./up_files/\")\n\n @gen.coroutine\n def get(self):\n file_name = self.get_argument(\"down_file\", \"\") if self.__is_dynamic else self.__down_file\n file_path = self.__down_path + file_name\n if isfile(file_path):\n self.set_header('Content-Type', 'application/octet-stream')\n self.set_header('Content-Disposition', 'attachment; filename=' + file_name)\n with open(file_path, 'rb') as down:\n while True:\n data = down.read()\n if not data:\n break\n self.write(data)\n self.finish()\n else:\n self.send_error(404)\n\n @gen.coroutine\n def post(self):\n self.get()\n\n\nclass UpLoadHandler(BaseHandler):\n __up_path = None\n __up_size = None\n __up_type_list = None\n __is_only = None\n\n def __init__(self, application, request, **kwargs):\n super(UpLoadHandler, self).__init__(application, request, **kwargs)\n\n def initialize(self, **kwargs):\n super(UpLoadHandler, self).initialize()\n self.__up_path = kwargs.get(\"up_path\", \"./up_files/\")\n self.__is_only = kwargs.get(\"is_only\", False)\n self.__up_size = kwargs.get(\"up_size\", 8000.0)\n self.__up_type_list = kwargs.get(\"up_type_list\", None)\n\n @gen.coroutine\n def get(self):\n self.write('''\n <html>\n <head><title>Upload File\n \n
\n
\n \n
\n \n \n ''')\n\n @gen.coroutine\n def post(self):\n if int(self.request.headers.get('Content-Length')) / 1000.0 > self.__up_size:\n self.write_json(\"file size too big\", status=1)\n return\n info = tor_up_file(self.request.files.get(\"file\", None), path=self.__up_path, is_only=self.__is_only,\n type_list=self.__up_type_list)\n if info[0]:\n self.write_json(\"Uploaded successfully\", message={\"name\": info[1]})\n else:\n self.write_json(\"Upload failed\", status=1)\n","sub_path":"cloud_require/handlers/deal_handler.py","file_name":"deal_handler.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"59665199","text":"import requests\nimport json\nfrom termcolor import colored\nc=0\ndef getranking(l,x,n):\n c=0\n page=1\n print(colored(f\"-------------------------------------{x}--------------------------------------------\\n\\n\",attrs=[\"bold\"]))\n print('{:<4}{:<24}{:15}{:10}'.format(\"S.No.\",\"Name\",\"Country Rank\",\"Rating\"))\n while(1):\n if(l=='i'):\n lm=requests.get(f'https://www.codechef.com/api/ratings/all?sortBy=global_rank&order=asc&page={page}&filterBy=Institution%3D{x}')\n else:\n lm=requests.get(f'https://www.codechef.com/api/ratings/all?sortBy=global_rank&order=asc&page={page}&filterBy=Country%3D{x}')\n str=lm.text\n str=json.loads(str)\n str=str['list']\n if(len(str)==0):\n print(\"No name like this exist in codechef database\")\n break\n for i in range(len(str)):\n c=c+1\n print(colored('{:<4}{:<24}{:^15}{:^10}'.format(c,str[i]['name'],str[i]['country_rank'],str[i]['rating']),\"cyan\",attrs=['dark']))\n if(c==n):\n break\n if(c==n):\n break\n page=page+1\n# getranking(\"Birla Institute of Technology Mesra\",10)\n\n","sub_path":"ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"501273786","text":"import os\nimport argparse\n\nparser = argparse.ArgumentParser(description='Classification tests for UCR repository datasets')\nparser.add_argument('--path',action='store_true', default=\"../UCRArchive_2018/\", help=\"Enter the path to master folder containing all the tests!\")\nargs = parser.parse_args()\n\nfor files in os.listdir(args.path):\n\tprint(\"Executing benchmark : {}\".format(files))\n\tos.system(' python ucr.py --dataset {} --path ../UCRArchive_2018/ --save_path models/ --hyper default_hyperparameters.json --cuda --gpu 0 '.format(files))\n","sub_path":"run_all.py","file_name":"run_all.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"147497830","text":"import ael\ndef CommCCY(t,*rest):\n \n ins = ael.Instrument.read('insaddr=%d'% t.insaddr.insaddr)\n \n a = t.counterparty_ptynbr.ptyid\n b = t.prfnbr.prfid\n c = 'Bond'\n output = c + b + a\n return output\n","sub_path":"Python modules/Test_concat.py","file_name":"Test_concat.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307748460","text":"class Solution:\n def getMoneyAmount(self, n: int) -> int:\n if n == 0 or n == 1:\n return 0\n dp = [[0 for i in range(n + 1)] for j in range(n + 1)]\n for len in range(2, n + 1):\n for start in range(1, n - len + 2):\n import sys\n res = sys.maxsize\n for k in range(start, len + start - 1):\n ldp = dp[start][k - 1] if dp[start][k - 1] >= dp[k + 1][start + len - 1] else dp[k + 1][\n start + len - 1]\n res = res if res <= ldp + k else ldp + k\n dp[start][len + start - 1] = res\n # for dd in dp:\n # print(dd)\n return dp[1][n]\n\n\nif __name__ == \"__main__\":\n n = 5\n sol = Solution()\n print(sol.getMoneyAmount(n))\n","sub_path":"leetcode/medium/guess_number_higher_or_lower.py","file_name":"guess_number_higher_or_lower.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"130066159","text":"\"\"\"\npower_rabi.py: A Rabi experiment sweeping the amplitude of the MW pulse.\n\"\"\"\nfrom qm.QuantumMachinesManager import QuantumMachinesManager\nfrom qm.qua import *\nfrom qm import SimulationConfig\nimport matplotlib.pyplot as plt\nfrom configuration import *\nfrom qualang_tools.loops import from_array\n\n###################\n# The QUA program #\n###################\n\na_min = 0.1 # proportional factor to the pulse amplitude\na_max = 1 # proportional factor to the pulse amplitude\nda = 0.02\na_vec = np.arange(a_min, a_max + da / 2, da) # +da/2 to include a_max\nn_avg = 1e6 # number of iterations\n\nwith program() as power_rabi:\n counts = declare(int) # variable for number of counts\n times = declare(int, size=100)\n a = declare(fixed) # variable to sweep over the amplitude\n n = declare(int) # variable to for_loop\n counts_st = declare_stream() # stream for counts\n n_st = declare_stream() # stream to save iterations\n\n with for_(n, 0, n < n_avg, n + 1):\n with for_(*from_array(a, a_vec)):\n # initialization\n play(\"laser_ON\", \"F_transition\")\n align()\n play(\"laser_ON\", \"A_transition\")\n play(\"switch_ON\", \"excited_state_mw\")\n align()\n\n # pulse sequence\n play(\"pi\" * amp(a), \"Yb\") # pulse of varied amplitude\n\n align()\n\n # readout laser\n play(\"laser_ON\", \"A_transition\", duration=int(meas_len // 4))\n # does it needs buffer to prevent damage?\n\n align()\n\n # decay readout\n measure(\"readout\", \"SNSPD\", None, time_tagging.analog(times, meas_len, counts))\n save(counts, counts_st) # save counts\n wait(100)\n\n save(n, n_st) # save number of iteration inside for_loop\n\n with stream_processing():\n counts_st.buffer(len(a_vec)).average().save(\"counts\")\n n_st.save(\"iteration\")\n\n#####################################\n# Open Communication with the QOP #\n#####################################\nqmm = QuantumMachinesManager(qop_ip)\n\nsimulate = False\nif simulate:\n simulation_config = SimulationConfig(duration=28000)\n job = qmm.simulate(config, power_rabi, simulation_config)\n job.get_simulated_samples().con1.plot()\nelse:\n qm = qmm.open_qm(config)\n # execute QUA program\n job = qm.execute(power_rabi)\n # Get results from QUA program\n results = fetching_tool(job, data_list=[\"counts\", \"iteration\"], mode=\"live\")\n # Live plotting\n fig = plt.figure()\n interrupt_on_close(fig, job) # Interrupts the job when closing the figure\n\n while results.is_processing():\n # Fetch results\n counts, iteration = results.fetch_all()\n # Progress bar\n progress_counter(iteration, n_avg, start_time=results.get_start_time())\n # Plot data\n plt.cla()\n plt.plot(a_vec * pi_amp_NV, counts / 1000 / (meas_len * 1e-9))\n plt.xlabel(\"Amplitude [volts]\")\n plt.ylabel(\"Intensity [kcps]\")\n plt.title(\"Power Rabi\")\n plt.pause(0.1)\n","sub_path":"Quantum-Control-Applications/Optically addressable spin qubits/Cryogenic nanophotonic cavity/power_rabi.py","file_name":"power_rabi.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"348292183","text":"# Average trials of files\n# Arg: file1, file2, file3... outputfile\n# Output: \n# model\n# clusterNum tiral1 trial2 trial3 ... average confidenceinterval paired_t_test:down/up\n\n#!/usr/bin/env python\nimport sys\nimport math\n\nfileNum = len(sys.argv) - 1\nfile_to = open(sys.argv[fileNum], \"a\")\nclusterNum = 100\ndt = {}\nmodel = \"\"\nindex = 0\nmaximum = 0\nfor i in range(fileNum - 1):\n\tfile = open(sys.argv[i + 1], \"r\")\n\tlines = file.readlines()\n\tfor line in lines:\n\t\ttokens = line.strip().split()\n\t\tif len(tokens) == 1:\n\t\t\tmodel = tokens[0]\n\t\t\tif model not in dt:\n\t\t\t\tdt[model] = []\n\t\t\t\tfor j in range(clusterNum):\n\t\t\t\t\tdt[model].append([])\n\t\t\tindex = 0\n\t\telse:\n\t\t\tdt[model][index].append(float(tokens[len(tokens) - 1]))\n\t\t\tmaximum = 1.0\n\t\t\tindex = index + 1\n\tfile.close()\n\nfor m in dt.keys():\n\tfor i in range(clusterNum):\n\t\tfor j in range(len(dt[m][i])):\n\t\t\tdt[m][i][j] = dt[m][i][j] / dt[m][99][j]\n\nfor m in dt.keys():\n\tfile_to.write(m + \"\\n\")\n\tfor i in range(clusterNum):\n\t\tfile_to.write(str(i + 1) + \"\\t\")\n\t\tsum = 0.0\n\t\tfor value in dt[m][i]:\n\t\t\tfile_to.write(str(value) + \"\\t\")\n\t\t\tsum = sum + value\n\t\tmean = sum / len(dt[m][i])\n\t\tfile_to.write(str(mean) + \"\\t\")\n\t\tdeviation = 0.0\n\t\tfor value in dt[m][i]:\n\t\t\tdeviation = deviation + (value - mean) * (value - mean)\n\t\tdeviation = math.sqrt(deviation)\n\t\tinterval = 1.96 * deviation / math.sqrt(len(dt[m][i]))\n\t\tfile_to.write(str(interval) + \"\\t\")\n\t\tif mean - interval > maximum:\n\t\t\tfile_to.write(\"up\\n\")\n\t\telse:\n\t\t\tif mean + interval < maximum:\n\t\t\t\tfile_to.write(\"down\\n\")\n\t\t\telse:\n\t\t\t\tfile_to.write(\"none\\n\")\n\nfile_to.close()\n\n","sub_path":"scripts/average_cost.py","file_name":"average_cost.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101003502","text":"#Text to Speech Conversion\r\nfrom ibm_watson import TextToSpeechV1\r\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\r\nimport playsound\r\n\r\nauthenticator = IAMAuthenticator('90ZND2exsa6Qa96XDSMbt4zilsWbZzxP0CZMCUN3MVBD')\r\ntext_to_speech = TextToSpeechV1(\r\n authenticator=authenticator\r\n)\r\n\r\ntext_to_speech.set_service_url('https://api.eu-gb.text-to-speech.watson.cloud.ibm.com/instances/29fa5bbf-695b-440b-a597-ff8b094a4abe') \r\n# here tts.wav is our file name and wb is write byte by byte in particular file as audio file as type it is python file handling procedure\r\nwith open('mall.mp3', 'wb') as audio_file: #chaning wav to mp3\r\n audio_file.write(\r\n text_to_speech.synthesize(\r\n 'Please wear mask to enter into Mall',\r\n voice='en-US_AllisonV3Voice',\r\n accept='audio/mp3' #changing wav to mp3 \r\n ).get_result().content)\r\nprint('Playing..............')\r\nplaysound.playsound('mall.mp3')\r\nprint('stopped!')\r\n#it is basically saving in folder but we have to play manually so to avoid this we are going to install playsound module\r\n \r\n","sub_path":"tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80617442","text":"# ------------------------------------------------------------------------------\n# challenge 08\n#\n# detect AES ECB mode encryption\n# ------------------------------------------------------------------------------\n# solution\nBS = 16 # block size\n\n\ndef is_aes_ecb(bs):\n \"\"\"Check if any 16-byte block repeats in byte string\"\"\"\n return any([bs[i:i + BS] in bs[i + BS:] for i in range(0, len(bs), BS)])\n\n\n# testing\nif __name__ == \"__main__\":\n with open('data/8.txt') as f:\n for s in f:\n if is_aes_ecb(bytes.fromhex(s.strip())):\n print('found ECB encrypted line:\\n', s)\n break\n","sub_path":"c08.py","file_name":"c08.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163283378","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 5 14:22:14 2019\n\n@author: stone\n\"\"\"\nimport cv2\nimport numpy as np\nfrom numba import autojit\n\nnumThd = 0.6\n\ndef imgOut(img, OUTFILE_PATH):\n cv2.imwrite(OUTFILE_PATH, img)\n return 'DONE!'\n\ndef match(img):\n res = []\n minnum = 1\n minindex = -1\n \n for i in range(10):\n filename = 'templ/templ_'+str(i)+'.png'\n templ = cv2.imread(filename)\n templ = cv2.cvtColor(templ, cv2.COLOR_BGR2GRAY)\n _, templ = cv2.threshold(templ, 210, 255, cv2.THRESH_BINARY)\n binary, templ_cnts, hierarchy = cv2.findContours(templ, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n x, y, w, h = cv2.boundingRect(templ_cnts[1])\n templ = templ[y:y+h, x:x+w]\n #imgOut(templ, 'Templ_'+str(i)+'.jpg')\n \n numFig = cv2.resize(img, (np.shape(templ)[1], np.shape(templ)[0]))\n resVal = np.mean(cv2.matchTemplate(numFig, templ, cv2.TM_SQDIFF_NORMED))\n res.append(resVal)\n \n #print('Num ', i, ': ', resVal)\n \n for i in range(10):\n if res[i] > numThd:\n res[i] = 1.0\n if res[i] < minnum:\n minnum = res[i]\n minindex = i\n return (minindex, minnum)\n \n \n","sub_path":"back/东工商河_01/matchTemp.py","file_name":"matchTemp.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"331408297","text":"import requests\nimport datetime\n\nurl = 'http://localhost:57004/api/BatchCloudData?metaObjectCode=WangDongApp.DataTest'\n\ndatas = []\nfor i in range(11000,20000):\n data = {\n 'positiveNumber': i,\n 'Text': 'Text测试'+str(i),\n 'DateTime': str(datetime.datetime.now()),\n 'TrueOrFalse': True,\n 'Integer': i,\n 'Long': i,\n 'Double': i\n }\n datas.append(data)\n print(i)\n\nresponse = requests.post(url, json=datas)\nprint(response.text)\n","sub_path":"60-Tools/batch_api_post.py","file_name":"batch_api_post.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"13749546","text":"import numpy as np\nimport math\n\n#Perceptron\nclass ModPerceptron:\n \n def __init__(self, training_dat, test_dat, margin = False):\n self.training_dat = training_dat\n self.test_dat = test_dat \n self.margin = margin\n self.numb_data = self.training_dat.shape[0]\n self.numb_test_data = self.test_dat.shape[0] \n self.numb_features = self.training_dat.shape[1]-1\n self.weight = np.zeros(self.numb_features+1)#initialize the weigth vector to be zero for all n+1 features\n self._setup_()\n \n def _setup_(self):\n intercept_vector = np.ones(self.numb_data) # initialize the intercept vector\n intercept_vector_t = np.ones(self.numb_test_data) #initialize the intercept vector for test\n augmented_test_sample = np.column_stack((intercept_vector_t,self.test_dat))\n augmented_sample = np.column_stack((intercept_vector, self.training_dat))# incoroperate the intercept vector into the feature vectors \n self.x_test = np.transpose(np.transpose(augmented_test_sample)[0:self.numb_features+1])\n self.y_test = np.transpose(augmented_test_sample)[self.numb_features+1]\n self.x_sample = np.transpose(np.transpose(augmented_sample)[0:self.numb_features+1]) #extracting the mx(n-1) data matrix\n self.y_label = np.transpose(augmented_sample)[self.numb_features+1] #extracting the mx1 label vector\n \n \n def update(self, rate_tup, i, gamma=1.0):\n eta1, eta2 = rate_tup\n raw_y = np.dot(self.weight, self.x_sample[i])\n if self.y_label[i] == -1:\n if self.y_label[i]*raw_y <= 0:\n self.weight +=eta1*self.y_label[i]*self.x_sample[i]\n return 1\n else:\n return 0\n elif self.y_label[i] == 1:\n if self.y_label[i]*raw_y <= 0:\n self.weight +=eta2*self.y_label[i]*self.x_sample[i]\n return 1\n else:\n return 0\n \n \n def prediction(self):\n mistakes = 0\n for i in range(self.numb_test_data):\n raw_y_test = np.dot(self.weight,self.x_test[i])\n if self.y_test[i]*raw_y_test<=0:\n mistakes+=1\n number_correct = self.numb_test_data-mistakes\n return (number_correct/float(self.numb_test_data))*100","sub_path":"mod_perceptron.py","file_name":"mod_perceptron.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"383328720","text":"\"\"\" Игра для батутной арены\"\"\"\nimport pysnooper\nimport time\nimport os\nimport random\nfrom threading import Thread\nimport cv2\nfrom pygame import mixer\nfrom flask import Flask, render_template, send_from_directory\nfrom db import SQL\nfrom buttons import butttons, start_button, buttons_specs, BUTTON\nfrom config import led_delay, game_time, result_time, start_sound, end_sound, press_sound, press_delay\n\nAPP = Flask(__name__)\nAPP.config.from_pyfile('config.ini')\nGAME_PHASE = 0 # 0 - ожидание, 1 - старт, 2 игра, 3 - результаты\nACTIVE_BUTTON = -1\nGAME_POINTS = 0\nGAMER_ID = -1\nGAME10 = False\n#camera = cv2.VideoCapture(0) # Включаем первую камеру\n\n@APP.route('/photo/')\ndef photo(filename):\n \"\"\"Метод выдаёт фотографии из рейтинга\n \"\"\"\n return send_from_directory(os.path.join(APP.root_path, 'static/photo'), filename,\n mimetype='image')\n\n@APP.route('/music/')\ndef music(filename):\n \"\"\" Метод передаёт файлы звуков\n \"\"\"\n return send_from_directory(os.path.join(APP.root_path, 'static/music'), filename,\n mimetype='audio')\n\n@APP.route('/game')\ndef game():\n \"\"\"Фаза игры\n \"\"\"\n global GAME_PHASE, GAME_POINTS\n GAME_PHASE = 2\n for button in butttons:\n button.led.off()\n start_button.led.off()\n return render_template('game.html', foto='/photo/{}.png'.format(GAMER_ID))\n\n@APP.route('/game_over')\ndef game_over():\n \"\"\"Фаза завершения игры\n \"\"\"\n global GAME_PHASE, GAME_POINTS, GAMER_ID\n GAME_PHASE = 3\n SQL('update', 'update_points', (GAME_POINTS, GAMER_ID,))\n for button in butttons:\n button.led.on()\n start_button.led.on()\n return render_template('game_over.html', points=GAME_POINTS,\n foto='/photo/{}.png'.format(GAMER_ID))\n\n@APP.route('/')\ndef index():\n \"\"\"Фаза ожидания начала игры\n \"\"\"\n global GAME_PHASE, GAME_POINTS, GAMER_ID\n GAME_PHASE = 0\n GAME_POINTS = 0\n GAMER_ID = -1\n for button in butttons:\n button.led.off()\n start_button.led.on()\n winners_list = []\n winners = SQL('select_all', 'select_winners')\n j = 0\n for winner in winners:\n j += 1\n winners_list.append(\n {\n \"num\" : j,\n \"name\": winner[0],\n \"rslt\": winner[1],\n \"foto\": \"/static/photo/{}\".format(winner[2])\n }\n )\n return render_template('history.html', rating=winners_list)\n\ndef do_photo(name, path):\n \"\"\"Метод делает фотографию\n \"\"\"\n global camera\n try:\n camera = cv2.VideoCapture(0) # Включаем первую камеру\n camera.read() # \"Прогреваем\" камеру, чтобы снимок не был тёмным\n time.sleep(0.5)\n ret, frame = camera.read() # Делаем снимок\n #frame = frame[300, 150] обрезать фото\n #cv2.ROTATE_90_CLOCKWISE\n #cv2.ROTATE_90_COUNTERCLOCKWISE\n frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)\n photo_file = '{}/{}'.format(os.path.join(path, 'static/photo'), name)\n cv2.imwrite(photo_file, frame) # Записываем в файл\n camera.release() # Отключаем камеру\n except Exception as err:\n return 'ERROR = {}'.format(err)\n return ''\n\ndef play_music(mp3_file: str):\n \"\"\"Метод проигрывает звуки\n \"\"\"\n mixer.init()\n mixer.music.load(mp3_file)\n mixer.music.play()\n\n@APP.route('/start')\ndef start_game():\n \"\"\"Фаза начала игры\n \"\"\"\n global GAME_PHASE, GAMER_ID#, camera\n #camera.release() # Отключаем камеру\n if GAME_PHASE == 1:\n GAME_PHASE = 2\n GAME10 = False\n for button in butttons:\n button.led.on()\n start_button.led.on()\n GAMER_ID = SQL('insert', 'insert_history')\n photo_name = '{}.png'.format(GAMER_ID)\n do_photo(photo_name, APP.root_path)\n thread_music = Thread(target=play_music, args = (start_sound,))\n thread_music.start()\n\n SQL('update', 'update_history',( photo_name, GAMER_ID,))\n return render_template('start.html', foto = '/photo/{}.png'.format(GAMER_ID))\n\n@APP.route('/get_game_phase', methods=['POST'])\ndef get_game_phase():\n \"\"\"Метод выдаёт фазу игры\n \"\"\"\n global GAME_PHASE\n return str(GAME_PHASE)\n\n@APP.route('/get_game_points', methods=['POST'])\ndef get_game_points():\n \"\"\"Метод возвращает количество игровых очков\n \"\"\"\n global GAME_POINTS\n return str(GAME_POINTS)\n\ndef start_button_work():\n \"\"\"Метод работы стартовой кнопки\n \"\"\"\n global GAME_PHASE\n while True:\n time.sleep(0.2)\n if GAME_PHASE == 0:\n if start_button.sensor.is_active:\n play_music('static/music/button.mp3')\n GAME_PHASE = 1\n continue\n\ndef buttons_work(): \n \"\"\"Метод работы игровых кнопок\n \"\"\"\n global GAME_POINTS, GAME_PHASE, GAME10\n start_button_num = len(butttons)\n last_two = [0,1]\n while True:\n time.sleep(0.04)\n if GAME_PHASE == 0:\n for i in butttons:\n if i.sensor.is_active:\n play_music(press_sound)\n butttons[i.number].led.on()\n time.sleep(led_delay)\n butttons[i.number].led.off()\n continue\n else:\n if GAME_PHASE == 2:\n sel_but = -1\n while True:\n sel_but = random.randint(0,len(butttons))\n if not sel_but in last_two:\n break\n last_two.append(sel_but)\n last_two.pop(0)\n if sel_but == start_button_num:\n start_button.led.on()\n else:\n butttons[sel_but].led.on()\n time_cnt = 0\n sign_cnt = 0\n while True:\n if sel_but == start_button_num:\n if start_button.sensor.is_active:\n sign_cnt += 1\n if sign_cnt > 3:\n if not GAME10:\n music_button = Thread(target = play_music, args = ('static/music/button.mp3',))\n music_button.start()\n GAME_POINTS += start_button.points_per_click\n start_button.led.off()\n break\n time.sleep(0.05)\n continue\n else:\n if butttons[sel_but].sensor.is_active:\n sign_cnt += 1\n if sign_cnt > 3:\n if not GAME10:\n music_button = Thread(target = play_music, args = ('static/music/button.mp3',))\n music_button.start()\n GAME_POINTS += butttons[sel_but].points_per_click\n butttons[sel_but].led.off()\n break\n time.sleep(0.05)\n continue\n sign_cnt = 0\n time_cnt += 1\n time.sleep(0.1)\n if time_cnt >29:\n if sel_but == start_button_num:\n start_button.led.off()\n else:\n butttons[sel_but].led.off()\n break\n\n@APP.route('/end_music', methods=['POST'])\ndef end_music():\n \"\"\"Метод проигрывает музыку об окончании игры\n \"\"\"\n global GAME10\n GAME10 = True\n thread_music2 = Thread(target=play_music, args = (end_sound,))\n thread_music2.start()\n return ''\n\nif __name__ == '__main__':\n for number in range(len(buttons_specs)):\n butttons.append(BUTTON(number, *buttons_specs[number]))\n st_work = Thread(target=start_button_work)\n st_work.start()\n ob_work = Thread(target=buttons_work)\n ob_work.start()\n APP.run(host='127.0.0.1', port=8080)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640944504","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nfrom google.protobuf import field_mask_pb2 # type: ignore\nfrom google.protobuf import timestamp_pb2 # type: ignore\nimport proto # type: ignore\n\n__protobuf__ = proto.module(\n package=\"google.cloud.networkconnectivity.v1\",\n manifest={\n \"LocationFeature\",\n \"State\",\n \"Hub\",\n \"RoutingVPC\",\n \"Spoke\",\n \"ListHubsRequest\",\n \"ListHubsResponse\",\n \"GetHubRequest\",\n \"CreateHubRequest\",\n \"UpdateHubRequest\",\n \"DeleteHubRequest\",\n \"ListSpokesRequest\",\n \"ListSpokesResponse\",\n \"GetSpokeRequest\",\n \"CreateSpokeRequest\",\n \"UpdateSpokeRequest\",\n \"DeleteSpokeRequest\",\n \"LinkedVpnTunnels\",\n \"LinkedInterconnectAttachments\",\n \"LinkedRouterApplianceInstances\",\n \"RouterApplianceInstance\",\n \"LocationMetadata\",\n },\n)\n\n\nclass LocationFeature(proto.Enum):\n r\"\"\"Supported features for a location\n\n Values:\n LOCATION_FEATURE_UNSPECIFIED (0):\n No publicly supported feature in this\n location\n SITE_TO_CLOUD_SPOKES (1):\n Site-to-cloud spokes are supported in this\n location\n SITE_TO_SITE_SPOKES (2):\n Site-to-site spokes are supported in this\n location\n \"\"\"\n LOCATION_FEATURE_UNSPECIFIED = 0\n SITE_TO_CLOUD_SPOKES = 1\n SITE_TO_SITE_SPOKES = 2\n\n\nclass State(proto.Enum):\n r\"\"\"The State enum represents the lifecycle stage of a Network\n Connectivity Center resource.\n\n Values:\n STATE_UNSPECIFIED (0):\n No state information available\n CREATING (1):\n The resource's create operation is in\n progress\n ACTIVE (2):\n The resource is active\n DELETING (3):\n The resource's Delete operation is in\n progress\n UPDATING (6):\n The resource's Update operation is in\n progress\n \"\"\"\n STATE_UNSPECIFIED = 0\n CREATING = 1\n ACTIVE = 2\n DELETING = 3\n UPDATING = 6\n\n\nclass Hub(proto.Message):\n r\"\"\"A Network Connectivity Center hub is a collection of spokes.\n A single hub can contain spokes from multiple regions. However,\n if any of a hub's spokes use the data transfer feature, the\n resources associated with those spokes must all reside in the\n same VPC network. Spokes that do not use data transfer can be\n associated with any VPC network in your project.\n\n Attributes:\n name (str):\n Immutable. The name of the hub. Hub names must be unique.\n They use the following form:\n ``projects/{project_number}/locations/global/hubs/{hub_id}``\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The time the hub was created.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The time the hub was last\n updated.\n labels (MutableMapping[str, str]):\n Optional labels in key:value format. For more information\n about labels, see `Requirements for\n labels `__.\n description (str):\n An optional description of the hub.\n unique_id (str):\n Output only. The Google-generated UUID for the hub. This\n value is unique across all hub resources. If a hub is\n deleted and another with the same name is created, the new\n hub is assigned a different unique_id.\n state (google.cloud.networkconnectivity_v1.types.State):\n Output only. The current lifecycle state of\n this hub.\n routing_vpcs (MutableSequence[google.cloud.networkconnectivity_v1.types.RoutingVPC]):\n The VPC networks associated with this hub's\n spokes.\n This field is read-only. Network Connectivity\n Center automatically populates it based on the\n set of spokes attached to the hub.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n create_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n update_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n labels: MutableMapping[str, str] = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=4,\n )\n description: str = proto.Field(\n proto.STRING,\n number=5,\n )\n unique_id: str = proto.Field(\n proto.STRING,\n number=8,\n )\n state: \"State\" = proto.Field(\n proto.ENUM,\n number=9,\n enum=\"State\",\n )\n routing_vpcs: MutableSequence[\"RoutingVPC\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=10,\n message=\"RoutingVPC\",\n )\n\n\nclass RoutingVPC(proto.Message):\n r\"\"\"RoutingVPC contains information about the VPC networks\n associated with the spokes of a Network Connectivity Center hub.\n\n Attributes:\n uri (str):\n The URI of the VPC network.\n required_for_new_site_to_site_data_transfer_spokes (bool):\n Output only. If true, indicates that this VPC network is\n currently associated with spokes that use the data transfer\n feature (spokes where the site_to_site_data_transfer field\n is set to true). If you create new spokes that use data\n transfer, they must be associated with this VPC network. At\n most, one VPC network will have this field set to true.\n \"\"\"\n\n uri: str = proto.Field(\n proto.STRING,\n number=1,\n )\n required_for_new_site_to_site_data_transfer_spokes: bool = proto.Field(\n proto.BOOL,\n number=2,\n )\n\n\nclass Spoke(proto.Message):\n r\"\"\"A Network Connectivity Center spoke represents a connection between\n your Google Cloud network resources and a non-Google-Cloud network.\n\n When you create a spoke, you associate it with a hub. You must also\n identify a value for exactly one of the following fields:\n\n - linked_vpn_tunnels\n - linked_interconnect_attachments\n - linked_router_appliance_instances\n\n Attributes:\n name (str):\n Immutable. The name of the spoke. Spoke names must be\n unique. They use the following form:\n ``projects/{project_number}/locations/{region}/spokes/{spoke_id}``\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The time the spoke was created.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The time the spoke was last\n updated.\n labels (MutableMapping[str, str]):\n Optional labels in key:value format. For more information\n about labels, see `Requirements for\n labels `__.\n description (str):\n An optional description of the spoke.\n hub (str):\n Immutable. The name of the hub that this\n spoke is attached to.\n linked_vpn_tunnels (google.cloud.networkconnectivity_v1.types.LinkedVpnTunnels):\n VPN tunnels that are associated with the\n spoke.\n linked_interconnect_attachments (google.cloud.networkconnectivity_v1.types.LinkedInterconnectAttachments):\n VLAN attachments that are associated with the\n spoke.\n linked_router_appliance_instances (google.cloud.networkconnectivity_v1.types.LinkedRouterApplianceInstances):\n Router appliance instances that are\n associated with the spoke.\n unique_id (str):\n Output only. The Google-generated UUID for the spoke. This\n value is unique across all spoke resources. If a spoke is\n deleted and another with the same name is created, the new\n spoke is assigned a different unique_id.\n state (google.cloud.networkconnectivity_v1.types.State):\n Output only. The current lifecycle state of\n this spoke.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n create_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n update_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n labels: MutableMapping[str, str] = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=4,\n )\n description: str = proto.Field(\n proto.STRING,\n number=5,\n )\n hub: str = proto.Field(\n proto.STRING,\n number=6,\n )\n linked_vpn_tunnels: \"LinkedVpnTunnels\" = proto.Field(\n proto.MESSAGE,\n number=17,\n message=\"LinkedVpnTunnels\",\n )\n linked_interconnect_attachments: \"LinkedInterconnectAttachments\" = proto.Field(\n proto.MESSAGE,\n number=18,\n message=\"LinkedInterconnectAttachments\",\n )\n linked_router_appliance_instances: \"LinkedRouterApplianceInstances\" = proto.Field(\n proto.MESSAGE,\n number=19,\n message=\"LinkedRouterApplianceInstances\",\n )\n unique_id: str = proto.Field(\n proto.STRING,\n number=11,\n )\n state: \"State\" = proto.Field(\n proto.ENUM,\n number=15,\n enum=\"State\",\n )\n\n\nclass ListHubsRequest(proto.Message):\n r\"\"\"Request for\n [HubService.ListHubs][google.cloud.networkconnectivity.v1.HubService.ListHubs]\n method.\n\n Attributes:\n parent (str):\n Required. The parent resource's name.\n page_size (int):\n The maximum number of results per page that\n should be returned.\n page_token (str):\n The page token.\n filter (str):\n An expression that filters the results listed\n in the response.\n order_by (str):\n Sort the results by a certain order.\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n page_size: int = proto.Field(\n proto.INT32,\n number=2,\n )\n page_token: str = proto.Field(\n proto.STRING,\n number=3,\n )\n filter: str = proto.Field(\n proto.STRING,\n number=4,\n )\n order_by: str = proto.Field(\n proto.STRING,\n number=5,\n )\n\n\nclass ListHubsResponse(proto.Message):\n r\"\"\"Response for\n [HubService.ListHubs][google.cloud.networkconnectivity.v1.HubService.ListHubs]\n method.\n\n Attributes:\n hubs (MutableSequence[google.cloud.networkconnectivity_v1.types.Hub]):\n The requested hubs.\n next_page_token (str):\n The next pagination token in the List response. It should be\n used as page_token for the following request. An empty value\n means no more result.\n unreachable (MutableSequence[str]):\n Locations that could not be reached.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n hubs: MutableSequence[\"Hub\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"Hub\",\n )\n next_page_token: str = proto.Field(\n proto.STRING,\n number=2,\n )\n unreachable: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=3,\n )\n\n\nclass GetHubRequest(proto.Message):\n r\"\"\"Request for\n [HubService.GetHub][google.cloud.networkconnectivity.v1.HubService.GetHub]\n method.\n\n Attributes:\n name (str):\n Required. The name of the hub resource to\n get.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass CreateHubRequest(proto.Message):\n r\"\"\"Request for\n [HubService.CreateHub][google.cloud.networkconnectivity.v1.HubService.CreateHub]\n method.\n\n Attributes:\n parent (str):\n Required. The parent resource.\n hub_id (str):\n Required. A unique identifier for the hub.\n hub (google.cloud.networkconnectivity_v1.types.Hub):\n Required. The initial values for a new hub.\n request_id (str):\n Optional. A unique request ID (optional). If\n you specify this ID, you can use it in cases\n when you need to retry your request. When you\n need to retry, this ID lets the server know that\n it can ignore the request if it has already been\n completed. The server guarantees that for at\n least 60 minutes after the first request.\n\n For example, consider a situation where you make\n an initial request and the request times out. If\n you make the request again with the same request\n ID, the server can check to see whether the\n original operation was received. If it was, the\n server ignores the second request. This behavior\n prevents clients from mistakenly creating\n duplicate commitments.\n The request ID must be a valid UUID, with the\n exception that zero UUID is not supported\n (00000000-0000-0000-0000-000000000000).\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n hub_id: str = proto.Field(\n proto.STRING,\n number=2,\n )\n hub: \"Hub\" = proto.Field(\n proto.MESSAGE,\n number=3,\n message=\"Hub\",\n )\n request_id: str = proto.Field(\n proto.STRING,\n number=4,\n )\n\n\nclass UpdateHubRequest(proto.Message):\n r\"\"\"Request for\n [HubService.UpdateHub][google.cloud.networkconnectivity.v1.HubService.UpdateHub]\n method.\n\n Attributes:\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Optional. In the case of an update to an existing hub, field\n mask is used to specify the fields to be overwritten. The\n fields specified in the update_mask are relative to the\n resource, not the full request. A field is overwritten if it\n is in the mask. If the user does not provide a mask, then\n all fields are overwritten.\n hub (google.cloud.networkconnectivity_v1.types.Hub):\n Required. The state that the hub should be in\n after the update.\n request_id (str):\n Optional. A unique request ID (optional). If\n you specify this ID, you can use it in cases\n when you need to retry your request. When you\n need to retry, this ID lets the server know that\n it can ignore the request if it has already been\n completed. The server guarantees that for at\n least 60 minutes after the first request.\n\n For example, consider a situation where you make\n an initial request and the request times out. If\n you make the request again with the same request\n ID, the server can check to see whether the\n original operation was received. If it was, the\n server ignores the second request. This behavior\n prevents clients from mistakenly creating\n duplicate commitments.\n The request ID must be a valid UUID, with the\n exception that zero UUID is not supported\n (00000000-0000-0000-0000-000000000000).\n \"\"\"\n\n update_mask: field_mask_pb2.FieldMask = proto.Field(\n proto.MESSAGE,\n number=1,\n message=field_mask_pb2.FieldMask,\n )\n hub: \"Hub\" = proto.Field(\n proto.MESSAGE,\n number=2,\n message=\"Hub\",\n )\n request_id: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass DeleteHubRequest(proto.Message):\n r\"\"\"The request for\n [HubService.DeleteHub][google.cloud.networkconnectivity.v1.HubService.DeleteHub].\n\n Attributes:\n name (str):\n Required. The name of the hub to delete.\n request_id (str):\n Optional. A unique request ID (optional). If\n you specify this ID, you can use it in cases\n when you need to retry your request. When you\n need to retry, this ID lets the server know that\n it can ignore the request if it has already been\n completed. The server guarantees that for at\n least 60 minutes after the first request.\n\n For example, consider a situation where you make\n an initial request and the request times out. If\n you make the request again with the same request\n ID, the server can check to see whether the\n original operation was received. If it was, the\n server ignores the second request. This behavior\n prevents clients from mistakenly creating\n duplicate commitments.\n The request ID must be a valid UUID, with the\n exception that zero UUID is not supported\n (00000000-0000-0000-0000-000000000000).\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n request_id: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass ListSpokesRequest(proto.Message):\n r\"\"\"The request for\n [HubService.ListSpokes][google.cloud.networkconnectivity.v1.HubService.ListSpokes].\n\n Attributes:\n parent (str):\n Required. The parent resource.\n page_size (int):\n The maximum number of results per page that\n should be returned.\n page_token (str):\n The page token.\n filter (str):\n An expression that filters the results listed\n in the response.\n order_by (str):\n Sort the results by a certain order.\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n page_size: int = proto.Field(\n proto.INT32,\n number=2,\n )\n page_token: str = proto.Field(\n proto.STRING,\n number=3,\n )\n filter: str = proto.Field(\n proto.STRING,\n number=4,\n )\n order_by: str = proto.Field(\n proto.STRING,\n number=5,\n )\n\n\nclass ListSpokesResponse(proto.Message):\n r\"\"\"The response for\n [HubService.ListSpokes][google.cloud.networkconnectivity.v1.HubService.ListSpokes].\n\n Attributes:\n spokes (MutableSequence[google.cloud.networkconnectivity_v1.types.Spoke]):\n The requested spokes.\n next_page_token (str):\n The next pagination token in the List response. It should be\n used as page_token for the following request. An empty value\n means no more result.\n unreachable (MutableSequence[str]):\n Locations that could not be reached.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n spokes: MutableSequence[\"Spoke\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"Spoke\",\n )\n next_page_token: str = proto.Field(\n proto.STRING,\n number=2,\n )\n unreachable: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=3,\n )\n\n\nclass GetSpokeRequest(proto.Message):\n r\"\"\"The request for\n [HubService.GetSpoke][google.cloud.networkconnectivity.v1.HubService.GetSpoke].\n\n Attributes:\n name (str):\n Required. The name of the spoke resource.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass CreateSpokeRequest(proto.Message):\n r\"\"\"The request for\n [HubService.CreateSpoke][google.cloud.networkconnectivity.v1.HubService.CreateSpoke].\n\n Attributes:\n parent (str):\n Required. The parent resource.\n spoke_id (str):\n Required. Unique id for the spoke to create.\n spoke (google.cloud.networkconnectivity_v1.types.Spoke):\n Required. The initial values for a new spoke.\n request_id (str):\n Optional. A unique request ID (optional). If\n you specify this ID, you can use it in cases\n when you need to retry your request. When you\n need to retry, this ID lets the server know that\n it can ignore the request if it has already been\n completed. The server guarantees that for at\n least 60 minutes after the first request.\n\n For example, consider a situation where you make\n an initial request and the request times out. If\n you make the request again with the same request\n ID, the server can check to see whether the\n original operation was received. If it was, the\n server ignores the second request. This behavior\n prevents clients from mistakenly creating\n duplicate commitments.\n The request ID must be a valid UUID, with the\n exception that zero UUID is not supported\n (00000000-0000-0000-0000-000000000000).\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n spoke_id: str = proto.Field(\n proto.STRING,\n number=2,\n )\n spoke: \"Spoke\" = proto.Field(\n proto.MESSAGE,\n number=3,\n message=\"Spoke\",\n )\n request_id: str = proto.Field(\n proto.STRING,\n number=4,\n )\n\n\nclass UpdateSpokeRequest(proto.Message):\n r\"\"\"Request for\n [HubService.UpdateSpoke][google.cloud.networkconnectivity.v1.HubService.UpdateSpoke]\n method.\n\n Attributes:\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Optional. In the case of an update to an existing spoke,\n field mask is used to specify the fields to be overwritten.\n The fields specified in the update_mask are relative to the\n resource, not the full request. A field is overwritten if it\n is in the mask. If the user does not provide a mask, then\n all fields are overwritten.\n spoke (google.cloud.networkconnectivity_v1.types.Spoke):\n Required. The state that the spoke should be\n in after the update.\n request_id (str):\n Optional. A unique request ID (optional). If\n you specify this ID, you can use it in cases\n when you need to retry your request. When you\n need to retry, this ID lets the server know that\n it can ignore the request if it has already been\n completed. The server guarantees that for at\n least 60 minutes after the first request.\n\n For example, consider a situation where you make\n an initial request and the request times out. If\n you make the request again with the same request\n ID, the server can check to see whether the\n original operation was received. If it was, the\n server ignores the second request. This behavior\n prevents clients from mistakenly creating\n duplicate commitments.\n The request ID must be a valid UUID, with the\n exception that zero UUID is not supported\n (00000000-0000-0000-0000-000000000000).\n \"\"\"\n\n update_mask: field_mask_pb2.FieldMask = proto.Field(\n proto.MESSAGE,\n number=1,\n message=field_mask_pb2.FieldMask,\n )\n spoke: \"Spoke\" = proto.Field(\n proto.MESSAGE,\n number=2,\n message=\"Spoke\",\n )\n request_id: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass DeleteSpokeRequest(proto.Message):\n r\"\"\"The request for\n [HubService.DeleteSpoke][google.cloud.networkconnectivity.v1.HubService.DeleteSpoke].\n\n Attributes:\n name (str):\n Required. The name of the spoke to delete.\n request_id (str):\n Optional. A unique request ID (optional). If\n you specify this ID, you can use it in cases\n when you need to retry your request. When you\n need to retry, this ID lets the server know that\n it can ignore the request if it has already been\n completed. The server guarantees that for at\n least 60 minutes after the first request.\n\n For example, consider a situation where you make\n an initial request and the request times out. If\n you make the request again with the same request\n ID, the server can check to see whether the\n original operation was received. If it was, the\n server ignores the second request. This behavior\n prevents clients from mistakenly creating\n duplicate commitments.\n The request ID must be a valid UUID, with the\n exception that zero UUID is not supported\n (00000000-0000-0000-0000-000000000000).\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n request_id: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass LinkedVpnTunnels(proto.Message):\n r\"\"\"A collection of Cloud VPN tunnel resources. These resources\n should be redundant HA VPN tunnels that all advertise the same\n prefixes to Google Cloud. Alternatively, in a passive/active\n configuration, all tunnels should be capable of advertising the\n same prefixes.\n\n Attributes:\n uris (MutableSequence[str]):\n The URIs of linked VPN tunnel resources.\n site_to_site_data_transfer (bool):\n A value that controls whether site-to-site data transfer is\n enabled for these resources. Data transfer is available only\n in `supported\n locations `__.\n vpc_network (str):\n Output only. The VPC network where these VPN\n tunnels are located.\n \"\"\"\n\n uris: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=1,\n )\n site_to_site_data_transfer: bool = proto.Field(\n proto.BOOL,\n number=2,\n )\n vpc_network: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass LinkedInterconnectAttachments(proto.Message):\n r\"\"\"A collection of VLAN attachment resources. These resources\n should be redundant attachments that all advertise the same\n prefixes to Google Cloud. Alternatively, in active/passive\n configurations, all attachments should be capable of advertising\n the same prefixes.\n\n Attributes:\n uris (MutableSequence[str]):\n The URIs of linked interconnect attachment\n resources\n site_to_site_data_transfer (bool):\n A value that controls whether site-to-site data transfer is\n enabled for these resources. Data transfer is available only\n in `supported\n locations `__.\n vpc_network (str):\n Output only. The VPC network where these VLAN\n attachments are located.\n \"\"\"\n\n uris: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=1,\n )\n site_to_site_data_transfer: bool = proto.Field(\n proto.BOOL,\n number=2,\n )\n vpc_network: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass LinkedRouterApplianceInstances(proto.Message):\n r\"\"\"A collection of router appliance instances. If you configure\n multiple router appliance instances to receive data from the\n same set of sites outside of Google Cloud, we recommend that you\n associate those instances with the same spoke.\n\n Attributes:\n instances (MutableSequence[google.cloud.networkconnectivity_v1.types.RouterApplianceInstance]):\n The list of router appliance instances.\n site_to_site_data_transfer (bool):\n A value that controls whether site-to-site data transfer is\n enabled for these resources. Data transfer is available only\n in `supported\n locations `__.\n vpc_network (str):\n Output only. The VPC network where these\n router appliance instances are located.\n \"\"\"\n\n instances: MutableSequence[\"RouterApplianceInstance\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"RouterApplianceInstance\",\n )\n site_to_site_data_transfer: bool = proto.Field(\n proto.BOOL,\n number=2,\n )\n vpc_network: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass RouterApplianceInstance(proto.Message):\n r\"\"\"A router appliance instance is a Compute Engine virtual\n machine (VM) instance that acts as a BGP speaker. A router\n appliance instance is specified by the URI of the VM and the\n internal IP address of one of the VM's network interfaces.\n\n Attributes:\n virtual_machine (str):\n The URI of the VM.\n ip_address (str):\n The IP address on the VM to use for peering.\n \"\"\"\n\n virtual_machine: str = proto.Field(\n proto.STRING,\n number=1,\n )\n ip_address: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass LocationMetadata(proto.Message):\n r\"\"\"Metadata about locations\n\n Attributes:\n location_features (MutableSequence[google.cloud.networkconnectivity_v1.types.LocationFeature]):\n List of supported features\n \"\"\"\n\n location_features: MutableSequence[\"LocationFeature\"] = proto.RepeatedField(\n proto.ENUM,\n number=1,\n enum=\"LocationFeature\",\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"packages/google-cloud-network-connectivity/google/cloud/networkconnectivity_v1/types/hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":30582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380274467","text":"import pandas as pd\nimport datetime\nfrom numpy import nan\nimport numpy as np\nimport matplotlib. pyplot as plt\nimport matplotlib.dates as mdates\n\n\nDATA_URL = 'http://covidtracking.com/api/states/daily.csv'\n\n\ndef load():\n df = pd.read_csv(DATA_URL)\n df.loc[:, 'date'] = df.loc[:, 'date'].apply(str).apply(datetime.datetime.strptime, args=('%Y%m%d',))\n df.loc[:, 'dateChecked'] = df.loc[:, 'dateChecked'].apply(pd.Timestamp).dt.date\n df.loc[:, 'total'] = df.loc[:, 'total'].astype(float)\n return df\n\n\ndef plot_total(usa_total, region_label):\n print('===Latest snapshot for ' + region_label + ' as of ' + usa_total.last_valid_index().strftime('%Y-%m-%d') +\n '===')\n snapshot = usa_total.iloc[-1, :]\n print('Total positive = ' + str(snapshot['positive']))\n print('Total tested = ' + str(snapshot['posNeg']))\n print('Death rate = ' + str(round(snapshot['death'] / snapshot['positive'], 3)))\n print('Hospitalization rate = ' + str(round(snapshot['hospitalizedCumulative'] / snapshot['positive'], 3)))\n print('ICU rate = ' + str(round(snapshot['inIcuCumulative'] / snapshot['positive'], 3)))\n print('Ventilator rate = ' + str(round(snapshot['onVentilatorCumulative'] / snapshot['positive'], 3)))\n\n fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(8, 24))\n date_format = mdates.DateFormatter('%m-%d')\n\n # plot total case count\n ax[0].plot((usa_total['positive']))\n ax[0].set_yscale('log')\n ax[0].set_title(region_label + ' total positives')\n\n # plot daily new case count\n ax[1].plot(usa_total['positive'].diff(1))\n ax[1].set_yscale('log')\n ax[1].set_title(region_label + ' new cases')\n\n # plot daily new tests\n ax[2].plot(usa_total['total'].diff(1))\n ax[2].set_yscale('log')\n ax[2].set_title(region_label + ' new tests')\n\n # plot daily positive rate\n ax[3].plot(usa_total['positive'].diff(1) / usa_total['total'].diff(1))\n ax[3].set_title(region_label + ' new positive rate')\n\n for i in range(3):\n ax[i].xaxis.set_major_formatter(date_format)\n\n fig.show()\n return\n\n\ndef main():\n df = load()\n usa_total = df.groupby('date').sum().replace(0.0, nan)\n ny_total = df[df.state == 'NY'].groupby('date').sum().replace(0.0, nan)\n ca_total = df[df.state == 'CA'].groupby('date').sum().replace(0.0, nan)\n plot_total(usa_total, 'USA')\n plot_total(ny_total, 'NY State')\n plot_total(ca_total, 'CA State')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"coronavirus.py","file_name":"coronavirus.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"406814918","text":"# -*- coding: utf-8 -*-\n\"\"\"Util\n\n File: \n /Lancet/core/model/util\n\n Description: \n Utils\n\"\"\"\n\n\ndef int_to_tuple(element: int, length: int):\n \"\"\"整数转元组\n\n Description:\n 自动将Int转为Tuple,适用于用一个整数表示元组中所有元素均为该整数值\n 的情况。\n\n Args:\n element: Int. 传入的整数\n length: Int. 元组长度\n\n Returns:\n Tuple\n \n Raises:\n None\n \"\"\"\n return (element,) * length\n\n\ndef normalize_tuple(obj, length: int):\n \"\"\"标准化为元组\n\n Description:\n 将tuple/list/int标准化为元组,并且符合指定长度\n\n Args:\n obj: Tuple/List/Int. 传入的对象\n length: Int. 元组长度\n\n Returns:\n Tuple\n \n Raises:\n TypeError\n LenError\n \"\"\"\n assert isinstance(obj, (tuple, list, int)), f'[TypeError] obj ' \\\n f'must be tuple/list/int, but got {type(obj).__name__}. '\n new_tuple = ()\n if isinstance(obj, (tuple, list)):\n if len(obj) == length:\n new_tuple = tuple(obj)\n elif len(obj) == 1:\n new_tuple = int_to_tuple(obj[0], length)\n else:\n raise Exception(f'[LenError] obj({type(obj).__name__}).len '\n f'must be 1 or {length}, but got {len(obj)}. ')\n else:\n new_tuple = int_to_tuple(obj, length)\n return new_tuple\n\n","sub_path":"core/model/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345104240","text":"import os, json\n\nfrom django.core.management.base import BaseCommand\nfrom stores.models import Country\n\n\nclass Command(BaseCommand):\n help = \"Adds a list of countries from a JSON file to database.\"\n\n def handle(self, *args, **options):\n\n countries_json_path = \"/home/nicky/git/ada_stores/backend/\"\n with open(os.path.join(countries_json_path, \"countries.json\")) as f:\n data = json.load(f)\n\n country_count = 0\n\n for country in data:\n name = country[\"Name\"]\n obj, created = Country.objects.get_or_create(name=name)\n\n if created:\n country_count += 1\n\n if obj is None:\n self.stdout.write(self.style.ERROR(f\"{name} was not created.\"))\n\n self.stdout.write(self.style.SUCCESS(f\"{country_count} countries created.\"))\n","sub_path":"stores/management/commands/import_countries.py","file_name":"import_countries.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"11480908","text":"import json \nimport PyPDF2\n \n# creating a pdf file object\npdfFileObj = open('EmployeeHandbook_January_2018.pdf', 'rb')\n#pdfFileObj = open(origFileName, 'rb')\n \n# creating a pdf Reader object\npdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n \n# creating a pdf writer object for new pdf\npdfWriter = PyPDF2.PdfFileWriter()\nemp_hb=[] \n# rotating each page\nfor page in range(pdfReader.numPages):\n \n# creating rotated page object\n pageObj = pdfReader.getPage(page) \n emp_hb.append(pageObj.extractText())\n #print(emp_hb)\n# closing the pdf file object\npdfFileObj.close()\nemp_handbook=\"\".join(emp_hb)\n#print (emp_handbook)\nprint (\"Enter your question\")\nquestion = input()\nd={}\nd['distractor1']=\"\"\nd['question']=question\nd['distractor3']=\"\"\nd['passage']=emp_handbook\nd['correct_answer']=\"\"\nd['distractor2']=\"\"\nprint (d)\nwith open('data_emp_hb.jsonl', 'w') as outfile:\n json.dump(d, outfile)\n","sub_path":"write_input.py","file_name":"write_input.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"97477792","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom datetime import datetime\nimport pytz\nimport os\nimport mock\nfrom django.test import TestCase, override_settings\n\n\nfrom corehq.form_processor.tests.utils import FormProcessorTestUtils\n\nfrom corehq.form_processor.interfaces.dbaccessors import CaseAccessors\nfrom custom.enikshay.integrations.ninetyninedots.views import (\n validate_adherence_values,\n validate_beneficiary_id,\n)\nfrom custom.enikshay.case_utils import get_open_episode_case_from_person\nfrom custom.enikshay.integrations.ninetyninedots.api_spec import load_api_spec\nfrom custom.enikshay.integrations.ninetyninedots.utils import (\n create_adherence_cases,\n PatientDetailsUpdater,\n update_adherence_confidence_level,\n update_default_confidence_level,\n)\nfrom custom.enikshay.integrations.ninetyninedots.exceptions import NinetyNineDotsException\nfrom custom.enikshay.tests.utils import ENikshayCaseStructureMixin\n\n\ndef property_setter(param, val, sector):\n return {'property_set_with_setter': val}\n\n\n@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True, SERVER_ENVIRONMENT='enikshay')\nclass Receiver99DotsTests(ENikshayCaseStructureMixin, TestCase):\n def setUp(self):\n super(Receiver99DotsTests, self).setUp()\n self.fake_api_spec_patch = mock.patch('custom.enikshay.integrations.ninetyninedots.utils.load_api_spec')\n fake_api_spec = self.fake_api_spec_patch.start()\n fake_api_spec.return_value = load_api_spec(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_api_spec.yaml')\n )\n self.spec = fake_api_spec()\n self.create_case_structure()\n\n def tearDown(self):\n self.fake_api_spec_patch.stop()\n\n def _get_fake_request(self):\n fake_request = {prop: '123' for prop in self.spec.required_params}\n fake_request['beneficiary_id'] = self.person_id\n return fake_request\n\n def test_required_properties(self):\n # request with required properties gets passed through fine\n PatientDetailsUpdater(self.domain, self._get_fake_request())\n\n # Without the required property, raises an error\n with self.assertRaises(NinetyNineDotsException) as e:\n PatientDetailsUpdater(None, {'boop': 'barp'})\n self.assertTrue(\", \".join(self.spec.required_params) in str(e.exception))\n\n def test_patient_not_found(self):\n fake_request = self._get_fake_request()\n fake_request['beneficiary_id'] = '123'\n with self.assertRaises(NinetyNineDotsException) as e:\n PatientDetailsUpdater(None, fake_request)\n self.assertTrue(str(e.exception), \"No patient exists with this beneficiary ID\")\n\n def test_invalid_choice(self):\n fake_request = self._get_fake_request()\n\n # A request with a valid choice passes through fine\n fake_request['has_choices'] = 'foo'\n PatientDetailsUpdater(None, fake_request)\n\n # A request with an invalid choice raises an error\n fake_request = self._get_fake_request()\n fake_request['has_choices'] = 'biff'\n with self.assertRaises(NinetyNineDotsException) as e:\n PatientDetailsUpdater(None, fake_request)\n self.assertEqual(str(e.exception), \"biff is not a valid value for has_choices.\")\n\n def test_wrong_direction(self):\n fake_request = self._get_fake_request()\n fake_request['outbound_only'] = 'foo'\n with self.assertRaises(NinetyNineDotsException) as e:\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n self.assertEqual(str(e.exception), \"outbound_only is not a valid parameter to update\")\n\n fake_request = self._get_fake_request()\n fake_request['inbound_only'] = 'bar'\n # shouldn't throw an exception\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n\n def test_update_with_setter(self):\n fake_request = self._get_fake_request()\n fake_request['with_setter'] = 'foo'\n\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n\n person_case = CaseAccessors(self.domain).get_case(self.person_id)\n self.assertEqual(person_case.get_case_property('property_set_with_setter'), 'foo')\n\n def test_case_update(self):\n fake_request = self._get_fake_request()\n fake_request['has_choices'] = 'foo'\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n\n person_case = CaseAccessors(self.domain).get_case(self.person_id)\n self.assertEqual(person_case.get_case_property('required_param'), '123')\n\n episode_case = CaseAccessors(self.domain).get_case(self.episode_id)\n self.assertEqual(episode_case.get_case_property('has_choices'), 'foo')\n\n def test_split_name(self):\n fake_request = self._get_fake_request()\n\n fake_request['split_name'] = 'Arya Horseface Stark'\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n\n person_case = CaseAccessors(self.domain).get_case(self.person_id)\n self.assertEqual(person_case.get_case_property('foo'), 'Arya')\n self.assertEqual(person_case.get_case_property('bar'), 'Horseface Stark')\n\n def test_unwrap_number(self):\n fake_request = self._get_fake_request()\n\n fake_request['unwrap_number'] = '+91123456789'\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n\n person_case = CaseAccessors(self.domain).get_case(self.person_id)\n self.assertEqual(person_case.get_case_property('foo'), '91123456789')\n\n def test_private_only(self):\n fake_request = self._get_fake_request()\n\n fake_request['private_only'] = 'so private'\n with self.assertRaises(NinetyNineDotsException):\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n\n def test_merm_params(self):\n fake_request = self._get_fake_request()\n\n fake_request['merm_params'] = {\n \"IMEI\": \"1321\",\n \"daily_reminder_status\": \"1\",\n \"daily_reminder_time\": \"12:00\",\n \"refill_reminder_status\": \"1\",\n \"refill_reminder_datetime\": \"2017/12/12 15:54:00\",\n \"RT_hours\": \"1\",\n }\n PatientDetailsUpdater(self.domain, fake_request).update_cases()\n person_case = CaseAccessors(self.domain).get_case(self.episode_id)\n self.assertEqual(person_case.get_case_property('merm_id'), '1321')\n\n def test_validate_patient_adherence_data(self):\n with self.assertRaises(NinetyNineDotsException) as e:\n validate_beneficiary_id(None)\n self.assertEqual(e.message, \"Beneficiary ID is null\")\n\n with self.assertRaises(NinetyNineDotsException) as e:\n validate_adherence_values('123')\n self.assertEqual(e.message, \"Adherences invalid\")\n\n\n@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True, SERVER_ENVIRONMENT='enikshay')\nclass NinetyNineDotsCaseTests(ENikshayCaseStructureMixin, TestCase):\n @classmethod\n def setUpClass(cls):\n super(NinetyNineDotsCaseTests, cls).setUpClass()\n FormProcessorTestUtils.delete_all_cases()\n\n def tearDown(self):\n FormProcessorTestUtils.delete_all_cases()\n\n def test_create_adherence_cases(self):\n self.create_case_structure()\n case_accessor = CaseAccessors(self.domain)\n adherence_values = [\n {\n \"timestamp\": \"2009-03-05T01:00:01-05:00\",\n \"numberFromWhichPatientDialled\": \"+910123456789\",\n \"sharedNumber\": False,\n \"adherenceValue\": 'manual',\n \"adherenceSource\": \"99DOTS\",\n },\n {\n \"timestamp\": \"2016-03-05T02:00:01-05:00\",\n \"numberFromWhichPatientDialled\": \"+910123456787\",\n \"sharedNumber\": True,\n \"adherenceSource\": \"MERM\",\n },\n {\n \"timestamp\": \"2016-03-05T19:00:01-05:00\", # next day in india\n \"numberFromWhichPatientDialled\": \"+910123456787\",\n \"sharedNumber\": True,\n }\n ]\n create_adherence_cases(self.domain, 'person', adherence_values)\n potential_adherence_cases = case_accessor.get_reverse_indexed_cases(['episode'])\n adherence_cases = [case for case in potential_adherence_cases if case.type == 'adherence']\n self.assertEqual(len(adherence_cases), 3)\n\n self.assertItemsEqual(\n [case.dynamic_case_properties().get('adherence_date') for case in adherence_cases],\n ['2009-03-05', '2016-03-05', '2016-03-06']\n )\n self.assertItemsEqual(\n [case.dynamic_case_properties().get('adherence_source') for case in adherence_cases],\n ['99DOTS', 'MERM', '99DOTS']\n )\n self.assertItemsEqual(\n [case.dynamic_case_properties().get('adherence_value') for case in adherence_cases],\n ['manual', 'unobserved_dose', 'unobserved_dose']\n )\n for adherence_case in adherence_cases:\n self.assertEqual(\n adherence_case.dynamic_case_properties().get('adherence_confidence'),\n 'high'\n )\n\n def test_invalid_adherence_value(self):\n self.create_case_structure()\n adherence_values = [\n {\n \"timestamp\": \"2009-03-05T01:00:01-05:00\",\n \"numberFromWhichPatientDialled\": \"+910123456789\",\n \"adherenceValue\": 'foo',\n },\n ]\n with self.assertRaises(NinetyNineDotsException):\n create_adherence_cases(self.domain, 'person', adherence_values)\n\n def test_update_adherence_confidence(self):\n self.create_case_structure()\n case_accessor = CaseAccessors(self.domain)\n adherence_dates = [\n datetime(2005, 7, 10),\n datetime(2016, 8, 10),\n datetime(2016, 8, 11),\n ]\n adherence_cases = self.create_adherence_cases(adherence_dates)\n\n update_adherence_confidence_level(\n self.domain,\n self.person_id,\n datetime(2016, 8, 10, tzinfo=pytz.UTC),\n datetime(2016, 8, 11, tzinfo=pytz.UTC),\n \"new_confidence_level\",\n )\n adherence_case_ids = [adherence_date.strftime(\"%Y-%m-%d-%H-%M\") for adherence_date in adherence_dates]\n adherence_cases = {case.case_id: case for case in case_accessor.get_cases(adherence_case_ids)}\n\n self.assertEqual(\n adherence_cases[adherence_case_ids[0]].dynamic_case_properties()['adherence_confidence'],\n 'medium',\n )\n self.assertEqual(\n adherence_cases[adherence_case_ids[1]].dynamic_case_properties()['adherence_confidence'],\n 'new_confidence_level',\n )\n self.assertEqual(\n adherence_cases[adherence_case_ids[2]].dynamic_case_properties()['adherence_confidence'],\n 'new_confidence_level',\n )\n\n def test_update_default_confidence_level(self):\n self.create_case_structure()\n confidence_level = \"new_confidence_level\"\n update_default_confidence_level(self.domain, self.person_id, confidence_level)\n episode = get_open_episode_case_from_person(self.domain, self.person_id)\n self.assertEqual(episode.dynamic_case_properties().get('default_adherence_confidence'), confidence_level)\n","sub_path":"custom/enikshay/integrations/ninetyninedots/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":11376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"352579922","text":"import base64\n\ndef consume_guruQueue(event, context):\n\t\"\"\"Triggered from a message on a Cloud Pub/Sub topic.\n\tArgs:\n\t\t event (dict): Event payload.\n\t\t context (google.cloud.functions.Context): Metadata for the event.\n\t\"\"\"\n\t\n\t# This is used to decode the data from PubSub which is encoded \n\t# pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n\t# print(pubsub_message)\n\t\n\t#for _attribute in event['attributes']:\n\t# print(\"Key: {0} - {1}\".format(_attribute, event['attributes'][_attribute]))\n\t\n\tif 'attributes' in event and event['attributes']:\n\t\tif 'name' in event['attributes']:\n\t\t\tname = event['attributes']['name']\n\t\telse:\n\t\t\tname = 'Cloud Guru'\n\n\t\tif 'animal' in event['attributes']:\n\t\t\tanimal = event['attributes']['animal']\n\t\telse:\n\t\t\tanimal = 'Spirit Wolf'\n\t\t\n\t\tif 'instructor' in event['attributes']:\n\t\t\t# This is 100% intentional\n\t\t\tinstructor = 'Frank Dice'\n\t\telse:\n\t\t\tinstructor = 'Frank Dice'\n\n\t\tprint(\"Hi {0}, your selected animal was a {1}.\".format(name, animal))\n\t\tprint(\"Your favorite instructor is {0}. That's very kind, thank you!\".format(instructor))\n\telse:\n\t\tprint(\"ERROR: No attributes in the PubSub Message\")\n\t\t\n\treturn\n","sub_path":"consume_guruQueue/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"143611189","text":"import pygame\nimport os\nimport random\nfrom mobs_dep import*\nfrom display import*\nimport time\n\n\n#effectue les mouvements de tous les pnj\ndef mouvement_general(viv, nbM):\n i = 0\n nbIA = 0\n mourrant = None\n while i= 1.0, 'scale step should be >= 1.0'\n search_factors = [track_config['scale_step'] ** x for x in scales]\n\n frame_sz = tf.shape(self.image)\n target_yx = self.target_bbox_feed[0:2]\n target_size = self.target_bbox_feed[2:4]\n avg_chan = tf.reduce_mean(self.image, axis=(0, 1), name='avg_chan')\n\n # Compute base values\n base_z_size = target_size\n base_z_context_size = base_z_size + context_amount * tf.reduce_sum(base_z_size)\n base_s_z = tf.sqrt(tf.reduce_prod(base_z_context_size)) # Canoical size\n base_scale_z = tf.div(tf.to_float(size_z), base_s_z)\n d_search = (size_x - size_z) / 2.0\n base_pad = tf.div(d_search, base_scale_z)\n base_s_x = base_s_z + 2 * base_pad\n base_scale_x = tf.div(tf.to_float(size_x), base_s_x)\n\n boxes = []\n for factor in search_factors:\n s_x = factor * base_s_x\n frame_sz_1 = tf.to_float(frame_sz[0:2] - 1)\n topleft = tf.div(target_yx - get_center(s_x) , frame_sz_1)\n bottomright = tf.div(target_yx + get_center(s_x), frame_sz_1)\n box = tf.concat([topleft, bottomright], axis=0)\n boxes.append(box)\n boxes = tf.stack(boxes)\n\n scale_xs = []\n for factor in search_factors:\n scale_x = base_scale_x / factor\n scale_xs.append(scale_x)\n self.scale_xs = tf.stack(scale_xs)\n\n image_minus_avg = tf.expand_dims(self.image - avg_chan, 0)\n image_cropped = tf.image.crop_and_resize(image_minus_avg, boxes,\n box_ind=tf.zeros((track_config['num_scales']), tf.int32),\n crop_size=[size_x, size_x])\n self.images = image_cropped + avg_chan\n\n def get_image_embedding(self, images, reuse=None,deform=False):\n config = self.model_config['embed_config']\n embedding_fn = embedding_factory.get_network_fn(\n config['embedding_name'],\n weight_decay=config['weight_decay'],\n trainable=config['train_embedding'],\n is_training=False,\n init_method=get(config, 'init_method', None),\n bn_momentum=get(config, 'bn_momentum', 3e-4),\n bn_epsilon=get(config, 'bn_epsilon', 1e-6), )\n embed, _ = embedding_fn(images, reuse,deform)\n\n return embed\n\n def build_template(self):\n model_config = self.model_config\n track_config = self.track_config\n examplar_images = get_exemplar_images(self.images, [model_config['z_image_size'],\n model_config['z_image_size']])\n templates = self.get_image_embedding(examplar_images,deform=False)\n center_scale = int(get_center(track_config['num_scales']))\n center_template = tf.identity(templates[center_scale])\n templates = tf.stack([center_template for _ in range(model_config['batch_size'])])\n\n with tf.variable_scope('target_template'):\n template_fn = template_factory.get_network_fn(\n model_config['template_name'],\n weight_decay=model_config['weight_decay'],\n is_training=False)\n templates, _ = template_fn(templates)\n\n # Store template in Variable such that we don't have to feed this template.\n with tf.variable_scope('State'):\n state = tf.get_variable('exemplar',\n initializer=tf.zeros_like(templates),\n trainable=False)\n with tf.control_dependencies([templates]):\n self.init = tf.assign(state, templates, validate_shape=True)\n self.templates = state\n\n def build_detection(self):\n self.embeds = self.get_image_embedding(self.images, reuse=True,deform=True)\n with tf.variable_scope('detection'):\n def _translation_match(x, z):\n x = tf.expand_dims(x, 0) # [batch, in_height, in_width, in_channels]\n z = tf.expand_dims(z, -1) # [filter_height, filter_width, in_channels, out_channels]\n return tf.nn.conv2d(x, z, strides=[1, 1, 1, 1], padding='VALID', name='translation_match')\n print(\"awjksfehawkjfh\",self.embeds,self.templates)\n output = tf.map_fn(\n lambda x: _translation_match(x[0], x[1]),\n (self.embeds, self.templates), dtype=self.embeds.dtype) # of shape [16, 1, 17, 17, 1]\n output = tf.squeeze(output, [1, 4]) # of shape e.g. [16, 17, 17]\n\n # Adjust score, this is required to make training possible.\n bias = tf.get_variable('biases', [1],\n dtype=tf.float32,\n initializer=tf.constant_initializer(0.0, dtype=tf.float32),\n trainable=False)\n response = self.model_config['adjust_response_config']['scale'] * output + bias\n self.response = response\n\n\n \n \n\n def build_upsample(self):\n with tf.variable_scope('upsample'):\n response = tf.expand_dims(self.response, 3)\n up_method = get(self.model_config['adjust_response_config'], 'upsample_method', 'bicubic')\n align = get(self.model_config['adjust_response_config'], 'align_cornor', True)\n logging.info('Upsample method -- {}'.format(up_method))\n logging.info('Upsample response align cornor -- {}'.format(align))\n logging.info('Upsampling size -- {}'.format(self.model_config['u_image_size']))\n methods = {'bilinear': tf.image.ResizeMethod.BILINEAR,\n 'bicubic': tf.image.ResizeMethod.BICUBIC}\n up_method = methods[up_method]\n response_up = tf.image.resize_images(response,\n [self.model_config['u_image_size'],\n self.model_config['u_image_size']],\n method=up_method,\n align_corners=align)\n response_up = tf.squeeze(response_up, [3])\n self.response_up = response_up\n\n def initialize(self, sess, input_feed):\n image_path, target_bbox = input_feed\n scale_xs, _ = sess.run([self.scale_xs, self.init],\n feed_dict={'filename:0': image_path,\n \"target_bbox_feed:0\": target_bbox,\n })\n \n \n return scale_xs\n\n def inference_step(self, sess, input_feed):\n image_path, target_bbox = input_feed\n scale_xs, response_output = sess.run(\n fetches=[self.scale_xs, self.response_up],\n feed_dict={\n \"filename:0\": image_path,\n \"target_bbox_feed:0\": target_bbox,})\n \n \n\n\n\n\n output = {\n 'scale_xs': scale_xs,\n 'response': response_output}\n return output, None\n","sub_path":"inference/inference_wrapper.py","file_name":"inference_wrapper.py","file_ext":"py","file_size_in_byte":8943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"398454969","text":"import pygame as pg\r\nimport sys\r\n \r\nWHITE = (255, 255, 255)\r\nRED = (225, 0, 50)\r\nGREEN = (0, 225, 0)\r\nBLUE = (0, 0, 225)\r\n \r\nsc = pg.display.set_mode((400, 300))\r\nsc.fill(WHITE)\r\npg.display.update()\r\n \r\nwhile 1:\r\n for i in pg.event.get():\r\n if i.type == pg.QUIT:\r\n sys.exit()\r\n if i.type == pg.MOUSEBUTTONDOWN:\r\n if i.button == 1:\r\n pg.draw.circle(\r\n sc, RED, i.pos, 20)\r\n pg.display.update()\r\n elif i.button == 3:\r\n pg.draw.rect(\r\n sc, GREEN,\r\n (i.pos[0] - 10,\r\n i.pos[1] - 10,\r\n 40, 40))\r\n pg.display.update()\r\n elif i.button == 2:\r\n sc.fill(WHITE)\r\n pg.display.update()\r\n pg.time.delay(20)\r\n","sub_path":"РедакторФигур.py","file_name":"РедакторФигур.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"58551875","text":"import urwid\nimport ui\nimport command as cmd\nfrom client import client\n\n@client.on_message()\ndef client_on_message(c, message):\n if message['chat']['type'] == client.PRIVATE:\n if message['chat']['username'] == client.CHAT_WITH['username']:\n ui.msg_list.append_child(ui.Message(message))\n\n@client.on_user_status()\ndef on_user_status(c, data):\n if data['user_id'] == client.CHAT_WITH['id']:\n client.update_chat_with()\n\ndef msg_line_on_enter(value):\n message = client.send_message(client.CHAT_WITH['username'], value)\n ui.msg_list.append_child(ui.Message(message, text=value))\n return 0\n\nmotion = { 'h': 'left', 'j': 'down', 'k': 'left', 'l': 'right' }\n\ndef msg_line_on_key_press(size, key):\n if ui.CURRENT_MODE == ui.NORMAL_MODE:\n if key == cmd.command_sign: ui.toggle_mode(ui.COMMAND_MODE)\n elif key == 'i': ui.toggle_mode(ui.MESSAGE_MODE)\n elif key in ['h', 'j', 'k', 'l']: return motion[key]\n return None\n elif ui.CURRENT_MODE == ui.MESSAGE_MODE:\n if key == 'esc': ui.toggle_mode(ui.NORMAL_MODE)\n return key\n return None\n\ndef cmd_line_on_enter(value):\n command = value.strip().split(' ')\n head = command[0]\n tail = ' '.join(command[1:])\n if value in ['q', 'quit', 'exit']:\n client.stop()\n ui.window.exit()\n elif head in ['cw', 'chat', 'chatwith']:\n client.chat_with(tail)\n elif value in ['users']:\n ui.msg_list.append_child(ui.Text(str(client.get_contacts()['users'][0])))\n \n ui.toggle_mode(ui.NORMAL_MODE)\n return 0\n\ndef cmd_line_on_key_press(size, key):\n if key == 'esc': ui.toggle_mode(ui.NORMAL_MODE)\n return key\n","sub_path":".old/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447078956","text":"from django.shortcuts import render, get_object_or_404\nfrom django.template import Template, Context\nfrom django.utils import timezone\nfrom .models import Post, PostImage\n\ndef blog(request):\n context = {}\n context['posts'] = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n context['description'] = \"Learn about diamonds on this diamond blog that contains a wealth of diamond eduction, engagement ring tips, and money-saving strategies.\"\n return render(request, 'blog/blog.html', context)\n\ndef post(request, path):\n post_object = get_object_or_404(Post, path=path)\n template = Template(post_object.text)\n ctx = {}\n # add image objects for this post to the context\n for image in PostImage.objects.filter(post_id=post_object.id):\n ctx[image.name] = image\n context = Context(ctx)\n # render the post body here\n post_object.text = template.render(context)\n return render(request, 'blog/post.html', {'post': post_object})\n","sub_path":"apps/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"335524140","text":"# Filename: Program5-06.py\r\n# Author: N. Anim\r\n# Date: Feb. 29, 2016\r\n# Purpose: To demonstarte the use and functioning of a while loop.\r\n# The algorithm is in Figure 5-9. In the text the algorithm uses\r\n# a do-while loop, so we will use a while loop instead.\r\n\r\ndef showRetail():\r\n # Constant\r\n MARKUP = 2.50\r\n\r\n # Prompt for and read the wholesale cost of an item\r\n wholesale = float(input(\"Enter an item's wholesale cost: \"))\r\n # Calculate the retail price\r\n retail = wholesale * MARKUP\r\n # Display the retail price\r\n print(\"The retail price is $\", retail)\r\n return\r\n\r\ndef main():\r\n doAnother = 'y'\r\n while (doAnother == 'y' or doAnother == 'Y'):\r\n showRetail()\r\n","sub_path":"Programs/Program5-06.py","file_name":"Program5-06.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"529161508","text":"import os\nfrom sphinx.builders.html import StandaloneHTMLBuilder\n\n#\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n#\n\nauthor = \"Liferay\"\ncopyright = \"2019, Liferay\"\nextensions = [\"notfound.extension\", \"recommonmark\", \"sphinx_markdown_tables\"]\nhtml_css_files = [\"main.css\"]\nhtml_favicon = \"_static/img/favicon.ico\"\n#html_js_files = [\"main.js\"]\nhtml_logo = \"_static/img/liferay-waffle.svg\"\nhtml_short_title = \"Documentation\"\nhtml_show_copyright = False\nhtml_show_sphinx = False\nhtml_static_path = [\"_static\"]\nhtml_theme = \"basic\"\nhtml_title = \"Liferay Learn\"\nlanguage = \"en\"\nlocale_dirs = [\"_locale\"]\nmaster_doc = \"contents\"\nnotfound_template = \"404.html\"\nproject = \"Liferay Learn\"\nrelease = \"1.0\"\nsource_suffix = [\".md\", \".rst\"]\ntemplates_path = [\"_templates\"]\nversion = \"1.0\"\n\nclass RelativePathParentsHTMLBuilder(StandaloneHTMLBuilder):\n\tdef get_doc_context(self, docname, body, metatags):\n\t\tdoc_context = super().get_doc_context(docname, body, metatags)\n\n\t\tdoc_context['parents'] = self._get_parents(self.env.titles, docname)\n\n\t\treturn doc_context\n\n\tdef _get_parent(self, docname):\n\t\tif docname == 'README':\n\t\t\treturn None\n\n\t\tbasename = os.path.basename(docname)\n\n\t\tif basename == 'README':\n\t\t\tdirname = os.path.dirname(os.path.dirname(docname))\n\n\t\t\tif dirname == '/':\n\t\t\t\treturn basename\n\t\t\telse:\n\t\t\t\treturn os.path.dirname(os.path.dirname(docname)) + '/README'\n\t\telse:\n\t\t\treturn os.path.dirname(docname) + '/README'\n\n\tdef _get_parents(self, titles, docname):\n\t\tparents = []\n\n\t\tlast_parent = docname\n\n\t\twhile last_parent is not None:\n\t\t\tnext_parent = self._get_parent(last_parent)\n\n\t\t\tif next_parent is not None and next_parent in titles:\n\t\t\t\tparents.append(\n\t\t\t\t\t{\n\t\t\t\t\t\t'link': self.get_relative_uri(docname, next_parent),\n\t\t\t\t\t\t'title': self.render_partial(titles[next_parent])['title']\n\t\t\t\t\t}\n\t\t\t\t)\n\n\t\t\tlast_parent = next_parent\n\n\t\tparents.reverse()\n\n\t\treturn parents\n\ndef setup(app):\n\tapp.add_builder(RelativePathParentsHTMLBuilder, True)","sub_path":"site/docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"352919221","text":"from django.db import models\n\n# Create your models here.\n\nimport datetime\nfrom django.db import models\nfrom django.utils import timezone\n\nclass Pessoa(models.Model):\n nome = models.CharField(max_length=200)\n ultima_postagem = models.DateTimeField('ultima_postagem')\n peso = models.FloatField()\n\n\nclass Postagem(models.Model):\n pessoa = models.ForeignKey(Pessoa, on_delete=models.CASCADE)\n texto = models.CharField(max_length=500)\n palavraschave = models . CharField(max_length=200)\n curtidas = models.IntegerField(default=0)\n\nclass Time(models.Model):\n \tnome = models.CharField(max_length=30)\n \tsigla = models.CharField(max_length=3)\n\nclass Partida(models.Model):\n\tTime1 = models.ForeignKey(Time, related_name='time1', on_delete=models.CASCADE)\n\tTime2 = models.ForeignKey(Time, related_name='time2', on_delete=models.CASCADE)\n\tplacar1_time1 = models.IntegerField(blank=True, null=True)\n\tPlacar2_time2 = models.IntegerField(blank=True, null=True)\n\tdata = models.DateTimeField()\n\nclass aposta(models.Model):\n\tPartida = models.ForeignKey(Partida, on_delete=models.CASCADE)\n\tvalor = models.FloatField(50)\n\tusuario = models.ForeignKey(Pessoa, on_delete=models.CASCADE)\n\nclass Premio(models.Model):\n\tposicao = models.IntegerField(default=0)\n\tvalor = models.FloatField(50)\n\n#class usuario(models.Model):\n\n\n\n\n\n \t\t\n","sub_path":"Bolao_aplicativo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"501870879","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tqdm import tqdm\nimport numpy as np\nfrom collections import Counter\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.tokenize import word_tokenize\nimport json\nimport os\nimport gzip\nfrom utilities import *\nfrom utils import *\nfrom nltk.stem.porter import PorterStemmer\nimport argparse\nfrom nus_utilities import *\nfrom common_v2 import *\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nsys.dont_write_bytecode = True\n\nparser = argparse.ArgumentParser()\nps = parser.add_argument\nps(\"--mode\", dest=\"mode\", type=str, default='all', help=\"mode\")\nps(\"--vocab_count\", dest=\"vocab_count\", type=int,\n default=0, help=\"set >0 to activate\")\nargs = parser.parse_args()\nmode = args.mode\n\n\ndef word_level_em_features(s1, s2, lower=True, stem=True):\n em1 = []\n em2 = []\n #print(s1)\n #print(s2)\n s1 = s1.split(' ')\n s2 = s2.split(' ')\n if(lower):\n s1 = [x.lower() for x in s1]\n s2 = [x.lower() for x in s2]\n if(stem):\n s1 = [porter_stemmer.stem(x) for x in s1]\n s2 = [porter_stemmer.stem(x) for x in s2]\n for w1 in s1:\n if(w1 in s2):\n em1.append(1)\n else:\n em1.append(0)\n for w2 in s2:\n if(w2 in s1):\n em2.append(1)\n else:\n em2.append(0)\n return em1, em2\n\n\ndef convert_paragraph(para):\n words = []\n context = para['context.tokens']\n # print(len(para['qas']))\n data = []\n for qa in para['qas']:\n qid = qa['id']\n question = qa['question.tokens']\n words += question\n label_start = int(qa['answers'][0]['answer_start'])\n label_length = len(qa['answers'][0]['text.tokens'])\n question = ' '.join(question)\n answer = ' '.join(qa['answers'][0]['text.tokens'])\n #print(qa['answers'][0])\n # print(context[label_start:label_start+label_length])\n words += context\n context = ' '.join(context)\n ground_truths = list(map(lambda x: x['text'], qa['answers']))\n #print(ground_truths)\n data = [context, question, label_start, label_length, qid, ground_truths]\n\n # print(data)\n return data, words\n\ndef load_set(fp, datatype='train'):\n parsed_file = load_json(fp)\n # print(parsed_file)\n all_words = []\n all_data = []\n all_feats = []\n # print(parsed_file[0])\n for p in tqdm(parsed_file, desc='parsing file'):\n pdata, words = convert_paragraph(p)\n qem, pem = word_level_em_features(pdata[1], pdata[0])\n all_words += words\n all_data.append(pdata)\n all_feats.append([pem, qem])\n # print(qem)\n # print(' Collected {} words'.format(len(all_words)))\n return all_words, all_data, all_feats\n\n\ntrain_words, train_data, train_feats = load_set('./corpus/SearchQA/tokenized-train.json')\ndev_words, dev_data, dev_feats = load_set('./corpus/SearchQA/tokenized-dev.json')\ntest_words, test_data, test_feats = load_set('./corpus/SearchQA/tokenized-test.json')\n\nall_words = train_words + dev_words + test_words\n\nif(args.vocab_count>0):\n print(\"Using Vocab Count of {}\".format(args.vocab_count))\n word_index, index_word = build_word_index(all_words, min_count=0,\n vocab_count=args.vocab_count,\n lower=True)\nelse:\n word_index, index_word = build_word_index(all_words, min_count=0,\n lower=True)\n\nprint(\"Vocab Size={}\".format(len(word_index)))\n\n# Convert passages to tokens\n# passages = dict(train_passage.items() + test_passage.items() + dev_passage.items())\n\nfp = './datasets/SearchQA/'\n\nif not os.path.exists(fp):\n os.makedirs(fp)\n\nbuild_embeddings(word_index, index_word,\n out_dir=fp,\n init_type='zero', init_val=0.01,\n emb_types=[('glove',300)],\n normalize=False)\n\npassages = {}\n\nenv = {\n 'train':train_data,\n 'test':test_data,\n 'dev':dev_data,\n 'passages':passages,\n 'word_index':word_index\n}\n\nfeature_env = {\n 'train':train_feats,\n 'test':test_feats,\n 'dev':dev_feats\n }\n\ndictToFile(env,'./datasets/SearchQA/env.gz'.format(mode))\ndictToFile(feature_env,'./datasets/SearchQA/feats.gz'.format(mode))\n","sub_path":"prep/prep_searchqa.py","file_name":"prep_searchqa.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"522720368","text":"import pytest\nfrom tests.base import BaseCase\n\n# Типы проверки\n# ID = \"id\"\n# XPATH = \"xpath\"\n# LINK_TEXT = \"link text\"\n# PARTIAL_LINK_TEXT = \"partial link text\"\n# NAME = \"name\"\n# TAG_NAME = \"tag name\"\n# CLASS_NAME = \"class name\"\n# CSS_SELECTOR = \"css selector\"\n\n\nclass TestAuthorize(BaseCase):\n # тестируем логин\n # @pytest.mark.skip(reason='TEMP')\n @pytest.mark.UI\n def test_authorize(self):\n self.login_page.authorize(\"technoatomtest@mail.ru\", \"11qazert\")\n actual_url = self.driver.current_url\n expected_url = 'https://target.my.com/campaigns/list'\n assert actual_url == expected_url\n\n # проверяем, что на странице присутствует плашка неверного логина пароля\n # @pytest.mark.skip(reason='TEMP')\n @pytest.mark.UI\n def test_incorrect_login(self):\n self.login_page.authorize(\"technoatomtest@mail.ru\", \"11qazerrrrt\")\n assert self.login_page.find(self.login_page.locators.FAIL_AUTH).is_displayed()\n\n\nclass TestSegment(BaseCase):\n # @pytest.mark.skip(reason='TEMP')\n @pytest.mark.UI\n def test_create_campaign(self):\n self.login_page.authorize()\n time_create = self.main_page.current_time()\n self.main_page.create_campaign(time_create)\n # кампания создается с названием = текущему времени, которое записывается в переменную\n self.driver.get(\"https://target.my.com/campaigns/list\")\n self.main_page.find(self.main_page.get_locator_time(time_create))\n\n # @pytest.mark.skip(reason='TEMP')\n @pytest.mark.UI\n def test_create_segment(self):\n self.login_page.authorize()\n time_create = self.main_page.current_time()\n # сегмент создается с названием = текущему времени, которое записывается в переменную\n self.main_page.create_segment(time_create)\n self.main_page.find(self.main_page.get_locator_time(time_create))\n\n # @pytest.mark.skip(reason='TEMP')\n @pytest.mark.UI\n def test_delete_segment(self):\n self.login_page.authorize()\n time_create = self.main_page.current_time()\n self.main_page.create_segment(time_create)\n self.driver.get(\"https://target.my.com/segments\")\n self.main_page.find_result_locator(time_create)\n","sub_path":"hw_2/tests/test_ui.py","file_name":"test_ui.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601790961","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 26 15:58:21 2018\n\n@author: owen\n\"\"\"\n\n# Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2.\n\n# You have the following 3 operations permitted on a word:\n\n# Insert a character\n# Delete a character\n# Replace a character\n\n# 最少编辑距离使两个单词相等\n\nclass Solution:\n \"\"\"\n @param word1: A string\n @param word2: A string\n @return: The minimum number of steps.\n \"\"\"\n def minDistance(self, word1, word2):\n # write your code here\n # 2D-DP\n n1, n2 = len(word1), len(word2)\n dp = [[0x7fffffff] * (n2 + 1) for __ in range(n1 + 1)] # dp[i][j] minimum steps convert word1[0:i) to word2[0:j)\n dp[0][0] = 0\n for i in range(1, n1 + 1):\n dp[i][0] = i\n \n for j in range(1, n2 + 1):\n dp[0][j] = j\n \n for i in range(1, n1 + 1): # convert word1 to word2\n for j in range(1, n2 + 1):\n dp[i][j] = min(\n dp[i - 1][j] + 1,\n dp[i][j - 1] + 1,\n dp[i - 1][j - 1] + 1 if word1[i - 1] != word2[j - 1] else dp[i - 1][j - 1])\n # delete word1[i], delete word2[j], replace either or do nothing\n\n return dp[n1][n2]\n\nclass Solution:\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n # 2D-DP\n n1, n2 = len(word1), len(word2)\n dp = [[float('inf')] * (n2 + 1) for __ in range(n1 + 1)] # dp[i][j] minimum steps convert word1[0:i) to word2[0:j)\n dp[0][0] = 0\n for j in range(1, n2 + 1):\n dp[0][j] = j\n\n for i in range(1, n1 + 1):\n dp[i][0] = i\n for j in range(1, n2 + 1):\n dp[i][j] = dp[i - 1][j - 1]\n if word1[i - 1] != word2[j - 1]:\n dp[i][j] = min(dp[i][j], dp[i - 1][j], dp[i][j - 1]) + 1\n \n return dp[n1][n2]\n\n\nclass Solution:\n \"\"\"\n @param word1: A string\n @param word2: A string\n @return: The minimum number of steps.\n \"\"\"\n def minDistance(self, word1, word2):\n # write your code here\n # 1D-DP\n n1, n2 = len(word1), len(word2)\n dp = [[0] * (n2 + 1) for __ in range(2)]\n for j in range(1, n2 + 1):\n dp[0][j] = j\n \n for i in range(1, n1 + 1):\n dp[i % 2][0] = i\n for j in range(1, n2 + 1):\n dp[i % 2][j] = min(\n dp[(i - 1) % 2][j] + 1,\n dp[i % 2][j - 1] + 1,\n dp[(i - 1) % 2][j - 1] + 1 if word1[i - 1] != word2[j - 1] else dp[(i - 1) % 2][j - 1])\n \n return dp[n1 % 2][n2]\n \n\nif __name__==\"__main__\":\n print(Solution().minDistance(\"mart\", \"karma\"))\n","sub_path":"Edit Distance.py","file_name":"Edit Distance.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"283825373","text":"#------------------------------------------------------------------------------#\n#mask.py\n#\n#NPS Night Skies Program\n#\n#Last updated: 2020/12/17\n#\n#This script finds a circle to describe where the fisheye view is located. \n#\n#Input: \n# (1) mask_input.py, containing a flat image or an image with fisheye viewing \n#\t\tarea well lit and the brightness cutoff.\n#\n#Output:\n# (1) Image of a circular mask\n#\n#History:\n#\tLi-Wei Hung -- Created \n#\n#------------------------------------------------------------------------------#\nimport importlib\nimport numpy as n\n\nfrom astropy.io import fits\nfrom matplotlib import pyplot as plt\n\n# Local Source\nimport mask_input as mi\n\nimportlib.reload(mi)\n#-----------------------------------------------------------------------------#\n\n#Read in the file and the light pixels\nflat = fits.open(mi.filein,uint=False)[0].data\nbright = n.where(flat>mi.t) #location of bright pixels\n\n#Find the xy center and the radius \ncenter_y , center_x = n.mean(bright,axis=1)\nradius = n.min(n.percentile(bright,99.99,axis=1) - n.mean(bright,axis=1))\n\n#Creat the mask image\nx, y = n.meshgrid(n.arange(flat.shape[1]),n.arange(flat.shape[0]))\nr = n.sqrt((x-center_x)**2 + (y-center_y)**2)\nmask = n.zeros_like(r)\nmask[n.where(r<=radius)] = 1\nmask[n.where(mask==0)] = n.nan\n\n#save the bestfit model mask\nhdu = fits.PrimaryHDU()\nhdu.header['THRESHOL'] = mi.t\nhdu.header['CENTERX'] = center_x\nhdu.header['CENTERY'] = center_y\nhdu.header['RADIUS'] = radius\nhdu.data = mask\nhdu.writeto(mi.fileout, overwrite=True)\n\n#plot\nfig = plt.figure(1)\nmaskplot = n.zeros_like(mask)\nmaskplot[n.where(mask==1)]=n.nan\nplt.imshow(flat-maskplot)\nplt.show(block=False)\n\n\n\n","sub_path":"Scripts/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345234091","text":"import os\nimport subprocess\nimport tempfile\n\n\nCSR_STORE_DIR = tempfile.gettempdir()\nCERT_STORE_DIR = tempfile.gettempdir()\nROOT_CA_PEM_PATH = \"./config/root_certificate/rootCA.pem\"\nROOT_CA_KEY_PATH = \"./config/root_certificate/rootCA.key\"\nOPENSSL_PATH = \"openssl\"\n\n\ndef submit(csr):\n csr_filepath = os.path.join(CSR_STORE_DIR, \"csr.req\")\n with open(csr_filepath, 'w') as csr_file:\n csr_file.write(csr)\n csr_file.close()\n\n cert_filepath = os.path.join(CERT_STORE_DIR, \"csr.cer\")\n run_args = [OPENSSL_PATH, 'x509', '-req', '-in', csr_filepath,\n '-CA', ROOT_CA_PEM_PATH, '-CAkey', ROOT_CA_KEY_PATH,\n '-CAcreateserial', '-days', '365',\n '-out', cert_filepath]\n subprocess.run(run_args)\n\n result_cert = None\n with open(cert_filepath, 'r', encoding='utf-8') as cert_file:\n result_cert = cert_file.readlines()\n cert_file.close()\n\n return {'result': result_cert}\n","sub_path":"_dmitry/external/openssl_console.py","file_name":"openssl_console.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345327607","text":"import os\nSETTINGS_DIR = os.path.dirname(__file__)\n\nPROJECT_PATH = os.path.join(SETTINGS_DIR, os.pardir)\nPROJECT_PATH = os.path.abspath(PROJECT_PATH)\n\nTEMPLATE_PATH = os.path.join(PROJECT_PATH, 'templates')\nASSETS_PATH = os.path.join(PROJECT_PATH, 'assets')\nSTATIC_PATH = os.path.join(PROJECT_PATH, 'static')\nMEDIA_PATH = os.path.join(PROJECT_PATH, 'media')\nDATABASE_PATH = os.path.join(PROJECT_PATH, 'skefc.db')\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Seyi', 'prontomaster@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', \n 'NAME': DATABASE_PATH,\n }\n}\n\nALLOWED_HOSTS = ['enyimbafc.pythonanywhere.com']\n\nTIME_ZONE = 'UTC'\n\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\nUSE_I18N = False\n\nUSE_L10N = False\n\nUSE_TZ = False\n\nMEDIA_ROOT = MEDIA_PATH\n\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = STATIC_PATH\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n ASSETS_PATH,\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSECRET_KEY = 'ilhrr_jfct&ae+a8_adup($c8n@r&#@^sbt67lv6@kk0b#ss55'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'ske',\n)\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nTEMPLATE_DIRS = (\n TEMPLATE_PATH,\n)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'skefc.urls'\n\nWSGI_APPLICATION = 'skefc.wsgi.application'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nDEFAULT_FROM_EMAIL = 'prontomaster@gmail.com'\nEMAIL_HOST_USER = ''\nEMAIL_HOST_PASSWORD = ''\nEMAIL_USE_TLS = False\nEMAIL_PORT = 1025\n\n# For sendgrid\n##EMAIL_HOST = 'smtp.sendgrid.net'\n##EMAIL_HOST_USER = 'sendgrid_username'\n##EMAIL_HOST_PASSWORD = 'sendgrid_password'\n##EMAIL_PORT = 587\n##EMAIL_USE_TLS = True\n","sub_path":"skefc/skefc/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"334953480","text":"import pytest\nfrom ebl.sign_list.value_mapper import create_value_mapper\n\n\nMAP_DATA = [\n ('nu', 'NU'),\n ('šu', 'ŠU'),\n ('gid₂', 'BU'),\n ('BI', 'BI'),\n ('BIxIS', 'BIxIS'),\n ('BI×IS', 'BI×IS'),\n ('|BIxIS|', '|BIxIS|'),\n ('|BI×IS|', '|BI×IS|'),\n ('|BI.IS|', '|BI.IS|'),\n ('|BI+IS|', '|BI+IS|'),\n ('|BI&IS|', '|BI&IS|'),\n ('|BI%IS|', '|BI%IS|'),\n ('|BI@IS|', '|BI@IS|'),\n ('|3×BI|', '|3×BI|'),\n ('|3xBI|', '|3xBI|'),\n ('|GEŠTU~axŠE~a@t|', '|GEŠTU~axŠE~a@t|'),\n ('|(GI&GI)׊E₃|', '|(GI&GI)׊E₃|'),\n ('unknown', '?'),\n ('x', 'X'),\n ('X', 'X'),\n ('1(AŠ)', 'AŠ'),\n # 1, 2, 5, 10, 20, 30 should be inserted manually to the sign list\n ('1', 'DIŠ'),\n ('10', 'U'),\n ('2', '2'),\n ('20', '20'),\n ('30', '30'),\n ('256', '256'),\n ('foo(TUKUL)', 'TUKUL'),\n ('šu/gid₂', 'ŠU/BU'),\n ('šu/gid₂/nu', 'ŠU/BU/NU'),\n ('šu/|BI×IS|', 'ŠU/|BI×IS|'),\n ('|BI×IS|/šu', '|BI×IS|/ŠU'),\n ('šu/|BI×IS|/nu', 'ŠU/|BI×IS|/NU'),\n ('foo(TUKUL)/šu', 'TUKUL/ŠU'),\n ('šu/1(AŠ)', 'ŠU/AŠ'),\n ('256/nu', '256/NU'),\n ('x/nu', 'X/NU'),\n ('nu/unknown', 'NU/?'),\n ('unknown/x', '?/X')\n]\n\n\n@pytest.mark.parametrize(\"value,expected\", MAP_DATA)\ndef test_create_value_mapper(value, expected, sign_repository, signs):\n for sign in signs:\n sign_repository.create(sign)\n\n value_mapper = create_value_mapper(sign_repository)\n\n assert value_mapper(value) == expected\n","sub_path":"ebl/tests/test_value_mapper.py","file_name":"test_value_mapper.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"205198954","text":"# -*- coding: utf-8 -*-\n\"\"\"The AS3Declaration module. Represents an AS3 Declaration as a python class.\"\"\"\nimport json\nfrom collections import abc\nfrom typing import List, Union\n\nfrom jinja2 import (\n ChoiceLoader,\n DictLoader,\n Environment,\n FileSystemLoader,\n StrictUndefined,\n)\nfrom jinja2.exceptions import TemplateSyntaxError, UndefinedError\nfrom six import iteritems\n\nfrom .filters import ninjafilters\nfrom .functions import ninjafunctions\nfrom .utils import deserialize\n\n__all__ = [\n \"AS3Declaration\",\n \"AS3JSONDecodeError\",\n \"AS3TemplateSyntaxError\",\n \"AS3UndefinedError\",\n]\n\n\nclass AS3JSONDecodeError(ValueError):\n \"\"\"Raised when the produced JSON cannot be decoded\"\"\"\n\n def __init__(self, message: str = \"\", original_exception=None):\n doc_highlighted = self._highlight_error(\n original_exception.doc, original_exception.lineno, original_exception.colno\n )\n super(AS3JSONDecodeError, self).__init__(\n f\"{message}: {original_exception.msg}. Error pos:{original_exception.pos} on line:{original_exception.lineno} on col:{original_exception.colno}.\\nJSON document:\\n{doc_highlighted}\"\n )\n\n @staticmethod\n def _highlight_error(doc: str, err_lineno: int, err_colno: int) -> str:\n \"\"\"Adds line numbers and highlights the error in the JSON document.\n\n :param doc: (invalid) JSON document\n :param err_lineno: Erroneous line number\n :param err_colno: exact error position on errorneous line\n \"\"\"\n doc_list: list = []\n lineno = 1\n lines_total = doc.count(\"\\n\")\n indent = len(str(lines_total))\n for line in doc.splitlines():\n if lineno == err_lineno:\n err_indent = indent + 1 + err_colno\n doc_list.append(\n \"{lineno:>{indent}}: {line}<---- Error line:{err_lineno}, position {err_colno}\".format(\n lineno=lineno,\n indent=indent,\n line=line,\n err_lineno=err_lineno,\n err_colno=err_colno,\n )\n )\n doc_list.append(\n \"{_:{err_indent}}^---- Exact Error position\".format(\n _=\"\", err_indent=err_indent\n )\n )\n else:\n doc_list.append(\n \"{lineno:>{indent}}: {line}\".format(\n lineno=lineno, indent=indent, line=line\n )\n )\n lineno += 1\n return \"\\n\".join(doc_list)\n\n\nclass AS3UndefinedError(UndefinedError):\n \"\"\"Raised if a AS3 declaration template tries to operate on ``Undefined``.\"\"\"\n\n def __init__(self, message: str, original_exception=None):\n super(AS3UndefinedError, self).__init__(f\"{message}: {str(original_exception)}\")\n\n\nclass AS3TemplateSyntaxError(Exception):\n \"\"\"Raised to tell the user that there is a problem with the AS3 declaration template.\"\"\"\n\n def __init__(\n self, message: str, declaration_template: str, original_exception=None\n ):\n doc_highlighted = self._highlight_error(\n declaration_template, original_exception.lineno\n )\n super(AS3TemplateSyntaxError, self).__init__(\n f\"{message}: {original_exception.message}. Error on line:{original_exception.lineno}.\\nJinja2 template:\\n{doc_highlighted}\"\n )\n\n @staticmethod\n def _highlight_error(doc: str, err_lineno: int) -> str:\n \"\"\"Adds line numbers and highlights the error in the Jinja2 template.\n\n :param doc: (invalid) Jinja2 template\n :param err_lineno: Erroneous line number\n \"\"\"\n doc_list: list = []\n lineno = 1\n lines_total = doc.count(\"\\n\")\n indent = len(str(lines_total))\n for line in doc.splitlines():\n if lineno == err_lineno:\n doc_list.append(\n \"{lineno:>{indent}}: {line}<---- Error line:{err_lineno}\".format(\n lineno=lineno, indent=indent, line=line, err_lineno=err_lineno\n )\n )\n marks = [\"^\" for _ in line]\n doc_list.append(\n \"{_:{indent}} {marks}------- Erroneous line above\".format(\n _=\"\", indent=indent, marks=\"\".join(marks)\n )\n )\n else:\n doc_list.append(\n \"{lineno:>{indent}}: {line}\".format(\n lineno=lineno, indent=indent, line=line\n )\n )\n lineno += 1\n return \"\\n\".join(doc_list)\n\n\nclass AS3Declaration:\n \"\"\"Creates an AS3Declaration instance representing the AS3 declaration.\n\n The AS3 declaration is created unsing the given template configuration, which can be either a dict or list of dicts.\n If a list is provided, the member dicts will be merged using :py:meth:`_dict_deep_update`.\n\n Optionally a jinja2 declaration_template can be provided, otherwise it is read from the configuration.\n The template file reference is expected to be at `as3ninja.declaration_template` within the configuration.\n An explicitly specified declaration_template takes precedence over any included template.\n\n :param template_configuration: Template configuration as ``dict`` or ``list``\n :param declaration_template: Optional Declaration Template as ``str`` (Default value = ``None``)\n :param jinja2_searchpath: The jinja2 search path for the FileSystemLoader. Important for jinja2 includes. (Default value = ``\".\"``)\n \"\"\"\n\n def __init__(\n self,\n template_configuration: Union[dict, List[dict]],\n declaration_template: str = None,\n jinja2_searchpath: str = \".\",\n ):\n self.__configuration: dict = {}\n\n self._template_configuration = template_configuration\n self._declaration_template = declaration_template\n self._configuration = template_configuration\n self._jinja2_searchpath = jinja2_searchpath\n\n if not self._declaration_template:\n try:\n declaration_template_file = self.configuration[\"as3ninja\"][\n \"declaration_template\"\n ]\n self._declaration_template = deserialize(\n datasource=f\"{self._jinja2_searchpath}/{declaration_template_file}\", return_as=str\n )\n except (KeyError, TypeError) as err:\n raise KeyError(\n f\"as3ninja.declaration_template not valid or missing in template_configuration: {err}\"\n )\n self._transform()\n\n @property\n def declaration(self) -> dict:\n \"\"\"Read-Only Property returns the tranformed AS3 declaration as ``dict``\"\"\"\n return self._declaration\n\n @property\n def declaration_asjson(self) -> Union[str, None]:\n \"\"\"Read-Only Property returns the tranformed AS3 declaration as ``str`` (contains JSON)\"\"\"\n if not self._declaration_asjson:\n self._declaration_asjson = json.dumps(self._declaration)\n return self._declaration_asjson\n\n @property\n def _declaration(self) -> dict:\n \"\"\"Private Property: Returns the declaration as dict\"\"\"\n return self.__declaration\n\n @_declaration.setter\n def _declaration(self, declaration: str) -> None:\n \"\"\"Private Property: sets __declaration and _declaration_asjson variables\n\n :param declaration: AS3 declaration\n \"\"\"\n try:\n self.__declaration = json.loads(declaration)\n # this properly formats the json\n self._declaration_asjson = json.dumps(json.loads(declaration))\n except json.decoder.JSONDecodeError as exc:\n raise AS3JSONDecodeError(\"JSONDecodeError\", exc)\n\n @property\n def configuration(self) -> dict:\n \"\"\"Read-Only Property returns the template configuration as dict.\n This is the merged configuration in case template_configuration was a list of configurations.\n \"\"\"\n return self.__configuration\n\n @property\n def _configuration(self) -> dict:\n \"\"\"\n Private Property: Returns the template configuration as dict.\n This is the merged configuration in case template_configuration was a list of configurations.\n \"\"\"\n return self.__configuration\n\n @_configuration.setter\n def _configuration(self, template_configuration: Union[dict, list]) -> None:\n \"\"\"\n Private Property: Merges a list of template_configuration elements in case a list is specfied.\n\n :param template_configuration: Union[dict, list]:\n\n \"\"\"\n if isinstance(template_configuration, list):\n for entry in template_configuration:\n self.__configuration = self._dict_deep_update(\n self.__configuration, entry\n )\n elif isinstance(template_configuration, dict):\n self.__configuration = template_configuration\n else:\n raise TypeError(\n f\"template_configuration has wrong type:{type(template_configuration)}\"\n )\n\n def _dict_deep_update(self, dict_to_update: dict, update: dict) -> dict:\n \"\"\"\n Private Method: similar to dict.update() but with full depth.\n\n :param dict_to_update: dict:\n :param update: dict:\n\n Example:\n\n .. code:: python\n\n dict.update:\n { 'a': {'b':1, 'c':2} }.update({'a': {'d':3} })\n -> { 'a': {'d':3} }\n\n _dict_deep_update:\n { 'a': {'b':1, 'c':2} } with _dict_deep_update({'a': {'d':3} })\n -> { 'a': {'b':1, 'c':2, 'd':3} }\n\n \"\"\"\n for k, v in iteritems(update):\n dv = dict_to_update.get(k, {})\n if not isinstance(dv, abc.Mapping):\n dict_to_update[k] = v\n elif isinstance(v, abc.Mapping):\n dict_to_update[k] = self._dict_deep_update(dv, v)\n else:\n dict_to_update[k] = v\n return dict_to_update\n\n @property\n def declaration_template(self) -> Union[str, None]:\n \"\"\"Read-Only Property returns the declaration template as dict or None (if non-existend).\"\"\"\n return self._declaration_template\n\n @property\n def template_configuration(self) -> Union[dict, list, None]:\n \"\"\"\n Read-Only Property returns the template configuration(s) as specified during class initialization.\n It returns either a dict or list of dicts.\n \"\"\"\n return self._template_configuration\n\n def _transform(self) -> None:\n \"\"\"\n Private Method: Transforms the declaration_template using the template_configuration to an AS3 declaration.\n \"\"\"\n env = Environment(\n loader=ChoiceLoader(\n [\n DictLoader({\"template\": self.declaration_template}),\n FileSystemLoader(searchpath=self._jinja2_searchpath),\n ]\n ),\n trim_blocks=False,\n lstrip_blocks=False,\n keep_trailing_newline=True,\n undefined=StrictUndefined,\n\n )\n env.globals[\"jinja2_searchpath\"] = self._jinja2_searchpath + \"/\"\n env.globals[\"ninja\"] = self.configuration\n env.globals.update(ninjafunctions)\n env.filters.update(ninjafilters)\n\n try:\n self._declaration = env.get_template(\"template\").render()\n except (TemplateSyntaxError, UndefinedError) as exc:\n if isinstance(exc, TemplateSyntaxError):\n raise AS3TemplateSyntaxError(\n \"AS3 declaration template caused jinja2 syntax error\",\n self.declaration_template,\n exc,\n )\n elif isinstance(exc, UndefinedError):\n raise AS3UndefinedError(\n \"AS3 declaration template tried to operate on an Undefined variable, attribute or type\",\n exc,\n )\n","sub_path":"as3ninja/declaration.py","file_name":"declaration.py","file_ext":"py","file_size_in_byte":12154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"654160522","text":"\"\"\"\nCopyright 2019 Cartesi Pte. Ltd.\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use\nthis file except in compliance with the License. You may obtain a copy of the\nLicense at http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\nfrom concurrent import futures\nfrom threading import Lock\nimport signal\nimport time\nimport math\nimport grpc\nimport sys\nimport traceback\nimport argparse\nimport pickle\nfrom grpc_reflection.v1alpha import reflection\n\nimport machine_discovery_pb2_grpc\nimport machine_discovery_pb2\nimport machine_manager_pb2_grpc\nimport machine_manager_pb2\nimport cartesi_machine_pb2\nimport utils\nfrom session_registry import SessionIdException, AddressException, RollbackException\n\n# docker graceful shutdown, raise a KeyboardInterrupt in case of SIGTERM\ndef handle_sigterm(*args):\n raise KeyboardInterrupt()\n\nsignal.signal(signal.SIGTERM, handle_sigterm)\n\nLOGGER = utils.get_new_logger(__name__)\nLOGGER = utils.configure_log(LOGGER)\n\nLISTENING_ADDRESS = 'localhost'\nLISTENING_PORT = 50051\nSLEEP_TIME = 5\n\nclass NotReadyException(Exception):\n pass\n\nclass SessionJob:\n\n def __init__(self, session_id):\n self.id = session_id\n self.job_hash = None\n self.job_future = None\n\nclass _MachineManager(machine_manager_pb2_grpc.MachineManagerServicer):\n\n def __init__(self, session_registry_manager):\n self.executor = futures.ThreadPoolExecutor(max_workers=10)\n self.session_registry_manager = session_registry_manager\n self.global_lock = Lock()\n self.job_cache = {}\n self.job = {}\n\n def __set_job_cache__(self, request, future):\n LOGGER.debug(\"Setting job cache\")\n result = future.result()\n\n request_hash = pickle.dumps(request)\n\n #Cache the job only if no exception raised\n self.job_cache[request_hash] = future\n\n return result\n\n def __set_job_future__(self, session_id, future):\n self.job[session_id].job_future = future\n\n def __set_job_hash__(self, session_id, request):\n self.job[session_id].job_hash = request\n\n def __reset_job__(self, session_id):\n self.job[session_id].job_future = None\n self.job[session_id].job_hash = None\n\n def __get_job__(self, session_id, request, err_msg, fn, *args):\n LOGGER.debug(\"Acquiring manager global lock\")\n with self.global_lock:\n LOGGER.debug(\"Lock acquired\")\n request_hash = pickle.dumps(request)\n\n if request_hash in self.job_cache.keys():\n LOGGER.debug(\"Job found in cache\")\n return self.job_cache[request_hash]\n\n if session_id in self.job.keys():\n if self.job[session_id].job_future is not None:\n if self.job[session_id].job_future.done():\n LOGGER.debug(\"Job is done\")\n if request_hash == self.job[session_id].job_hash:\n LOGGER.debug(\"Request hash matches, return job\")\n job = self.job[session_id].job_future\n self.__reset_job__(session_id)\n return job\n else:\n LOGGER.debug(\"Request hash not match, dump result and start fresh\")\n else:\n LOGGER.debug(\"Job is not done\")\n raise NotReadyException(err_msg)\n else:\n LOGGER.debug(\"First SessionJob creation\")\n self.job[session_id] = SessionJob(session_id)\n\n self.__set_job_hash__(session_id, request_hash)\n self.__set_job_future__(session_id, self.executor.submit(fn, *args))\n raise NotReadyException(err_msg)\n\n\n def ServerShuttingDown(self, context):\n if self.session_registry_manager.shutting_down:\n context.set_details(\"Server is shutting down, not accepting new requests\")\n context.set_code(grpc.StatusCode.UNAVAILABLE)\n return True\n else:\n return False\n\n def NewSession(self, request, context):\n try:\n if self.ServerShuttingDown(context):\n return\n\n session_id = request.session_id\n machine_req = request.machine\n LOGGER.info(\"New session requested with session_id: {}\".format(session_id))\n\n return self.session_registry_manager.new_session(session_id, machine_req)\n\n #No session with provided id or address issue\n except (SessionIdException, AddressException) as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\n def SessionRun(self, request, context):\n try:\n if self.ServerShuttingDown(context):\n return\n\n session_id = request.session_id\n final_cycles = request.final_cycles\n LOGGER.info(\"New session run requested for session_id {} with final cycles {}\".format(session_id, final_cycles))\n\n #Validate cycle values\n utils.validate_cycles(final_cycles)\n\n err_msg = \"Result is not yet ready for SessionRun: \" + session_id\n job = self.__get_job__(session_id, request, err_msg, self.session_registry_manager.run_session, session_id, final_cycles)\n return self.__set_job_cache__(request, job)\n\n #If the session result is not ready yet, return progress\n except NotReadyException as e:\n LOGGER.debug(\"Not ready yet, getting progress\")\n session_context = self.session_registry_manager.registry[session_id]\n\n #Calculating cycles related progress\n last_cycle = request.final_cycles[-1]\n if session_context.halt_cycle != None:\n if last_cycle > session_context.halt_cycle:\n last_cycle = session_context.halt_cycle\n\n cycle_progress = 0\n #Calcuting percentage progress with 2 decimal places, if machine already in a cycle\n #that alows it to run to the desired cycle\n if (session_context.cycle <= last_cycle):\n cycle_progress = int(int(session_context.cycle/last_cycle * 10000) / 100)\n\n #Build a status object to return\n session_run_progress = machine_manager_pb2.SessionRunProgress(\n progress=cycle_progress,\n application_progress=session_context.app_progress,\n updated_at=int(session_context.updated_at),\n cycle=session_context.cycle\n )\n return machine_manager_pb2.SessionRunResponse(progress=session_run_progress)\n\n #No session with provided id, address issue, bad final cycles provided or problem during rollback\n except (SessionIdException, AddressException, utils.CycleException, RollbackException) as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\n def SessionStep(self, request, context):\n try:\n if self.ServerShuttingDown(context):\n return\n\n session_id = request.session_id\n initial_cycle = request.initial_cycle\n step_params = None\n\n #Setting step_params if provided\n if (request.WhichOneof(\"step_params_oneof\") is not None):\n if (request.WhichOneof(\"step_params_oneof\") == \"step_params\"):\n step_params = request.step_params\n LOGGER.info(\"Step parameters received on request\")\n\n #Setting default step parameters if none were provided\n if (step_params == None):\n log_type = cartesi_machine_pb2.AccessLogType(proofs=True, annotations=False)\n step_params = cartesi_machine_pb2.StepRequest(log_type=log_type)\n LOGGER.info(\"Step parameters set to default\")\n\n\n LOGGER.info(\"New session step requested for session_id {} with initial cycle {}\\nLog proofs: {}\\nLog annotations: {}\".format(session_id, initial_cycle, step_params.log_type.proofs, step_params.log_type.annotations))\n\n #Validate cycle value\n utils.validate_cycles([initial_cycle])\n\n return self.session_registry_manager.step_session(session_id, initial_cycle, step_params)\n\n #No session with provided id, address issue, bad initial cycle provided or problem during rollback\n except (SessionIdException, AddressException, utils.CycleException, RollbackException) as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\n def SessionStore(self, request, context):\n try:\n if self.ServerShuttingDown(context):\n return\n\n session_id = request.session_id\n store_req = request.store\n\n LOGGER.info(\"New session store requested for session_id {} on directory {}\".format(session_id, store_req.directory))\n\n return self.session_registry_manager.session_store(session_id, store_req)\n\n #No session with provided id or address issue\n except (SessionIdException, AddressException) as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\n def SessionReadMemory(self, request, context):\n try:\n if self.ServerShuttingDown(context):\n return\n\n session_id = request.session_id\n read_mem_req = request.position\n cycle = request.cycle\n LOGGER.info(\"New session memory read requested for session_id {} on cycle {} for address {} with length {}\".format(session_id, cycle, read_mem_req.address, read_mem_req.length))\n\n return self.session_registry_manager.session_read_mem(session_id, cycle, read_mem_req)\n\n #No session with provided id or address issue\n except (SessionIdException, AddressException) as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\n def SessionWriteMemory(self, request, context):\n try:\n if self.ServerShuttingDown(context):\n return\n\n session_id = request.session_id\n write_mem_req = request.position\n cycle = request.cycle\n LOGGER.info(\"New session memory write requested for session_id {} on cycle {} for address {} with data {}\".format(session_id, cycle, write_mem_req.address, write_mem_req.data))\n\n return self.session_registry_manager.session_write_mem(session_id, cycle, write_mem_req)\n\n #No session with provided id or address issue\n except (SessionIdException, AddressException) as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\n def SessionGetProof(self, request, context):\n try:\n if self.ServerShuttingDown(context):\n return\n\n session_id = request.session_id\n proof_req = request.target\n cycle = request.cycle\n\n LOGGER.info(\"New session proof requested for session_id {} on cycle {} for address {} with log2_size {}\".format(session_id, cycle, proof_req.address, proof_req.log2_size))\n\n return self.session_registry_manager.session_get_proof(session_id, cycle, proof_req)\n\n #No session with provided id or address issue\n except (SessionIdException, AddressException) as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\n\n\nclass _MachineDiscovery(machine_discovery_pb2_grpc.MachineDiscoveryServicer):\n\n def __init__(self, session_registry_manager):\n self.session_registry_manager = session_registry_manager\n\n def CommunicateAddress (self, request, context):\n try:\n address = request.address\n session_id = request.session_id\n\n LOGGER.info(\"Received a CommunicateAddress request for session_id {} and address {}\".format(session_id, address))\n\n self.session_registry_manager.register_address_for_session(session_id, address)\n\n #Returning\n return cartesi_machine_pb2.Void()\n\n #No session with provided id\n except SessionIdException as e:\n LOGGER.error(e)\n context.set_details(\"{}\".format(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n #Generic error catch\n except Exception as e:\n LOGGER.error(\"An exception occurred: {}\\nTraceback: {}\".format(e, traceback.format_exc()))\n context.set_details('An exception with message \"{}\" was raised!'.format(e))\n context.set_code(grpc.StatusCode.UNKNOWN)\n\ndef serve(args):\n listening_add = args.address\n listening_port = args.port\n\n #Importing the defective session registry if defective flag is set\n if args.defective:\n from defective_session_registry import SessionRegistryManager\n else:\n from session_registry import SessionRegistryManager\n\n manager_address = '{}:{}'.format(listening_add, listening_port)\n session_registry_manager = SessionRegistryManager(manager_address)\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n machine_manager_pb2_grpc.add_MachineManagerServicer_to_server(_MachineManager(session_registry_manager),\n server)\n machine_discovery_pb2_grpc.add_MachineDiscoveryServicer_to_server(_MachineDiscovery(session_registry_manager),\n server)\n\n SERVICE_NAMES = (\n machine_manager_pb2.DESCRIPTOR.services_by_name['MachineManager'].full_name,\n machine_discovery_pb2.DESCRIPTOR.services_by_name['MachineDiscovery'].full_name,\n reflection.SERVICE_NAME,\n )\n reflection.enable_server_reflection(SERVICE_NAMES, server)\n server.add_insecure_port(manager_address)\n server.start()\n LOGGER.info(\"Server started, listening on address {} and port {}\".format(listening_add, listening_port))\n try:\n while True:\n time.sleep(SLEEP_TIME)\n except KeyboardInterrupt:\n LOGGER.info(\"\\nIssued to shut down\")\n\n LOGGER.debug(\"Acquiring session registry global lock\")\n #Acquiring lock to write on session registry\n with session_registry_manager.global_lock:\n LOGGER.debug(\"Session registry global lock acquired\")\n session_registry_manager.shutting_down = True\n\n #Shutdown all active sessions servers\n for session_id in session_registry_manager.registry.keys():\n LOGGER.debug(\"Acquiring lock for session {}\".format(session_id))\n with session_registry_manager.registry[session_id].lock:\n LOGGER.debug(\"Lock for session {} acquired\".format(session_id))\n if (session_registry_manager.registry[session_id].address):\n utils.shutdown_cartesi_machine_server(session_id, session_registry_manager.registry[session_id].address)\n\n shutdown_event = server.stop(0)\n\n LOGGER.info(\"Waiting for server to stop\")\n shutdown_event.wait()\n LOGGER.info(\"Server stopped\")\n\nif __name__ == '__main__':\n\n #Adding argument parser\n description = \"Instantiates a machine manager server, responsible for managing and interacting with multiple cartesi machine instances\"\n\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\n '--address', '-a',\n dest='address',\n default=LISTENING_ADDRESS,\n help='Address to listen (default: {})'.format(LISTENING_ADDRESS)\n )\n parser.add_argument(\n '--port', '-p',\n dest='port',\n default=LISTENING_PORT,\n help='Port to listen (default: {})'.format(LISTENING_PORT)\n )\n parser.add_argument(\n '--defective', '-d',\n dest='defective',\n action='store_true',\n help='Makes server behave improperly, injecting errors silently in the issued commands\\n\\n' + '-'*23 + 'WARNING!' + '-'*23 + 'FOR TESTING PURPOSES ONLY!!!\\n' + 54*'-'\n )\n\n #Getting arguments\n args = parser.parse_args()\n\n serve(args)\n","sub_path":"manager_server.py","file_name":"manager_server.py","file_ext":"py","file_size_in_byte":18880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"442939035","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport uuid\nimport copy\nimport httplib\nimport unittest\nimport responses\nimport requests\nimport re\nfrom datetime import datetime, timedelta\nfrom os.path import abspath, join\nimport jwt\n\nfrom drift.utils import get_tier_name\nfrom drift.tenant import construct_db_name\n\nimport logging\nlog = logging.getLogger(__name__)\n\nservice_username = \"user+pass:$SERVICE$\"\nservice_password = \"SERVICE\"\nlocal_password = \"LOCAL\"\n\nbig_number = 9999999999\n\n\ndef uuid_string():\n return str(uuid.uuid4()).split(\"-\")[0]\n\n\ndb_name = None\n\n\ndef flushwrite(text):\n sys.stdout.write(text + '\\n')\n sys.stdout.flush()\n\n\ndef _get_test_target():\n target = os.environ.get(\"drift_test_target\")\n return target\n\n\ndef _get_test_db():\n db = os.environ.get(\"drift_test_database\")\n return db\n\n\ndef setup_tenant():\n \"\"\"\n Called from individual test modules.\n create a tenant only if the test module was not called from\n the kitrun's systest command\n (in which case drift_test_database has been set in environ)\n Also configure some basic parameters in the app\n \"\"\"\n from appmodule import app\n global db_name\n tenant_name = _get_test_db()\n service_name = app.config[\"name\"]\n from drift.utils import get_tier_name\n tier_name = get_tier_name()\n\n db_name = construct_db_name(tenant_name, service_name, tier_name)\n test_target = _get_test_target()\n if test_target:\n flushwrite(\n \"Skipping tenant setup due to \"\n \"manually specified test target: %s\" % test_target\n )\n return\n\n db_host = app.config[\"systest_db\"][\"server\"]\n app.config[\"db_connection_info\"][\"server\"] = db_host\n app.config[\"default_tenant\"] = tenant_name\n app.config[\"service_user\"] = {\n \"username\": service_username,\n \"password\": service_password\n }\n conn_string = \"postgresql://zzp_user:zzp_user@{}/{}\" \\\n .format(db_host, db_name)\n test_tenant = {\n \"name\": tenant_name,\n \"db_connection_string\": conn_string,\n }\n app.config[\"tenants\"].insert(0, test_tenant)\n # flushwrite(\"Adding test tenant '%s'\" % test_tenant)\n # TODO: _get_env assumes \"*\" is the last tenant and screws things up\n # if you append something else at the end. Fix this plz.\n\n # Add public and private key for jwt.\n\n app.config['private_key'] = private_test_key\n app.config['jwt_trusted_issuers'] = [\n {\n \"iss\": app.config['name'],\n \"pub_rsa\": public_test_key,\n }\n ]\n\nprivate_test_key = '''\n-----BEGIN RSA PRIVATE KEY-----\nMIIBygIBAAJhAOOEkKLzpVY5zNbn2zZlz/JlRe383fdnsuy2mOThXpJc9Tq+GuI+\nPJJXsNa5wuPBy32r46/N8voe/zUG4qYrrRCRyjmV0yu4kZeNPSdO4uM4K98P1obr\nUaYrik9cpwnu8QIDAQABAmA+BSAMW5CBfcYZ+yAlpwFVmUfDxT+YtpruriPlmI3Y\nJiDvP21CqSaH2gGptv+qaGQVq8E1xcxv9jT1qK3b7wm7+xoxTYyU0XqZC3K+lGeW\n5L+77H59RwQznG21FvjtRgECMQDzihOiirv8LI2S7yg11/DjC4c4lIzupjnhX2ZH\nweaLJcjGogS/labJO3b2Q8RUimECMQDvKKKl1KiAPNvuylcrDw6+yXOBDw+qcwiP\nrKysATJ2iCsOgnLC//Rk3+SN3R2+TpECMGjAglOOsu7zxu1levk16cHu6nm2w6u+\nyfSbkSXaTCyb0vFFLR+u4e96aV/hpCfs4QIwd/I0aOFYRUDAuWmoAEOEDLHyiSbp\nn34kLBLZY0cSbRpsJdHNBvniM/mKoo/ki/7RAjEAtpt6ixFoEP3w/2VLh5cut61x\nE74vGa3+G/KdGO94ZnI9uxySb/czhnhvOGkpd9/p\n-----END RSA PRIVATE KEY-----\n'''\n\npublic_test_key = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAYQDjhJCi86VWOc\" \\\n \"zW59s2Zc/yZUXt/N33Z7Lstpjk4V6SXPU6vhriPjySV7DWucLjwct9q+Ovz\" \\\n \"fL6Hv81BuKmK60Qkco5ldMruJGXjT0nTuLjOCvfD9aG61GmK4pPXKcJ7vE=\" \\\n \" unittest@dg-api.com\"\n\n\n\ndef remove_tenant():\n \"\"\"\n Called from individual test modules.\n remove the tenant only if the test module\n was not called from the kitrun's systest command\n \"\"\"\n test_target = _get_test_target()\n if test_target:\n flushwrite(\n \"Skipping tenant removal due to \"\n \"manually specified test target: %s\" % test_target\n )\n return\n # TODO: Not implemented!\n\n\ndef user_payload(user_id=1, player_id=1, role=\"player\", user_name=\"user_name\", client_id=1):\n \"\"\"Returns a dictionary containing typical user data for a JWT\"\"\"\n return {\n \"user_id\": user_id,\n \"player_id\": player_id,\n \"roles\": [role],\n \"user_name\": user_name,\n \"client_id\": client_id,\n }\n\n\ndef set_config_file(test_filename):\n config_file = abspath(join(test_filename, \"..\", \"..\", \"..\", \"config\", \"config.json\"))\n os.environ.setdefault(\"drift_CONFIG\", config_file)\n\n\ndef create_standard_claims_for_test():\n \"\"\"\n Duplicate of the code from jwtsetup but does not use the\n application context to get tenant, deployable and tier\n (which should probably be refactored instead of duplicated)\n \"\"\"\n from appmodule import app\n\n expire = 86400\n tier_name = get_tier_name()\n iat = datetime.utcnow()\n exp = iat + timedelta(seconds=expire)\n nbf = iat + timedelta(seconds=0)\n jti = str(uuid.uuid4()).replace(\"-\", \"\")\n iss = app.config[\"name\"]\n standard_claims = {\n # JWT standard fields\n 'iat': iat,\n 'exp': exp,\n 'nbf': nbf,\n 'jti': jti,\n 'iss': iss,\n\n # Drift fields\n 'tier': tier_name,\n 'tenant': _get_test_db(),\n 'deployable': iss,\n }\n return standard_claims\n\n\nclass DriftBaseTestCase(unittest.TestCase):\n\n headers = {}\n token = None\n current_user = {}\n endpoints = {}\n player_id = None\n user_id = None\n\n @staticmethod\n def mock(func):\n @responses.activate\n def wrapped(self, *args, **kwargs):\n self._setup_mocking()\n return func(self, *args, **kwargs)\n\n def passthrough(self, *args, **kwargs):\n return func(self, *args, **kwargs)\n\n if _get_test_target():\n return passthrough\n else:\n return wrapped\n\n def _do_request(self, method, endpoint, data=None,\n params=None, *args, **kw):\n\n \"\"\"\n Note that here we must use a inner function, otherwise mock\n will be evaluated at the module import time, by which time\n the 'drift_test_target' environ variable has not been setup yet\n \"\"\"\n @DriftBaseTestCase.mock\n def inner(self, method, endpoint, data, params, *args, **kw):\n check = kw.pop(\"check\", True)\n expected_status_code = kw.pop(\"expected_status_code\", httplib.OK)\n headers = copy.copy(self.headers)\n if \"Accept\" not in headers:\n headers[\"Accept\"] = \"application/json\"\n\n if data:\n headers[\"Content-Type\"] = \"application/json\"\n if not isinstance(data, list) and not isinstance(data, dict):\n raise Exception(\"Data must be a list or a dict: %s\" % data)\n if db_name:\n headers[\"tenant\"] = db_name\n\n if not endpoint.startswith(self.host):\n endpoint = self.host + endpoint\n\n r = getattr(requests, method)(\n endpoint,\n json=data,\n headers=headers,\n params=params\n\n )\n if check:\n self.assertEqual(\n r.status_code, expected_status_code,\n u\"Status code should be {} but is {}: {}\".format(\n expected_status_code,\n r.status_code, r.text.replace(\"\\\\n\", \"\\n\")\n )\n )\n return r\n return inner(self, method, endpoint, data, params, *args, **kw)\n\n def _setup_mocking(self):\n def _mock_callback(request):\n method = request.method.lower()\n url = request.path_url\n handler = getattr(self.app, method)\n r = handler(\n url,\n data=request.body,\n headers=dict(request.headers)\n )\n return (r.status_code, r.headers, r.data)\n\n pattern = re.compile(\"{}/(.*)\".format(self.host))\n methods = [\n responses.GET,\n responses.POST,\n responses.PUT,\n responses.DELETE,\n responses.PATCH,\n ]\n for method in methods:\n responses.add_callback(\n method, pattern,\n callback=_mock_callback\n )\n\n def get(self, *args, **kw):\n return self._do_request(\"get\", *args, **kw)\n\n def put(self, *args, **kw):\n return self._do_request(\"put\", *args, **kw)\n\n def post(self, *args, **kw):\n return self._do_request(\"post\", *args, **kw)\n\n def delete(self, *args, **kw):\n return self._do_request(\"delete\", *args, **kw)\n\n def patch(self, *args, **kw):\n return self._do_request(\"patch\", *args, **kw)\n\n def setUp(self):\n pass\n\n def auth(self, payload=None, username=\"systest\"):\n \"\"\"\n If payload is supplied we JWT encode it using the current\n app's secret and add it to the headers.\n If payload is not supplied we do an auth call against the\n current app's /auth endpoint\n \"\"\"\n if not payload:\n payload = {\n \"username\": username,\n \"password\": local_password,\n }\n resp = self.post(\"/auth\", data=payload)\n token = resp.json()[\"token\"]\n jti = resp.json()[\"jti\"]\n else:\n payload.update(create_standard_claims_for_test())\n from appmodule import app\n token = jwt.encode(payload, app.config['private_key'], algorithm='RS256')\n jti = payload[\"jti\"]\n self.token = token\n self.jti = jti\n self.current_user = jwt.decode(self.token, verify=False)\n self.player_id = self.current_user[\"player_id\"]\n self.user_id = self.current_user[\"user_id\"]\n self.headers = {\"Authorization\": \"JWT \" + token, }\n\n r = self.get(\"/\")\n self.endpoints = r.json()[\"endpoints\"]\n\n def auth_service(self):\n \"\"\"\n Authenticate as a service user\n \"\"\"\n payload = {\n \"username\": service_username,\n \"password\": service_password,\n \"provider\": \"user+pass\"\n }\n resp = self.post(\"/auth\", data=payload)\n token = resp.json()[\"token\"]\n jti = resp.json()[\"jti\"]\n self.token = token\n self.jti = jti\n self.current_user = jwt.decode(self.token, verify=False)\n self.player_id = self.current_user[\"player_id\"]\n self.user_id = self.current_user[\"user_id\"]\n self.headers = {\"Authorization\": \"JWT \" + token, }\n\n r = self.get(\"/\")\n self.endpoints = r.json()[\"endpoints\"]\n\n @classmethod\n def setUpClass(cls):\n target = _get_test_target()\n cls.host = target or \"http://localhost\"\n if not target:\n from appmodule import app\n cls.app = app.test_client()\n\n @classmethod\n def tearDownClass(cls):\n pass\n","sub_path":"drift/systesthelper.py","file_name":"systesthelper.py","file_ext":"py","file_size_in_byte":10905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354539780","text":"from typing import List, Tuple, Optional\n\nimport numpy as np\nimport pandas as pd\nimport torch.utils.data\n\n__all__ = ['DaskDataLoader', 'DaskDataLoaderIter']\n\n\ndef _assert_sorted(arr):\n it = iter(arr)\n v_prev = next(it)\n for v in it:\n assert v_prev < v\n v_prev = v\n\n\nclass DaskDataLoaderIter:\n def __init__(self, dl):\n self.dl = dl\n self.next_batch = 0\n\n def __next__(self):\n if self.next_batch >= len(self.dl.batches):\n raise StopIteration()\n rv = self.dl.read_batch(self.next_batch)\n self.next_batch += 1\n return rv\n\n\nclass DaskDataLoader:\n\n def __init__(self, ds, columns):\n # Get partition sizes\n self.ds = ds\n self.batches: Optional[List[Tuple[int, int, int]]] = None\n\n self.column_ids = []\n self.all_columns = []\n for i in columns:\n if isinstance(i, list):\n col_batch = []\n for j in i:\n col_batch.append(len(self.all_columns))\n self.all_columns.append(j)\n self.column_ids.append(col_batch)\n else:\n self.column_ids.append(len(self.all_columns))\n self.all_columns.append(i)\n self.cur_part_i = None\n self.cur_part = None\n self.get_partition(0)\n\n def infer_batch_split(self, min_batch_size, max_batch_size):\n\n # Patch format\n self.batches = list()\n\n for part_i, part in enumerate(self.ds.partitions):\n\n index = pd.Series(part.index.compute())\n _assert_sorted(index)\n\n # Dedup index - we want all entries with same index in same batch\n batch_index = np.array(index.drop_duplicates(keep='last').index)\n\n ind_max = batch_index[-1]\n\n batch_end = 0\n while batch_end < ind_max:\n batch_beg = batch_end\n batch_end = batch_index[batch_index < batch_beg + max_batch_size][-1]\n batch_size = batch_end - batch_beg\n\n if batch_size >= min_batch_size:\n self.batches.append((part_i, batch_beg, batch_end))\n\n def set_batch_split(self, batches):\n self.batches = batches\n\n def get_partition(self, part_i):\n if self.cur_part_i is not None and self.cur_part_i == part_i:\n return self.cur_part\n else:\n self.cur_part_i = part_i\n self.cur_part = self.ds.get_partition(self.cur_part_i)[self.all_columns].compute().values\n return self.cur_part\n\n def read_batch(self, batch_i):\n part_i, batch_beg, batch_end = self.batches[batch_i]\n part = self.get_partition(part_i)[batch_beg:batch_end]\n rv = tuple((torch.tensor(part[:, i]) for i in self.column_ids))\n return rv\n\n def __len__(self):\n return len(self.batches)\n\n def __iter__(self):\n return DaskDataLoaderIter(self)\n","sub_path":"dask_pipes/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19311602","text":"#Csci 1913\n#Violet Chang\n#5197617\nimport types\n\ndef left(e):\n return e[0]\ndef op(e):\n return e[1]\ndef right(e):\n return e[2]\n\ndef isInside(v,e):\n if v == e:\n return True\n elif type(e) == types.TupleType:\n return isInside(v,left(e)) or isInside(v,right(e))\n else:\n return False\n\ndef solve(v,q):\n if isInside(v,left(q)):\n return solving(v,q)\n elif isInside(v,right(q)):\n p = (right(q),op(q),left(q))\n return solving(v,p)\n else:\n return None\n\ndef solving(v,q):\n if v == left(q):\n return q\n elif op(left(q)) == '+':\n return solvingAdd(v,q)\n elif op(left(q)) == '-':\n return solvingSubtract(v,q)\n elif op(left(q)) == '*':\n return solvingMultiply(v,q)\n elif op(left(q)) == '/':\n return solvingDivide(v,q)\n else:\n return None\n\ndef solvingAdd(v,q):\n if isInside(v,left(left(q))):\n a = (right(q),'-',right(left(q)))\n b = (left(left(q)),op(q),a)\n return solving(v,b)\n elif isInside(v,right(left(q))):\n a = (right(q),'-',left(left(q)))\n b = (right(left(q)),op(q),a)\n return solving(v,b)\n\ndef solvingSubtract(v,q):\n if isInside(v,left(left(q))):\n a = (right(q),'+',right(left(q)))\n b = (left(left(q)),op(q),a)\n return solving(v,b)\n elif isInside(v,right(left(q))):\n a = (left(left(q)),'-',right(q))\n b = (right(left(q)),op(q),a)\n return solving(v,b)\n\ndef solvingMultiply(v,q):\n if isInside(v,left(left(q))):\n a = (right(q),'/',right(left(q)))\n b = (left(left(q)),op(q),a)\n return solving(v,b)\n elif isInside(v,right(left(q))):\n a = (right(q),'/',left(left(q)))\n b = (right(left(q)),op(q),a)\n return solving(v,b)\n\ndef solvingDivide(v,q):\n if isInside(v,left(left(q))):\n a = (right(q),'*',right(left(q)))\n b = (left(left(q)),op(q),a)\n return solving(v,b)\n elif isInside(v,right(left(q))):\n a = (left(left(q)),'/',right(q))\n b = (right(left(q)),op(q),a)\n return solving(v,b)\n\n#the tests \nprint(isInside('x', 'x')) \nprint(isInside('x', 'y')) \nprint(isInside('x', ('x', '+', 'y'))) \nprint(isInside('x', ('a', '+', 'b'))) \nprint(isInside('x', (('m', '*', 'x'), '+', 'b'))) \n\nprint(solve('x', (('a', '+', 'x'), '=', 'c'))) \nprint(solve('x', (('x', '+', 'b'), '=', 'c'))) \nprint(solve('x', (('a', '-', 'x'), '=', 'c')))\nprint(solve('x', (('x', '-', 'b'), '=', 'c'))) \nprint(solve('x', (('a', '*', 'x'), '=', 'c'))) \nprint(solve('x', (('x', '*', 'b'), '=', 'c'))) \nprint(solve('x', (('a', '/', 'x'), '=', 'c'))) \nprint(solve('x', (('x', '/', 'b'), '=', 'c'))) \nprint(solve('x', ('y', '=', (('m', '*', 'x'), '+', 'b')))) \n\n#the results\n#True\n#False\n#True\n#False\n#True\n#('x', '=', ('c', '-', 'a'))\n#('x', '=', ('c', '-', 'b'))\n#('x', '=', ('a', '-', 'c'))\n#('x', '=', ('c', '+', 'b'))\n#('x', '=', ('c', '/', 'a'))\n#('x', '=', ('c', '/', 'b'))\n#('x', '=', ('a', '/', 'c'))\n#('x', '=', ('c', '*', 'b'))\n#('x', '=', (('y', '-', 'b'), '/', 'm'))\n","sub_path":"csci1913/Lab1/chan1300_lab1.py","file_name":"chan1300_lab1.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"529128039","text":"def insert_sort(li):\n \"\"\"插入排序\"\"\"\n for i in range(1,len(li)):\n tmp = li[i]\n j = i-1\n while j >= 0 and tmp < li[j]:\n li[j+1] = li[j]\n j -= 1\n li[j+1] = tmp\n\nif __name__ == '__main__':\n list = [1,7,2,5,7,6,3,9,4]\n insert_sort(list)\n print(list)\n","sub_path":"02_insert sort.py","file_name":"02_insert sort.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"83910926","text":"from mptt.models import MPTTModel, TreeForeignKey\nfrom django.contrib.comments.models import Comment\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom lib.customTime import humanizeTimeDiff, timeDiffRaw\nfrom Dare.models import Dare\nimport datetime\nimport math\n\nclass ThreadedCommentManager(models.Manager):\n # Because comments can be tagged to any model, we need this special filter.\n def getListFor(self, typeFilter, pkFilter=None):\n if pkFilter == None:\n return super(ThreadedCommentManager, self).get_query_set().filter(content_type=typeFilter)\n return super(ThreadedCommentManager, self).get_query_set().filter\\\n (content_type=typeFilter).filter(object_pk=int(pkFilter))\n\n# Comments will have relationships with more than one model. They can be under\n# dares or proofs, or even other comments (threaded).\n# This model uses Custom tree traversal models from the mptt library.\n# https://github.com/django-mptt/django-mptt/\nclass ThreadedComment(MPTTModel, Comment):\n parent = TreeForeignKey('self', null = True, blank = True, related_name = \"children\")\n upvotes = models.ManyToManyField(User, blank = True, null = True,\n related_name = \"upvoted_comments\", editable = False)\n downvotes = models.ManyToManyField(User, blank = True, null = True,\n related_name = \"downvoted_comments\", editable = False)\n totalVotes = models.IntegerField(default = 1, blank = True, null = True, editable = False)\n hotness = models.FloatField(\"Hotness\", default = 0, editable = False)\n\n objects = ThreadedCommentManager()\n\n def __unicode__(self):\n return \"%s, by %s\" % (self.comment[0:20], self.user)\n\n def howLongAgo(self):\n return humanizeTimeDiff(self.submit_date)\n\n def setVoteCount(self):\n self.totalVotes = 1 + self.upvotes.count() - self.downvotes.count()\n self.save()\n\n def setHotness(self):\n order = math.log10(max(abs(self.totalVotes), 1))\n sign = 1 if self.totalVotes > 0 else -1 if self.totalVotes < 0 else 0\n time = self.getRawAge() - 1134028003\n self.hotness = round(order + sign * time / 45000, 7)\n self.save()\n\n def getRawAge(self):\n return timeDiffRaw(self.submit_date) # Seconds of existance\n\n class MPTTMeta:\n # comments on one level will be ordered by date of creation\n order_insertion_by = ['hotness'] # Controls ordering in the first level. 'submit_date'\n\n # We need to use the Comments meta because we are using the Comment's tag for the list.\n class Meta:\n ordering = ['-tree_id', 'lft']\n","sub_path":"Comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"371138561","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom beike.items import BeikeItem\nimport sys\nfrom bs4 import BeautifulSoup\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\nclass ErshoufangSpider(scrapy.Spider):\n name = 'ershoufang'\n allowed_domains = ['bj.ke.com']\n base_url = 'https://bj.ke.com'\n\n def start_requests(self):\n start_urls = ['https://bj.ke.com/ershoufang/']\n for url in start_urls:\n yield scrapy.Request(url=url, callback=self.parse_qu)\n\n def parse_qu(self, response):\n soup = BeautifulSoup(response.text, \"lxml\")\n quyuhref = soup.find('div', attrs={\"data-role\": \"ershoufang\"}).find('div').find_all('a')\n for href in quyuhref:\n uri = href.get('href')\n daqu = href.text\n yield scrapy.Request(url=self.base_url + uri, callback=self.parse_bankuai, meta={'daqu': daqu, 'uri': uri})\n\n def parse_bankuai(self, response):\n daqu = response.meta['daqu']\n soup = BeautifulSoup(response.text, \"lxml\")\n bankuaihref = soup.find('div', attrs={\"data-role\": \"ershoufang\"}).find_all('div')[1].find_all('a')\n for href in bankuaihref:\n uri = href.get('href')\n qu = href.text\n yield scrapy.Request(url=self.base_url + uri, callback=self.parse,\n meta={\"url\": self.base_url + uri, \"qu\": qu, \"daqu\": daqu})\n\n def parse(self, response):\n lj = response.meta[\"url\"]\n daqu = response.meta[\"daqu\"]\n qu = response.meta[\"qu\"]\n soup = BeautifulSoup(response.text, \"lxml\")\n lis = soup.find('div', attrs={\"data-component\": \"list\"}).find_all('li', attrs={\"class\": \"clear\"})\n for li in lis:\n item = BeikeItem()\n item['href'] = li.find('div', attrs={\"class\": \"info clear\"}).find('div', attrs={\"class\": \"title\"}).find(\n 'a').get('href')\n item['totalPrice'] = li.find('div', attrs={\"class\": \"totalPrice\"}).find('span').text\n item['unitPrice'] = li.find('div', attrs={\"class\": \"unitPrice\"}).get(\"data-price\")\n item['title'] = li.find('div', attrs={\"class\": \"info clear\"}).find('div', attrs={\"class\": \"title\"}).find(\n 'a').get('title')\n item['address'] = li.find('div', attrs={\"class\": \"positionInfo\"}).find('a').text\n item['address_uri'] = li.find('div', attrs={\"class\": \"positionInfo\"}).find('a').get('href')\n item['houseInfo'] = li.find('div', attrs={\"class\": \"houseInfo\"}).text.replace('\\n', '').replace(' ',\n '').split(\n \"|\")\n item['followInfo'] = li.find('div', attrs={\"class\": \"followInfo\"}).text.replace('\\n', '').replace(' ', '')\n tags = []\n tag_cont = li.find('div', attrs={\"class\": \"tag\"}).find_all('span')\n for tag in tag_cont:\n tags.append(tag.text)\n item['tags'] = tags\n item['lj'] = lj\n item['daqu'] = daqu\n item['qu'] = qu\n yield item\n pages = eval(soup.find('div', attrs={\"class\": \"page-box house-lst-page-box\"}).get('page-data'))\n if pages['totalPage'] != pages['curPage']:\n yield scrapy.Request(url=lj + \"/pg\" + str(int(pages['curPage'] + 1)), callback=self.parse,\n meta={\"url\": lj, \"qu\": qu, \"daqu\": daqu})\n","sub_path":"beike/beike/spiders/ershoufang.py","file_name":"ershoufang.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160711358","text":"# title: paint-house-iii\n# detail: https://leetcode.com/submissions/detail/381236499/\n# datetime: Sat Aug 15 22:28:24 2020\n# runtime: 492 ms\n# memory: 19.8 MB\n\nfrom functools import lru_cache\nimport math\n\nclass Solution:\n def minCost(self, houses: List[int], cost: List[List[int]], m: int, n: int, target: int) -> int:\n MAX_COST = 10 ** 7\n \n @lru_cache(None)\n def paint(i, color, k):\n # print(i, k, color)\n if k == 0 and i == m:\n return 0\n if k < 0 or i == m:\n return MAX_COST\n if m - i < k:\n return MAX_COST\n if houses[i] != 0:\n return paint(i + 1, houses[i], k - (1 if houses[i] != color else 0))\n return min((cost[i][c - 1] + paint(i + 1, c, k - (1 if c != color else 0)) for c in range(1, n + 1)))\n # print(i, k, color, total_cost)\n # return total_cost\n \n # neighbors = 0\n # prev = 0\n # for h in houses:\n # if h == 0: \n # continue\n # if h != prev:\n # neighbors += 1\n # prev = h\n # if neighbors > target:\n # return -1\n res = paint(0, -1, target)\n return res if res < MAX_COST else -1","sub_path":"leetcode/paint-house-iii/381236499.py","file_name":"381236499.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"566742185","text":"# Assignment - Introduction to Functions\n# Q6-Program that asks the user for a number and deides whether that number is prime or\n# not.\n# Michael Xu\n\n######### FUNCTIONS #########\ndef prime(number_):\n factors = 0\n for i in range(2, number_):\n if number_ % i == 0:\n factors += 1\n if factors == 0:\n return True\n else:\n return False\n\n\n######## MAIN PROGRAM ########\nnumber = int(input(\"Please enter a positive integer: \"))\n\nif prime(number) == True:\n print(number, \"is prime.\")\nelse:\n print(number, \"is composite.\")\n","sub_path":"Introduction to Functions/Q6-Prime_.py","file_name":"Q6-Prime_.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461690347","text":"from django.core.exceptions import ValidationError\n\nfrom engine.testcases import EngineTestCase\nfrom engine_modules.corporation_run.models import DataStealOrder, ExtractionOrder\n\n\nclass SignalsTest(EngineTestCase):\n\tdef setUp(self):\n\n\t\tsuper(SignalsTest, self).setUp()\n\n\tdef test_datasteal_target_stealer_different(self):\n\t\t\"\"\"\n\t\tTarget and stealer must be different for Datasteal.\n\t\t\"\"\"\n\n\t\tself.dso = DataStealOrder(\n\t\t\tplayer=self.p,\n\t\t\ttarget_corporation=self.c,\n\t\t\tstealer_corporation=self.c\n\t\t)\n\n\t\tself.assertRaises(ValidationError, self.dso.clean)\n\n\tdef test_extraction_target_stealer_different(self):\n\t\t\"\"\"\n\t\tTarget and stealer must be different for Extraction.\n\t\t\"\"\"\n\n\t\tself.dso = ExtractionOrder(\n\t\t\tplayer=self.p,\n\t\t\ttarget_corporation=self.c,\n\t\t\tkidnapper_corporation=self.c\n\t\t)\n\n\t\tself.assertRaises(ValidationError, self.dso.clean)\n","sub_path":"engine_modules/corporation_run/tests/test_signals.py","file_name":"test_signals.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359715030","text":"from isobar.note import *\nfrom isobar.pattern.core import *\nfrom isobar.timeline import TICKS_PER_BEAT\n\nimport mido\n\nimport logging\nlog = logging.getLogger(__name__)\n\nclass MidiNote:\n def __init__(self, pitch, velocity, location, duration = None):\n # pitch = MIDI 0..127\n self.pitch = pitch\n # velocity = MIDI 0..127\n self.velocity = velocity\n # location in time, beats\n self.location = location\n # duration in time, beats\n self.duration = duration\n\nclass MidiFileIn:\n \"\"\" Read events from a MIDI file.\n Requires mido. \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n\n def read(self, quantize = 0.25):\n midi_reader = mido.MidiFile(self.filename)\n note_tracks = list(filter(lambda track: any(message.type == 'note_on' for message in track), midi_reader.tracks))\n if not note_tracks:\n raise ValueError(\"Could not find any tracks with note data\")\n\n #------------------------------------------------------------------------\n # TODO: Support for multiple tracks\n #------------------------------------------------------------------------\n track = note_tracks[0]\n\n notes = []\n offset = 0\n for event in track:\n if event.type == 'note_on' and event.velocity > 0:\n #------------------------------------------------------------------------\n # Found a note_on event.\n #------------------------------------------------------------------------\n note = MidiNote(event.note, event.velocity, offset)\n notes.append(note)\n offset += event.time / 480.0\n elif event.type == 'note_off' or (event.type == 'note_on' and event.velocity == 0):\n #------------------------------------------------------------------------\n # Found a note_off event.\n #------------------------------------------------------------------------\n for note in reversed(notes):\n if note.pitch == event.note:\n note.duration = offset - note.location\n break\n offset += event.time / 480.0\n\n for note in notes:\n if quantize:\n # note.location = round(note.location / quantize) * quantize\n # note.duration = round(note.duration / quantize) * quantize\n pass\n print(\"%d (%d, %f)\" % (note.pitch, note.velocity, note.duration))\n\n #------------------------------------------------------------------------\n # Construct a sequence which honours chords and relative lengths.\n # First, group all notes by their starting time.\n #------------------------------------------------------------------------\n notes_by_time = {}\n for note in notes:\n log.debug(\"(%.2f) %d/%d, %s\" % (note.location, note.pitch, note.velocity, note.duration))\n location = note.location\n if location in notes_by_time:\n notes_by_time[location].append(note)\n else:\n notes_by_time[location] = [ note ]\n\n note_dict = {\n \"note\" : [],\n \"amp\" : [],\n \"gate\" : [],\n \"dur\" : []\n }\n for n in notes_by_time:\n print(\"%s - %s\" % (n, notes_by_time[n]))\n\n #------------------------------------------------------------------------\n # Next, iterate through groups of notes chronologically, figuring out\n # appropriate parameters for duration (eg, inter-note distance) and\n # gate (eg, proportion of distance note extends across).\n #------------------------------------------------------------------------\n times = sorted(notes_by_time.keys())\n for i in range(len(times)):\n time = times[i]\n notes = notes_by_time[time]\n\n #------------------------------------------------------------------------\n # Our duration is always determined by the time of the next note event.\n # If a next note does not exist, this is the last note of the sequence;\n # use the maximal length of note currently playing (assuming a chord)\n #------------------------------------------------------------------------\n if i < len(times) - 1:\n next_time = times[i + 1]\n else:\n next_time = time + max([ note.duration for note in notes ])\n\n dur = next_time - time\n note_dict[\"dur\"].append(dur)\n\n if len(notes) > 1:\n note_dict[\"note\"].append(tuple(note.pitch for note in notes))\n note_dict[\"amp\"].append(tuple(note.velocity for note in notes))\n note_dict[\"gate\"].append(tuple(note.duration / dur for note in notes))\n else:\n note = notes[0]\n note_dict[\"note\"].append(note.pitch)\n note_dict[\"amp\"].append(note.velocity)\n note_dict[\"gate\"].append(note.duration / dur)\n\n return note_dict\n\nclass MidiFileOut:\n \"\"\" Write events to a MIDI file.\n\n EC: Rewritten using mido\n \"\"\"\n\n def __init__(self, file):\n self.file = file\n self.score = mido.MidiFile(ticks_per_beat=TICKS_PER_BEAT)\n self.tracks = {} # channel->track\n self.time = 0\n self.ticks = 0\n self.tick_delta = 0\n self.num_events = 0\n\n def tick(self, tick_length):\n self.ticks += 1\n if self.num_events > 0:\n # Don't start counting deltas until we receive the first event\n self.tick_delta += 1\n\n def note_on(self, note = 60, velocity = 64, channel = 0, duration = 1):\n #------------------------------------------------------------------------\n # avoid rounding errors\n #------------------------------------------------------------------------\n #time = round(self.time, 5)\n track = self.tracks.get(channel, None)\n if track is None:\n track = self.tracks[channel] = mido.MidiTrack()\n self.score.tracks.append(track)\n track.append(mido.Message('note_on', note=note, velocity=velocity, time=self.tick_delta))\n self.tick_delta = 0\n self.num_events += 1\n\n def note_off(self, note = 60, channel = 0):\n #time = round(self.time, 5)\n track = self.tracks.get(channel, None)\n if track is None:\n track = self.tracks[channel] = mido.MidiTrack()\n self.score.tracks.append(track)\n track.append(mido.Message('note_off', note=note, velocity=127, time=self.tick_delta))\n self.tick_delta = 0\n self.num_events += 1\n\n def all_notes_off(self, channel = 0):\n log.info(\"[midi] All notes off (channel = %d)\" % (channel))\n for n in range(128):\n self.note_off(n, channel)\n\n def write(self):\n self.score.save(file=self.file)\n\nclass PatternWriterMIDI:\n \"\"\" Writes a pattern to a MIDI file.\n Requires the MIDIUtil package:\n https://code.google.com/p/midiutil/ \"\"\"\n\n def __init__(self, filename = \"score.mid\", numtracks = 1):\n from midiutil.MidiFile import MIDIFile\n\n self.score = MIDIFile(numtracks)\n self.track = 0\n self.channel = 0\n self.volume = 64\n\n def add_track(self, pattern, track_number = 0, track_name = \"track\", dur = 1.0):\n time = 0\n\n # naive approach: assume every duration is 1\n # TODO: accept dicts or PDicts\n try:\n for note in pattern:\n vdur = Pattern.value(dur)\n if note is not None and vdur is not None:\n self.score.addNote(track_number, self.channel, note, time, vdur, self.volume)\n time += vdur\n else:\n time += vdur\n except StopIteration:\n #------------------------------------------------------------------------\n # a StopIteration exception means that an input pattern has been\n # exhausted. catch it and treat the track as completed.\n #------------------------------------------------------------------------\n pass\n\n def add_timeline(self, timeline):\n #------------------------------------------------------------------------\n # TODO: translate entire timeline into MIDI\n # difficulties: need to handle degree/transpose params\n # need to handle channels properly, and reset numtracks\n #------------------------------------------------------------------------\n pass\n\n def write(self, filename = \"score.mid\"):\n fd = open(filename, 'wb')\n self.score.writeFile(fd)\n fd.close()\n\n","sub_path":"isobar/io/midifile.py","file_name":"midifile.py","file_ext":"py","file_size_in_byte":8829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"521389009","text":"\"\"\"ebooks_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom ebooks_app import views\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name='home'),\n path('add/', views.add_book, name='add'),\n path('contact/', views.contact, name='contact'),\n path('detail/', views.detail, name='detail'),\n path('register/', views.register, name='register'),\n path('login/', views.user_login, name='login'),\n path('logout/', views.user_logout, name='logout'),\n path('book/create/', views.BookCreate.as_view(), name='add-book'),\n path('book//update/', views.BookUpdate.as_view(), name='update-book'),\n path('book//delete/', views.BookDelete.as_view(), name='delete-book'),\n path('private/', views.private_view, name='private'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","sub_path":"ebooks_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207883040","text":"from django.contrib import admin\nfrom inquiry.models import Person\n\n\nclass PersonAdmin(admin.ModelAdmin):\n fieldsets = [('Personal information',\n {'fields': ['name', 'surname',\n 'date_of_birth', 'bio']}),\n ('Contacts',\n {'fields': ['email', 'jabber',\n 'skype', 'other_contacts']}),\n ]\n list_display = ('name', 'surname', 'email')\n\n\nadmin.site.register(Person, PersonAdmin)\n","sub_path":"inquiry/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"111692983","text":"import math \r\n\r\ndef encryptMessage(msg,key): \r\n cipher = \"\" \r\n \r\n # track key indices \r\n index = 0\r\n \r\n msg_len = float(len(msg)) \r\n msg_list = list(msg) \r\n key_list = sorted(list(key)) \r\n \r\n #matrix is of dimension row x col\r\n \r\n col = len(key) \r\n row = int(math.ceil(msg_len / col)) \r\n \r\n # add the padding character '_' for empty cells\r\n padding = int((row * col) - msg_len) \r\n msg_list.extend('_' * padding) \r\n \r\n #print(msg_list)\r\n\r\n #create the matrix \r\n matrix = [msg_list[i: i + col] \r\n for i in range(0, len(msg_list), col)] \r\n \r\n # print(\"Matrix Construction...\\n\")\r\n # print(matrix)\r\n\r\n # read matrix column-wise using key \r\n for _ in range(col): \r\n curr_idx = key.index(key_list[index]) \r\n cipher += ''.join([row[curr_idx] \r\n for row in matrix]) \r\n #print(cipher)\r\n index += 1\r\n \r\n return cipher \r\n \r\n# Decryption \r\ndef decryptMessage(cipher,key, flag = 0): \r\n msg = \"\" \r\n \r\n # track key indices \r\n index = 0\r\n \r\n # track msg indices \r\n msg_indx = 0\r\n msg_len = float(len(cipher)) \r\n msg_list = list(cipher) \r\n \r\n # calculate column of the matrix \r\n col = len(key) \r\n row = int(math.ceil(msg_len / col)) \r\n \r\n # convert key into list and sort \r\n # alphabetically so we can access \r\n # each character by its alphabetical position. \r\n key_list = sorted(list(key)) \r\n \r\n # create an empty matrix to \r\n # store deciphered message \r\n deciphered = [] \r\n\r\n for _ in range(row): \r\n deciphered += [[None] * col] \r\n \r\n # Arrange the matrix column wise according \r\n # to permutation order by adding into new matrix \r\n for _ in range(col): \r\n curr_idx = key.index(key_list[index]) \r\n \r\n for j in range(row): \r\n deciphered[j][curr_idx] = msg_list[msg_indx] \r\n msg_indx += 1\r\n index += 1\r\n #print(deciphered)\r\n # convert decrypted msg matrix into a string \r\n try: \r\n msg = ''.join(sum(deciphered, [])) \r\n except TypeError: \r\n raise TypeError(\"This program cannot\", \r\n \"handle repeating words.\") \r\n if flag == 1:\r\n return msg\r\n \r\n else:\r\n null_count = msg.count('_') \r\n \r\n if null_count > 0: \r\n return msg[: -null_count] \r\n \r\n return msg \r\n\r\ndef encryptRailFence(text, key): \r\n \r\n # create the matrix to cipher \r\n # plain text key = rows , \r\n # length(text) = columns \r\n # filling the rail matrix \r\n # to distinguish filled \r\n # spaces from blank ones \r\n rail = [['\\n' for i in range(len(text))] \r\n for j in range(key)] \r\n \r\n # to find the direction \r\n dir_down = False\r\n row, col = 0, 0\r\n \r\n for i in range(len(text)): \r\n \r\n # check the direction of flow \r\n # reverse the direction if we've just \r\n # filled the top or bottom rail \r\n if (row == 0) or (row == key - 1): \r\n dir_down = not dir_down \r\n \r\n # fill the corresponding alphabet \r\n rail[row][col] = text[i] \r\n col += 1\r\n \r\n # find the next row using \r\n # direction flag \r\n if dir_down: \r\n row += 1\r\n else: \r\n row -= 1\r\n # now we can construct the cipher \r\n # using the rail matrix \r\n result = [] \r\n for i in range(key): \r\n for j in range(len(text)): \r\n if rail[i][j] != '\\n': \r\n result.append(rail[i][j]) \r\n return(\"\" . join(result)) \r\n \r\n# This function receives cipher-text \r\n# and key and returns the original \r\n# text after decryption \r\ndef decryptRailFence(cipher, key): \r\n \r\n # create the matrix to cipher \r\n # plain text key = rows , \r\n # length(text) = columns \r\n # filling the rail matrix to \r\n # distinguish filled spaces \r\n # from blank ones \r\n rail = [['\\n' for i in range(len(cipher))] \r\n for j in range(key)] \r\n \r\n # to find the direction \r\n dir_down = None\r\n row, col = 0, 0\r\n \r\n # mark the places with '*' \r\n for i in range(len(cipher)): \r\n if row == 0: \r\n dir_down = True\r\n if row == key - 1: \r\n dir_down = False\r\n \r\n # place the marker \r\n rail[row][col] = '*'\r\n col += 1\r\n \r\n # find the next row \r\n # using direction flag \r\n if dir_down: \r\n row += 1\r\n else: \r\n row -= 1\r\n \r\n # now we can construct the \r\n # fill the rail matrix \r\n index = 0\r\n for i in range(key): \r\n for j in range(len(cipher)): \r\n if ((rail[i][j] == '*') and\r\n (index < len(cipher))): \r\n rail[i][j] = cipher[index] \r\n index += 1\r\n \r\n # now read the matrix in \r\n # zig-zag manner to construct \r\n # the resultant text \r\n result = [] \r\n row, col = 0, 0\r\n for i in range(len(cipher)): \r\n \r\n # check the direction of flow \r\n if row == 0: \r\n dir_down = True\r\n if row == key-1: \r\n dir_down = False\r\n \r\n # place the marker \r\n if (rail[row][col] != '*'): \r\n result.append(rail[row][col]) \r\n col += 1\r\n \r\n # find the next row using \r\n # direction flag \r\n if dir_down: \r\n row += 1\r\n else: \r\n row -= 1\r\n return(\"\".join(result)) \r\n \r\n# Driver Code \r\n\r\nif __name__ == '__main__':\r\n ans=True\r\n while ans:\r\n print(\"\\t\\t\\t\\t\\t\\t==========================================================\")\r\n print(\"\\t\\t\\t\\t\\t\\t\\t\\tTRANSPOSITION CIPHERS\")\r\n print(\"\\t\\t\\t\\t\\t\\t==========================================================\")\r\n print (\"\"\"\r\n\r\n 1.Columnar Transposition\r\n 2.Double Columnar Transposition\r\n 3.Rail Fence Cipher\r\n 4.Exit/Quit\r\n \"\"\")\r\n ans=input(\"Please choose one of the above encryption methods: \\n >> \")\r\n\r\n if ans!=\"4\":\r\n\r\n choice=input(\"Choose 1. Encryption 2. Decryption \\n >> \")\r\n msg=input(\"Please enter the message you wish to encrypt/decrypt below:\\n >> \")\r\n if ans==\"1\": \r\n print(\"\\nYou have picked Columnar Transposition!\")\r\n n=input(\"Please enter the key: \\n >> \")\r\n if choice==\"1\":\r\n cipher = encryptMessage(msg,n)\r\n else: \r\n cipher = decryptMessage(msg,n) \r\n elif ans==\"2\":\r\n print(\"\\nDouble Columnar Transposition\")\r\n #output = \"1\"\r\n #flag = 1\r\n #while (output == \"1\" or output == \"2\") and flag == 1:\r\n '''print(\" Would you like to use the same key for both transpositions?\")\r\n output = input(\" 1 for YES or 2 for NO :\")\r\n if output == \"1\":\r\n key1 = key2 = input(\" Enter the common key:\")\r\n flag = 0\r\n elif output == \"2\":\r\n key1 = input(\" Enter the first key:\")\r\n key2 = input(\" Enter the second key:\")\r\n flag = 0\r\n else:\r\n print(\"Please enter a valid choice\")'''\r\n key = input(\"Please enter the key: \\n >> \")\r\n if choice==\"1\":\r\n cipher1 = encryptMessage(msg,key)\r\n cipher = encryptMessage(cipher1,key)\r\n else: \r\n cipher1 = decryptMessage(msg,key,1) \r\n cipher = decryptMessage(cipher1,key)\r\n elif ans==\"3\":\r\n print(\"\\n Rail Fence Cipher\")\r\n n=int(input(\"Please enter the key: \\n >> \"))\r\n if choice==\"1\":\r\n cipher = encryptRailFence(msg, n)\r\n else: \r\n cipher = decryptRailFence(msg, n)\r\n\r\n if choice == \"1\":\r\n print(\"Encrypted Message: {}\". \r\n format(cipher)) \r\n else:\r\n print(\"Decrypted Message: {}\". \r\n format(cipher)) \r\n \r\n elif ans==\"4\":\r\n print(\"\\nGoodbye!\")\r\n exit() \r\n else:\r\n print(\"\\nNot Valid Choice Try again\")\r\n exit()\r\n \r\n \r\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":8467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"152243095","text":"# -*_ coding:utf-8 _*-\nimport openpyxl\nfrom common.ReadConfig import ReadConfig\n\n\nclass DoExcel:\n\n @staticmethod\n def get_top_row(ExcelName, SheetName):\n top_row = {}\n wb = openpyxl.load_workbook(ExcelName)\n sheet = wb[SheetName]\n for i in range(1, (sheet.max_column + 1)):\n top_row[sheet.cell(1, i).value] = i\n return top_row, sheet\n\n def read_excel(self, ExcelReadName, SheetReadName):\n \"\"\"获取excel内容并返回[{},{},{}]格式\"\"\"\n excel_list = []\n top_row_read, sheet = DoExcel.get_top_row(ExcelReadName, SheetReadName)\n for i in range(2, (sheet.max_row + 1)):\n each_dict = {}\n if (str(ReadConfig(\"module\",\"module\").readConfig()) == \"全部\") or (ReadConfig(\"module\",\"id\").readConfig() == \"全部\"):\n for item in top_row_read.keys():\n each_dict[item] = sheet.cell(i, top_row_read[item]).value\n excel_list.append(each_dict)\n else:\n if sheet.cell(i,top_row_read[\"module\"]).value in str(ReadConfig(\"module\",\"module\").readConfig()).split(\",\")\\\n or (str(sheet.cell(i,top_row_read[\"id\"])) in str(ReadConfig(\"module\",\"id\").readConfig()).split(\",\")):\n for item in top_row_read.keys():\n each_dict[item] = sheet.cell(i, top_row_read[item]).value\n excel_list.append(each_dict)\n else:\n continue\n return excel_list\n\n def write_excel(self, ExcelWriteName, SheetWriteName, Excel_list=[]):\n top_row_write, aa = DoExcel.get_top_row(ExcelWriteName, SheetWriteName)\n wb = openpyxl.load_workbook(ExcelWriteName)\n sheet = wb[SheetWriteName]\n # if sheet.cell(j, 1).value is None:用来判断表格第一列,单元格为空的最小行号\n for item in Excel_list:\n for i in item.keys():\n #用例的编号比行号小1,通过id把数据写入对应的单元格\n sheet.cell(row=(int(item[\"id\"])+1), column=top_row_write[i], value=str(item[i]))\n wb.save(ExcelWriteName)\n wb.close()\n\n def updata_mobile(self, ExcelReadName, SheetReadName):\n pass\n\nif __name__ == \"__main__\":\n pass\n # list1 = [{\"id\": 1, \"TestResult\": 44, \"real_code\": 55, \"res_json\": 66}]\n # DoExcel().write_excel(\"/Users/liqingju/Documents/python/My_project/test_data/testCase.xlsx\", \"testCaseRead\", list1)\n # print(DoExcel().read_excel(\"/Users/liqingju/Documents/python/My_project/test_data/testCase.xlsx\", \"testCaseRead\"))\n\n # print(DoExcel().read_excel(\"../test_data/testCase.xlsx\",\"testCaseRead\"))\n # print(str(ReadConfig(\"module\",\"module\").readConfig() == \"全部\") or ReadConfig(\"module\",\"id\").readConfig() == \"全部\")","sub_path":"common/DoExcel.py","file_name":"DoExcel.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69316018","text":"\n# Libraries\nimport numpy as np\nimport pandas as pd\nimport sys\nimport time\nimport gzip\nimport shutil\nimport os\nimport imp\nimport matplotlib.pyplot as plt\nimport math\n\n# Project Library\nfrom src.lib import FCC_lib_data_v1 as ld\nfrom src.lib import FCC_lib_models_NN_keras_v1 as lm\nfrom src.lib import FCC_lib_train_NN_v1 as lt\nfrom src.lib import FCC_lib_2Dimg_v1 as ltt\n\nMODEL_ID = \"NN_BxB01\"\nSTAGE = \"S2_VSE\"\n\nclass Model(object):\n \n def __init__(self):\n \n self.reset_variables()\n self.reset_parameters_DATA()\n self.reset_parameters_S1_MODEL()\n self.reset_parameters_MODEL()\n self.reset_parameters_TRANSFORMATIONS()\n self.reset_parameters_TRAIN()\n self.reset_parameters_PREDICT()\n \n def reset_parameters_DATA(self):\n # Parameters: DATA\n self.init_patch_size =(448, 224)\n self.final_patch_size=(224, 56)\n self.size=(192, 32)\n self.channels = 1\n self.DT_mean = np.array([0.5*255] * self.channels)\n self.DT_std = np.array([0.5*255] * self.channels)\n self.DT_zero = (np.array([0] * self.channels) - self.DT_mean) / self.DT_std\n \n def reset_parameters_S1_MODEL(self):\n self.S1_STAGE, self.S1_MODEL_ID = 'S1_ROI', 'NN_AxC01'\n S1_src_file = 'src/{}_models/{}_{}_model.py'.format(self.S1_STAGE, self.S1_STAGE, self.S1_MODEL_ID)\n self.S1_Model = imp.load_source('', S1_src_file).Model('test')\n filename = os.path.join(self.S1_Model.path_predictions, '{}_{}_pred.csv.gz'\\\n .format(self.S1_STAGE, self.S1_MODEL_ID))\n try:\n self.S1_Model_DF = pd.read_csv(filename)\n except:\n self.S1_Model_DF = None\n \n def reset_parameters_MODEL(self):\n # Parameters: MODEL \n from keras.optimizers import Adam\n \n self.NNmodel_FUNC = lm.get_CNN_C_r0\n self.isz2D = self.size # Size of 2D patches\n self.model_size = (self.channels, self.isz2D, 1) #channels, ISZ, classes\n self.model_desc = \"Cnn\"\n self.model_args = {\n 'batch_norm': True,\n 'conv_Blks': [[2, 16, True, 0.5, False], # nb_convs, nb_filters, pool, drop, separable\n [2, 32, True, 0.5, False], \n [2, 64, True, 0.5, False],\n [2, 128, True, 0.5, False],\n [2, 256, True, 0.5, False],\n ], \n 'conv_Blk_args': {'pad': 0,\n 'kernel': 3,\n 'conv_strides': 1,\n 'conv_padding': \"same\",\n 'pool_size': 2, \n 'pool_strides': 2,\n 'pool_padding': \"same\",\n },\n 'dense_Blks': [[512, 0.5],\n [512, 0.5]\n ], \n 'final_activation': 'linear',\n 'optimizer': Adam(lr=0.01),\n 'loss': 'mae', \n 'metrics': ['mae','mse'] \n }\n \n def reset_parameters_TRANSFORMATIONS(self):\n # Parameters: TRANSFORMATIONS\n self.data_transforms = {\n 'train': ltt.Compose([\n ltt.RandomRotate(5),\n ltt.CenterCrop((224,32)),\n ltt.RandomCrop((192,32)),\n ltt.RandomVerticalFlip(p=0.5),\n ltt.RandomHorizontalFlip(p=0.5),\n ltt.ToArray(np.float16),\n ]),\n 'valid': ltt.Compose([\n ltt.CenterCrop((192,32)),\n ltt.ToArray(np.float16),\n ]),\n 'test': ltt.Compose([\n ltt.CenterCrop((192,32)),\n ltt.ToArray(np.float16),\n ]),\n }\n self.TT_scale = ltt.Scale(size=self.final_patch_size, method='PIL')\n \n def reset_parameters_TRAIN(self):\n # Parameters: TRAINING\n self.fold_column = 'Fs3'\n self.seed = 0\n self.gen_comm_params = {'seed': None} \n self.train_gen_params = self.gen_comm_params.copy() \n self.valid_gen_params = self.gen_comm_params.copy() \n self.train_gen_params = self.gen_comm_params.copy() \n self.valid_gen_params.update({'shuffle': False, }) \n \n def reset_parameters_PREDICT(self): \n self.predict_gen_params = self.gen_comm_params.copy() \n self.predict_gen_params.update({'shuffle':False, 'predicting':True})\n self.predict_batch_size = 128\n \n def reset_variables(self):\n # Initializations\n self.dsetID = None\n self.Data = ld.FishDATA()\n self.img_raw = None\n self.img = None\n self.info = None\n \n self.output_dir = str(self.Data.path_settings['path_outputs_{}'.format(STAGE)])\n self.NNmodel = None\n self.stage = STAGE\n self.model_id = MODEL_ID\n self.weights_format = '{}_{}_{}_weights'.format(self.stage, self.model_id, '{fold_id}')\n self.path_predictions = os.path.join(self.output_dir, self.model_id)\n self.weights_file = None\n self.prev_foldID = None\n \n \n def read_image(self, itype, image_id, \n frame = 'example', # int, 'all', 'example'(0 or max_size)\n #'all_labeled' --> only if training\n read_labels=False, split_wrap_imgs = False, seed=None, \n use_cache=None, verbose=False):\n '''Custom read_image function for this model.\n '''\n\n start_time_L1 = time.time()\n \n # Initiate data class & variables\n labels=[] if read_labels else None\n info={}\n \n # Read image.\n vidD = self.Data.load_vidDATA(itype, image_id)\n \n # Read bbox from S1_Model\n use_cache = self.Data.exec_settings['cache'] == \"True\" if use_cache is None else use_cache\n \n try:\n bbox_df = self.S1_Model_DF\n bbox_df = bbox_df[bbox_df.image_id == image_id]\n bbox_df = bbox_df[bbox_df.ich == 0]\n bbox_irow = bbox_df.iloc[0]\n except:\n bbox_df, _, _, _, _ = self.S1_Model.get_labels(itype, image_id, use_cache=use_cache, verbose=verbose)\n bbox_df = bbox_df[bbox_df.image_id == image_id]\n bbox_df = bbox_df[bbox_df.ich == 0]\n bbox_irow = bbox_df.iloc[0]\n \n xc, yc, ang = int(bbox_irow.xc), int(bbox_irow.yc), int(bbox_irow.ang)\n \n try:\n max_frame = int(bbox_irow.max_frame)\n except:\n max_frame = None\n \n # Read annotations\n df = self.Data.annotations\n mini_df = df[df.video_id == image_id]\n nb_frames = len(mini_df)\n \n \n # Create frames list\n if frame == 'all':\n frames = range(len(vidD.vi))\n elif frame == 'example':\n frames = [0,] if max_frame is None else [max_frame,]\n elif frame == 'all_labeled' and nb_frames > 0:\n frames = mini_df.frame.values.tolist()\n else:\n frames = [int(frame),]\n \n # Extract patches\n patches = []\n for i_frame in frames:\n patch = self.Data.extract_patch(vidD.vi.get_data(i_frame),(xc, yc), ang, \n size = self.init_patch_size, convert_BnW=True)\n \n #preprocess\n patch = patch.astype(np.float16)\n patch = lt.standarize_image(patch, self.DT_mean, self.DT_std, on_site=True)\n patch = self.TT_scale(patch)\n \n patches.append(patch)\n \n if read_labels:\n label = mini_df[mini_df.frame == i_frame]['length']\n if len(label) == 0:\n labels.append(np.nan)\n else:\n labels.append(label.values[0].astype(np.uint16))\n\n \n # Include usefull information\n info = {'meta': vidD.vi._meta}\n \n # wrap results\n if len(patches)>1:\n if split_wrap_imgs:\n wrap_img = [patches, labels, info]\n else:\n wrap_img = [[patches[s1], labels[s1], info] for s1 in range(len(patches))]\n else:\n wrap_img = [patches[0], labels[0], info]\n \n if verbose:\n print(\"Read image {} in {:.2f} s\".format(image_id, (time.time() - start_time_L1)/1))\n \n return wrap_img\n\n def batch_generator(self, datafeed, batch_size=1, params={}):\n \n # Parameters\n seed = params.get('seed', None)\n shuffle = params.get('shuffle', True)\n predicting = params.get('predicting', False)\n \n sample_index = np.arange(len(datafeed))\n number_of_batches = np.ceil(len(sample_index)/batch_size)\n \n if seed is not None:\n np.random.seed(seed)\n if shuffle:\n np.random.shuffle(sample_index)\n \n counter = 0\n while True:\n batch_index = sample_index[batch_size*counter:batch_size*(counter+1)]\n x_trn, y_trn = datafeed[batch_index]\n \n # Yield\n counter += 1\n if predicting:\n yield x_trn\n else:\n yield x_trn, y_trn\n \n if (counter == number_of_batches):\n if shuffle:\n np.random.shuffle(sample_index)\n counter = 0 \n \n def get_NNmodel(self, model_size=None, model_args=None, NNmodel_FUNC=None):\n \n model_size = self.model_size if model_size is None else model_size\n model_args = self.model_args if model_args is None else model_args\n NNmodel_FUNC = self.NNmodel_FUNC if NNmodel_FUNC is None else NNmodel_FUNC\n \n NNmodel = NNmodel_FUNC(channels = model_size[0], isz = model_size[1], classes = model_size[2], \n args_dict = model_args)\n self.NNmodel = NNmodel\n \n return NNmodel\n \n def load_weights(self, weights_filename, weights_path=None, verbose=False):\n weights_path = self.output_dir if weights_path is None else weights_path\n self.weights_file = '{}{}.hdf5'.format(weights_path, weights_filename)\n if self.NNmodel is None:\n self.get_NNmodel()\n self.NNmodel.load_weights(self.weights_file)\n if verbose:\n print(' Read weights: {}'.format(self.weights_file))\n\n def predict(self, image, pred_type='test'):\n '''\n image: img (np.array) or image_id\n '''\n img = image if (type(image) == np.ndarray) else self.read_image(image)[0]\n \n if self.NNmodel is None:\n self.get_NNmodel()\n if self.weights_file is None:\n sys.exit(\"Weights not loaded\")\n \n #apply transformations\n timg = self.data_transforms[pred_type](img)\n \n # make batch size = 1\n timg = timg[np.newaxis, ...]\n \n # Predict\n pred = self.NNmodel.predict(timg)\n \n # Change predictions dtype\n pred = pred.astype(np.float16)\n\n return pred\n \n def predict_BATCH(self, images, pred_type='test', batch_size = None):\n '''\n images: list(img (np.array)) or image_id\n '''\n \n if self.NNmodel is None:\n self.get_NNmodel()\n if self.weights_file is None:\n sys.exit(\"Weights not loaded\")\n \n #apply transformations\n timgs = self.data_transforms[pred_type](images)\n \n # convert to array\n timgs = np.array(timgs)\n \n # predict in batches\n preds = []\n batch_size = self.predict_batch_size if batch_size is None else batch_size\n for start in range(0, timgs.shape[0], batch_size):\n end = min(start+batch_size, timgs.shape[0])\n pred = self.NNmodel.predict_on_batch(timgs[start:end])\n preds.append(pred)\n preds = np.vstack(preds) \n \n # Change predictions dtype\n preds = preds.astype(np.float16)\n\n return preds \n \n def get_predictions(self, itype, image_id,\n return_imgs = False, avoid_read_weights=False, return_score = False, \n use_cache=None, force_save=False, verbose=True):\n \n start_time_L1 = time.time()\n use_cache = self.Data.exec_settings['cache'] == \"True\" if use_cache is None else use_cache\n pred = None\n score = None\n score_txt = 'R2_score'\n \n if use_cache & (not force_save):\n try:\n file_to_load = os.path.join(self.path_predictions, itype, '{}_{}_pred.npy.gz'.format(itype, image_id))\n with gzip.open(file_to_load, 'rb') as f:\n pred = np.load(f)\n if not return_imgs:\n if verbose:\n print(\"Read prediction {}_{} in {:.2f} s\".format(itype, image_id, \n (time.time() - start_time_L1)/1))\n return pred\n except:\n if verbose:\n print(\"File not in cache\")\n \n imgs, labels, info = self.read_image(itype, image_id, frame = 'all', split_wrap_imgs = True,\n read_labels=(itype=='train'), verbose=verbose)\n \n if pred is None:\n \n #get weights\n if (self.weights_file is None) or not avoid_read_weights:\n self.dsetID = ld.read_dsetID() if self.dsetID is None else self.dsetID\n fold_id = self.dsetID.loc[(self.dsetID.video_id == image_id) & (self.dsetID.itype == itype), \n self.fold_column]\n fold_id = fold_id.values[0]\n if self.prev_foldID != fold_id:\n weight_file = self.weights_format.format(fold_id=fold_id)\n self.load_weights(weight_file, verbose=verbose)\n self.prev_foldID = fold_id \n \n # predict\n pred = self.predict_BATCH(imgs)\n \n # Save cache\n if use_cache|force_save:\n if not os.path.exists(os.path.join(self.path_predictions, itype)):\n os.makedirs(os.path.join(self.path_predictions, itype))\n file_to_save = os.path.join(self.path_predictions, itype, '{}_{}_pred.npy'.format(itype, image_id)) \n np.save(file_to_save, pred)\n with open(file_to_save, 'rb') as f_in, gzip.open(file_to_save + '.gz', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(file_to_save)\n \n \n # evaluate\n if labels is not None:\n from sklearn.metrics import r2_score\n np_labels = np.vstack(labels)[:,0]\n np_preds = pred[:,0]\n y_true = (np_labels[np.logical_not(np.isnan(np_labels))]).astype(np.float32)\n y_pred = (np_preds[np.logical_not(np.isnan(np_labels))]).astype(np.float32)\n score = r2_score(y_true, y_pred)\n \n if verbose: \n if score is not None:\n print(\"Read prediction {}_{} ({}: {:.5f}) in {:.2f} s\".format(itype, image_id, score_txt, score, \n (time.time() - start_time_L1)/1)) \n else:\n print(\"Read prediction {}_{} in {:.2f} s\".format(itype, image_id, (time.time() - start_time_L1)/1)) \n \n if return_imgs:\n if return_score:\n return pred, imgs, labels, score\n else:\n return pred, imgs, labels\n \n if return_score:\n return pred, score\n else:\n return pred\n \n def get_predictions_BATCH(self, itype_list, image_id_list, imgs_list, batch_size = None, verbose=False):\n '''\n Predict from a list of imgs (outputs from self.read_image)\n '''\n \n for itype, image_id, imgs in zip(itype_list, image_id_list, imgs_list):\n \n #get weights\n if (self.weights_file is None):\n self.dsetID = ld.read_dsetID() if self.dsetID is None else self.dsetID\n fold_id = self.dsetID.loc[(self.dsetID.video_id == image_id) & (self.dsetID.itype == itype), \n self.fold_column]\n fold_id = fold_id.values[0]\n if self.prev_foldID != fold_id:\n weight_file = self.weights_format.format(fold_id=fold_id)\n self.load_weights(weight_file, verbose=False)\n self.prev_foldID = fold_id \n \n # predict\n pred = self.predict_BATCH(imgs, batch_size = batch_size)\n \n # Save cache\n if not os.path.exists(os.path.join(self.path_predictions, itype)):\n os.makedirs(os.path.join(self.path_predictions, itype))\n file_to_save = os.path.join(self.path_predictions, itype, '{}_{}_pred.npy'.format(itype, image_id)) \n np.save(file_to_save, pred)\n with open(file_to_save, 'rb') as f_in, gzip.open(file_to_save + '.gz', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(file_to_save)\n\n def show_imgs(self, imgs, labels=None, preds=None, grid = None, size=(12,6), title=\"\"):\n nb_frames = len(imgs)\n \n imgs_show = [np.transpose((s[0]*125.7+127.5).astype(np.uint8), (1,0)) for s in imgs]\n \n # plot images\n if grid is None:\n nbx = int(math.sqrt(nb_frames))\n nby = int(np.ceil(nb_frames/float(nbx)))\n else:\n nbx, nby = grid\n fig,axes = plt.subplots(nbx,nby,figsize=size)\n fig.suptitle(title)\n ax = axes.ravel()\n \n for i, img in enumerate(imgs_show):\n ax[i].imshow(img, cmap='gray')\n try:\n i_label = labels[i]\n except:\n i_label = np.nan\n try:\n i_pred = preds[i][0]\n except:\n i_pred = np.nan\n ititle = 'True: {:.0f} - Predicted: {:.0f}'.format(i_label, i_pred)\n ax[i].set_title(ititle) \n if i == len(ax)-1:\n break\n plt.show()\n \n def show_preds(self, preds, labels=None, size=(12,6), title=\"\"):\n \n preds_data = preds[:,0]\n if labels is not None:\n labels_data = np.nan_to_num(np.vstack(labels)[:,0])\n data = np.transpose(np.vstack([preds_data, labels_data]), (1,0))\n else:\n data = np.transpose(np.vstack([preds_data, ]), (1,0))\n \n fig,ax = plt.subplots(1,1,figsize=size)\n ax.plot(data)\n ax.set_title(title) \n plt.show()","sub_path":"3rd-place/src/S2_VSE_models/S2_VSE_NN_BxB01_model.py","file_name":"S2_VSE_NN_BxB01_model.py","file_ext":"py","file_size_in_byte":19197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"404320184","text":"from .base import FunctionalTest\r\n\r\nclass LayoutAndStylingTest(FunctionalTest):\r\n\r\n def test_layout_and_styling(self):\r\n # Анна заходить на головну сторінку\r\n self.browser.get(self.server_url)\r\n self.browser.set_window_size(1024, 768)\r\n\r\n # перевіряє чи форма розташована приблизно по середині екрану\r\n form = self.browser.find_element_by_id('addStudentForm')\r\n self.assertAlmostEqual(\r\n form.location['x'] + form.size['width'] / 2,\r\n 512,\r\n delta=15\r\n )\r\n\r\n # створює новий клас і перевіряє чи список учнів по середині екрану\r\n self.fill_and_send_students_full_name('Pupkin', 'Vasya', 'Ivanovych')\r\n\r\n table = self.browser.find_element_by_id('id_student_table')\r\n self.assertAlmostEqual(\r\n table.location['x'] + table.size['width'] / 2,\r\n 512,\r\n delta=15\r\n )\r\n","sub_path":"functional_tests/test_simple_layout_and_styling.py","file_name":"test_simple_layout_and_styling.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"191240087","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport time\nimport cv2\n\nimport os\nimport torch\nimport pprint\nimport json\nimport importlib\nimport numpy as np\nimport matplotlib\n\nimport _init_paths\n\nfrom detectors.detector_factory import detector_factory\nfrom opts_deploy import opts\n\n\nimage_ext = ['jpg', 'jpeg', 'png', 'webp']\nvideo_ext = ['mp4', 'mov', 'avi', 'mkv']\n\nopt = opts().init()\n\nclass CenterNetEngine(object):\n def __init__(self, task = \"ctdet_drone\"):\n opt.load_model = \"../exp/ctdet_drone/dla_34/model_5_1.0785181907108123.pth\"\n opt.task = task\n opt.dataset = 'visdrone'\n opt.arch = \"dla_34\"\n\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str\n opt.debug = max(opt.debug, 1)\n Detector = detector_factory[opt.task]\n self.detector = Detector(opt)\n\n self.num_classes = opt.num_classes\n\n self.class_name = ['__background__', 'person']\n \n self._valid_ids = np.arange(self.num_classes, dtype=np.int32)\n\n def show_image(self, img, score_min = 0.5, save = False):\n\n start_time = time.time()\n ret = self.detector.run(img)\n end_time = time.time()\n infer_time = end_time - start_time\n print(\"Inference Time:\" + str(infer_time) + \"s\")\n\n #results[image_name] = ret['results']\n det_list = self.convert_eval_format(ret['results'])\n\n return det_list\n\n def show_video(self, video_file, nnet, drawer, score_min, save = False): # , debug): <--- UNTESTED (Another way of adding bboxes)\n\n cap = cv2.VideoCapture(video_file)\n fps = cap.get(cv2.CAP_PROP_FPS)\n print(\"FPS:\" + str(fps))\n\n #sample = 0.5 # every sec take one frame # Use only if you do not want the infer every frame\n #sample_num = sample * fps\n\n if not cap.isOpened():\n print(\"Error in opening video stream or file\")\n\n frame_count = 0\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n frame_count += 1\n start_time = time.time()\n detections = kp_detection(frame, nnet, score_min) # , debug) <--- UNTESTED (Another way of adding bboxes)\n end_time = time.time()\n infer_time = end_time - start_time\n print(\"Inference Time:\" + str(infer_time) + \"s\")\n # print(\"~~~~~Detections~~~~~\")\n # print(detections)\n\n #if sample_num%frame_count != 0: # Use only if you do not want the infer every frame\n # continue\n\n # do what you want\n # TODO get center and corner (nnet)\n # TODO user drawer on frame\n \n frame_det = drawer.draw_dets_video(frame, detections, infer_time)\n cv2.imshow(\"Frame\", frame_det)\n\n if save:\n cv2.imwrite('./Video_Frames/To_Convert/' + str(frame_count) + \".jpg\", frame_det)\n\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n break\n\n else:\n break\n\n cap.release()\n\n cv2.destroyAllWindows()\n\n def to_int(self, x):\n return int(float(\"{}\".format(x)))\n\n def convert_eval_format(self, all_bboxes):\n det_list = list()\n\n print(all_bboxes)\n for image_id in all_bboxes:\n for cls_ind_i in range(len(all_bboxes[image_id])):\n category_id = self.class_name[image_id-1]\n print(all_bboxes[image_id][cls_ind_i])\n #for bbox in all_bboxes[image_id][cls_ind_i]:\n all_bboxes[image_id][cls_ind_i][2] -= all_bboxes[image_id][cls_ind_i][0]\n all_bboxes[image_id][cls_ind_i][3] -= all_bboxes[image_id][cls_ind_i][1]\n score = all_bboxes[image_id][cls_ind_i][4]\n bbox_out = list(map(self.to_int, all_bboxes[image_id][cls_ind_i][0:4]))\n\n detection = {\n \"bbox\": bbox_out,\n \"category_id\": category_id,\n \"score\": float(score)\n }\n\n if detection[\"score\"] >= 0.3:\n det_list.append(detection)\n\n return det_list\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Video Demo\")\n parser.add_argument(\"--model\", dest=\"json_file\", help=\"which .json file in ./confg\", type=str) # CenterNet-52 or CenterNet-104\n parser.add_argument(\"--testiter\", dest=\"testiter\",\n help=\"test at iteration i\",\n default=None, type=int) # Used to identify pretrained model\n parser.add_argument(\"--file\", dest=\"file_dir\", help=\"video file path\", type=str) # Path to video for detection\n parser.add_argument(\"--score\", dest=\"score_min\", help=\"Remove bboxes of those scores < score\", \n type=float) # Minimise bboxes\n parser.add_argument(\"--save\", action=\"store_true\")\n #parser.add_argument(\"--debug\", action=\"store_true\") \n args = parser.parse_args()\n\n print(\"Video File:\" + str(args.file_dir))\n\n json_file = os.path.join(system_configs.config_dir, args.json_file + \".json\")\n\n print(\"json_file: {}\".format(json_file))\n\n with open(json_file, \"r\") as f:\n configs = json.load(f) # Read .json file to retrieve 'system' and 'db' parameters\n\n configs[\"system\"][\"snapshot_name\"] = args.json_file # Insert model's name into configuration file\n system_configs.update_config(configs[\"system\"]) # Update config.py based on retrieved 'system' parameters\n db_configs.update_config(configs[\"db\"]) # Update db/base.py based on retrieved 'db' parameters\n\n print(\"system config...\")\n pprint.pprint(system_configs.full) # Show 'system' parameters in terminal\n\n print(\"db config...\")\n pprint.pprint(db_configs.full) # Show 'db' parameters in terminal\n\n print(\"loading parameters at iteration: {}\".format(args.testiter)) # Show args.testiter in terminal\n\n print(\"building neural network...\")\n nnet = NetworkFactory() # Initialise CenterNet's neural network\n print(\"loading parameters...\")\n nnet.load_params(args.testiter) # To locate CenterNet's pretrained model\n\n drawer = Drawer() # Initialise Drawer to add bboxes in frames later\n\n #nnet.cpu() # Uncomment if using cpu\n nnet.cuda() # Comment if using cpu\n nnet.eval_mode()\n\n if args.file_dir[args.file_dir.rfind('.') + 1:].lower() in video_ext:\n show_video(args.file_dir, nnet, drawer, args.score_min, args.save)\n else:\n show_image(args.file_dir, nnet, drawer, args.score_min, args.save)\n","sub_path":"src/CenterNetEngine.py","file_name":"CenterNetEngine.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641516860","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '2.0.0'\n\nsetup(name='zopyx.smartprintng.core',\n version=version,\n description=\"SmartPrintNG core engine\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='SmartPrintNG Zope Python',\n author='Andreas Jung',\n author_email='info@zopyx.com',\n url='',\n license='ZPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['zopyx', 'zopyx.smartprintng'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'zope.component',\n 'zope.pagetemplate',\n# 'zope.app.testing',\n 'zopyx.convert2',\n # -*- Extra requirements: -*-\n ],\n extras_require=dict(test=[ 'zope.testing', 'zope.app.testing']),\n entry_points={'console_scripts': ['spng-demo=zopyx.smartprintng.core.demo.demo_app:main',\n 'spng-demo2=zopyx.smartprintng.core.demo2.demo_app:main',\n ]},\n )\n","sub_path":"pypi_install_script/zopyx.smartprintng.core-2.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"180217786","text":"'''\nInterpolation functions shared by mesh generators.\nCreated on Nov 15, 2017\n\n@author: Richard Christie\n'''\n\nimport math\nimport scaffoldmaker.utils.vector as vector\n\ngaussXi3 = ( (-math.sqrt(0.6)+1.0)/2.0, 0.5, (+math.sqrt(0.6)+1.0)/2.0 )\ngaussWt3 = ( 5.0/18.0, 4.0/9.0, 5.0/18.0 )\n\ndef interpolateCubicHermite(v1, d1, v2, d2, xi):\n \"\"\"\n Return cubic Hermite interpolated value of tuples v1, d1 (end 1) to v2, d2 (end 2) for xi in [0,1]\n :return: tuple containing result\n \"\"\"\n xi2 = xi*xi\n xi3 = xi2*xi\n f1 = 1.0 - 3.0*xi2 + 2.0*xi3\n f2 = xi - 2.0*xi2 + xi3\n f3 = 3.0*xi2 - 2.0*xi3\n f4 = -xi2 + xi3\n return tuple([ (f1*v1[i] + f2*d1[i] + f3*v2[i] + f4*d2[i]) for i in range(len(v1)) ])\n\ndef interpolateCubicHermiteDerivative(v1, d1, v2, d2, xi):\n \"\"\"\n Return cubic Hermite interpolated derivatives of tuples v1, d1 (end 1) to v2, d2 (end 2) for xi in [0,1]\n :return: tuple containing result\n \"\"\"\n xi2 = xi*xi\n f1 = -6.0*xi + 6.0*xi2\n f2 = 1.0 - 4.0*xi + 3.0*xi2\n f3 = 6.0*xi - 6.0*xi2\n f4 = -2.0*xi + 3.0*xi2\n return tuple([ (f1*v1[i] + f2*d1[i] + f3*v2[i] + f4*d2[i]) for i in range(len(v1)) ])\n\ndef interpolateCubicHermiteSecondDerivative(v1, d1, v2, d2, xi):\n \"\"\"\n Return cubic Hermite interpolated second derivatives of tuples v1, d1 (end 1) to v2, d2 (end 2) for xi in [0,1]\n :return: tuple containing result\n \"\"\"\n f1 = -6.0 + 12.0*xi\n f2 = -4.0 + 6.0*xi\n f3 = 6.0 - 12.0*xi\n f4 = -2.0 + 6.0*xi\n return tuple([ (f1*v1[i] + f2*d1[i] + f3*v2[i] + f4*d2[i]) for i in range(len(v1)) ])\n\ndef computeCubicHermiteArcLength(v1, d1, v2, d2, rescaleDerivatives):\n \"\"\"\n Compute arc length between v1 and v2, scaling unit d1 and d2.\n Iterative; not optimised.\n :param d1: Initial derivative at v1.\n :param d2: Initial derivative at v2.\n :param rescaleDerivatives: If True, rescale initial d1 and d2 to |v2 - v|\n :return: Arc length.\n \"\"\"\n if rescaleDerivatives:\n lastArcLength = math.sqrt(sum((v2[i] - v1[i])*(v2[i] - v1[i]) for i in range(len(v1))))\n else:\n lastArcLength = getCubicHermiteArcLength(v1, d1, v2, d2)\n d1 = vector.normalise(d1)\n d2 = vector.normalise(d2)\n tol = 1.0E-6\n for iters in range(100):\n #print('iter',iters,'=',lastArcLength)\n d1s = [lastArcLength*d for d in d1]\n d2s = [lastArcLength*d for d in d2]\n arcLength = getCubicHermiteArcLength(v1, d1s, v2, d2s)\n if iters > 9:\n arcLength = 0.8*arcLength + 0.2*lastArcLength\n if math.fabs(arcLength - lastArcLength) < tol*arcLength:\n #print('computeCubicHermiteArcLength converged at iter',iters,'=',arcLength,', closeness', math.fabs(arcLength - lastArcLength))\n return arcLength\n lastArcLength = arcLength\n print('computeCubicHermiteArcLength Max iters reached:',iters,'=',arcLength,', closeness', math.fabs(arcLength - lastArcLength))\n return arcLength\n\ndef getCubicHermiteArcLength(v1, d1, v2, d2):\n '''\n :return: Arc length of cubic curve using 3 point Gaussian quadrature.\n '''\n arcLength = 0.0\n for i in range(3):\n dm = interpolateCubicHermiteDerivative(v1, d1, v2, d2, gaussXi3[i])\n arcLength += gaussWt3[i]*math.sqrt(sum(d*d for d in dm))\n return arcLength\n\ndef getCubicHermiteCurvature(v1, d1, v2, d2, radialVector, xi):\n \"\"\"\n :param radialVector: Radial direction, assumed unit normal to curve tangent at point.\n :return: Scalar curvature (1/R) of the 1-D cubic Hermite curve.\n \"\"\"\n tangent = interpolateCubicHermiteDerivative(v1, d1, v2, d2, xi)\n dTangent = interpolateCubicHermiteSecondDerivative(v1, d1, v2, d2, xi)\n #tangentVector = vector.normalise(tangent)\n #tangentCurvature = vector.dotproduct(dTangent, tangentVector)\n radialCurvature = vector.dotproduct(dTangent, radialVector)\n magTangent = vector.magnitude(tangent)\n curvature = radialCurvature/(magTangent*magTangent)\n return curvature\n","sub_path":"scaffoldmaker/utils/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131881198","text":"from src.utils.tk import TKUtils\n\nfrom src.view.activity.actions import Actions\nfrom src.view.activity.list import ActivitiesList\nfrom src.view.activity.register import RegisterWindow\n\n\nclass Activity(TKUtils.Container()):\n\n def __init__(self, master, controller, commands):\n super().__init__(master=master)\n self.pack(side='bottom')\n\n self.commands = commands\n self.__controller = controller\n\n self.actions = None\n self.activities_list = None\n self.register_window = None\n\n self._create_activities_list()\n self._create_actions()\n\n def _create_actions(self):\n commands = {}\n\n commands['raffle'] = self.commands['raffle']\n commands['register'] = self.__controller.register_button\n\n self.actions = Actions(master=self, commands=commands)\n\n def _create_activities_list(self):\n commands = {}\n\n commands['raffle'] = self.commands['raffle']\n commands['remove'] = self.__controller.remove_activity_button\n\n if not self.activities_list:\n self.activities_list =\\\n ActivitiesList(master=self, commands=commands)\n\n def destroy_activities_list(self):\n for activity in self.activities_list.label_list:\n activity.destroy()\n\n self.activities_list.label_list = []\n\n def create_register_window(self):\n commands = {}\n commands['submit_form'] = self.__controller.submit_form_button\n commands['cancel_form'] = self.__controller.cancel_form_button\n\n self.register_window = RegisterWindow(commands=commands)\n\n def destroy_register_window(self):\n self.register_window.destroy()\n self.register_window = None\n","sub_path":"src/view/activity/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"557064670","text":"import scrapy\nfrom tutorial.items import TutorialItem\n\nclass QuotesSpider(scrapy.Spider):\n name = \"quotes\"\n\n def start_requests(self):\n print('-----------------我开始urls-----------------')\n urls = [\n 'http://zz.58.com/eqi/chuzu/pn1/',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n print('----------我开始parse----------')\n print(response.xpath('//title/text()').extract_first())\n \n for quote in response.xpath('//ul/li'):\n item = TutorialItem()\n item['title'] = quote.css(\"div.des h2 a::text\").extract_first().strip()\n item['price'] = quote.css(\"div.money b::text\").extract_first()\n item['room'] = quote.css(\"div.des p.room::text\").extract_first()\n \n yield item\n #next_page = response.css('li.next a::attr(href)').extract_first()\n #if next_page is not None:\n # yield response.follow(next_page, callback=self.parse) \n \n print('----------我结束parse----------')\n \n #page = response.url.split(\"/\")[-2]\n #filename = 'quotes-%s.html' % page\n #with open(filename, 'wb') as f:\n # f.write(response.body)\n #self.log('Saved file %s' % filename)\n \n ","sub_path":"Scrapy/tutorial/tutorial/spiders/quotes_spider.py","file_name":"quotes_spider.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"651145689","text":"\"\"\"\nHelper foc mac_addr tests\n\"\"\"\n\nimport shlex\n\nimport rhevmtests.networking.config as network_config\nimport art.rhevm_api.tests_lib.high_level.host_network as hl_host_network\nimport art.rhevm_api.tests_lib.high_level.networks as hl_networks\nfrom art.rhevm_api.tests_lib.low_level import (\n events as ll_events,\n hosts as ll_hosts\n)\n\n\ndef run_macaddr_test(network):\n \"\"\"\n Check if MACADDR is set in IFCFG\n\n Args:\n network (str): Network name\n\n Returns:\n bool: True If MACADDR in bond1 ifcfg file, False otherwise\n \"\"\"\n bond = \"bond1\"\n cat_cmd = \"cat {ifcfg_path}ifcfg-{bond}\".format(\n ifcfg_path=hl_networks.IFCFG_FILE_PATH, bond=bond\n )\n last_event = ll_events.get_max_event_id()\n if not ll_hosts.refresh_host_capabilities(\n host=network_config.HOST_0_NAME, start_event_id=last_event\n ):\n return False\n\n sn_dict = {\n \"add\": {\n \"1\": {\n \"network\": network,\n \"nic\": bond,\n }\n }\n }\n if not hl_host_network.setup_networks(\n host_name=network_config.HOST_0_NAME, **sn_dict\n ):\n return False\n\n ifcfg_out = network_config.VDS_0_HOST.run_command(\n command=shlex.split(cat_cmd)\n )[1]\n\n return \"MACADDR\" in ifcfg_out\n","sub_path":"art/tests/rhevmtests/networking/mac_addr/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142002553","text":"# -*- coding: utf-8 -*\nimport datetime\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User, Group\nfrom django.template.defaultfilters import slugify\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\n\nclass Artist(models.Model):\n none = models.IntegerField(default=-1000)\n name = models.CharField(blank=True, null=True, max_length=122100)\n text = models.CharField(blank=True, null=True, max_length=122100)\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True,\n related_name=\"artist\")\n\n def save(self, *args, **kwargs):\n super(Artist, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.name\n\nclass Prod(models.Model):\n none = models.IntegerField(default=-1000)\n name = models.CharField(blank=True, null=True, max_length=122100)\n idv = models.CharField(blank=True, null=True, max_length=122100)\n cover = models.CharField(blank=True, null=True, max_length=122100)\n\n\n def save(self, *args, **kwargs):\n names=str(self.name)\n super(Prod, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.name\n\"\"\"\nProd.objects.create(name=\"Product A\", idv=\"+1\", cover=\"High\")\nProd.objects.create(name=\"Product B\", idv=\"-1\", cover=\"High\")\nProd.objects.create(name=\"Product C\", idv=\"+1\", cover=\"Low\")\nProd.objects.create(name=\"Product D\", idv=\"-1\", cover=\"Low\")\n\nprint(\"Prod\")\n\"\"\"\n\nclass Make(models.Model):\n none = models.IntegerField(default=-1000)\n name = models.CharField(blank=True, null=True, max_length=122100)\n namec = models.CharField(blank=True, null=True, max_length=122100)\n model = models.CharField(blank=True, null=True, max_length=122100)\n modelc = models.CharField(blank=True, null=True, max_length=122100)\n cc = models.CharField(blank=True, null=True, max_length=122100)\n name1 = models.CharField(blank=True, null=True, max_length=122100)\n exprice = models.DecimalField(max_digits=30, default=0,decimal_places=3, blank=True, null=True)\n start_date = models.CharField(blank=True, null=True, max_length=122100)\n\n def save(self, *args, **kwargs):\n name=str(self.name).upper()\n model=str(self.model).upper()\n cc=str(self.cc).upper()\n name1=str(self.name1).upper()\n super(Make, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.model\n\nclass RTO(models.Model):\n none = models.IntegerField(default=-1000)\n loct = models.CharField(blank=True, null=True, max_length=122100)\n loct_code = models.CharField(blank=True, null=True, max_length=122100)\n state_code = models.CharField(blank=True, null=True, max_length=122100)\n city_code = models.CharField(blank=True, null=True, max_length=122100)\n\n def save(self, *args, **kwargs):\n loct=str(self.loct).upper().strip()\n loct_code=str(self.loct_code).upper().strip()\n super(RTO, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.loct","sub_path":"example_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"50303817","text":"import numpy as np\nimport tensorflow as tf\n\n# load data\ndef read_data():\n train_X = np.load(\"./imdb/train_X.npy\")\n train_Y = np.load(\"./imdb/train_Y.npy\")\n train_Y = np.array(train_Y, dtype=np.float32).reshape((-1, 1))\n test_X = np.load(\"./imdb/test_X.npy\")\n test_Y = np.load(\"./imdb/test_Y.npy\")\n test_Y = np.array(test_Y, dtype=np.float32).reshape((-1, 1))\n return train_X, train_Y, test_X, test_Y\n\n\n# add padding to input sequences & count sequence lengths\ndef preprocsesing(input_sequence):\n seq_lens = np.zeros(len(input_sequence), dtype=np.int64)\n max_len = 100 # max sequence lengths is set to 100\n input_sequence_padded = 2 * np.ones((len(input_sequence), 100), dtype=np.int64) # out of vocab:2\n for i, seq in enumerate(input_sequence):\n input_sequence_padded[i,:len(seq)] = seq\n seq_lens[i] = len(seq)\n return input_sequence_padded, seq_lens\n\n\n# build forward path of RNN classifier\ndef forward_path(train_X, train_X_lengths, test_X, test_X_lengths, hyperparams):\n #hyperparameters\n embedding_dims = hyperparams['embedding_dims']\n max_seq_lens = hyperparams['max_seq_len']\n n_of_unique_words = hyperparams['n_of_unique_words']\n\n #init word vectors\n init_width = 1/embedding_dims\n word_embedding_matrix = tf.Variable(\n tf.random_uniform([n_of_unique_words, embedding_dims], -init_width, init_width, dtype=tf.float32),\n name=\"embeddings\",\n dtype=tf.float32)\n\n #inputs (placeholders)\n batch_size = tf.placeholder(tf.int32, shape=(), name=\"batch_size\")\n input_sentence_idx = tf.placeholder(tf.int64, shape=(None, max_seq_lens), name=\"input_sentence_placeholders\")\n input_sentence_len = tf.placeholder(tf.int64, shape=(None), name=\"input_sentence_len\")\n input_labels = tf.placeholder(tf.float32, shape=(None, 1), name=\"input_labels\")\n placeholders = [input_sentence_idx, input_sentence_len, input_labels, batch_size]\n\n #convert input idx to embeddings\n input_sentence_emb = tf.nn.embedding_lookup(word_embedding_matrix, input_sentence_idx)\n\n #CNN\n input_sentence_emb = tf.expand_dims(input_sentence_emb, -1)\n\n filter_size = 5\n num_filters = 16\n conv_filter_shape = [filter_size, embedding_dims, 1, num_filters]\n pool_filter_shape = [1, max_seq_lens - filter_size + 1, 1, 1]\n\n W = tf.Variable(tf.truncated_normal(conv_filter_shape, stddev=0.1), name=\"conv_filter\")\n\n ################### CNN (1 conv layer, 1 pooling layer) ##############################3\n # (1) convolution\n conv1 = tf.nn.conv2d(input_sentence_emb, W, [1, 1, 1, 1], padding='VALID', name='conv_fliter')\n # (2) relu activation\n conv1 = tf.nn.relu(conv1, name='relu')\n # (3) max pooling\n pooled = tf.nn.max_pool(conv1, ksize=pool_filter_shape, strides=[1, 2, 2, 1], padding='VALID', name='pool')\n ################### CNN (1 conv layer, 1 pooling layer) ##############################3\n\n pooled = tf.reshape(pooled, shape=(-1, num_filters))\n\n #dense layer\n dense_W = tf.Variable(tf.random_normal([num_filters, 1], stddev=.01), name='W')\n dense_b = tf.Variable(tf.random_normal([1], stddev=.01), name='b')\n logits = tf.matmul(pooled, dense_W) + dense_b\n return logits, placeholders\n\n\n# train ops for training models\ndef backward_path(logits, labels):\n cost = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n name='loss'\n )\n cost = tf.reduce_mean(cost, name='cost')\n train = tf.train.AdamOptimizer(learning_rate=.001).minimize(cost, name=\"train\")\n return cost, train\n\n\n# performance measures for training/test set\ndef performance(logits, labels):\n prob = tf.nn.sigmoid(logits)\n prediction_binary = tf.cast(prob > .5, tf.float32)\n labels = tf.cast(labels, tf.float32)\n acc = tf.reduce_mean(tf.cast(tf.equal(prediction_binary, labels), dtype=tf.float32), name=\"acc\")\n return acc\n\n\n# wrapper\ndef main():\n train_X, train_Y, test_X, test_Y = read_data()\n train_X_padded, train_X_lengths = preprocsesing(train_X)\n test_X_padded, test_X_lengths = preprocsesing(test_X)\n\n hyperparams = {\n 'embedding_dims': 100,\n 'max_seq_len': 100,\n 'n_of_unique_words': np.max(train_X_padded)+1\n }\n\n # build graph\n logits, placeholders = forward_path(train_X_padded, train_X_lengths, test_X_padded, test_X_lengths, hyperparams)\n input_sentence_idx, input_sentence_len, labels, batch_size_holder = placeholders\n cost, train = backward_path(logits, labels)\n accuracy = performance(logits, labels)\n\n # run models\n number_of_epochs = 3\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # model training\n batch_size = 50\n for i in range(number_of_epochs):\n current_cost = 0\n current_accuracy = 0\n for j in range(int(train_X.shape[0]/batch_size)):\n batch_X = train_X_padded[j*batch_size : (j+1)*batch_size]\n batch_Y = train_Y[j*batch_size : (j+1)*batch_size]\n batch_len = train_X_lengths[j*batch_size : (j+1)*batch_size]\n\n c, _, a = sess.run(\n [cost, train, accuracy],\n feed_dict={\n input_sentence_idx:batch_X,\n input_sentence_len:batch_len,\n labels:batch_Y,\n batch_size_holder:batch_size\n }\n )\n\n current_cost += c / int(train_X.shape[0]/batch_size)\n current_accuracy += a / int(train_X.shape[0]/batch_size)\n\n print(\"epoch:\", i+1, \"cost:\", current_cost, \"training set accuracy:\", current_accuracy)\n\n # model testing\n batch_size = test_X.shape[0]\n test_acc = sess.run(\n accuracy,\n feed_dict={\n input_sentence_idx:test_X_padded,\n input_sentence_len:test_X_lengths,\n labels:test_Y,\n batch_size_holder:batch_size\n }\n )\n print(\"Result: test set accuracy:\", test_acc)\n\n return tf.trainable_variables(), test_acc\n\ndef grade():\n vars, test_acc = main()\n\n for v in vars:\n if 'gru_cell' in v.name: print(v.name)\n\n\n return\n\nif __name__ == '__main__':\n #main()\n grade()\n","sub_path":"5. 숙제/[6주차] Intro to Deep Learning/[숙제4] Convolutional NN (Revisited)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"2713830","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.neighbors import KNeighborsRegressor\n\nfrom .helpers import save_index_order, restore_index_order\n\n\nclass MakeIsnullColumns(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n self.columns = X.columns[X.isnull().any()]\n return self\n\n def transform(self, X, y=None):\n for c in self.columns:\n X[c + '_isnull'] = X[c].isnull().astype(bool)\n X['total_isnull'] = X[self.columns + '_isnull'].sum(axis=1).astype(int)\n return X\n\n\nclass PandasSelector(BaseEstimator, TransformerMixin):\n def __init__(self, columns=None, dtype=None, inverse=False,\n return_vector=True):\n self.dtype = dtype\n self.columns = columns\n self.inverse = inverse\n self.return_vector = return_vector\n\n if isinstance(self.columns, str):\n self.columns = [self.columns]\n\n def check_condition(self, x, col):\n cond = (self.dtype is not None and x[col].dtype == self.dtype) or \\\n (self.columns is not None and col in self.columns)\n return self.inverse ^ cond\n\n def fit(self, x, y=None):\n return self\n\n def _check_if_all_columns_present(self, x):\n if not self.inverse and self.columns is not None:\n missing_columns = set(self.columns) - set(x.columns)\n if len(missing_columns) > 0:\n missing_columns_ = ','.join(col for col in missing_columns)\n raise KeyError('Keys are missing in the record: %s' %\n missing_columns_)\n\n def transform(self, x):\n # check if x is a pandas DataFrame\n if not isinstance(x, pd.DataFrame):\n raise KeyError('Input is not a pandas DataFrame')\n\n selected_cols = []\n for col in x.columns:\n if self.check_condition(x, col):\n selected_cols.append(col)\n\n # if the column was selected and inversed = False make sure the column\n # is in the DataFrame\n self._check_if_all_columns_present(x)\n\n ## if only 1 column is returned return a vector instead of a dataframe\n #if len(selected_cols) == 1 and self.return_vector:\n # return list(x[selected_cols[0]])\n #else:\n return x[selected_cols]\n\n\n# class PandasSelector(BaseEstimator, TransformerMixin):\n# def __init__(self, columns, complement=False):\n# self.columns = columns\n# self.complement = complement\n#\n# def fit(self, X, y=None):\n# return self\n#\n# def transform(self, X, y=None):\n# if self.complement:\n# columns = list(set(X.columns) - set(self.columns))\n# print(columns)\n# return X[columns]\n# else:\n# print(self.columns)\n# return X[self.columns]\n\n\nclass ReshapeOneColumn(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return X.values.reshape(-1, 1)\n\n\nclass ParseText(BaseEstimator, TransformerMixin):\n def __init__(self, ngram_range=(1, 1), max_features=100):\n self.ngram_range = ngram_range\n self.max_features = max_features\n\n def fit(self, X, y=None):\n X = np.sum([X[c].fillna('') + ' ' for c in X.columns], axis=0)\n self.vectorizer_ = TfidfVectorizer(ngram_range=self.ngram_range,\n max_features=self.max_features,\n stop_words='english',\n token_pattern='\\w+')\n self.vectorizer_.fit(X)\n self.columns_ = self.vectorizer_.get_feature_names()\n return self\n\n def transform(self, X, y=None):\n X = np.sum([X[c].fillna('') + ' ' for c in X.columns], axis=0)\n X = self.vectorizer_.transform(X).astype(bool)\n return X\n\n\nclass MakeLen(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n res = X.apply(lambda x: x.fillna('').str.len())\n return res\n\nclass MakeCodes(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n self.columns_ = X.columns\n self.codes_ = {c: np.unique(X[c].dropna()).tolist()\n for c in self.columns_}\n return self\n\n def transform(self, X, y=None):\n X = X.copy()\n for c in self.columns_:\n codes = np.zeros(len(X[c]))\n idx = X[c].isin(self.codes_[c])\n codes[idx] = np.searchsorted(self.codes_[c], X.loc[idx, c]) + 1\n X[c] = codes.astype(int)\n return X\n\n\nclass ToFloat(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = X.astype(float)\n return X\n\n\nclass GroupbyImputer(BaseEstimator, TransformerMixin):\n def __init__(self, imp_col, group_cols, agg, min_size_group=12):\n self.imp_col = imp_col\n self.group_cols = group_cols\n self.agg = agg\n self.min_size_group = min_size_group\n\n def fit(self, X, y=None):\n self.vals_ = (X\n .groupby(self.group_cols)[self.imp_col]\n .agg(self.agg)\n .dropna()\n .to_dict('index'))\n return self\n\n def transform(self, X, y=None):\n idx = X[self.group_cols].dropna().index\n\n def fill_func(x, c):\n if x.name in self.vals_.keys() and \\\n len(x.dropna()) >= self.min_size_group:\n return x.fillna(self.vals_[x.name][c])\n return x\n\n for c in self.imp_col:\n X.loc[idx, c] = (X.loc[idx]\n .groupby(self.group_cols)[c]\n .apply(lambda x: fill_func(x, c)))\n return X\n\n\nclass Imputer(BaseEstimator, TransformerMixin):\n def __init__(self, agg_dict):\n self.agg_dict = agg_dict\n self.columns = list(agg_dict.keys())\n\n def fit(self, X, y=None):\n self.aggs_ = X[self.columns].agg(self.agg_dict).to_dict()\n return self\n\n def transform(self, X, y=None):\n X[self.columns] = X[self.columns].fillna(self.aggs_)\n return X\n\n\n# class ProcessCategoricalFeatures(BaseEstimator, TransformerMixin):\n# def __init__(self, to_codes, to_dummy=None, fill_agg_func=None):\n# self.to_codes = to_codes\n# if fill_agg_func is None:\n# self.fill_agg_func = {}\n# else:\n# self.fill_agg_func = fill_agg_func\n#\n# if to_dummy is None:\n# self.to_dummy = []\n# else:\n# self.to_dummy = to_dummy\n#\n# def fit(self, X, y=None):\n# self.codes_ = {c: np.unique(X[c].dropna()) for c in self.to_codes}\n# self.aggs_ = X.apply(self.fill_agg_func).to_dict()\n# self.total_cols_ = np.concatenate([self.to_codes, self.to_dummy])\n# return self\n#\n# def transform(self, X, y=None):\n# for c in self.aggs_.keys():\n# X[c] = X[c].fillna(self.aggs_[c])\n#\n# for c in self.total_cols_:\n# diff = list(set(X[c].dropna().unique()) - set(self.codes_[c]))\n# if diff:\n# print(\"Warning: Feature {0} has unseen values\\n{1}\".format(c, diff))\n#\n# idx = X[c].isin(self.codes_[c])\n# X.loc[idx, c] = np.searchsorted(self.codes_[c], X.loc[idx, c])\n# X.loc[~idx, c] = -1\n# X[c] = X[c].astype(int) + 1\n#\n# if c in self.to_dummy:\n# columns = [c + '_' + val for val in self.codes_[c]]\n# n_dummy_vars = len(self.codes_[c])+1\n#\n# dummy_cols = np.eye(n_dummy_vars).take(X[c], axis=0)[:, 1:]\n# dummy_cols = pd.DataFrame(dummy_cols.astype(bool),\n# columns=columns,\n# index=X[c].index)\n#\n# X.drop(c, axis=1, inplace=True)\n# X = pd.concat([X, dummy_cols], axis=1)\n# return X\n\n\n# class ProcessContinuousFeatures(BaseEstimator, TransformerMixin):\n# def __init__(self, columns):\n# self.columns = columns\n#\n# def fit(self, X, y=None):\n# self.mean_ = X[self.columns].mean(axis=0).to_dict()\n# return self\n#\n# def transform(self, X, y=None):\n# X[self.columns] = X[self.columns].apply(lambda x: x.fillna(self.mean_[x.name]))\n# return X\n\n\nclass MakeRatio():\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = X.copy()\n res = X['metroscubiertos']/X['metrostotales']\n return res.values.reshape(-1, 1)\n\n\nclass AddUSDMXN(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = X.copy()\n # The merge method will reset the index. The next lines of code add features to keep track of the\n # original index and the original ordering of the rows\n index_name = X.index.name\n # saves the original index as feature 'old_index'\n X.index.name = 'old_index'\n X = X.reset_index()\n # saves the original ordering of the rows as feature 'original_order'\n X.index.name = 'original_order'\n X = X.reset_index()\n\n usdmxn = pd.read_csv('data/external/usdmxn.csv',\n usecols=['usd Open', 'fechaYear', 'fechaMonth',\n 'fechaDay'])\n\n X = pd.merge(X, usdmxn,\n left_on=['fechaYear', 'fechaMonth', 'fechaDay'],\n right_on=['fechaYear', 'fechaMonth', 'fechaDay'])\n\n # Setting back the original ordering of the rows\n X = X.set_index('original_order')\n X = X.sort_index()\n # Setting the original index back\n X = X.set_index('old_index')\n X.index.name = index_name\n res = X['usd Open']\n\n return res.values.reshape(-1, 1)\n\n\nclass AddSHF(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = X.copy()\n # Setting the nulls to 'Nacional', to assign to them the values of the shf rate across all provinces. This value\n # is given in the SHF rate files.\n idx = X['provincia'].isnull()\n X.loc[idx, 'provincia'] = 'Nacional'\n\n index_name = X.index.name\n X = save_index_order(X)\n\n # Reading the rates\n shf = pd.read_csv('data/external/shf2012_2016.csv', index_col='Provincia')\n\n # Taking the mean of the prices across all years\n shf_mean_prices = []\n for i in range(1, 5):\n shf_mean_prices.append(\n pd.read_csv('data/external/shf{0}.csv'.format(i), index_col='Provincia',\n usecols=['Precio promedio', 'Provincia']))\n shf_mean_prices = pd.concat(shf_mean_prices, axis=1).mean(axis=1)\n\n # Normalizing so that the rate of 2016 is 1 and multiplying by the mean of the prices of 2016 to\n # obtain an estimate of the prices taking into account the inflation of prices given by the SHF rate.\n log_precio_promedio = np.log(\n shf.divide(shf['2016'], axis=0).multiply(shf_mean_prices, axis=0))\n\n # So far log_precio_promedio has 6 columns: provincia and one column for each year from 2012 to 2016.\n # The next lines of code change log_precio_promedio in such a way that it has three columns 'fechaYear', 'provincia',\n # and 'log_precio_promedio'. This is made to make the merging step easier later on.\n log_precio_promedio = log_precio_promedio.unstack().reset_index()\n log_precio_promedio.columns = ['fechaYear', 'provincia',\n 'log_precio_promedio']\n log_precio_promedio['fechaYear'] = log_precio_promedio[\n 'fechaYear'].astype(int)\n\n # Merging log_precio_promedio.\n X = pd.merge(left=X, right=log_precio_promedio,\n left_on=['fechaYear', 'provincia'],\n right_on=['fechaYear', 'provincia'], sort=False)\n\n # The same as before but with the shf rate now\n shf = shf.unstack().reset_index()\n shf.columns = ['fechaYear', 'provincia', 'shf']\n shf['fechaYear'] = shf['fechaYear'].astype(int)\n\n X = pd.merge(left=X, right=shf, left_on=['fechaYear', 'provincia'],\n right_on=['fechaYear', 'provincia'])\n\n # Now with log_shf, in case it makes sense.\n log_shf = shf.copy()\n log_shf['shf'] = np.log(log_shf['shf'])\n log_shf.columns = ['fechaYear', 'provincia', 'log_shf']\n\n X = pd.merge(left=X, right=log_shf,\n left_on=['fechaYear', 'provincia'],\n right_on=['fechaYear', 'provincia'])\n\n # Setting the null values of provincia back. This is important for the encoding step.\n idx = (X['provincia'] == 'Nacional')\n X.loc[idx, 'provincia'] = np.nan\n\n X = restore_index_order(X, index_name)\n return X[['log_precio_promedio', 'shf', 'log_shf']]\n\n\nclass AddInagiHogar(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = X.copy()\n # Setting the nulls to 'Nacional', to assign to them the values of the shf rate across all provinces. This value\n # is given in the SHF rate files.\n idx = X['provincia'].isnull()\n X.loc[idx, 'provincia'] = 'Nacional'\n\n index_name = X.index.name\n X = save_index_order(X)\n\n inagi = pd.read_csv('data/external/inagi_poblacion.tsv',\n sep='\\t',\n index_col='desc_entidad',\n usecols=['desc_entidad', 'indicador'] + [\n '{0}/t{1}'.format(j,i) for i in range(1, 5) for j in\n range(2012, 2017)])\n\n dictionary_prov = {\n 'Baja California': 'Baja California Norte',\n 'Coahuila de Zaragoza': 'Coahuila',\n 'Ciudad de México': 'Distrito Federal',\n 'México': 'Edo. de México',\n 'Michoacán de Ocampo': 'Michoacán',\n 'San Luis Potosí': 'San luis Potosí',\n 'Veracruz de Ignacio de la Llave': 'Veracruz'\n }\n\n indic = []\n groups = []\n for key, group in inagi.groupby('indicador'):\n indic.append(key)\n group = group.drop(['indicador'], axis=1)\n group.columns = pd.MultiIndex.from_product(\n [range(2012, 2017), range(1, 5)])\n group = group.unstack().reset_index()\n group.columns = ['fechaYear', 'fechaTrimester', 'provincia',\n key]\n group['provincia'] = group['provincia'].apply(\n lambda x: dictionary_prov.get(x, x))\n groups.append(group)\n\n cols_to_merge = ['provincia', 'fechaYear', 'fechaTrimester']\n\n X['fechaTrimester'] = X['fechaMonth'].apply(\n lambda x: (x - 1) // 3 + 1)\n\n for g in groups:\n X = pd.merge(X, g, left_on=cols_to_merge,\n right_on=cols_to_merge)\n\n # Setting the null values of provincia back. This is important for the encoding step.\n idx = (X['provincia'] == 'Nacional')\n X.loc[idx, 'provincia'] = np.nan\n\n X = restore_index_order(X, index_name)\n new_cols = [c for c in X.columns if c not in ['fechaYear', 'fechaMonth', 'provincia']]\n return X[new_cols]\n\n\nclass AddInagiVivienda(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = X.copy()\n # Setting the nulls to 'Nacional', to assign to them the values of the shf rate across all provinces. This value\n # is given in the SHF rate files.\n idx = X['provincia'].isnull()\n X.loc[idx, 'provincia'] = 'Nacional'\n\n index_name = X.index.name\n X = save_index_order(X)\n\n vivienda = pd.read_csv('data/external/inagi_vivienda.tsv', sep='\\t', header=None,\n usecols=[1, 5, 14])\n\n vivienda.columns = ['provincia', 'indicador', 'valor']\n\n dictionary_prov = {\n 'Baja California': 'Baja California Norte',\n 'Coahuila de Zaragoza': 'Coahuila',\n 'Ciudad de México': 'Distrito Federal',\n 'México': 'Edo. de México',\n 'Michoacán de Ocampo': 'Michoacán',\n 'San Luis Potosí': 'San luis Potosí',\n 'Veracruz de Ignacio de la Llave': 'Veracruz'\n }\n\n indic = []\n groups = []\n for key, group in vivienda.groupby('indicador'):\n indic.append(key)\n group = group.drop('indicador', axis=1)\n group['provincia'] = group['provincia'].apply(\n lambda x: dictionary_prov.get(x, x))\n group.columns = ['provincia', key]\n groups.append(group)\n\n for g in groups:\n X = pd.merge(X, g, left_on='provincia', right_on='provincia')\n\n # Setting the null values of provincia back. This is important for the encoding step.\n idx = (X['provincia'] == 'Nacional')\n X.loc[idx, 'provincia'] = np.nan\n\n X = restore_index_order(X, index_name)\n new_cols = [c for c in X.columns if c not in ['fechaYear', 'fechaMonth', 'provincia']]\n return X[new_cols]\n\n","sub_path":"src/features/transformers.py","file_name":"transformers.py","file_ext":"py","file_size_in_byte":17680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"400265136","text":"from Bridges.Bridges import *\nfrom Bridges.TreeElement import *\n\n\n\nclass tutorial:\n\n # create bridges object\n bridges = Bridges(8, \"YOUR_USER_ID\", \"YOUR_API_KEY\")\n\n # create tree nodes\n t0 = TreeElement(e = \"Hello\")\n t1 = TreeElement(e= \"this\")\n t2 = TreeElement(e=\"is\")\n t3 = TreeElement(e=\"a\")\n t4 = TreeElement(e=\"generic\")\n t5 = TreeElement(e=\"tree\")\n t6 = TreeElement(e=\"representation\")\n\n # put in labels for each node; simply use integers\n t0.set_label(\"10\")\n t1.set_label(\"20\")\n t2.set_label(\"30\")\n t3.set_label(\"40\")\n t4.set_label(\"50\")\n t5.set_label(\"60\")\n t6.set_label(\"70\")\n\n # add links to children\n t0.add_child(t1)\n t0.add_child(t2)\n t0.add_child(t5)\n t2.add_child(t4)\n t2.add_child(t5)\n t3.add_child(t6)\n\n # set some visual attributes\n t0.get_visualizer().set_color(\"red\")\n t0.get_visualizer().set_opacity(0.3)\n\n # set visualizer type\n bridges.set_data_structure(t0)\n\n # visualize the tree\n bridges.visualize()\n","sub_path":"python/website_tutorials/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"532950561","text":"\n# coding: utf-8\n\n# # OpenMKM Input and Output\n# This notebook describes pmutt's functionality to read and write OpenMKM CTI files. We will use the NH3 formation mechanism as a case study.\n# \n# ## Topics Covered\n# - Read species *ab-initio* data, reactions, lateral interactions and phases from a spreadsheet\n# - Write the CTI file that can be read by OpenMKM\n\n# ## Input Spreadsheet\n# All the data will be imported from the [`./inputs/NH3_Input_data.xlsx`](https://github.com/VlachosGroup/pmutt/blob/master/docs/source/examples_jupyter/openmkm_io/inputs/NH3_Input_Data.xlsx) file. There are five sheets:\n# 1. `refs` contains *ab-initio* and experimental data for a handful of gas species to calculate references\n# 2. `species` contains *ab-initio* data for each specie\n# 3. `beps` contains Bronsted-Evans-Polanyi relationships for reactions\n# 4. `reactions` contains elementary steps\n# 5. `lateral_interactions` contains lateral interactions between species\n# 6. `phases` contains phases for the species\n\n# First, we change the working directory to the location of the Jupyter notebook.\n\n# In[1]:\n\n\nimport os\nfrom pathlib import Path\n\n# Find the location of Jupyter notebook\n# Note that normally Python scripts have a __file__ variable but Jupyter notebook doesn't.\n# Using pathlib can overcome this limiation\ntry:\n notebook_path = os.path.dirname(__file__)\nexcept NameError:\n notebook_path = Path().resolve()\n \nos.chdir(notebook_path)\ninput_path = './inputs/NH3_Input_Data.xlsx'\n\n\n# Below is a helper function to print tables easily.\n\n# In[2]:\n\n\nimport pandas as pd\nfrom IPython.display import display\n\ndef disp_data(io, sheet_name):\n data = pd.read_excel(io=io, sheet_name=sheet_name, skiprows=[1])\n data = data.fillna(' ')\n display(data) \n\n\n# **References**\n\n# In[3]:\n\n\ndisp_data(io=input_path, sheet_name='refs')\n\n\n# **Species**\n\n# In[4]:\n\n\ndisp_data(io=input_path, sheet_name='species')\n\n\n# **BEPs**\n\n# In[5]:\n\n\ndisp_data(io=input_path, sheet_name='beps')\n\n\n# **Reactions**\n\n# In[6]:\n\n\ndisp_data(io=input_path, sheet_name='reactions')\n\n\n# **Lateral Interactions**\n\n# In[7]:\n\n\ndisp_data(io=input_path, sheet_name='lateral_interactions')\n\n\n# **Phases**\n\n# In[8]:\n\n\ndisp_data(io=input_path, sheet_name='phases')\n\n\n# ## Designate Units\n# First, we will designate the units to write the CTI file.\n\n# In[9]:\n\n\nfrom pmutt.omkm.units import Units\n\nunits = Units(length='cm', quantity='mol', act_energy='kcal/mol', mass='g', energy='kcal/mol')\n\n\n# ## Reading data\n# Before we can initialize our species, we need the references.\n# \n# ### Reading References (optional)\n# We will open the [input spreadsheet](https://github.com/VlachosGroup/pmutt/blob/master/docs/source/examples_jupyter/openmkm_io/inputs/NH3_Input_Data.xlsx) and read the `refs` sheet.\n\n# In[10]:\n\n\nfrom pmutt.io.excel import read_excel\nfrom pmutt.empirical.references import Reference, References\n\ntry:\n refs_data = read_excel(io=input_path, sheet_name='refs')\nexcept:\n refs = None\nelse:\n refs = [Reference(**ref_data) for ref_data in refs_data]\n refs = References(references=refs)\n\n\n# ### Reading Species\n\n# In[11]:\n\n\nfrom pmutt.empirical.nasa import Nasa\n\n# Lower and higher temperatures\nT_low = 298. # K\nT_high = 800. # K\n\nspecies_data = read_excel(io=input_path, sheet_name='species')\nspecies = []\nspecies_phases = {}\nfor ind_species_data in species_data:\n # Initialize NASA from statistical mechanical data\n ind_species = Nasa.from_model(T_low=T_low, T_high=T_high, references=refs,\n **ind_species_data)\n species.append(ind_species)\n\n # Group the species by phase for later use\n try:\n species_phases[ind_species.phase].append(ind_species)\n except KeyError:\n species_phases[ind_species.phase] = [ind_species]\n\n\n# ### Adding species from other empirical sources (optional)\n\n# In[12]:\n\n\nimport numpy as np\nfrom pmutt.empirical.shomate import Shomate\n\nAr = Shomate(name='Ar', elements={'Ar': 1}, phase='gas', T_low=298., T_high=6000.,\n a=np.array([20.78600, 2.825911e-7, -1.464191e-7, 1.092131e-8, -3.661371e-8, -6.19735, 179.999, 0.]))\n\nspecies.append(Ar)\nspecies_phases['gas'].append(Ar)\n\n\n# ### Reading BEP (optional)\n\n# In[13]:\n\n\nfrom pmutt.omkm.reaction import BEP\n\ntry:\n beps_data = read_excel(io=input_path, sheet_name='beps')\nexcept:\n beps = None\n species_with_beps = species.copy()\nelse:\n beps = []\n for bep_data in beps_data:\n beps.append(BEP(**bep_data))\n\n # Combine species and BEPs to make reactions\n species_with_beps = species + beps\n\n\n# ### Read reactions\n\n# In[14]:\n\n\nfrom pmutt import pmutt_list_to_dict\nfrom pmutt.omkm.reaction import SurfaceReaction\n\n# Convert species to dictionary for easier reaction assignment\nspecies_with_beps_dict = pmutt_list_to_dict(species_with_beps)\ntry:\n reactions_data = read_excel(io=input_path, sheet_name='reactions')\nexcept:\n reactions = None\nelse:\n reactions = []\n # Store information about phases for later retrieval\n reaction_phases = {}\n for reaction_data in reactions_data:\n reaction = SurfaceReaction.from_string(species=species_with_beps_dict,\n **reaction_data)\n reactions.append(reaction)\n # Assign phase information\n reaction_species = reaction.get_species(include_TS=True)\n for ind_species in reaction_species.values():\n try:\n phase = ind_species.phase\n except AttributeError:\n pass\n # Assign if key already exists\n if phase in reaction_phases:\n if reaction not in reaction_phases[phase]:\n reaction_phases[phase].append(reaction)\n else:\n reaction_phases[phase] = [reaction]\n\n\n# ### Read lateral interactions (optional)\n\n# In[15]:\n\n\nfrom pmutt.mixture.cov import PiecewiseCovEffect\n\ntry:\n interactions_data = read_excel(io=input_path, sheet_name='lateral_interactions')\nexcept:\n interactions = None\nelse:\n interactions = []\n interaction_phases = {}\n for interaction_data in interactions_data:\n interaction = PiecewiseCovEffect(**interaction_data)\n interactions.append(interaction)\n\n # Assign phase information\n phase = species_with_beps_dict[interaction.name_i].phase\n # Assign if key already exists\n if phase in interaction_phases:\n if interaction not in interaction_phases[phase]:\n interaction_phases[phase].append(interaction)\n else:\n interaction_phases[phase] = [interaction]\n\n\n# ### Reading Phases\n\n# In[16]:\n\n\nfrom pmutt.omkm.phase import IdealGas, InteractingInterface, StoichSolid\n\ntry:\n phases_data = read_excel(io=input_path, sheet_name='phases')\nexcept:\n phases = None\nelse:\n phases = []\n # Group data related to previously collected data\n additional_fields = {'species': species_phases,\n 'reactions': reaction_phases,\n 'interactions': interaction_phases}\n for phase_data in phases_data:\n # Pre-processing relevant data\n phase_name = phase_data['name']\n phase_type = phase_data.pop('phase_type')\n\n # Add additional fields to phase data if present\n for field, phase_dict in additional_fields.items():\n try:\n phase_data[field] = phase_dict[phase_name]\n except (NameError, KeyError):\n pass\n\n # Create the appropriate object\n if phase_type == 'IdealGas':\n # Special rule where reactions are only in the gas phase if\n # all species belong to the gas phase\n del_indices = []\n for i, reaction in enumerate(phase_data['reactions']):\n # Reaction will be deleted if any of the species are a different phase\n valid_rxn = True\n for ind_species in reaction.get_species(include_TS=False).values():\n try:\n ind_species_phase = ind_species.phase\n except AttributeError:\n valid_rxn = False\n else:\n if ind_species_phase != phase_name:\n valid_rxn = False\n # Record reaction index if not valid\n if not valid_rxn:\n del_indices.append(i)\n break\n # Delete reactions that do not qualify\n if len(del_indices) == len(phase_data['reactions']):\n phase_data.pop('reactions')\n else:\n for del_i in sorted(del_indices, reverse=True):\n del phase_data['reactions'][del_i]\n phase = IdealGas(**phase_data)\n elif phase_type == 'StoichSolid':\n phase = StoichSolid(**phase_data)\n elif phase_type == 'InteractingInterface':\n phase = InteractingInterface(**phase_data)\n phases.append(phase)\n\n\n# ## Write CTI File\n\n# In[17]:\n\n\nfrom pmutt.io.omkm import write_cti\n\ncti_path = './outputs/thermo.cti'\nuse_motz_wise = True\n\nwrite_cti(reactions=reactions, species=species, phases=phases, units=units,\n lateral_interactions=interactions, filename=cti_path,\n use_motz_wise=use_motz_wise)\n\n\n# If you would prefer to return the file as a string instead of writing it, omit the ``filename``.\n\n# In[18]:\n\n\nprint(write_cti(reactions=reactions, species=species, phases=phases, units=units,\n lateral_interactions=interactions, use_motz_wise=use_motz_wise))\n\n\n# ## Write YAML File\n# \n# The YAML file specifying the reactor configuration can also be written using the ``write_yaml`` function. Note that if:\n# - ``units`` is not specified, float values are assumed to be in SI units\n# - ``units`` is specified, float values are consistent with ``unit``'s attributes\n# - you would like a quantity to have particular units, pass the value as a string with the units (e.g. 10 cm3/s).\n\n# In[19]:\n\n\nfrom pmutt.io.omkm import write_yaml\n\nyaml_path = './outputs/cstr.yaml'\n\nwrite_yaml(filename=yaml_path, reactor_type='cstr', mode='isothermal',\n V=1., T=900., P=1., cat_abyv=1500, end_time=50, flow_rate=1.,\n transient=True, stepping='logarithmic', init_step=1e-15, atol=1e-15,\n rtol=1e-10, output_format='csv', phases=phases, units=units)\n\n\n# If you would prefer to return the file as a string instead of writing it, omit the ``filename``.\n\n# In[20]:\n\n\nprint(write_yaml(reactor_type='cstr', mode='isothermal', V=1., T=900., P=1., cat_abyv=1500,\n end_time=50, flow_rate=1., transient=True, stepping='logarithmic',\n init_step=1e-15, atol=1e-15, rtol=1e-10, output_format='csv', phases=phases,\n units=units))\n\n","sub_path":"docs/source/examples_jupyter/omkm_io/OpenMKM_IO.py","file_name":"OpenMKM_IO.py","file_ext":"py","file_size_in_byte":10791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386869218","text":"\"\"\"\nMinimum Initial Points to Reach Destination\n\nGiven a grid with each cell consisting of positive, negative or no points i.e, zero points. We can move across a cell only if we have positive points ( > 0 ). Whenever we pass through a cell, points in that cell are added to our overall points. We need to find minimum initial points to reach cell (m-1, n-1) from (0, 0).\n\nConstraints :\n\nFrom a cell (i, j) we can move to (i+1, j) or (i, j+1).\nWe cannot move from (i, j) if your overall points at (i, j) is <= 0.\nWe have to reach at (n-1, m-1) with minimum positive points i.e., > 0.\n\nExample\n\nInput: points[m][n] = { {-2, -3, 3},\n {-5, -10, 1},\n {10, 30, -5}\n };\nOutput: 7\nExplanation:\n7 is the minimum value to reach destination with\npositive throughout the path. Below is the path.\n\n(0,0) -> (0,1) -> (0,2) -> (1, 2) -> (2, 2)\n\nWe start from (0, 0) with 7, we reach(0, 1)\nwith 5, (0, 2) with 2, (1, 2) with 5, (2, 2)\nwith and finally we have 1 point (we needed\ngreater than 0 points at the end).\nreference: https://www.geeksforgeeks.org/minimum-positive-points-to-reach-destination/\n\"\"\"\n\nimport math as mt\n\ndef min_initial_points(points, m, n):\n\t'''\n\tdp[i][j] represents the minimum initial\n\tpoints player should have so that when\n\tstarts with cell(i, j) successfully\n\treaches the destination cell(m-1, n-1)\n\t'''\n\tdp = [[0 for x in range(n + 1)]\n\t\t\t\tfor y in range(m + 1)]\n\n\tif points[m - 1][n - 1] > 0:\n\t\tdp[m - 1][n - 1] = 1\n\telse:\n\t\tdp[m - 1][n - 1] = abs(points[m - 1][n - 1]) + 1\n\t''' \n\tFill last row and last column as base \n\tto fill entire table \n\t'''\n\tfor i in range(m - 2, -1, -1):\n\t\tdp[i][n - 1] = max(dp[i + 1][n - 1] - points[i][n - 1], 1)\n\tfor i in range(2, -1, -1):\n\t\tdp[m - 1][i] = max(dp[m - 1][i + 1] - points[m - 1][i], 1)\n\t''' \n\tfill the table in bottom-up fashion \n\t'''\n\tfor i in range(m - 2, -1, -1):\n\t\tfor j in range(n - 2, -1, -1):\n\t\t\tmin_points_on_exit = min(dp[i + 1][j], dp[i][j + 1])\n\t\t\tdp[i][j] = max(min_points_on_exit - points[i][j], 1)\n\n\treturn dp[0][0]\n\n\nif __name__ == \"__main__\":\n\tpoints = [[-2, -3, 3],\n\t\t\t\t\t\t[-5, -10, 1],\n\t\t\t\t\t\t[10, 30, -5]]\n\n\tprint(\"Minimum Initial Points Required:\",min_initial_points(points, 3, 3))","sub_path":"practice/hard/_42_minimum_points.py","file_name":"_42_minimum_points.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"20362706","text":"import os\n\ndef main():\n os.chdir('Data')\n numbers = open('problem_013.txt', 'r',)\n sum = 0\n\n for number in numbers.readlines():\n sum += int(number)\n\n numbers.close()\n return(str(sum)[0:10])\n \n\nprint(main())\n","sub_path":"JTI93/problem_013.py","file_name":"problem_013.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556003633","text":"#!/usr/local/bin/python3\r\n# -*- coding: utf-8 -*-\r\nimport logging\r\nfrom functools import wraps\r\nfrom selenium import webdriver\r\nimport subprocess\r\n\r\nfrom setup import PROXY, TOKEN\r\n\r\nfrom Covid import *\r\nfrom Files import *\r\nfrom UrlRequests import *\r\nfrom SpeechToText import *\r\n\r\nfrom telegram import Bot, Update, bot\r\nfrom telegram.ext import CallbackContext, CommandHandler, Filters, MessageHandler, Updater\r\nimport re\r\nfrom pymongo import MongoClient\r\n\r\n# Enable logging\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\r\n level=logging.INFO)\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n# Define a few command handlers. These usually take the two arguments update and\r\n# context. Error handlers also receive the raised TelegramError object in error.\r\n\r\nLOG_ACTIONS = []\r\nCITY = 'Nizhny Novgorod'\r\n\r\n# bot = Bot(\r\n# token=TOKEN,\r\n# base_url=PROXY, # delete it if connection via VPN\r\n# )\r\n\r\nclient = MongoClient('localhost', 27017)\r\ndb = client[\"ThePyProject\"]\r\ncollection = db[\"HistoryCollection\"]\r\ncovid_collection_t = db[\"CovidCollectionToday\"]\r\ncovid_collection_y = db[\"CovidCollectionYesterday\"]\r\ncovid_collection_wa = db[\"CovidCollectionWeekAgo\"]\r\n\r\n\r\ndef voice_message(update: Update, context: CallbackContext):\r\n file_info = context.bot.get_file(update.message.voice.file_id)\r\n file_info.download('VOICE.ogg')\r\n command = [\r\n 'C:Project/bin/ffmpeg.exe', # путь до ffmpeg.exe\r\n '-i', 'VOICE.ogg',\r\n '-ar', '48000',\r\n 'VOICE.wav'\r\n ]\r\n proc = subprocess.Popen(command)\r\n proc.wait()\r\n transcript = SpeechToText(\"./VOICE.wav\")\r\n update.message.reply_text(transcript.sample_recognize())\r\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'VOICE.wav')\r\n os.remove(path)\r\n\r\n\r\ndef log_action(function):\r\n def inner(*args, **kwargs):\r\n update = args[0]\r\n funcname = function.__name__\r\n if update and hasattr(update, 'message') and hasattr(update, 'effective_user'):\r\n Files.new_log(Files, update, funcname)\r\n return function(*args, **kwargs)\r\n return inner\r\n\r\n\r\ndef decorator_error(func):\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n try:\r\n return func(*args, **kwargs)\r\n except Exception as er:\r\n print(\"Error! \" + str(er) + ', ' + \"function: \" + func.__name__)\r\n return wrapper\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef start(update: Update, context: CallbackContext):\r\n \"\"\"Send a message when the command /start is issued.\"\"\"\r\n update.message.reply_text(f'Привет, {update.effective_user.first_name}!\\nОтправь команду /help, чтобы получить '\r\n f'список команд!')\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef rbc_news(update: Update, context: CallbackContext):\r\n options = webdriver.FirefoxOptions()\r\n options.add_argument('headless')\r\n #options.add_argument(f'window-size={512},{512}')\r\n options.add_argument('hide-scrollbars')\r\n\r\n driver = webdriver.Firefox(firefox_options=options)\r\n driver.get(\"https://nn.rbc.ru/\")\r\n driver.get_screenshot_as_file('source_pack/img_news.png')\r\n driver.quit()\r\n\r\n Bot.send_photo(bot, update.message.chat.id, open('source_pack/img_news.png', 'rb'))\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef corona_stats_dynamics(update: Update, context: CallbackContext):\r\n cd = CovidStatsDaily()\r\n cd.image_create()\r\n cd.top_five()\r\n\r\n update.message.reply_text('Below you\\'ll be provided with daily changes in covid-19 statistics')\r\n Bot.send_photo(bot, update.message.chat.id, open('source_pack/Covid_stats.png', 'rb'))\r\n update.message.reply_text('Top-5 injured countries by Confirmed')\r\n Bot.send_photo(bot, update.message.chat.id, open('source_pack/Top_5_pie.png', 'rb'))\r\n Bot.send_photo(bot, update.message.chat.id, open('source_pack/Top_5_bar.png', 'rb'))\r\n update.message.reply_text('Top-5 injured countries by Death')\r\n Bot.send_photo(bot, update.message.chat.id, open('source_pack/Top_5_bar_death.png', 'rb'))\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef clear_db(update: Update, context: CallbackContext):\r\n \"\"\"clear the whole database including history and all covids\"\"\"\r\n client.drop_database('ThePyProject')\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef clear_db(update: Update, context: CallbackContext):\r\n \"\"\"clear the whole database including history and all covids\"\"\"\r\n client.drop_database('ThePyProject')\r\n update.message.reply_text('Ready!')\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef admin_settings(update: Update, context: CallbackContext):\r\n \"\"\"Send a list of AdminOnly commands\"\"\"\r\n if update.effective_user.first_name =='Meseyoshi':\r\n update.message.reply_text('Список функций для администрирования:\\n/clean\\n/clear_db')\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef film(update: Update, context: CallbackContext):\r\n data = UrlRequests.get_film(UrlRequests, update)\r\n update.message.reply_text('Название: ' + data[0] +'\\n'+'Рейтинг на IMDb: ' + data[1] + '\\n'+'Длительность: '\r\n + data[2]+'\\n'+'В главной роли:' + data[3])\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef covid(update: Update, context: CallbackContext):\r\n '''Send user top-5 covid infected provinces'''\r\n data = CovidStats.upload(CovidStats)\r\n text = CovidStats.top_five(CovidStats, data)\r\n update.message.reply_text('Here you can find some statistic about top-5 covid infected regions')\r\n update.message.reply_text(text)\r\n update.message.reply_text('And about about top-5 covid least infected regions')\r\n text = CovidStats.top_five(CovidStats, data)\r\n update.message.reply_text(text)\r\n CovidStats.image_create(CovidStats, data)\r\n Bot.send_photo(bot, update.message.chat.id, open('source_pack/Covid_statistics.png', 'rb'))\r\n Bot.send_photo(bot, update.message.chat.id, open('source_pack/Covid_weekly_changes.png', 'rb'))\r\n\r\n\r\ndef get_date(date_in):\r\n try :\r\n y = re.findall(r\"\\d{4}$\", date_in)\r\n m = re.findall(r\"\\d{2}\", str(re.findall(r\"\\.\\d{2}\\.\", date_in)))\r\n d = str(int(re.findall(r\"^\\d{2}\", date_in)[0]) - 1)\r\n return y, m, d\r\n except:\r\n try:\r\n y = re.findall(r\"\\d{4}$\", date_in)\r\n d = str(int(re.findall(r\"\\d{2}\", str(re.findall(r\"-\\d{2}-\", date_in)))[0]) - 1)\r\n m = re.findall(r\"^\\d{2}\", date_in)\r\n return y, m, d\r\n except:\r\n return 0\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef covid_chosen_date(update: Update, context: CallbackContext):\r\n update.message.reply_text('You\\'ll be provided with some covid info by chosen date')\r\n a = update.message['text'].replace('/corona_stats', '').strip()\r\n d = get_date(a)\r\n if d == 0:\r\n update.message.reply_text(\"Sorry, format is incorrect, try call function again!\")\r\n else:\r\n try:\r\n data = CovidStats.upload_chosen_date(CovidStats, *d[0], *d[1], d[2])\r\n text = CovidStats.top_five(CovidStats, data)\r\n update.message.reply_text('Here you can find some statistic about top-5 covid infected regions')\r\n update.message.reply_text(text)\r\n except:\r\n update.message.reply_text('Sorry, something got wrong with it...')\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef smile(update: Update, context: CallbackContext):\r\n '''Send user a smile'''\r\n smiles = [\"\\U0001F601\", \"Грузовичок\" + \"\\U0001F69A\" + \"Везет улыбочки!\", \"\\U0001F601\", \"\\U0001F606\",\r\n \"\\U0001F60B\", \"\\U0001F60F\", \"\\U0001F609\", \"\\U0001F606\",\r\n \"\\U0001F633\" + \"Задача для сеньора\" + \"\\U0001F633\"]\r\n s = random.randint(0, len(smiles) - 1)\r\n update.message.reply_text(smiles[s])\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef help(update: Update, context: CallbackContext):\r\n \"\"\"Send a list of all available functions when the command /list is issued.\"\"\"\r\n update.message.reply_text('Доступные команды:\\n/history\\n/fact\\n/weather\\n/smile\\n/film\\n/covid\\n'\r\n '/news\\n/dynamics\\n')\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef echo(update: Update, context: CallbackContext):\r\n \"\"\"Echo the user message.\"\"\"\r\n update.message.reply_text(update.message.text)\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef error(update: Update, context: CallbackContext):\r\n \"\"\"Log Errors caused by Updates.\"\"\"\r\n logger.warning(f'Update {update} caused error {context.error}')\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef history(update: Update, context: CallbackContext):\r\n update.message.reply_text(Files.history(Files))\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef admin_check_period(update: Update, context: CallbackContext):\r\n Files.delete_logs(Files)\r\n mes = 'Готово!'\r\n update.message.reply_text(mes)\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef weather(update: Update, context: CallbackContext):\r\n update.message.reply_text(UrlRequests.get_weather())\r\n\r\n\r\n@log_action\r\n@decorator_error\r\ndef fact(update: Update, context: CallbackContext):\r\n update.message.reply_text(UrlRequests.get_cat_fact())\r\n\r\n\r\ndef main():\r\n # Connect via proxy\r\n REQUEST_KWARGS = {\r\n 'proxy_url': PROXY,\r\n # Optional, if you need authentication:\r\n # 'urllib3_proxy_kwargs': {\r\n # 'username': 'name',\r\n # 'password': 'passwd',\r\n # }\r\n }\r\n updater = Updater(TOKEN, request_kwargs=REQUEST_KWARGS, use_context=True)\r\n\r\n # updater = Updater(bot=bot, use_context=True)\r\n\r\n # on different commands - answer in Telegram\r\n updater.dispatcher.add_handler(CommandHandler('start', start))\r\n updater.dispatcher.add_handler(CommandHandler('help', help))\r\n updater.dispatcher.add_handler(CommandHandler('history', history))\r\n updater.dispatcher.add_handler(CommandHandler('settings', admin_settings))\r\n updater.dispatcher.add_handler(CommandHandler('clean', admin_check_period))\r\n updater.dispatcher.add_handler(CommandHandler('clean_db', clear_db))\r\n updater.dispatcher.add_handler(CommandHandler('fact', fact))\r\n updater.dispatcher.add_handler(CommandHandler('news', rbc_news))\r\n updater.dispatcher.add_handler(CommandHandler('covid', covid))\r\n updater.dispatcher.add_handler(CommandHandler('smile', smile))\r\n updater.dispatcher.add_handler(CommandHandler('weather', weather))\r\n updater.dispatcher.add_handler(CommandHandler('film', film))\r\n updater.dispatcher.add_handler(CommandHandler('dynamics', corona_stats_dynamics))\r\n updater.dispatcher.add_handler(CommandHandler('corona_stats', covid_chosen_date))\r\n\r\n\r\n # on noncommand i.e message - echo the message on Telegram\r\n updater.dispatcher.add_handler(MessageHandler(Filters.text, echo))\r\n updater.dispatcher.add_handler(MessageHandler(Filters.voice, voice_message))\r\n\r\n # log all errors\r\n updater.dispatcher.add_error_handler(error)\r\n\r\n # Start the Bot\r\n updater.start_polling()\r\n\r\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\r\n # SIGTERM or SIGABRT. This should be used most of the time, since\r\n # start_polling() is non-blocking and will stop the bot gracefully.\r\n updater.idle()\r\n\r\n\r\nif __name__ == '__main__':\r\n logger.info('Start Bot')\r\n main()\r\n","sub_path":"Shushushu.py","file_name":"Shushushu.py","file_ext":"py","file_size_in_byte":11359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"546680667","text":"import gym\r\nimport numpy as np\r\nimport time\r\nimport pickle, os\r\n\r\nEPISODES = 50\r\n\r\n#env = gym.make('FrozenLake-v0', is_slippery=False)\r\nenv = gym.make('FrozenLake-v0')\r\n\r\nwith open(\"frozenLake_qTable.pkl\", 'rb') as f:\r\n\tQ = pickle.load(f)\r\n\r\ndef choose_action(state):\r\n\taction = np.argmax(Q[state, :])\r\n\treturn action\r\nwins = 0\r\n# start\r\nfor episode in range(EPISODES):\r\n\r\n\tstate = env.reset()\r\n\tprint(\"*** Episode: \", episode)\r\n\tt = 0\r\n\twhile t < 20:\r\n\t\tenv.render()\r\n\r\n\t\taction = choose_action(state)\r\n\r\n\t\tstate2, reward, done, info = env.step(action)\r\n\r\n\t\tstate = state2\r\n\r\n\t\tif done:\r\n\t\t\tif reward == 1:\r\n\t\t\t\twins += 1\r\n\t\t\t\tprint('\\n*******WIN********\\n')\r\n\t\t\tbreak\r\n\r\n\t\t#time.sleep(1)\r\n\t\tos.system('cls')\r\n#print('\\n\\n', Q)\r\nprint('% Win :', wins/EPISODES*100, '%')\r\n","sub_path":"foofrozenlakePlay.py","file_name":"foofrozenlakePlay.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"204564514","text":"import numpy as np\nimport cv2\nimport os \n\nclass SimpleDatasetLoader:\n def __init__(self, preprocessors=None):\n # store the image preprocessor\n self.preprocessors = preprocessors\n # if the preprocessors are None, initialize them as an\n # empty list\n if self.preprocessors is None:\n self.preprocessors = []\n\n def load(self,imagePaths,verbose=-1):\n data = []\n labels=[]\n\n #loop over the input images\n for (i,imagePath) in enumerate(imagePaths):\n image=cv2.imread(imagePath)\n label=imagePath.split(os.path.sep)[-2]\n if self.preprocessors is not None:\n for p in self.preprocessors:\n image = p.preprocess(image)\n\n data.append(image)\n labels.append(label)\n # show an update every ‘verbose‘ images\n if(verbose > 0 and i > 0 and (i + 1) % verbose == 0):\n print(\"[INFO] processed {}/{}\".format(i + 1,len(imagePaths)))\n return (np.array(data),np.array(labels))","sub_path":"chapter07-first_image_classifier/pyimagesearch/datasets/SimpleDatasetLoader.py","file_name":"SimpleDatasetLoader.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"237454429","text":"import requests\n\nnumber = \"+19086421986\"\n\ndata = '{\"to\": \"' + number + '\",\"body\": \"You have a new message on SafeCollab!\"}'\nheaders = {\n 'Content-Type': 'application/json',\n }\nurl = \"https://5726cb496e1ff90008000003:0b191af91f1c3a5f9a73f87178915955@api.easysmsapp.com/accounts/5726cb496e1ff90008000003\" + '/messages'\nr = requests.post(url, headers=headers, data=data)\nprint(r.status_code)\nprint(r.content)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"196856295","text":"import random,pickle\nimport numpy as np\nbasemodelpath=\"../models/\"\ndef combineCapitalSingle(words):\n\tcombinedWords=[]\n\tcomword=\"\"\n\tfor word in words:\n\t\tif len(word)>1 or (len(word)==1 and not 65<=ord(word)<=90 and not 48<=ord(word)<=57 ) :\n\t\t\tif len(comword)>0:\n\t\t\t\tcombinedWords.append(comword)\n\t\t\t\tcomword=\"\"\n\t\t\tcombinedWords.append(word)\n\t\telse:\n\t\t\tcomword+=word\n\tif len(comword) > 0:\n\t\tcombinedWords.append(comword)\n\treturn combinedWords\n\ndef dividehashTag(hasgtag):\n\thasgtag=hasgtag.replace(\"_\",\" \")\n\tallwords=[]\n\tcurrentword=\"\"\n\tfor ch in hasgtag:\n\t\tif (65<=ord(ch)<=90 or 48<=ord(ch)<=57 or ch==\" \") and len(currentword)>0:\n\t\t\tallwords.append(currentword)\n\t\t\tcurrentword=\"\"\n\t\tif ch!=\" \":\n\t\t\tcurrentword+=ch\n\tallwords.append(currentword)\n\tallwords=combineCapitalSingle(allwords)\n\thasgtag=(\" \".join(allwords))\n\treturn hasgtag\n\n\ndef replcaeHashtag(sentence):\n\tif \"#\" in sentence:\n\t\tsentence1,sentence2=sentence.split(\"#\",1)\n\t\tsentences=sentence2.strip().split(\" \",1)\n\t\tif len(sentences)==1:\n\t\t\thastag=dividehashTag(sentences[0])\n\t\t\treturn sentence1.strip()+\" \"+hastag,[0]*len(sentence1.split())+[1]*len(hastag.split())\n\t\telse:\n\t\t\thasgtag,sentence2=sentence2.split(\" \",1)\n\t\t\tsentence1+dividehashTag(sentences[0])\n\t\t\thashtag,(sentence2,mask)=dividehashTag(hasgtag),replcaeHashtag(sentence2)\n\t\t\treturn sentence1.strip() +\" \"+ hashtag + \" \" + sentence2,[0]*len(sentence1.split())+[1]*len(hashtag.split())+mask\n\n\treturn sentence.strip(),[0]*len(sentence.split())\n\ndef clearnSentence(sentence):\n\tfor sep in [\"\\\\n\",\"।\",\"॥\"]:\n\t\tsentence = sentence.replace(sep, \" \")\n\twhile \" \" in sentence:\n\t\tsentence=sentence.replace(\" \",\" \")\n\tsentence=sentence.replace(\"!\",\"\").replace(\"\\\"\",\"\").replace(\"##\",\"#\")\n\tsentence=\"\".join([s for s in sentence if ord(s)<5000])\n\tsentence,map=replcaeHashtag(sentence)\n\twhile \" \" in sentence:\n\t\tsentence=sentence.replace(\" \",\" \")\n\treturn sentence.strip(),map\n\ndef prepaireDict(all_sentences,minword=3):\n\twords=[d1 for data in all_sentences for d1 in data[0].split() if len(d1)>0]\n\tunique_word,un_count=np.unique(words, return_counts=True)\n\tunique_word=[w for w,c in zip(unique_word, un_count) if c>=minword]\n\tdict={w:i for w,i in zip(unique_word,range(1,1+len(unique_word)))}\n\twith open(basemodelpath+\"Dict.pk\",\"wb\") as f:\n\t\tpickle.dump(dict,f)\n\tprint(\"Total unique Word\",len(dict))\n\treturn dict\n\ndef convertdataToNumber(data,dict,label,padding=50):\n\tindata=[]\n\tfor sent,masks in data:\n\t\twords=sent.split()\n\t\tassert len(words)==len(masks), \"Issue in preprocessing\"\n\t\tsentindex=[]\n\t\tmaskindex=[]\n\t\tfor word,wordmask in zip(words,masks):\n\t\t\tif word in dict:\n\t\t\t\tsentindex.append(dict[word])\n\t\t\t\tmaskindex.append(wordmask)\n\t\tif len(sentindex)>padding:\n\t\t\tsentindex=sentindex[:padding]\n\t\t\tmaskindex=maskindex[:padding]\n\t\telif len(sentindex)[0-9]+) (?P[0-9]+)'\n PLATEAU_INPUT_DESCRIPTION = 'Plateau: '\n\n ROVER_LANDING_INPUT = \\\n r'(?PRover[0-9]+) Landing:(?P[0-9]+) (?P[0-9]+) (?P[NSEW])'\n ROVER_LANDING_INPUT_DESCRIPTION = 'Rover Landing: '\n\n ROVER_INSTRUCTIONS_INPUT = \\\n r'(?PRover[0-9]+) Instructions:(?P[MRL]+)'\n ROVER_INSTRUCTIONS_INPUT_DESCRIPTION = \\\n 'Rover Instructions:'\n\n def process_input(self):\n print(\" ===================================\")\n print(\"Welcome to Mars\\n\")\n print(\"Provide the Plateau params (format: {})\".format(\n Parser.PLATEAU_INPUT_DESCRIPTION))\n plateau = Plateau(**self.parse_plateau_params(line=input()))\n i = 0\n while True:\n name = \"Rover{}\".format(i + 1)\n print(\n \"Please provide landing for {} in \"\n \"the format: {}\".format(\n name, Parser.ROVER_LANDING_INPUT_DESCRIPTION))\n rover = Rover(**self.parse_rover_params(line=input()))\n print(\n \"Please provide instructions for {} in \"\n \"the format: {}\".format(\n name, Parser.ROVER_INSTRUCTIONS_INPUT_DESCRIPTION))\n instructions = self.parse_rover_instructions(line=input())\n self.process_rover_instructions(\n rover=rover, instructions=instructions)\n print(self.get_rover_position(name=name, rover=rover))\n i += 1\n\n def parse_input_with_regex(\n self, text=None, regex=None, regex_description=None):\n \"\"\" Parses the input with the given regex. Raises a ValueError\n based on the description if the format is not met. Returns\n a dictionary\n \"\"\"\n match = re.match(regex, (text or \"\").strip())\n if not match:\n raise ValueError(\n \"Input '{}' did not match the given format: {}\".format(\n text, regex_description))\n return match.groupdict()\n\n def parse_plateau_params(self, line=None):\n \"\"\" Gets parameters to intiialize the Plateau\n \"\"\"\n params = self.parse_input_with_regex(\n text=line,\n regex=Parser.PLATEAU_INPUT,\n regex_description=Parser.PLATEAU_INPUT_DESCRIPTION)\n x = int(params.get(\"x\"))\n y = int(params.get(\"y\"))\n return dict(size=(x, y))\n\n def parse_rover_params(self, line=None):\n \"\"\" Gets parameters to initialize the Rover\n \"\"\"\n params = self.parse_input_with_regex(\n text=line,\n regex=Parser.ROVER_LANDING_INPUT,\n regex_description=Parser.ROVER_LANDING_INPUT_DESCRIPTION)\n x = int(params.get(\"x\"))\n y = int(params.get(\"y\"))\n heading = params.get(\"heading\")\n return dict(x=x, y=y, heading=heading)\n\n def parse_rover_instructions(self, line=None):\n \"\"\" Pulls out the instructions from the command line\n \"\"\"\n params = self.parse_input_with_regex(\n text=line,\n regex=Parser.ROVER_INSTRUCTIONS_INPUT,\n regex_description=Parser.ROVER_INSTRUCTIONS_INPUT_DESCRIPTION)\n instructions = list(params.get(\"instructions\"))\n return instructions\n\n def process_rover_instructions(self, rover, instructions):\n for instruction in instructions:\n self.process_rover_instruction(rover, instruction)\n\n def process_rover_instruction(self, rover, instruction):\n if instruction == ACTION_MOVE:\n rover.move()\n elif instruction == ACTION_LEFT:\n rover.turn_left()\n elif instruction == ACTION_RIGHT:\n rover.turn_right()\n\n def get_rover_position(self, name=None, rover=None):\n \"\"\" Returns the rover position in the expected format\n \"\"\"\n return \"{}:{} {} {}\".format(\n name, rover.x, rover.y, rover.heading)\n","sub_path":"rover/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"262244626","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Container',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Hostinfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('version', models.CharField(max_length=100)),\n ('ip', models.CharField(max_length=20)),\n ('memory', models.CharField(max_length=20)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Images',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=110)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('container', models.ManyToManyField(to='web_site.Container')),\n ('images', models.ManyToManyField(to='web_site.Images')),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"docker_web/web_site/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131045495","text":"from django.urls import include, path\n\n# Recibe un viewSet \nfrom rest_framework.routers import DefaultRouter\n\n#DjangoRF nos da routers que recibe un viewset y genera los paths que necesitas\n\n#views\nfrom .views import rides as ride_views \n\nrouter = DefaultRouter()\n\nrouter.register(\n r'circles/(?P[-a-zA-Z0-0_]*)/rides',\n ride_views.RideViewSet,\n basename='ride'\n)\n\n\nurlpatterns = [\n path('',include(router.urls))\n]","sub_path":"rides/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165389255","text":"import numpy as np\n\nfrom PuzzleLib.Backend import gpuarray\n\nfrom PuzzleLib.Modules.Module import ModuleError\nfrom PuzzleLib.Modules.BatchNormND import BatchNormND\n\n\nclass BatchNorm3D(BatchNormND):\n\tdef __init__(self, maps, epsilon=1e-5, initFactor=1.0, minFactor=0.1, sscale=0.01, affine=True, name=None,\n\t\t\t\t empty=False, inplace=False):\n\t\tsuper().__init__(3, maps, epsilon, initFactor, minFactor, sscale, affine, name, empty, inplace)\n\t\tself.registerBlueprint(locals())\n\n\n\tdef checkDataShape(self, shape):\n\t\tif len(shape) != 5:\n\t\t\traise ModuleError(\"Data must be 5d tensor\")\n\n\t\t_, maps, _, _, _ = shape\n\t\tif maps != self.maps:\n\t\t\traise ModuleError(\"Data has %d maps (expected: %d)\" % (maps, self.maps))\n\n\n\tdef checkGradShape(self, shape):\n\t\tif len(shape) != 5:\n\t\t\traise ModuleError(\"Grad must be 5d tensor\")\n\n\t\t_, maps, _, _, _ = shape\n\t\tif maps != self.maps:\n\t\t\traise ModuleError(\"Grad has %d maps (expected: %d)\" % (maps, self.maps))\n\n\ndef unittest():\n\tbatchsize, maps, d, h, w = 8, 5, 3, 4, 2\n\tdata = gpuarray.to_gpu(np.random.randn(batchsize, maps, d, h, w).astype(np.float32))\n\n\tbn = BatchNorm3D(maps)\n\tbn(data)\n\n\thostData, hostScale, hostBias = data.get(), bn.scale.get(), bn.bias.get()\n\thostNormData, hostOutData = np.empty(hostData.shape, dtype=np.float32), np.empty(hostData.shape, dtype=np.float32)\n\thostMean, hostInvVar = np.zeros(hostScale.shape, dtype=np.float32), np.zeros(hostScale.shape, dtype=np.float32)\n\tfor c in range(maps):\n\t\tfor b in range(batchsize):\n\t\t\thostMean[0, c, 0, 0, 0] += np.sum(hostData[b, c])\n\t\thostMean[0, c, 0, 0, 0] /= (batchsize * w * h * d)\n\n\t\tfor b in range(batchsize):\n\t\t\thostInvVar[0, c, 0, 0, 0] += np.sum((hostData[b, c] - hostMean[0, c, 0, 0, 0])**2)\n\t\thostInvVar[0, c, 0, 0, 0] /= (batchsize * w * h * d)\n\n\t\thostInvVar[0, c, 0, 0, 0] = 1.0 / np.sqrt(hostInvVar[0, c, 0, 0, 0] + bn.epsilon)\n\t\thostNormData[:, c, :, :, :] = (hostData[:, c, :, :, :] - hostMean[0, c, 0, 0, 0]) * hostInvVar[0, c, 0, 0, 0]\n\t\thostOutData[:, c, :, :, :] = hostNormData[:, c, :, :, :] * hostScale[0, c, 0, 0, 0] + hostBias[0, c, 0, 0, 0]\n\n\tassert np.allclose(hostMean, bn.mean.get())\n\tassert np.allclose(hostInvVar, bn.saveinvvar.get())\n\tassert np.allclose(hostOutData, bn.data.get())\n\n\tgrad = gpuarray.to_gpu(np.random.randn(batchsize, maps, d, h, w).astype(np.float32))\n\tbn.backward(grad)\n\n\thostGrad, hostInGrad = grad.get(), np.empty_like(hostData)\n\thostScaleGrad, hostBiasGrad = np.empty_like(hostScale), np.empty_like(hostBias)\n\thostMeanGrad, hostVarGrad = np.empty_like(hostMean), np.empty_like(hostInvVar)\n\tfor c in range(maps):\n\t\thostBiasGrad[0, c, 0, 0, 0] = np.sum(hostGrad[:, c, :, :, :])\n\t\thostScaleGrad[0, c, 0, 0, 0] = np.sum(hostGrad[:, c, :, :, :] * hostNormData[:, c, :, :, :])\n\n\t\thostMeanGrad[0, c, 0, 0, 0] = np.sum(hostGrad[:,c,:,:,:]) * hostScale[0,c,0,0,0] * -hostInvVar[0,c,0,0,0]\n\t\thostVarGrad[0, c, 0, 0, 0] = np.sum(hostGrad[:,c,:,:,:] * (hostData[:,c,:,:,:] - hostMean[0,c,0,0,0])) * \\\n\t\t\t\t\t\t\t\t\t hostScale[0, c, 0, 0, 0] * -0.5 * hostInvVar[0, c, 0, 0, 0]**3\n\n\t\thostInGrad[:, c, :, :, :] = hostGrad[:,c,:,:,:] * hostScale[0,c,0,0,0] * hostInvVar[0,c,0,0,0] + \\\n\t\t\t\t\t\t\t\t\thostVarGrad[0, c, 0, 0, 0] * 2.0 / (batchsize * w * h * d) * \\\n\t\t\t\t\t\t\t\t\t(hostData[:, c, :, :, :] - hostMean[0, c, 0, 0, 0]) + \\\n\t\t\t\t\t\t\t\t\thostMeanGrad[0, c, 0, 0, 0] / (batchsize * w * h * d)\n\n\tassert np.allclose(hostInGrad, bn.grad.get())\n\tassert np.allclose(hostScaleGrad, bn.vars[\"scale\"].grad.get())\n\tassert np.allclose(hostBiasGrad, bn.vars[\"bias\"].grad.get())\n\n\nif __name__ == \"__main__\":\n\tunittest()\n","sub_path":"Modules/BatchNorm3D.py","file_name":"BatchNorm3D.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519281696","text":"from direct.actor.Actor import Actor\nfrom pandac.PandaModules import *\nfrom direct.task import Task\nimport math\nfrom math import pi, sin, cos\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.task import Task\nfrom direct.interval.IntervalGlobal import Sequence\nfrom pandac.PandaModules import Point3\nfrom pandac.PandaModules import *\n\nimport direct.directbase.DirectStart\nfrom panda3d.core import CollisionTraverser,CollisionNode\nfrom panda3d.core import CollisionHandlerQueue,CollisionRay\nfrom panda3d.core import Filename,AmbientLight,DirectionalLight\nfrom panda3d.core import PandaNode,NodePath,Camera,TextNode\nfrom panda3d.core import Vec3,Vec4,BitMask32\nfrom direct.gui.OnscreenText import OnscreenText\nfrom direct.actor.Actor import Actor\nfrom direct.showbase.DirectObject import DirectObject\n\nterrain = loader.loadModel('phase_5/models/cogdominium/cogdominiumElevator.bam')\nterrain.reparentTo(render)\nterrain.setScale(2.25)\nterrain.setZ(1.00)\n\nCogt = Actor('phase_3.5/models/char/suitA-mod.bam', {'Anim' : 'phase_4/models/char/suitA-neutral.bam'})\nCogt.reparentTo(render)\nCogt.loop('Anim')\nTorsoTex = loader.loadTexture('phase_3.5/maps/s_blazer.jpg')\nCogt.find('**/torso').setTexture(TorsoTex, 1)\nArmTex = loader.loadTexture('phase_3.5/maps/s_sleeve.jpg')\nCogt.find('**/arms').setTexture(ArmTex, 1)\nLegTex = loader.loadTexture('phase_3.5/maps/s_leg.jpg')\nCogt.find('**/legs').setTexture(LegTex, 1)\nHead = Actor('phase_3/models/char/tt_a_chr_dgm_skirt_head_1000.bam',\n {'anim':'phase_3/models/char/tt_a_chr_dgm_skirt_head_neutral.bam'})\n \nHead.setColor(1, 1, 1)\nHead.loop('anim')\nHead.find('**/ears').setColor(0)\nHead.find('**/muzzle').setColor(0.87, 0.65, 0.47)\nTop_Head = Head.find('**/head')\nBot_Head = Head.find('**/head-front')\nHead.reparentTo(Cogt.find('**/joint_head'))\nTop_Head.setColor(0.30, 0.73, 0.84, 1.0)\nBot_Head.setColor(0.30, 0.73, 0.84, 1.0)\nHead.setPos(0, -0.33, 0.22,)\nCogt.setScale(1.27)\nCogt.setX(-5.67)\nCogt.setY(9.91)\nCogt.setZ(1.00)\nCogt.setH(264.17)\n\nWhiskers22 = Actor({'Torso':'phase_3/models/char/tt_a_chr_dgl_shorts_torso_1000.bam', \\\n 'Legs':'phase_3/models/char/tt_a_chr_dgl_shorts_legs_1000.bam'}, \\\n {'Torso':{'Idle': 'phase_3/models/char/tt_a_chr_dgl_shorts_torso_1000.bam', \\\n 'TorsoAnim':'phase_3/models/char/tt_a_chr_dgl_shorts_torso_neutral.bam'}, \\\n 'Legs':{'Idle':'phase_3/models/char/tt_a_chr_dgl_shorts_legs_1000.bam',\n 'LegsAnim':'phase_3/models/char/tt_a_chr_dgl_shorts_legs_neutral.bam'}})\n \nWhiskers22.attach('Torso', 'Legs', 'joint_hips')\n \n#Animations\nWhiskers22.loop('TorsoAnim')\nWhiskers22.loop('LegsAnim')\n \n#Pos, Hpr, Scale, ReparentTo\nWhiskers22.setPos(0, 0, 0)\nWhiskers22.setHpr(0, 0, 0)\nWhiskers22.setScale(0.90)\nWhiskers22.reparentTo(render)\n \n#Head\nHead = loader.loadModel('phase_3/models/char/cat-heads-1000.bam')\nHead.find('**/muzzle-short-surprise').hide()\nHead.find('**/muzzle-short-sad').hide()\nHead.find('**/muzzle-short-smile').hide()\nHead.find('**/muzzle-short-angry').hide()\nHead.find('**/muzzle-short-laugh').hide()\nHead.find('**/head-short').hide()\nHead.find('**/head-front-short').hide()\nHead.find('**/muzzle-long-surprise').hide()\nHead.find('**/muzzle-long-sad').hide()\nHead.find('**/muzzle-long-smile').hide()\nHead.find('**/muzzle-long-angry').hide()\nHead.find('**/muzzle-long-laugh').hide()\nHead.find('**/eyes-short').hide()\nHead.find('**/joint_pupilL_long').hide()\nHead.find('**/joint_pupilR_long').hide()\nHead.find('**/ears-short').hide()\n \nNeck = Whiskers22.find('**/def_head')\nHead.reparentTo(Neck)\n \n#******************************Clothes******************************\n \n \n#Gloves\nGloves = Whiskers22.find('**/hands')\nGloves.setColor(0.99, 0.99, 0.99)\n \n#Sleeves\nSleeves = loader.loadTexture('phase_4/maps/4thJulySleeve2.jpg')\nWhiskers22.find('**/sleeves').setTexture(Sleeves, 1)\n \n#Shirts\nShirt = loader.loadTexture('phase_4/maps/4thJulyShirt2.jpg')\nWhiskers22.find('**/torso-top').setTexture(Shirt, 1)\n \n#Shorts\nShorts = loader.loadTexture('phase_4/maps/4thJulyShorts1.jpg')\nWhiskers22.find('**/torso-bot').setTexture(Shorts, 1)\n \n#Shoes/Boots\nWhiskers22.find('**/shoes').hide()\nWhiskers22.find('**/shoes').hide()\nWhiskers22.find('**/boots_long').hide()\nWhiskers22.find('**/boots_short').hide()\n\n \n#Hats\n#Hat = loader.loadModel('phase_4/models/accessories/tt_m_chr_avt_acc_hat_topHat.bam')\n#Hat.reparentTo(Head.find('**/head-short'))\n#Hat.setPos(0, -0.10, 0.75)\n#Hat.setHpr(180.00, 350.00, 0)\n#Hat.setScale(0.45)\n \n#Glasses\n#Glasses = loader.loadModel('phase_4/models/accessories/tt_m_chr_avt_acc_msk_squareRims.bam')\n#Glasses.reparentTo(Head.find('**/head-front-short'))\n#Glasses.setZ(0.30)\n#Glasses.setHpr(180, 350.00, 0)\n#Glasses.setScale(0.40)\n \n#Colors\nHead.find('**/head-long').setColor(0.72, 0.2, 0.21, 1.0)\nHead.find('**/head-front-long').setColor(0.72, 0.2, 0.21, 1.0)\nHead.find('**/ears-long').setColor(0.72, 0.2, 0.21, 1.0)\nWhiskers22.find('**/neck').setColor(0.72, 0.2, 0.21, 1.0)\nWhiskers22.find('**/arms').setColor(0.72, 0.2, 0.21, 1.0)\nWhiskers22.find('**/legs').setColor(0.72, 0.2, 0.21, 1.0)\nWhiskers22.find('**/feet').setColor(0.72, 0.2, 0.21, 1.0)\nWhiskers22.setX(78.27)\nWhiskers22.setY(0.17)\nWhiskers22.setZ(7.01)\nWhiskers22.setH(95)\nWhiskers22.setScale(1.07)\n\nCogt2 = Actor('phase_3.5/models/char/suitA-mod.bam', {'Anim' : 'phase_4/models/char/suitA-neutral.bam'})\nCogt2.reparentTo(render)\nCogt2.loop('Anim')\nTorsoTex = loader.loadTexture('phase_3.5/maps/s_blazer.jpg')\nCogt2.find('**/torso').setTexture(TorsoTex, 1)\nArmTex = loader.loadTexture('phase_3.5/maps/s_sleeve.jpg')\nCogt2.find('**/arms').setTexture(ArmTex, 1)\nLegTex = loader.loadTexture('phase_3.5/maps/s_leg.jpg')\nCogt2.find('**/legs').setTexture(LegTex, 1)\nHead = Actor('phase_3/models/char/tt_a_chr_dgm_skirt_head_1000.bam',\n {'anim':'phase_3/models/char/tt_a_chr_dgm_skirt_head_neutral.bam'})\n \nHead.setColor(1, 1, 1)\nHead.loop('anim')\nHead.find('**/ears').setColor(0)\nHead.find('**/muzzle').setColor(0.87, 0.65, 0.47)\nTop_Head = Head.find('**/head')\nBot_Head = Head.find('**/head-front')\nHead.reparentTo(Cogt2.find('**/joint_head'))\nTop_Head.setColor(0.992188, 0.480469, 0.167969, 1.0)\nBot_Head.setColor(0.992188, 0.480469, 0.167969, 1.0)\nHead.setPos(0, -0.33, 0.22,)\nCogt2.setScale(1.27)\nCogt2.setX(5.67)\nCogt2.setY(9.91)\nCogt2.setZ(1.00)\nCogt2.setH(-264.17)\n\nbase.oobe()\nrun()\n","sub_path":"projects/old py files/movie(11).py","file_name":"movie(11).py","file_ext":"py","file_size_in_byte":6341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"635802382","text":"# Copyright (c) 2010-2012 OpenStack, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom ConfigParser import ConfigParser, NoSectionError, NoOptionError, \\\n RawConfigParser\n\nfrom webob.exc import HTTPBadRequest, HTTPLengthRequired, \\\n HTTPRequestEntityTooLarge\n\nconstraints_conf = ConfigParser()\nconstraints_conf.read('/etc/swift/swift.conf')\n\n\ndef constraints_conf_int(name, default):\n try:\n return int(constraints_conf.get('swift-constraints', name))\n except (NoSectionError, NoOptionError):\n return default\n\n\n#: Max file size allowed for objects\nMAX_FILE_SIZE = 5 * 1024 * 1024 * 1024 + 2\nMAX_FILE_SIZE = constraints_conf_int('max_file_size',\n 5368709122) # 5 * 1024 * 1024 * 1024 + 2\n#: Max length of the name of a key for metadata\nMAX_META_NAME_LENGTH = 128\nMAX_META_NAME_LENGTH = constraints_conf_int('max_meta_name_length', 128)\n#: Max length of the value of a key for metadata\nMAX_META_VALUE_LENGTH = 256\nMAX_META_VALUE_LENGTH = constraints_conf_int('max_meta_value_length', 256)\n#: Max number of metadata items\nMAX_META_COUNT = 90\nMAX_META_COUNT = constraints_conf_int('max_meta_count', 90)\n#: Max overall size of metadata\nMAX_META_OVERALL_SIZE = 4096\nMAX_META_OVERALL_SIZE = constraints_conf_int('max_meta_overall_size', 4096)\n#: Max object name length\nMAX_OBJECT_NAME_LENGTH = 1024\nMAX_OBJECT_NAME_LENGTH = constraints_conf_int('max_object_name_length', 1024)\n#: Max object list length of a get request for a container\nCONTAINER_LISTING_LIMIT = 10000\nCONTAINER_LISTING_LIMIT = constraints_conf_int('container_listing_limit',\n 10000)\n#: Max container list length of a get request for an account\nACCOUNT_LISTING_LIMIT = 10000\nMAX_ACCOUNT_NAME_LENGTH = 256\nMAX_CONTAINER_NAME_LENGTH = 256\nACCOUNT_LISTING_LIMIT = constraints_conf_int('account_listing_limit', 10000)\n#: Max account name length\nMAX_ACCOUNT_NAME_LENGTH = constraints_conf_int('max_account_name_length', 256)\n#: Max container name length\nMAX_CONTAINER_NAME_LENGTH = constraints_conf_int('max_container_name_length',\n 256)\n\n\n#: Query string format= values to their corresponding content-type values\nFORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json',\n 'xml': 'application/xml'}\n\n\ndef check_object_creation(req, object_name):\n \"\"\"\n Check to ensure that everything is alright about an object to be created.\n\n :param req: HTTP request object\n :param object_name: name of object to be created\n :raises HTTPRequestEntityTooLarge: the object is too large\n :raises HTTPLengthRequered: missing content-length header and not\n a chunked request\n :raises HTTPBadRequest: missing or bad content-type header, or\n bad metadata\n \"\"\"\n if req.content_length and req.content_length > MAX_FILE_SIZE:\n return HTTPRequestEntityTooLarge(body='Your request is too large.',\n request=req, content_type='text/plain')\n if req.content_length is None and \\\n req.headers.get('transfer-encoding') != 'chunked':\n return HTTPLengthRequired(request=req)\n \n if len(object_name) > MAX_OBJECT_NAME_LENGTH:\n return HTTPBadRequest(body='Object name length of %d longer than %d' %\n (len(object_name), MAX_OBJECT_NAME_LENGTH), request=req,\n content_type='text/plain')\n \n return None\n\n\ndef check_mount(root, drive):\n \"\"\"\n Verify that the path to the device is a mount point and mounted. This\n allows us to fast fail on drives that have been unmounted because of\n issues, and also prevents us for accidently filling up the root partition.\n\n :param root: base path where the devices are mounted\n :param drive: drive name to be checked\n :returns: True if it is a valid mounted device, False otherwise\n \"\"\"\n if not drive.isalnum():\n return False\n path = os.path.join(root, drive)\n return os.path.exists(path) and os.path.ismount(path)\n\n\ndef check_float(string):\n \"\"\"\n Helper function for checking if a string can be converted to a float.\n\n :param string: string to be verified as a float\n :returns: True if the string can be converted to a float, False otherwise\n \"\"\"\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n\ndef check_utf8(string):\n \"\"\"\n Validate if a string is valid UTF-8 str or unicode\n\n :param string: string to be validated\n :returns: True if the string is valid utf-8 str or unicode, False otherwise\n \"\"\"\n if not string:\n return False\n try:\n if isinstance(string, unicode):\n string.encode('utf-8')\n else:\n string.decode('UTF-8')\n return True\n # If string is unicode, decode() will raise UnicodeEncodeError\n # So, we should catch both UnicodeDecodeError & UnicodeEncodeError\n except UnicodeError:\n return False\n","sub_path":"common/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"372984247","text":"import os\r\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DJANGO_PROJECT.settings')\r\n\r\nimport django\r\ndjango.setup()\r\n\r\nimport random\r\nfrom users_app.models import User\r\nfrom faker import Faker\r\n\r\nfakegen = Faker()\r\n\r\n\r\ndef populate(N=5):\r\n for entry in range(N):\r\n\r\n fake_first_name = fakegen.first_name()\r\n fake_second_name = fakegen.last_name()\r\n fake_email = fakegen.email()\r\n\r\n user = User.objects.get_or_create(first_name=fake_first_name, second_name=fake_second_name, email=fake_email)[0]\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"populating script\")\r\n populate(20)\r\n print(\"populating completed!\")","sub_path":"populate_script.py","file_name":"populate_script.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"411740922","text":"import re\nimport os\nimport csv\n\n\nwith open(os.path.join(os.getcwd(), 'phonebook_raw.csv'), 'r', encoding=\"utf-8\") as f:\n rows = csv.reader(f, delimiter =',')\n contact_lists = list(rows)\n\nphonebook = []\nphonebook_lst = []\n\n\ndef delete_commas():\n for i in contact_lists:\n list_i = list(filter(lambda x: x != '', i))\n phonebook_lst.append(list_i)\n\n\ndef separation_last_first_surname(phonebook_lst):\n lst1, lst2 = [], []\n for i in phonebook_lst:\n for j in range(2):\n lst1 = lst1 + (i[j].split(' '))\n lst1 = lst1 + (i[2:])\n lst2.append(lst1)\n lst1 =[]\n return lst2\n\n\ndef addition_without_repetitions(lst1, lst2):\n for i in lst2:\n if i not in lst1:\n lst1.append(i)\n return lst1\n\n\ndef joining_repeats(separation_last_first_surname):\n lst, k = [], 1\n for i in range(len(separation_last_first_surname)):\n for j in range(k, len(separation_last_first_surname)):\n if separation_last_first_surname[i][0:2] == \\\n separation_last_first_surname[j][0:2]:\n lst.append(addition_without_repetitions(separation_last_first_surname[i],\n separation_last_first_surname[j]))\n k += 1\n return lst\n\n\ndef assembly_phonebook():\n global phonebook_lst\n lst1 = separation_last_first_surname(phonebook_lst)\n lst2 = joining_repeats(separation_last_first_surname(phonebook_lst))\n for i in joining_repeats(separation_last_first_surname(phonebook_lst)):\n for j in separation_last_first_surname(phonebook_lst):\n if i[0:2] == j[0:2]:\n lst1.remove(j)\n phonebook_lst = lst1+lst2\n return phonebook_lst\n\n\ndef regex_phone(number):\n pattern1 = r'\\доб.'\n ext_number = re.findall(pattern1, number)\n if len(ext_number) != 0:\n pattern2 = r'(\\+7|8).*?(\\d{3}).*?(\\d{3}).*?(\\d{2}).*?(\\d{2}).*?(\\доб.(?= )).*?(\\d+)'\n phone = re.match(pattern2, number)\n return '8' + '(' + phone.group(2) + ')'+phone.group(3)+'-'+phone.group(4)+'-'+phone.group(5)+\\\n ' '+phone.group(6)+phone.group(7)\n else:\n pattern2 = r'(\\+7|8).*?(\\d{3}).*?(\\d{3}).*?(\\d{2}).*?(\\d{2})'\n phone = re.match(pattern2, number)\n return '8' + '(' + phone.group(2) + ')'+phone.group(3)+'-'+phone.group(4)+'-'+phone.group(5)\n\n\ndef phone_number_or_not(str):\n try:\n return regex_phone(str)\n except Exception:\n return str\n\n\ndef phone_or_email(str):\n phone = r'(\\+7|8).*?(\\d{3}).*?(\\d{3}).*?(\\d{2}).*?(\\d{2})'\n email = r'\\@\\w*\\.'\n result_phone = re.findall(phone, str)\n result_email = re.findall(email, str)\n if len(result_phone) > 0:\n return 'its phone'\n elif len(result_email) > 0:\n return 'its email'\n\n\ndef correcting_phone_number():\n for i in range(len(phonebook_lst)):\n for j in range(len(phonebook_lst[i])):\n phonebook_lst[i][j] = phone_number_or_not(phonebook_lst[i][j])\n\n\ndef fix_incorrect_locations():\n for i in range(len(assembly_phonebook())):\n A = assembly_phonebook()[i][-2]\n B = assembly_phonebook()[i][-1]\n if phone_or_email(A) == 'its phone' and phone_or_email(B) != 'its email':\n assembly_phonebook()[i][-2] = B\n assembly_phonebook()[i][-1] = A\n elif phone_or_email(A) == 'its email' and phone_or_email(B) == 'its phone':\n assembly_phonebook()[i][-2] = B\n assembly_phonebook()[i][-1] = A\n\n\nif __name__ == '__main__':\n delete_commas()\n correcting_phone_number()\n fix_incorrect_locations()\n\nwith open(os.path.join(os.getcwd(), 'phonebook_new.csv'),mode='w', newline='', encoding=\"utf-8\") as f:\n datawriter = csv.writer(f, delimiter=',')\n datawriter.writerows(phonebook_lst)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498357017","text":"#Natural Language Processing\n#Importing libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#Import the dataset\ndataset=pd.read_csv('Restaurant_Reviews.tsv',delimiter='\\t',quoting=3)#quoting for double quote ignorance\n\n#cleaning the text\nimport re # to clean the texts\nimport nltk#nltk is the library which will download stopwords package\nnltk.download('stopwords')#stop contains irrelevent words e.g(is,this,the,a,on,of etc)\nfrom nltk.corpus import stopwords#so we remove those stopwords if any in the reviews becoz stopwors will not help to give any hint either the review good or bad\nfrom nltk.stem.porter import PorterStemmer#convert words to root words e.g loved,loving,loves to love\ncorpus=[]\nfor i in range(0,1000):\n review=re.sub('[^a-zA-Z]',' ',dataset['Review'][i])#a-zA-z is not to remove any letters \n review=review.lower()# lower the alphsbets in the string\n review=review.split()# conver string of words to list of words\n ps=PorterStemmer()\n review=[ps.stem(word) for word in review if not word in set(stopwords.words('english'))]#stem only those words which are not stopwords\n review=' '.join(review)#make again from list of words to string of words e.g ['wow','love'] to wow love\n corpus.append(review)\n\n#Creating the Bag of Words model\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv=CountVectorizer(max_features=1500)\nX=cv.fit_transform(corpus).toarray()\nY=dataset.iloc[:,1].values\n#X=pd.DataFrame(X[:,:],columns=cv.get_feature_names())\n\n#Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0)\n#Fitting Naive Bayes Classifier to the training set\n\nfrom sklearn.naive_bayes import GaussianNB\nclassifier=GaussianNB()\nclassifier.fit(X_train,Y_train)\n#Predicting the test set results\n\ny_pred=classifier.predict(X_test)\n#Making the confusion Matrix\n\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(Y_test,y_pred)\nprint(cm)\nfrom sklearn.metrics import precision_score, recall_score,f1_score,accuracy_score\nAccuracy=accuracy_score(Y_test,y_pred)*100\nprint(Accuracy)\nPrecision=precision_score(Y_test,y_pred)\nprint(Precision)\nRecall=recall_score(Y_test,y_pred)\nprint(Recall)\nF1_Score=f1_score(Y_test,y_pred)\nprint(F1_Score)\n#Accuracy = (TP + TN) / (TP + TN + FP + FN)\n#Precision = TP / (TP + FP)\n#Recall = TP / (TP + FN)\n#F1 Score = 2 * Precision * Recall / (Precision + Recall)","sub_path":"NLP.py","file_name":"NLP.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"238993177","text":"import requests\nimport json\ntime = [22,23,24]\ngameId = 4659518008\nurl = \"https://kr.api.riotgames.com/lol/match/v4/timelines/by-match/\"+str(gameId)+\"?api_key=RGAPI-4dcd2099-2605-4440-9864-f53a305141e7\"\ntime_data = requests.get(url).json()[\"frames\"]\n# print(time_data)\n\n# 필수 이벤트\nevents_creator_id = [\"WARD_PLACED\"]\nevents_killer_id = [\"WARD_KILL\", \"BUILDING_KILL\", \"CHAMPION_KILL\"]\nevents_moster_kill = [\"ELITE_MONSTER_KILL\"]\n\npart_set = [dict() for _ in range(11)]\n# print(part_set)\ntime_set = dict()\n\n\n\nfor t in time:\n events = time_data[t][\"events\"]\n for event in events:\n # 이벤트가 필수 이벤트(creatorId)라면\n if event[\"type\"] in events_creator_id:\n real_event = event[\"type\"]\n # 해당 인덱스의 dict에 횟수를 누적한다\n part_set[event[\"creatorId\"]].setdefault(real_event, 0)\n part_set[event[\"creatorId\"]][real_event] += 1\n\n # 이벤트가 필수 이벤트(killerId)라면\n elif event[\"type\"] in events_killer_id:\n real_event = event[\"type\"]\n # 해당 인덱스의 dict에 횟수를 누적한다\n part_set[event[\"killerId\"]].setdefault(real_event, 0)\n part_set[event[\"killerId\"]][real_event] += 1\n \n # 이벤트가 필수 이벤트(monsterkill)라면\n elif event[\"type\"] in events_moster_kill:\n real_event = event[\"type\"]\n # 해당 인덱스의 dict에 횟수를 누적한다\n part_set[event[\"killerId\"]].setdefault(real_event, [])\n part_set[event[\"killerId\"]][real_event] += [event[\"monsterType\"]]\n time_set.setdefault(t,[])\n time_set[t] = part_set\nprint(time_set)\n","sub_path":"데마시아5/DEMACIA5/djangobackend/timeline.py","file_name":"timeline.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"40027728","text":"\"\"\"\ndesispec.zfind.redmonster\n=========================\n\nClasses for use with the redmonster package.\n\"\"\"\nfrom __future__ import division, absolute_import\n\nimport os\n\nimport numpy as np\n\nfrom desispec.zfind import ZfindBase\nfrom desispec.interpolation import resample_flux\nfrom desispec.log import get_logger\n\nclass RedMonsterZfind(ZfindBase):\n \"\"\"Class documentation goes here.\n \"\"\"\n def __init__(self, wave, flux, ivar, R=None, dloglam=1e-4):\n \"\"\"Uses Redmonster to classify and find redshifts.\n\n See :class:`desispec.zfind.zfind.ZfindBase` class for inputs/outputs.\n\n TODO: document redmonster specific output variables\n \"\"\"\n try:\n from redmonster.physics.zfinder import Zfinder\n from redmonster.physics.zfitter import Zfitter\n from redmonster.physics.zpicker import Zpicker\n except ImportError:\n get_logger().error(\"You are attempting to use RedMonster, but it is not available for import!\")\n raise\n #- RedMonster templates don't quite go far enough into the blue,\n #- so chop off some data\n ii, = np.where(wave>3965)\n wave = wave[ii]\n flux = flux[:, ii]\n ivar = ivar[:, ii]\n\n #- Resample inputs to a loglam grid\n start = round(np.log10(wave[0]), 4)+dloglam\n stop = round(np.log10(wave[-1]), 4)\n\n nwave = int((stop-start)/dloglam)\n loglam = start + np.arange(nwave)*dloglam\n\n nspec = flux.shape[0]\n self.flux = np.empty((nspec, nwave))\n self.ivar = np.empty((nspec, nwave))\n\n for i in range(nspec):\n self.flux[i], self.ivar[i] = resample_flux(10**loglam, wave, flux[i], ivar[i])\n\n self.dloglam = dloglam\n self.loglam = loglam\n self.wave = 10**loglam\n self.nwave = nwave\n self.nspec = nspec\n\n #- list of (templatename, zmin, zmax) to fix\n self.template_dir = os.getenv('REDMONSTER')+'/templates/'\n self.templates = [\n ('ndArch-spEigenStar-55734.fits', -0.005, 0.005),\n ('ndArch-ssp_em_galaxy-v000.fits', 0.6, 1.6),\n # ('ndArch-ssp_em_galaxy_quickdesi-v000.fits', 0.6, 1.6),\n ('ndArch-QSO-V003.fits', 0.0, 3.5),\n ]\n\n #- Find and refine best redshift per template\n self.zfinders = list()\n self.zfitters = list()\n for template, zmin, zmax in self.templates:\n zfind = Zfinder(self.template_dir+template, npoly=2, zmin=zmin, zmax=zmax)\n zfind.zchi2(self.flux, self.loglam, self.ivar, npixstep=2)\n zfit = Zfitter(zfind.zchi2arr, zfind.zbase)\n zfit.z_refine()\n\n self.zfinders.append(zfind)\n self.zfitters.append(zfit)\n\n #- Create wrapper object needed for zpicker\n specobj = _RedMonsterSpecObj(self.wave, self.flux, self.ivar)\n flags = list()\n for i in range(len(self.zfitters)):\n flags.append(self.zfinders[i].zwarning.astype(int) | \\\n self.zfitters[i].zwarning.astype(int))\n\n #- Zpicker\n self.zpicker = Zpicker(specobj,\n self.zfinders[0], self.zfitters[0], flags[0],\n self.zfinders[1], self.zfitters[1], flags[1],\n self.zfinders[2], self.zfitters[2], flags[2])\n\n #- Fill in outputs\n self.type = np.asarray(self.zpicker.type, dtype='S20')\n self.subtype = np.asarray(self.zpicker.subtype, dtype='S20')\n self.z = np.array([self.zpicker.z[i,0] for i in range(nspec)])\n self.zerr = np.array([self.zpicker.z_err[i,0] for i in range(nspec)])\n self.zwarn = np.array([self.zpicker.zwarning[i].astype(int) for i in range(nspec)])\n self.model = self.zpicker.models\n\n\n#- This is a container class needed by Redmonster zpicker\nclass _RedMonsterSpecObj(object):\n def __init__(self, wave, flux, ivar, dof=None):\n \"\"\"\n Create an object with .wave, .flux, .ivar, and .dof attributes;\n these are needed by RedMonster as input\n \"\"\"\n nspec, nwave = flux.shape\n self.wave = wave\n self.flux = flux\n self.ivar = ivar\n if dof is None:\n self.dof = np.ones(nspec) * nwave\n else:\n self.dof = dof\n\n #- Leftover BOSS-isms\n self.plate = self.mjd = self.fiberid = self.npix = 0\n self.hdr = None\n self.plugmap = None\n\n","sub_path":"py/desispec/zfind/redmonster.py","file_name":"redmonster.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"84558128","text":"#!/usr/bin/python3\n# functions.py by Bill Weinman [http://bw.org/]\n# This is an exercise file from Python 3 Essential Training on lynda.com\n# Copyright 2010 The BearHeart Group, LLC\n\ndef main():\n for i in inclusive_range(25,30,4):\n print(i,end=' ')\n\n\ndef inclusive_range(*args):\n numargs = len(args)\n if numargs<1: raise TypeError('require atleast one argument')\n elif numargs == 1:\n stop = args[0]\n start = 0\n step = 1\n elif numargs == 2:\n (start,stop) = args\n step = 1\n elif numargs == 3:\n (start,stop,step) = args\n else: raise TypeError('inclusive_range expected at most 3 arguments, got{}'.format(numargs))\n\n i = start\n while (i<=stop):\n yield i\n i += step\n\ndef testfunc(**kwargs):\n print(kwargs['one'],kwargs['two'],kwargs['three'])\n\nif __name__ == \"__main__\": main()\n","sub_path":"Lynda_com/Ex_Files_Python_3_EssT/Exercise Files/11 Functions/functions-working.py","file_name":"functions-working.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591635493","text":"\"\"\"\nUtilities for iterating constructing data sets and iterating over\nDNA sequence data.\nPybedtools code from:\nhttps://github.com/uci-cbcl/FactorNet/blob/master/utils.py\nPseudo code structure:\n1. Construct a random training set (start with a random negative,\n account for data augmentations later?)\n2. Load the data & convert it to onehot. (Look for parallelization tools.)\n3. Build a generator\nWhat data does this script take as input or require?\n1. The genome sizes file\n2. The genome-wide fasta file\n3. A blacklist regions file.\n4. A ChIP-seq peak file.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pyfasta\nfrom pybedtools import BedTool\nimport pyBigWig\nimport argparse\nfrom subprocess import call\nimport yaml\nimport subprocess\nimport os\n\n# local imports\nimport utils\n\n# pybedtools.set_tempdir('/storage/home/dvs5680/scratch/')\nnp.random.seed(9)\n\n\nclass AccessGenome:\n def __init__(self, genome_fasta_file):\n self.genome_fasta_file = genome_fasta_file\n\n def get_genome_fasta(self):\n f = pyfasta.Fasta(self.genome_fasta_file)\n return f\n\n @staticmethod\n def get_onehot_array(seqs, window_length):\n \"\"\"\n Parameters:\n seqs: The sequence array that needs to be converted into one-hot encoded\n features.\n batch_size: mini-batch size\n L: window length\n Returns:\n A one-hot encoded array of shape batch_size * window_len * 4\n \"\"\"\n onehot_map = {'A': [1, 0, 0, 0], 'T': [0, 1, 0, 0], 'G': [0, 0, 1, 0],\n 'C': [0, 0, 0, 1], 'N': [0, 0, 0, 0]}\n # note: converting all lower-case nucleotides into upper-case here.\n onehot_seqs = [onehot_map[x.upper()] for seq in seqs for x in seq]\n onehot_data = np.reshape(onehot_seqs, newshape=(len(seqs), window_length, 4))\n return onehot_data\n\n def rev_comp(self, inp_str):\n rc_dict = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G', 'c': 'g',\n 'g': 'c', 't': 'a', 'a': 't', 'n': 'n', 'N': 'N'}\n outp_str = list()\n for nucl in inp_str:\n outp_str.append(rc_dict[nucl])\n return ''.join(outp_str)\n\n def get_data_at_coordinates(self, coordinates_df, genome_fasta,\n window_len, chromatin_track_list, nbins):\n \"\"\"\n This method can be used either by:\n 1. class ConstructSets: uses this method to return features and labels\n for a training or validation batch.\n 2. class ConstructTestData: uses this method to return features and\n labels for the test chromosome co-ordinates and labels.\n Parameters:\n coordinates_df(dataFrame): This method takes as input a Pandas DataFrame with dimensions N * 4\n Where N is the number of samples.\n The columns are: chr, start, stop, label\n genome_fasta (pyFasta npy record): Pyfasta pointer to the fasta file.\n window_len (int): length of windows used for training\n nbins (int): number of bins for chromatin tracks\n Returns:\n This method returns a one hot encoded numpy array (X) and a np\n vector y.\n Both X and y are numpy arrays.\n X shape: (batch size, L, 4)\n y shape: (batch size,)\n \"\"\"\n y = coordinates_df['label']\n X_seq = []\n seq_len = []\n\n bw_list = [pyBigWig.open(bw_file) for bw_file in chromatin_track_list]\n chromatin_out_lists = [[] for x in bw_list]\n\n batch_size = len(y)\n idx = 0\n for chrom, start, stop, lab in coordinates_df.values:\n fa_seq = genome_fasta[chrom][int(start):int(stop)]\n try:\n for idx, bw_file in enumerate(bw_list):\n chromatin_out_lists[idx].append(bw_file.stats(chrom, start, stop, nBins=nbins))\n except RuntimeError:\n print(\n \"Error while analyzing the BigWig file.\\n\"\n \"-> Please ensure that the genome sizes file and genome fasta file is compatible \"\n \"with the genome to which the BigWig\"\n \"data is aligned. \\n\"\n \"-> It is possible that chromosome names are different in these file types\")\n exit(1)\n # Adding reverse complements into the training process:\n if idx <= int(batch_size/2):\n X_seq.append(fa_seq)\n else:\n X_seq.append(self.rev_comp(fa_seq))\n idx += 1\n seq_len.append(len(fa_seq))\n return X_seq, chromatin_out_lists, y\n\n\nclass ConstructTrainingData(AccessGenome):\n \"\"\"\n Notes:\n chip_coords is the filtered chip_seq file, it either contains only\n train chromosomes or validation chromosomes based on the input.\n \"\"\"\n\n def __init__(self, genome_sizes_file, genome_fasta_file, blacklist_file,\n chip_coords, window_length, exclusion_df,\n curr_genome_bed, acc_regions_file, chromatin_track_list, nbins):\n super().__init__(genome_fasta_file)\n self.genome_sizes_file = genome_sizes_file\n self.blacklist_file = blacklist_file\n self.chip_coords = chip_coords\n self.L = window_length\n self.exclusion_df = exclusion_df # This is df, convert to a bdt object.\n self.curr_genome_bed = curr_genome_bed\n # self.curr_genome_bed is is a df, convert to a bdt obj.\n self.acc_regions_file = acc_regions_file\n self.chromatin_track_list = chromatin_track_list\n self.nbins = nbins\n\n def apply_random_shift(self, coords):\n \"\"\"\n This function takes as input a set of bed co-ordinates\n It finds the mid-point for each record or Interval in the bed file,\n shifts the mid-point, and generates a window of\n length self.L.\n Calculating the shift:\n For each interval, find the mid-point.\n In this case, multiGPS is outputting 1 bp windows,\n so just taking the \"start\" as the mid-point.\n For example:\n Asc1.bed record:\n chr18 71940632 71940633\n mid-point: 71940632\n If training window length is L, then we must ensure that the\n peak center is still within the training window.\n Therefore: -L/2 < shift < L/2\n To add in a buffer: -L/2 + 25 <= shift <= L/2 + 25\n # Note: The 50 here is a tunable hyper-parameter.\n Parameters:\n coords(pandas dataFrame): This is an input bedfile\n Returns:\n shifted_coords(pandas dataFrame): The output bedfile with shifted coords\n \"\"\"\n # defining the random shift\n low = int(-self.L/2 + 25)\n high = int(self.L/2 - 25)\n coords['random_shift'] = np.random.randint(low=low, high=high,\n size=len(coords))\n\n coords['s_start'] = coords['start'] + coords['random_shift'] - int(self.L/2)\n coords['s_end'] = coords['start'] + coords['random_shift'] + int(self.L/2)\n # making a new dataFrame containing the new shifted coords.\n shifted_coords = coords.loc[:, ('chr', 's_start', 's_end')]\n shifted_coords.columns = ['chr', 'start', 'end']\n\n return shifted_coords\n\n def define_coordinates(self):\n \"\"\"\n Use the chip-seq peak file and the blacklist files to define a bound\n set and an unbound set of sites. The ratio of bound to unbound is 1:N,\n but can be controlled using the parameter \"ratio\".\n The unbound/negative set is chosen randomly from the genome.(ha)\n \"\"\"\n # POS. SAMPLES\n # Take a sample from the chip_coords file,\n # Then apply a random shift that returns 500 bp windows.\n # Create a BedTool object for further use.\n bound_sample_size = int(len(self.chip_coords))\n bound_sample = self.chip_coords.sample(n=(bound_sample_size * 5), replace=True)\n bound_sample_w_shift = self.apply_random_shift(bound_sample)\n bound_sample_bdt_obj = BedTool.from_dataframe(bound_sample_w_shift)\n bound_sample_w_shift['label'] = 1\n\n\n # NEG. SAMPLES\n # note: the self.curr_genome_bed.fn contains only training chromosomes.\n # Creates a DF.\n curr_genome_bdt = BedTool.from_dataframe(self.curr_genome_bed)\n exclusion_bdt_obj = BedTool.from_dataframe(self.exclusion_df)\n # unbound_random_bdt_obj = BedTool.shuffle(bound_sample_bdt_obj,\n # g=self.genome_sizes_file,\n # incl=curr_genome_bdt.fn,\n # excl=exclusion_bdt_obj.fn)\n unbound_random_bdt_obj = BedTool().random(l=self.L, n=(bound_sample_size * 4),\n g=self.genome_sizes_file)\n # Choosing only unbound regions that lie in the training set\n unbound_random_bdt_obj = unbound_random_bdt_obj.intersect(curr_genome_bdt)\n # Choosing only unbound regions that do not intersect ChIP-seq peaks or blacklist regions\n unbound_random_bdt_obj = unbound_random_bdt_obj.intersect(exclusion_bdt_obj, v=True)\n unbound_random_df = unbound_random_bdt_obj.to_dataframe()[['chrom', 'start', 'end']] # BedTool random produced 6 columns\n unbound_random_df.columns = ['chr', 'start', 'end']\n unbound_random_df['label'] = 0\n\n # NEG. SAMPLES: FLANKS\n def make_flanks(lower_lim, upper_lim):\n # getting a list of chip-seq flanking windows:\n # (can be a separate fn in utils)\n flanks_left = self.chip_coords.copy()\n flanks_right = self.chip_coords.copy()\n flanks_left['start'] = self.chip_coords['start'] - upper_lim\n flanks_left['end'] = self.chip_coords['start'] - lower_lim\n flanks_right['start'] = self.chip_coords['start'] + lower_lim\n flanks_right['end'] = self.chip_coords['start'] + upper_lim\n return flanks_left, flanks_right\n\n fl_r, fl_l = make_flanks(lower_lim=250, upper_lim=750)\n fl_r_2, fl_l_2 = make_flanks(lower_lim=200, upper_lim=700)\n fl_r_3, fl_l_3 = make_flanks(lower_lim=1500, upper_lim=2000)\n fl_r_4, fl_l_4 = make_flanks(lower_lim=1000, upper_lim=1500)\n flanks_df = pd.concat([fl_r, fl_l, fl_r_2, fl_l_2, fl_l_3, fl_r_3, fl_r_4, fl_l_4])\n flanks_df = flanks_df[flanks_df['start'] > 0]\n\n flanks_bdt = BedTool.from_dataframe(flanks_df)\n unbound_flanks_bdt_obj = flanks_bdt.intersect(curr_genome_bdt)\n unbound_flanks_df = unbound_flanks_bdt_obj.to_dataframe()\n unbound_flanks_df.columns = ['chr', 'start', 'end']\n unbound_flanks_df['label'] = 0\n unbound_flanks_df = unbound_flanks_df.sample(frac=1)\n\n # NEG. SAMPLES: ACCESSIBLE\n regions_acc_bdt_obj = BedTool(self.acc_regions_file)\n regions_acc_bdt_obj = regions_acc_bdt_obj.intersect(curr_genome_bdt)\n # negative samples/pre-accessible\n # unbound_acc_bdt_obj = BedTool.shuffle(bound_sample_bdt_obj,\n # g=self.genome_sizes_file,\n # incl=regions_acc_bdt_obj.fn,\n # excl=exclusion_bdt_obj)\n unbound_acc_bdt_obj = BedTool().random(l=self.L, n=(bound_sample_size * 4),\n g=self.acc_regions_file)\n # unbound_acc_bdt_obj = unbound_acc_bdt_obj.intersect(regions_acc_bdt_obj)\n unbound_acc_bdt_obj = unbound_acc_bdt_obj.intersect(exclusion_bdt_obj, v=True)\n unbound_acc_df = unbound_acc_bdt_obj.to_dataframe()[['chrom', 'start', 'end']]\n unbound_acc_df.columns = ['chr', 'start', 'end']\n unbound_acc_df['label'] = 0\n\n # Sizes of each set in this training construction are already accounted for.\n training_coords = pd.concat([bound_sample_w_shift, unbound_random_df,\n unbound_flanks_df, unbound_acc_df])\n\n training_coords = training_coords[(training_coords['end'] - training_coords['start'] == 500)]\n # randomly shuffle the dataFrame\n training_coords = training_coords.sample(frac=1)\n return training_coords\n\n def get_data(self):\n # get mini-batch co-ordinates:\n coords_for_data = self.define_coordinates()\n # get the fasta file:\n genome_fasta = super(ConstructTrainingData, self).get_genome_fasta()\n\n X_seq, X_chromatin_list, y = super().get_data_at_coordinates(coordinates_df=coords_for_data,\n genome_fasta=genome_fasta,\n window_len=self.L, chromatin_track_list=self.chromatin_track_list,\n nbins=self.nbins)\n return X_seq, X_chromatin_list, y, coords_for_data\n\n\ndef construct_training_data(genome_sizes_file, peaks_file, genome_fasta_file,\n blacklist_file, to_keep, to_filter,\n window_length, acc_regions_file, out_prefix, chromatin_track_list, nbins):\n \"\"\"\n This generator can either generate training data or validation data based on\n the to_keep and to_filter arguments.\n The train generate uses the to_filter argument, whereas to_keep=None\n For example:\n train_generator: to_filter=['chr10', 'chr17, 'chrUn', 'chrM', 'random']\n i.e. In this construction; chr10 and chr17 can be used for testing/validation.\n The val generator uses the to_keep argument, whereas to_filter=None.\n For example:\n val_generator: to_keep=['chr17']\n i.e. In this construction; chr17 data is used for validation.\n Additional Parameters:\n genome_sizes_file: sizes\n peaks_file: multiGPS formatted *events* file\n blacklist_file: BED format blacklist file\n genome_fasta_file: fasta file for the whole genome\n batch_size (int): batch size used for training and validation batches\n window_len (int): the length of windows used for training and testing.\n \"\"\"\n # Load the genome_sizes_file (Filtering out the validation and test chromosomes):\n curr_genome_bed = utils.get_genome_sizes(genome_sizes_file, to_keep=to_keep,\n to_filter=to_filter)\n genome_bed_df = curr_genome_bed.to_dataframe()\n\n # Loading the chip-seq bed file (Filtering out the validation and test chromosomes):\n chip_seq_coordinates = utils.load_chipseq_data(peaks_file,\n genome_sizes_file=genome_sizes_file,\n to_keep=to_keep,\n to_filter=to_filter)\n\n # Loading the exclusion bed file (Blacklist + ChIP-seq peaks, use for constructing negative sets):\n exclusion_windows_bdt = utils.exclusion_regions(blacklist_file,\n chip_seq_coordinates)\n exclusion_windows_df = exclusion_windows_bdt.to_dataframe()\n\n # constructing the training set\n construct_sets = ConstructTrainingData(genome_sizes_file=genome_sizes_file,\n genome_fasta_file=genome_fasta_file,\n blacklist_file=blacklist_file,\n chip_coords=chip_seq_coordinates,\n exclusion_df=exclusion_windows_df,\n window_length=window_length,\n curr_genome_bed=genome_bed_df,\n acc_regions_file=acc_regions_file,\n chromatin_track_list=chromatin_track_list,\n nbins=nbins)\n\n X_seq, X_chromatin_list, y, training_coords = construct_sets.get_data()\n # saving the data\n np.savetxt(out_prefix + '.seq', X_seq, fmt='%s')\n for idx, chromatin_track in enumerate(chromatin_track_list):\n chromatin_out_files = [x.split('/')[-1].split('.')[0] for x in chromatin_track_list]\n np.savetxt(out_prefix + '.' + chromatin_out_files[idx] + '.chromatin', X_chromatin_list[idx], delimiter='\\t', fmt='%1.3f')\n np.savetxt(out_prefix + '.labels', y, fmt='%s')\n return training_coords\n\n\nclass ConstructTestData(AccessGenome):\n\n def __init__(self, genome_fasta_file, genome_sizes_file, peaks_file,\n blacklist_file, window_len, stride, to_keep, chromatin_track_list,\n nbins):\n super().__init__(genome_fasta_file)\n self.genome_sizes_file = genome_sizes_file\n self.peaks_file = peaks_file\n self.blacklist_file = blacklist_file\n self.window_len = window_len\n self.stride = stride\n self.to_keep = to_keep\n self.chromatin_track_list = chromatin_track_list\n self.nbins = nbins\n\n def define_coordinates(self):\n \"\"\"\n This function loads and returns coords & labels for the test set.\n Logic for assigning test set labels:\n The multiGPS peak files are used as inputs; and expanded to record\n 25 bp windows around the peak center.\n if 100% of peak center lies in window:\n label bound.\n elif < 100% of peak center lies in the window:\n label ambiguous.\n else:\n label unbound.\n Returns:\n test_coords (pd dataFrame): A dataFrame with chr, start, end and\n labels\n \"\"\"\n genome_sizes = pd.read_csv(self.genome_sizes_file, sep=\"\\t\",\n names=['chr', 'len'])\n # subset the test chromosome:\n genome_test = genome_sizes[genome_sizes['chr'] == self.to_keep[0]]\n # the assumption here is that to_keep is a single chromosome list.\n end_idx = genome_test.iloc[0, 1]\n chromosome = genome_test.iloc[0, 0]\n test_set = []\n start_idx = 0\n while start_idx + self.window_len < end_idx:\n curr_interval = [chromosome, start_idx, start_idx + self.window_len]\n start_idx += self.stride\n test_set.append(curr_interval)\n\n test_df = pd.DataFrame(test_set, columns=['chr', 'start', 'stop'])\n test_bdt_obj = BedTool.from_dataframe(test_df)\n\n chip_peaks = utils.load_chipseq_data(chip_peaks_file=self.peaks_file,\n to_keep=self.to_keep,\n genome_sizes_file=self.genome_sizes_file)\n # note: multiGPS reports 1 bp separated start and end,\n # centered on the ChIP-seq peak.\n chip_peaks['start'] = chip_peaks['start'] - int(self.window_len/2)\n # (i.e. 250 if window_len=500 )\n chip_peaks['end'] = chip_peaks['end'] + int(self.window_len/2 - 1)\n # (i.e. 249 if window_len=500); multiGPS reports 1bp intervals\n\n chip_peaks = chip_peaks[['chr', 'start', 'end']]\n chip_peaks_bdt_obj = BedTool.from_dataframe(chip_peaks)\n\n blacklist_exclusion_windows = BedTool(self.blacklist_file)\n # intersecting\n unbound_data = test_bdt_obj.intersect(chip_peaks_bdt_obj, v=True)\n if self.blacklist_file is None:\n bound_data = chip_peaks_bdt_obj\n else:\n unbound_data = unbound_data.intersect(blacklist_exclusion_windows,\n v=True)\n # i.e. if there is any overlap with chip_peaks, that window is not\n # reported\n # removing blacklist windows\n bound_data = chip_peaks_bdt_obj.intersect(blacklist_exclusion_windows,\n v=True)\n # i.e. the entire 500 bp window is the positive window.\n # making data-frames\n bound_data_df = bound_data.to_dataframe()\n bound_data_df['label'] = 1\n unbound_data_df = unbound_data.to_dataframe()\n unbound_data_df['label'] = 0\n # exiting\n test_coords = pd.concat([bound_data_df, unbound_data_df])\n return test_coords\n\n def get_data(self):\n # get mini-batch co-ordinates:\n test_coords = self.define_coordinates()\n # get the fasta file:\n genome_fasta = super().get_genome_fasta()\n X_seq, X_chromatin_list, y = super().get_data_at_coordinates(coordinates_df=test_coords, genome_fasta=genome_fasta,\n window_len=self.window_len, chromatin_track_list=self.chromatin_track_list,\n nbins=self.nbins)\n return X_seq, X_chromatin_list, y, test_coords\n\n\ndef construct_test_data(genome_sizes_file, peaks_file, genome_fasta_file,\n blacklist_file, to_keep, window_len, stride, out_prefix, chromatin_track_list,\n nbins):\n\n ts = ConstructTestData(genome_fasta_file=genome_fasta_file, genome_sizes_file=genome_sizes_file,\n peaks_file=peaks_file, blacklist_file=blacklist_file,\n window_len=window_len, stride=stride, to_keep=to_keep,\n chromatin_track_list=chromatin_track_list, nbins=nbins)\n X_seq, X_chromatin_list, y_test, test_coords = ts.get_data()\n\n # Saving the data\n np.savetxt(out_prefix + '.seq', X_seq, fmt='%s')\n for idx, chromatin_track in enumerate(chromatin_track_list):\n chromatin_out_files = [x.split('/')[-1].split('.')[0] for x in chromatin_track_list]\n np.savetxt(out_prefix + '.' + chromatin_out_files[idx] + '.chromatin', X_chromatin_list[idx], delimiter='\\t', fmt='%1.3f')\n np.savetxt(out_prefix + '.labels', y_test, fmt='%d')\n test_coords.to_csv(out_prefix + '.bed', sep='\\t')\n return test_coords\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Construct Training Data For Bichrom')\n parser.add_argument('-info', help='Genome sizes file',\n required=True)\n parser.add_argument('-fa', help='The fasta file for the genome of interest',\n required=True)\n parser.add_argument('-len', help='Size of training, test and validation windows',\n type=int, required=True)\n parser.add_argument('-acc_domains', help='Bed file with accessible domains',\n required=True)\n parser.add_argument('-chromtracks', nargs='+', help='A list of BigWig files for all input chromatin '\n 'experiments', required=True)\n parser.add_argument('-peaks', help='A ChIP-seq or ChIP-exo peak file in multiGPS file format',\n required=True)\n parser.add_argument('-o', '--outdir', help='Output directory for storing train, test data',\n required=True)\n parser.add_argument('-nbins', type=int, help='Number of bins for chromatin tracks',\n required=True)\n\n parser.add_argument('-blacklist', default=None, help='Optional, blacklist file for the genome of interest')\n\n args = parser.parse_args()\n\n if args.outdir[0] == '/':\n # The user has specified a full directory path for the output directory:\n out_dir_path = args.outdir\n elif args.outdir[0] == '~':\n # The user has specified a full path starting with the home directory:\n out_dir_path = args.outdir\n elif args.outdir[0] == '.':\n # The user has specified a relative path.\n print(\"Please specify an absolute path for the output directory.\")\n print(\"Exiting..\")\n exit(1)\n else:\n # The user has specified an output directory within the current wd.\n dir_path = subprocess.run(['pwd'], stdout=subprocess.PIPE)\n # Specifying the full path in the yaml configuration file.\n out_dir_path = (str(dir_path.stdout, 'utf-8')).rstrip() + '/' + args.outdir\n\n print('Creating output directory')\n call(['mkdir', args.outdir])\n print(out_dir_path)\n\n print('Recording output paths')\n\n print([x.split('/')[-1].split('.')[0] for x in args.chromtracks])\n\n # Produce a default yaml file recording the output\n yml_training_schema = {'train': {'seq': out_dir_path + '/data_train.seq',\n 'labels': out_dir_path + '/data_train.labels',\n 'chromatin_tracks': [out_dir_path + '/data_train.' + x.split('/')[-1].split('.')[0] + '.chromatin'\n for x in args.chromtracks]},\n 'val': {'seq': out_dir_path + '/data_val.seq',\n 'labels': out_dir_path + '/data_val.labels',\n 'chromatin_tracks': [out_dir_path + '/data_val.' + x.split('/')[-1].split('.')[0] + '.chromatin'\n for x in args.chromtracks]},\n 'test': {'seq': out_dir_path + '/data_test.seq',\n 'labels': out_dir_path + '/data_test.labels',\n 'chromatin_tracks': [out_dir_path + '/data_test.' + x.split('/')[-1].split('.')[0] + '.chromatin'\n for x in args.chromtracks]}}\n\n # Note: The x.split('/')[-1].split('.')[0] accounts for input chromatin bigwig files with\n # associated directory paths\n\n with open(args.outdir + '/bichrom.yaml', \"w\") as fp:\n yaml.dump(yml_training_schema, fp)\n\n print('Constructing train data ...')\n coords = construct_training_data(genome_sizes_file=args.info, peaks_file=args.peaks,\n genome_fasta_file=args.fa,\n blacklist_file=args.blacklist, window_length=args.len,\n acc_regions_file=args.acc_domains,\n to_filter=['chr17', 'chr11', 'chrM', 'chrUn'],\n to_keep=None,\n out_prefix=args.outdir + '/data_train',\n chromatin_track_list=args.chromtracks,\n nbins=args.nbins)\n\n print('Constructing validation data ...')\n construct_test_data(genome_sizes_file=args.info, peaks_file=args.peaks,\n genome_fasta_file=args.fa,\n blacklist_file=args.blacklist, window_len=args.len,\n stride=args.len,\n to_keep=['chr11'],\n out_prefix=args.outdir + '/data_val',\n chromatin_track_list=args.chromtracks, nbins=args.nbins)\n\n print('Constructing test data ...')\n construct_test_data(genome_sizes_file=args.info, peaks_file=args.peaks,\n genome_fasta_file=args.fa,\n blacklist_file=args.blacklist, window_len=args.len,\n stride=args.len,\n to_keep=['chr17'],\n out_prefix=args.outdir + '/data_test',\n chromatin_track_list=args.chromtracks, nbins=args.nbins)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"construct_data/construct_data.py","file_name":"construct_data.py","file_ext":"py","file_size_in_byte":27381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393105449","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# pylint: disable=undefined-variable\n# flake8: noqa: F821\n\n\"\"\"Called from views.view\"\"\"\n\nimport sys\nimport webbrowser\nfrom tkinter import Menu, PhotoImage\n\nfrom utils import versions\nfrom utils.utils import set_language\nfrom views.about import About\n\n\nclass MenuBar:\n \"\"\"Display menubar.\"\"\"\n\n def __init__(self, parent, controller):\n \"\"\"Setting bar menu.\"\"\"\n self.parent = parent\n self.controller = controller\n\n self.about = About\n self.load_images()\n self.setui_menu()\n\n def load_images(self):\n \"\"\"Load flags image.\"\"\"\n self.img_fr = PhotoImage(file=r\"./assets/fr.png\")\n self.img_en = PhotoImage(file=r\"./assets/en.png\")\n\n def setui_menu(self):\n \"\"\"Make widget menu.\"\"\"\n menu = Menu(self.parent)\n self.parent.config(menu=menu)\n\n file_menu = Menu(menu, tearoff=False)\n\n if sys.platform == \"darwin\":\n file_menu.add_command(\n label=_(\"Open\"), accelerator=\"Command-O\",\n underline=0, command=self.controller.open_filenames)\n self.parent.bind(\"\",\n lambda event: self.controller.open_filenames())\n\n file_menu.add_command(\n label=_(\"Exit\"), accelerator=\"Command-W\",\n command=self.parent.destroy)\n else:\n file_menu.add_command(\n label=_(\"Open\"), accelerator=\"Ctrl-O\",\n underline=0, command=self.controller.open_filenames)\n self.parent.bind(\"\",\n lambda event: self.controller.open_filenames())\n\n file_menu.add_command(\n label=_(\"Exit\"), accelerator=\"Alt-F4\", command=sys.exit)\n\n menu.add_cascade(label=_(\"File\"), menu=file_menu)\n\n lang_menu = Menu(menu, tearoff=False)\n lang_menu.add_command(\n label=_(\"French\"),\n image=self.img_fr,\n compound=\"left\",\n command=lambda: set_language(\"fr\"))\n lang_menu.add_command(\n label=_(\"English\"),\n image=self.img_en,\n compound=\"left\",\n command=lambda: set_language(\"en\"))\n menu.add_cascade(label=\"Language\", menu=lang_menu)\n\n edit_menu = Menu(menu, tearoff=False)\n edit_menu.add_command(\n label=_(\"Website\"),\n command=lambda: webbrowser.open(versions.__website__)\n )\n edit_menu.add_command(label=_(\"About\"),\n command=lambda: self.about(self.parent))\n menu.add_cascade(label=_(\"Help\"), menu=edit_menu)\n","sub_path":"views/menubar.py","file_name":"menubar.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"177399404","text":"from time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n# Немного настроим наш браузер\ndriver = webdriver.Chrome()\ndriver.maximize_window()\ndriver.implicitly_wait(3)\n\n# Откроем страницу блога\ndriver.get(\"https://blog.noveogroup.ru/\")\n\n# Попытаемся найти... кого?\nsleep(1)\ndriver.find_element_by_css_selector(\"[type=search]\").send_keys(\"настоящих ниндзя\") # P.s. здесь могла бы быть ваша реклама\nsleep(1)\ndriver.find_element_by_css_selector(\"[type=search]\").send_keys(Keys.ENTER)\n\n# И посмотрим, как же стать настоящим ниндзя?\nsleep(1)\ndriver.find_element_by_tag_name(\"article\").click()\n\n# А мы точно открыли ту статью?\nsleep(1)\ntitle = driver.find_element_by_css_selector(\"article>header>h2\").text\nassert title == \"Тестовые площадки для тренировок настоящих ниндзя\", \"Упс, а мы хотели стать ниндзя :(\"\nprint(\"Yeah! That's it!\")\n\n# А теперь можно ознакомиться со статьей\n# Чтобы ознакомиться со статьёй получше - удалите строчки ниже\nsleep(5)\ndriver.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216930126","text":"# coding:utf-8\n\nimport numpy as np\nimport pandas as pd\nimport math\n\nfrom base_func import getDist\nfrom base_func import format_convert\nfrom base_func import point_poly\nfrom avg_speed import steadyAvgSpeed\n\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\n\n##################################################\n# 栅格化函数\n# 求得每个珊格与其他所有珊格的距离,找到与当前珊格小于25公里的珊格\nfc = format_convert()\ndef gridDst(coorTuple, grade, dst = 25.):\n coorList = list(coorTuple)\n lon = coorList[0] + (grade / 2.)\n lat = coorList[1] + (grade / 2.)\n # 当前坐标所在的areaID\n areaID = fc.areaID(longitude=lon, latitude=lat, grade=grade)\n print(\"lon = %f\" % lon)\n # 形成地球上的每个珊格的中心的坐标\n lonList = [(i * grade) for i in range(int(-180/grade), int(180/grade))]\n latList = [(i * grade) for i in range(int(-90/grade), int(90/grade))]\n # 求得该areaID到其他所有珊格的距离,找出小于25公里的进行记录\n gridList = [] # 初始化输出的珊格编号列表\n for gridLat in latList:\n # 获得珊格的中心经度\n gridLat = gridLat + (grade / 2.)\n for gridLon in lonList:\n # 获得珊格的中心维度\n gridLon = gridLon + (grade / 2.)\n # 获取当前areaID与当前珊格的距离\n tmpGridDst = getDist(lon1=lon, lat1=lat, lon2=gridLon, lat2=gridLat)\n # 判断当前areaID与当前珊格的距离是否小于25公里\n if(tmpGridDst < dst): # 若小于25公里,记录此珊格的areaID\n # 记录此珊格的areaID\n gridList.append(fc.areaID(longitude=gridLon, latitude=gridLat))\n else: # 若大于25公里,不做处理\n pass\n gridStr = \"\"\n for grid in gridList:\n gridStr = gridStr + str(grid) + \"*\"\n outStr = str(areaID) + \",\" + gridStr\n return outStr\n\n#################################################################\n# 优化求得附近的areaID程序\n# 获取当前区域内的最大最小经纬\ndef getAreaCenter(lon, lat, grade):\n lon = lon / grade\n lat = lat / grade\n # 判断经度是正数还是负数\n if (lon < 0): # 若是负数获取当前区域内���经度极值\n areaMinLon = math.ceil(lon)\n areaMaxLon = math.floor(lon)\n else: # 若是整数获取当前区域内的经度极值\n areaMinLon = math.floor(lon)\n areaMaxLon = math.ceil(lon)\n # 判断纬度,方法同经度\n if (lat < 0):\n areaMinLat = math.ceil(lat)\n areaMaxLat = math.floor(lat)\n else:\n areaMinLat = math.floor(lat)\n areaMaxLat = math.ceil(lat)\n # 获取区域的中心经纬度\n areaCenterLon = ((areaMinLon + areaMaxLon) / 2.) * grade\n areaCenterLat = ((areaMinLat + areaMaxLat) / 2.) * grade\n return areaCenterLon, areaCenterLat\n\n# 获取给定半径与单位距离的整数关系\ndef getMoveNum(unionDst, dst):\n n = int(dst / unionDst) + 1\n return n\n\n# 根据经纬度方向上的移动格数获取附近的经纬度\ndef getCoorList(areaCenterLon, areaCenterLat, lonMoveNum, latMoveNum, grade):\n import itertools\n # 初始化形成的经纬度列表\n lonList = []\n latList = []\n coorList = []\n # 获取经度列表\n for nLon in range((lonMoveNum + 1)):\n # 获取附近的栅格中心经度\n tmpLonBigger = areaCenterLon + nLon * grade\n tmpLonSmaller = areaCenterLon - nLon * grade\n # 若数值变化后跨越了东西半球,进行处理\n if(tmpLonBigger > 180.): # 若由东向西\n tmpLonBigger = tmpLonBigger - 360.\n if(tmpLonSmaller < -180.): # 若由西向东\n tmpLonSmaller = tmpLonSmaller + 360.\n lonList.append(tmpLonBigger)\n lonList.append(tmpLonSmaller)\n\n # 获取纬度列表\n for nLat in range((latMoveNum + 1)):\n # 获取附近栅格的纬度里\n tmpLatBigger = areaCenterLat + nLat * grade\n tmpLatSmaller = areaCenterLat - nLat * grade\n if(tmpLatBigger < 90.):\n latList.append(tmpLatBigger)\n if(tmpLatSmaller > -90.):\n latList.append(tmpLatSmaller)\n # 求出经度列表与纬度列表形成的笛卡尔积\n for coor in itertools.product(lonList, latList):\n # 把笛卡尔积内的每个元素放入列表记录\n coorList.append(coor)\n # 输出存放所有坐标的列表\n return coorList\n\n\n# 获取临近的栅格对应的中心经纬度\ndef getCloseArea(areaCenterLon, areaCenterLat, dst, grade):\n # 初始化当前区域的中心经纬度\n tmpAreaCenterLon = areaCenterLon\n tmpAreaCenterLat = areaCenterLat\n\n # 求出当前栅格在纬度方向上的单位距离\n if ((tmpAreaCenterLon + grade) > 180.):\n # 将经度转换为负值\n tmpAreaCenterLon = -180. + (grade / 2.0)\n else:\n tmpAreaCenterLon += grade\n lonDst = getDist(lon1=areaCenterLon, lat1=areaCenterLat,\n lon2=tmpAreaCenterLon, lat2=areaCenterLat)\n # 求出当前栅格在经度方向上的单位距离\n tmpAreaCenterLat += grade\n latDst = getDist(lon1=areaCenterLon, lat1=areaCenterLat,\n lon2=areaCenterLon, lat2=tmpAreaCenterLat)\n\n # 获取经纬度需要移动的格数\n lonMoveNum = getMoveNum(unionDst=lonDst, dst=dst)\n latMoveNum = getMoveNum(unionDst=latDst, dst=dst)\n # 获取附近的所有栅格的中心坐标点\n closeCoorList = getCoorList(areaCenterLon=areaCenterLon, areaCenterLat=areaCenterLat,\n lonMoveNum=lonMoveNum, latMoveNum=latMoveNum, grade=grade)\n # 获取附近所有栅格的ID\n closeAreaIDList = []\n for coor in closeCoorList:\n coorList = list(coor)\n tmpLon = coorList[0]\n tmpLat = coorList[1]\n tmpAreaID = fc.areaID(tmpLon, tmpLat, grade=grade)\n closeAreaIDList.append(tmpAreaID)\n return closeAreaIDList\n\n\n# 求得当前区域临近的areaID\ndef gridDstOpt(coorTuple, grade, dst=25.):\n coorList = list(coorTuple)\n lon = float(coorList[0])\n lat = float(coorList[1])\n if len(coorList[2]) == 2:\n portName = str(coorList[2][0])\n portID = str(coorList[2][1]).replace(\",\", \";\")\n\n # 获取当前区域内的经纬度极值\n areaCenterLon, areaCenterLat = getAreaCenter(lon, lat, grade)\n # 当前坐标所在的areaID\n areaID = fc.areaID(longitude=lon, latitude=lat, grade=grade)\n # 找到以25公里为半径,在半径范围内的栅格的ID\n closeAreaIDList = getCloseArea(areaCenterLon=areaCenterLon, areaCenterLat=areaCenterLat,\n dst=dst, grade=grade)\n gridStr = \"\"\n for grid in closeAreaIDList:\n gridStr = gridStr + str(grid) + \"*\"\n outStr = portName + \",\" + portID + \",\" + str(lon) + \",\" + str(lat) + \",\" + str(areaID) + \",\" + gridStr\n return outStr\n elif len(coorList[2]) == 3:\n portName = str(coorList[2][0])\n portID = str(coorList[2][1]).replace(\",\", \";\")\n berth_id = str(coorList[2][2]).replace(\",\", \";\")\n\n # 获取当前区域内的经纬度极值\n areaCenterLon, areaCenterLat = getAreaCenter(lon, lat, grade)\n # 当前坐标所在的areaID\n areaID = fc.areaID(longitude=lon, latitude=lat, grade=grade)\n # 找到以25公里为半径,在半径范围内的栅格的ID\n closeAreaIDList = getCloseArea(areaCenterLon=areaCenterLon, areaCenterLat=areaCenterLat,\n dst=dst, grade=grade)\n gridStr = \"\"\n for grid in set(closeAreaIDList):\n gridStr = gridStr + str(grid) + \"*\"\n\n outStr = portName + \",\" + portID + \",\" + berth_id + \",\" + str(lon) + \",\" + str(lat) + \",\" + \\\n str(areaID) + \",\" + gridStr\n return outStr\n elif len(coorList[2]) == 4:\n portName = str(coorList[2][0])\n portID = str(coorList[2][1]).replace(\",\", \";\")\n berth_id = str(coorList[2][2]).replace(\",\", \";\")\n terminal_id = str(coorList[2][3]).replace(\",\", \";\")\n\n # 获取当前区域内的经纬度极值\n areaCenterLon, areaCenterLat = getAreaCenter(lon, lat, grade)\n # 当前坐标所在的areaID\n areaID = fc.areaID(longitude=lon, latitude=lat, grade=grade)\n # 找到以25公里为半径,在半径范围内的栅格的ID\n closeAreaIDList = getCloseArea(areaCenterLon=areaCenterLon, areaCenterLat=areaCenterLat,\n dst=dst, grade=grade)\n gridStr = \"\"\n for grid in set(closeAreaIDList):\n gridStr = gridStr + str(grid) + \"*\"\n\n outStr = portName + \",\" + portID + \",\" + berth_id + \",\" + terminal_id + \",\" + str(lon) + \",\" + str(lat) + \",\" +\\\n str(areaID) + \",\" + gridStr\n return outStr\n\n##################################################\n# 20170516停泊事件模型类\nclass moor(object):\n def __init__(self):\n # 实例化格式转换类\n self.fc = format_convert()\n # 实例化平均航速类\n self.steadyAvgSpeed = steadyAvgSpeed()\n # 初始化距离精度 1 -- km; 1000 -- m; 1000000 -- mm;\n self.preision = 1000000.\n # 初始化停泊事件最大位移,单位:毫米。数值200米 * sqrt(2) * 1000\n self.D_DST = 282842.2725\n # 初始化停泊事件最大低速点,单位:毫米/秒\n self.D_SPEED = 100.\n # 初始化判断点港口与停泊事件之间的位置关系距离阈值,单位:千米\n self.moorDst = 25.\n # 初始化判断合并停泊事件条件\n self.mergeDst = 100000. # 距离阈值,单位:毫米\n self.mergeTime = 30 * 60 # 时间阈值,单位:秒\n\n # 对三阶段AIS数据求出指定索引范围内的航程\n # 参数输入:shipAISList -- AIS数据;startIndex -- 停泊开始的索引;\n # endIndex -- 停泊结束的索引\n def __getSailDst(self, shipAISList, startIndex, endIndex):\n sailDst = 0\n # 获取指定索引内的AIS数据与条数\n tmpShipAISList = shipAISList[startIndex:(endIndex + 1)]\n # 初始化需求航程的经纬度列表\n lonList = []\n latList = []\n # 循环获得经纬度信息\n for line in tmpShipAISList:\n lonList.append(line[6])\n latList.append(line[7])\n # 求出航程\n for index in range((endIndex - startIndex)):\n tmpDst = getDist(lon1=lonList[index], lat1=latList[index],\n lon2=lonList[index + 1], lat2=latList[index + 1])\n sailDst = sailDst + tmpDst\n # 返回航程,单位:千米\n return sailDst\n\n # 对三阶段AIS数据形成停泊事件的数据格式\n # 参数输入:shipAISList -- AIS数据;staticDF -- 静态数据;startIndex -- 停泊开始的索引;\n # endIndex -- 停泊结束的索引;lastEndIndex -- 上次停泊结束的索引;\n def __convertMoorResult(self, shipAISList, staticDF, startIndex, endIndex, lastEndIndex):\n # 获取停泊事件的输出数据\n shipAISList = [[float(x) for x in y] for y in shipAISList]\n shipAISList = np.array(shipAISList)\n begin_time = shipAISList[startIndex][1] # time of starting nav point\n end_time = shipAISList[endIndex][1] # time of ending nav point\n # 判断是否存在上一停泊事件\n if(lastEndIndex != 0): # 若存在上次停泊事件\n # 上一停泊事件结束时间\n last_time = shipAISList[lastEndIndex][1]\n # 获取两个停泊事件之间的间隔时间\n apart_time = begin_time - last_time\n # 获取上一停泊结束时间至当前停泊开始时间的静态数据\n staticDF = staticDF[(staticDF[\"shipid\"] == shipAISList[endIndex][0]) &\n (staticDF[\"time\"] >= last_time) &\n (staticDF[\"time\"] <= begin_time)]\n # 获取该航段内出现过的吃水深度个数\n draughtList = list(set(staticDF.iloc[:, 10]))\n draughtLen = len(draughtList)\n # 若有且仅有一次吃水深度更新\n if draughtLen >= 2:\n draught = draughtList[-1]\n else:\n draught = None\n else: # 若不存在上次停泊事件\n apart_time = None\n draught = None\n mmsi = shipAISList[endIndex][0] # 船舶MMSI\n begin_lon = shipAISList[startIndex][6] * self.preision # longitude of starting nav point\n begin_lat = shipAISList[startIndex][7] * self.preision # latitude of starting nav point\n begin_hdg = shipAISList[startIndex][12] # true_head of starting nav point\n begin_sog = shipAISList[startIndex][9] # sog of starting nav point\n begin_cog = shipAISList[startIndex][11] # cog of starting nav point\n end_lon = shipAISList[endIndex][6] * self.preision # longitude of ending nav point\n end_lat = shipAISList[endIndex][7] * self.preision # latitude of ending nav point\n end_hdg = shipAISList[endIndex][12] # true_head of ending nav point\n end_sog = shipAISList[endIndex][9] # sog of ending nav point\n end_cog = shipAISList[endIndex][11] # cog of ending nav point\n point_num = endIndex - startIndex + 1 # ais data nums between nav\n avg_lon = np.mean(shipAISList[startIndex:(endIndex + 1), 6].astype(float)) * self.preision\n avg_lat = np.mean(shipAISList[startIndex:(endIndex + 1), 7].astype(float)) * self.preision\n zone_id = shipAISList[endIndex][8] # zone_id of ending nav point\n navistate = shipAISList[startIndex + 1][5] # status of start+1 nav point\n\n sailArray = np.array(shipAISList[lastEndIndex:(startIndex + 1)])\n avgSpeed = self.steadyAvgSpeed.shipSteadySpeedThr(sailArray) # 获取平均速度\n # 判断该停泊事件包含几条AIS数据\n if(point_num == 2): # 若该停泊事件只由2条AIS数据组成\n # 获取输出数据\n avg_lon = (begin_lon + end_lon) / 2.0\n avg_lat = (begin_lat + end_lat) / 2.0\n avg_hdgMcog = abs(((begin_hdg - begin_cog) + (end_hdg - end_cog)) / 2.0)\n avg_sog = (begin_sog + end_sog) / 2.0\n var_hdg = np.var([begin_hdg, end_hdg])\n var_cog = np.var([begin_cog, end_cog])\n var_sog = np.var([begin_sog, end_sog])\n var_rot = np.var([shipAISList[startIndex][9], shipAISList[endIndex][9]])\n max_sog = max([begin_sog, end_sog])\n maxSog_cog = [begin_cog, end_cog][np.argmax([begin_sog, end_sog])]\n max_rot = max([shipAISList[startIndex][9], shipAISList[endIndex][9]])\n else:\n # 获取输出数据\n tmp_avg_lon, tmp_avg_lat, tmp_avg_hdgMcog, tmp_avg_sog = [], [], [], []\n tmp_var_hdg, tmp_var_cog, tmp_var_sog, tmp_var_rot = [], [], [], []\n for index in range(startIndex, endIndex + 1):\n tmp_avg_lon.append(shipAISList[index][6] * self.preision)\n tmp_avg_lat.append(shipAISList[index][7] * self.preision)\n tmp_avg_hdgMcog.append(abs(shipAISList[index][12] - shipAISList[index][11]))\n tmp_avg_sog.append(shipAISList[index][9])\n tmp_var_hdg.append(shipAISList[index][12])\n tmp_var_cog.append(shipAISList[index][11])\n tmp_var_sog.append(shipAISList[index][9])\n tmp_var_rot.append(shipAISList[index][15])\n # 求出平均值、方差\n avg_lon = np.mean(tmp_avg_lon)\n avg_lat = np.mean(tmp_avg_lat)\n avg_hdgMcog = np.mean(tmp_avg_hdgMcog)\n avg_sog = np.mean(tmp_avg_sog)\n var_hdg = np.var(tmp_var_hdg)\n var_cog = np.var(tmp_var_cog)\n var_sog = np.var(tmp_var_sog)\n var_rot = np.var(tmp_var_rot)\n max_sog = max(tmp_var_sog)\n maxSog_cog = tmp_var_cog[np.argmax(tmp_var_sog)]\n max_rot = max(tmp_var_rot)\n\n return [mmsi, begin_time, end_time, apart_time,\n begin_lon, begin_lat, begin_hdg, begin_sog, begin_cog,\n end_lon, end_lat, end_hdg, end_sog, end_cog,\n point_num, avg_lon, avg_lat, var_hdg, var_cog, avg_hdgMcog,\n avg_sog, var_sog, max_sog, maxSog_cog,\n max_rot, var_rot, draught, avgSpeed, zone_id, navistate]\n\n # 将停泊事件list转换为一个大字符串输出\n # 输入参数:nav_event -- 停泊事件list\n def __getNavStr(self, input_list):\n if len(input_list) != 0:\n output_str_list = list()\n input_str_list = [[str(x) for x in ele] for ele in input_list]\n for ele in input_str_list:\n # ele[6] = str(int(float(ele[6]) * 1000000))\n # ele[7] = str(int(float(ele[7]) * 1000000))\n output_str_list.append(','.join(ele) + '\\n')\n output_str = ''.join(list(set(output_str_list)))\n return output_str[:-1]\n else:\n pass\n\n # 判断当前停泊事件与暂存停泊事件是否需要合并\n # 参数输入:shipAISList -- AIS数据;startIndex -- 停泊开始的索引;\n # endIndex -- 停泊结束的索引;lastEndIndex -- 上次停泊结束的索引;lastStartIndex -- 上次停泊开始的索引;\n def __mergeMoor(self, shipAISList, startIndex, lastEndIndex):\n # 获取上一停泊事件结束时的经纬度\n preTime = int(shipAISList[lastEndIndex][1])\n preLon = float(shipAISList[lastEndIndex][6])\n preLat = float(shipAISList[lastEndIndex][7])\n # 获取当前停泊事件开始时的经纬度\n nowTime = int(shipAISList[startIndex][1])\n nowLon = float(shipAISList[startIndex][6])\n nowLat = float(shipAISList[startIndex][7])\n\n # 获取停泊事件之间的间隔距离与间隔时间\n apartDst = getDist(lon1=preLon, lat1=preLat, lon2=nowLon, lat2=nowLat) * self.preision\n apartTime = nowTime - preTime\n # 判断是否满足合并条件\n if((apartDst <= self.mergeDst) | (apartTime <= self.mergeTime)): # 需要合并\n # 返回值True\n mergeBool = True\n else: # 不需要合并\n # 返回值False\n mergeBool = False\n return mergeBool\n\n # 将cogroupData分割出动态数据与静态数据\n # 输入参数: cogroupData -- AIS动态数据与静态数据的合并数据,键值对\n def __splitCogroup(self, cogroupData):\n # 获取主键,MMSI\n keyName = cogroupData[0]\n # 获取建值,动态数据与静态数据的合并数据\n valueList = list(cogroupData[1])\n # 初始化动态数据、静态数据的存储列表\n aisList = []\n staticList = []\n # 获取动态、静态数据\n for value in valueList:\n if value:\n valueList = list(value)\n for x in valueList:\n if len(list(x)) == 16:\n aisList.append(list(x))\n elif len(list(x)) == 13:\n staticList.append(list(x))\n return keyName, aisList, staticList\n\n # 根据地球两点间距离求得平均速度\n def getAvgSpeed(self, dst, detaTime):\n if (detaTime == 0):\n avgSpeed = dst / (detaTime + 1)\n else:\n avgSpeed = dst / detaTime\n return avgSpeed\n\n # 获取停泊事件程序段\n # 输入参数:shipAIS -- sparkRDD分组后的每个元祖\n def moorShipGroup(self, shipAIS, staticDF):\n # 将分组后的AIS数据转换为list\n groupList = list(shipAIS)\n MMSI = groupList[0] # 船舶MMSI\n # print MMSI\n # shipAISList = list(groupList[1]) # 船舶AIS数据\n # shipAISList = np.array(groupList[1]) # 船舶AIS数据\n # 将AIS数据中的str转为整型或浮点型\n shipAISList = []\n for lineAIS in list(groupList[1]):\n lineAIS[0] = int(lineAIS[0])\n lineAIS[1] = int(lineAIS[1])\n lineAIS[6] = float(lineAIS[6]) / 1000000.\n lineAIS[7] = float(lineAIS[7]) / 1000000.\n lineAIS[9] = float(lineAIS[9])\n lineAIS[11] = float(lineAIS[11])\n lineAIS[12] = float(lineAIS[12])\n lineAIS[15] = float(lineAIS[15].split(\"&\")[0])\n shipAISList.append([int(lineAIS[0]), int(lineAIS[1]), 2, 3, 4, 5, float(lineAIS[6]),\n float(lineAIS[7]), 8, 9, 10, 11, 12, 13, 14, 15])\n shipAISList.sort(key=lambda v: v[1])\n shipAISList = np.array(shipAISList)\n # for x in shipAISList:\n # print x\n # print \"-----------------------------------\"\n # 初始化该船舶形成的最终停泊事件列表,暂存停泊事件索引\n tmpNavBool = False # 判断是否存在暂存停泊事件\n tmpNavStartIndex = 0\n tmpNavEndIndex = 0\n nav_event = []\n # 获取船舶AIS数据的条数\n aisLen = len(shipAISList)\n # 判断AIS数据是否仅存在一条\n if (aisLen <= 1): # 若AIS数据只有1条,无法形成停泊事件\n pass\n else: # 若AIS数据大于1条,找出停泊事件\n # 初始化停泊时间窗口的左窗口\n startIndex = 0\n # 初始化上一条停泊事件的时间与索引\n pre_startIndex = 0\n pre_endIndex = 0\n # 判断停泊时间窗口开启,startIndex为窗口左端\n # startIndex从AIS数据的第一条开始循环,循环制倒数第二条\n while (startIndex < (aisLen - 1)):\n # 初始化窗口右端\n endIndex = startIndex\n # 初始化最大最小经纬度\n maxLon = shipAISList[startIndex][6]\n maxLat = shipAISList[startIndex][7]\n minLon = shipAISList[startIndex][6]\n minLat = shipAISList[startIndex][7]\n # 判断窗口右端是否需要移动\n while (endIndex < (aisLen - 1)):\n # 获取endIndex 与 endIndex + 1的平均速度\n tmpDst = getDist(lon1=shipAISList[endIndex][6], lat1=shipAISList[endIndex][7],\n lon2=shipAISList[endIndex + 1][6], lat2=shipAISList[endIndex + 1][7])\n tmpDetaTime = shipAISList[endIndex + 1][1] - shipAISList[endIndex][1]\n avgSpeed = self.getAvgSpeed(tmpDst, tmpDetaTime)\n # 判断平均速度条件是否满足停泊事件的最大低速条件\n if (avgSpeed < self.D_SPEED): # 若满足停泊事件的低速阈值条件\n # 找出次停泊范围内的经纬度极值\n if maxLon < shipAISList[endIndex + 1][6]:\n maxLon = shipAISList[endIndex + 1][6]\n if maxLat < shipAISList[endIndex + 1][7]:\n maxLat = shipAISList[endIndex + 1][7]\n if minLon > shipAISList[endIndex + 1][6]:\n minLon = shipAISList[endIndex + 1][6]\n if minLat > shipAISList[endIndex + 1][7]:\n minLat = shipAISList[endIndex + 1][7]\n # 获取此范围内生成的最大距离\n maxDst = self.preision * getDist(maxLon, maxLat, minLon, minLat)\n # 判断是否满足停泊事件的距离阈值条件\n if (maxDst < self.D_DST): # 满足距离阈值条件\n # 满足距离、速度条件,输出数据\n # 窗口右端向右移动\n endIndex = endIndex + 1\n # 特殊处理部分:最后一条仍为停泊事件\n if (endIndex == (aisLen - 1)): # 若停泊条件且endIndex为最后一条\n # 判断是否存在暂存停泊事件\n if (tmpNavBool): # 若存在暂存停泊事件\n # 判断暂存停泊事件与该停泊事件是否需要合并\n mergeBool = self.__mergeMoor(shipAISList=shipAISList,\n startIndex=startIndex,\n lastEndIndex=tmpNavEndIndex)\n if (mergeBool): # 若需要进行合并\n # 输出停泊事件,暂存停泊开始至当前停泊结束\n outList = self.__convertMoorResult(shipAISList=shipAISList,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=endIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(outList)\n else: # 若不需要进行合并\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISList,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n pre_endIndex = tmpNavEndIndex\n # 输出当前停泊事件\n outList = self.__convertMoorResult(shipAISList=shipAISList,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n nav_event.append(outList)\n # 清空暂存停泊事件\n tmpNavBool = False\n else: # 若不存在暂存停泊事件\n pass\n startIndex = endIndex\n break\n else: # 不满足距离阈值条件\n if (endIndex > startIndex): # 若已有停泊事件生成\n # 判断是否存在暂存停泊事件\n if (tmpNavBool): # 若存在暂存停泊事件\n # 判断是否需要进行合并\n mergeBool = self.__mergeMoor(shipAISList=shipAISList,\n startIndex=startIndex,\n lastEndIndex=tmpNavEndIndex)\n if (mergeBool): # 若需要进行合并\n tmpNavEndIndex = endIndex\n else: # 若不需要进行合并\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISList,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n pre_endIndex = tmpNavEndIndex\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n else: # 若不存在暂存停泊事件\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n tmpNavBool = True\n startIndex = endIndex\n break\n else: # 若没有生成停泊事件\n startIndex = endIndex + 1\n break\n else: # 若不满足停泊事件低速条件\n if (endIndex > startIndex): # 若已有停泊事件生成\n # 判断是否存在暂存停泊事件\n if (tmpNavBool): # 若存在暂存停泊事件\n # 判断是否需要进行合并\n mergeBool = self.__mergeMoor(shipAISList=shipAISList,\n startIndex=startIndex,\n lastEndIndex=tmpNavEndIndex)\n if (mergeBool): # 若需要进行合并\n tmpNavEndIndex = endIndex\n else: # 若不需要进行合并\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISList,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n pre_endIndex = tmpNavEndIndex\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n else: # 若不存在暂存停泊事件\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n tmpNavBool = True\n startIndex = endIndex\n break\n else: # 若没有产生过停泊事件,即又窗口没有产生过,左窗口向右移动一行\n startIndex = endIndex + 1\n break\n # 特殊处理:当右端窗口达到倒数第二条,判断是否存在暂存停泊事件需要输出\n if (endIndex == (aisLen - 2)):\n # 判断是否存在暂存停泊事件\n if (tmpNavBool): # 若存在暂存停泊事件\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISList,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n tmpNavBool = False\n else: # 若不存在暂存停泊事件\n pass\n startIndex = endIndex + 1\n moorStr = self.__getNavStr(nav_event)\n return moorStr\n\n # 获取停泊事件程序段\n # 输入参数:cogroupData -- AIS动态数据与静态数据的合并数据,键值对\n def moorShipCogroup(self, cogroupData):\n # 获取AIS动态数据、静态数据\n mmsi, shipAISList, staticDF = self.__splitCogroup(cogroupData)\n print(mmsi)\n # 将分组后的AIS数据转换为list\n # groupList = list(shipAIS)\n # shipAISList = shipAISList # 船舶AIS数据\n staticDF = pd.DataFrame(staticDF, columns=[\"shipid\", \"time\", \"shiptype\", \"length\", \"width\", \"left\",\n \"trail\", \"imo\", \"name\", \"callsign\", \"draught\", \"destination\", \"eta\"])\n staticDF[\"shipid\"] = staticDF[\"shipid\"].astype(int)\n staticDF[\"time\"] = staticDF[\"time\"].astype(int)\n # shipAISList = np.array(groupList[1]) # 船舶AIS数据\n # 将AIS数据中的str转为整型或浮点型\n for lineAIS in shipAISList:\n lineAIS[0] = int(lineAIS[0])\n lineAIS[1] = int(lineAIS[1])\n lineAIS[6] = float(lineAIS[6]) / 1000000.\n lineAIS[7] = float(lineAIS[7]) / 1000000.\n lineAIS[9] = float(lineAIS[9])\n lineAIS[11] = int(lineAIS[11])\n lineAIS[12] = int(lineAIS[12])\n lineAIS[15] = int(lineAIS[15].split(\"&\")[0])\n # shipAISList = shipAISList.sort(key=lambda v: v[1])\n shipAISArray = np.array(shipAISList)\n # 初始化该船舶形成的最终停泊事件列表,暂存停泊事件索引\n tmpNavBool = False # 判断是否存在暂存停泊事件\n tmpNavStartIndex = 0\n tmpNavEndIndex = 0\n nav_event = []\n # 获取船舶AIS数据的条数\n aisLen = len(shipAISList)\n # 判断AIS数据是否仅存在一条\n if(aisLen <= 1): # 若AIS数据只有1条,无法形成停泊事件\n pass\n else: # 若AIS数据大于1条,找出停泊事件\n # 初始化停泊时间窗口的左窗口\n startIndex = 0\n # 初始化上一条停泊事件的时间与索引\n pre_startIndex = 0\n pre_endIndex = 0\n # 判断停泊时间窗口开启,startIndex为窗口左端\n # startIndex从AIS数据的第一条开始循环,循环制倒数第二条\n while (startIndex < (aisLen - 1)):\n # 初始化窗口右端\n endIndex = startIndex\n # 初始化最大最小经纬度\n maxLon = shipAISList[startIndex][6]\n maxLat = shipAISList[startIndex][7]\n minLon = shipAISList[startIndex][6]\n minLat = shipAISList[startIndex][7]\n # 判断窗口右端是否需要移动\n while(endIndex < (aisLen - 1)):\n # 获取endIndex 与 endIndex + 1的平均速度\n tmpDst = getDist(lon1=shipAISList[endIndex][6], lat1=shipAISList[endIndex][7],\n lon2=shipAISList[endIndex + 1][6], lat2=shipAISList[endIndex + 1][7])\n tmpDetaTime = shipAISList[endIndex + 1][1] - shipAISList[endIndex][1]\n avgSpeed = self.getAvgSpeed(tmpDst, tmpDetaTime)\n # 判断平均速度条件是否满足停泊事件的最大低速条件\n if(avgSpeed < self.D_SPEED): # 若满足停泊事件的低速阈值条件\n # 找出次停泊范围内的经纬度极值\n if maxLon < shipAISList[endIndex + 1][6]:\n maxLon = shipAISList[endIndex + 1][6]\n if maxLat < shipAISList[endIndex + 1][7]:\n maxLat = shipAISList[endIndex + 1][7]\n if minLon > shipAISList[endIndex + 1][6]:\n minLon = shipAISList[endIndex + 1][6]\n if minLat > shipAISList[endIndex + 1][7]:\n minLat = shipAISList[endIndex + 1][7]\n # 获取此范围内生成的最大距离\n maxDst = self.preision * getDist(maxLon, maxLat, minLon, minLat)\n # 判断是否满足停泊事件的距离阈值条件\n if(maxDst < self.D_DST): # 满足距离阈值条件\n # 满足距离、速度条件,输出数据\n # 窗口右端向右移动\n endIndex = endIndex + 1\n # 特殊处理部分:最后一条仍为停泊事件\n if(endIndex == (aisLen - 1)): # 若停泊条件且endIndex为最后一条\n # 判断是否存在暂存停泊事件\n if tmpNavBool: # 若存在暂存停泊事件\n # 判断暂存停泊事件与该停泊事件是否需要合并\n mergeBool = self.__mergeMoor(shipAISList=shipAISArray,\n startIndex=startIndex,\n lastEndIndex=tmpNavEndIndex)\n if mergeBool: # 若需要进行合并\n # 输出停泊事件,暂存停泊开始至当前停泊结束\n outList = self.__convertMoorResult(shipAISList=shipAISArray,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=endIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(outList)\n else: # 若不需要进行合并\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISArray,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n pre_endIndex = tmpNavEndIndex\n # 输出当前停泊事件\n outList = self.__convertMoorResult(shipAISList=shipAISArray,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n nav_event.append(outList)\n # 清空暂存停泊事件\n tmpNavBool = False\n else: # 若不存在暂存停泊事件\n pass\n startIndex = endIndex\n break\n else: # 不满足距离阈值条件\n if endIndex > startIndex: # 若已有停泊事件生成\n # 判断是否存在暂存停泊事件\n if tmpNavBool: # 若存在暂存停泊事件\n # 判断是否需要进行合并\n mergeBool = self.__mergeMoor(shipAISList=shipAISArray,\n startIndex=startIndex,\n lastEndIndex=tmpNavEndIndex)\n if mergeBool: # 若需要进行合并\n tmpNavEndIndex = endIndex\n else: # 若不需要进行合并\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISArray,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n pre_endIndex = tmpNavEndIndex\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n else: # 若不存在暂存停泊事件\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n tmpNavBool = True\n startIndex = endIndex\n break\n else: # 若没有生成停泊事件\n startIndex = endIndex + 1\n break\n else: # 若不满足停泊事件低速条件\n if endIndex > startIndex: # 若已有停泊事件生成\n # 判断是否存在暂存停泊事件\n if tmpNavBool: # 若存在暂存停泊事件\n # 判断是否需要进行合并\n mergeBool = self.__mergeMoor(shipAISList=shipAISArray,\n startIndex=startIndex,\n lastEndIndex=tmpNavEndIndex)\n if mergeBool: # 若需要进行合并\n tmpNavEndIndex = endIndex\n else: # 若不需要进行合并\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISArray,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n pre_endIndex = tmpNavEndIndex\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n else: # 若不存在暂存停泊事件\n tmpNavStartIndex = startIndex\n tmpNavEndIndex = endIndex\n tmpNavBool = True\n startIndex = endIndex\n break\n else: # 若没有产生过停泊事件,即又窗口没有产生过,左窗口向右移动一行\n startIndex = endIndex + 1\n break\n # 特殊处理:当右端窗口达到倒数第二条,判断是否存在暂存停泊事件需要输出\n if endIndex == (aisLen - 2):\n # 判断是否存在暂存停泊事件\n if tmpNavBool: # 若存在暂存停泊事件\n # 输出暂存停泊事件\n tmpOutList = self.__convertMoorResult(shipAISList=shipAISArray,\n staticDF=staticDF,\n startIndex=tmpNavStartIndex,\n endIndex=tmpNavEndIndex,\n lastEndIndex=pre_endIndex)\n nav_event.append(tmpOutList)\n tmpNavBool = False\n else: # 若不存在暂存停泊事件\n pass\n startIndex = endIndex + 1\n moorStr = self.__getNavStr(nav_event)\n return moorStr\n\n #########################################################################################\n # 判断停泊事件与多边形港口的位置关系\n # 输入参数:polyPortGDF -- 多边形港口数据分组后数据;moorLon -- 停泊事件所在经度\n # moorLat -- 停泊事件所在纬度\n def __moorPoly(self, polyPortDF, moorLon, moorLat):\n name_list = list(set(polyPortDF[\"portName\"]))\n name_list.sort()\n for port_name in name_list:\n # 获取\"多边形港口\"的坐标集合\n portNameStr = port_name\n aPolyPortDF = polyPortDF[polyPortDF[\"portName\"] == port_name]\n aPolyPortCorNum = len(aPolyPortDF)\n aPolyPortCoorList = []\n for aPolyPortDFIndex in range(aPolyPortCorNum):\n tmpPolyPortCorList = [aPolyPortDF.iloc[aPolyPortDFIndex, 0],\n aPolyPortDF.iloc[aPolyPortDFIndex, 1]]\n aPolyPortCoorList.append(tmpPolyPortCorList)\n # 求出\"多边形港口\"的中心坐标点,用平均值来求得\n lonList = [lon[0] for lon in aPolyPortCoorList]\n latList = [lat[1] for lat in aPolyPortCoorList]\n portAvgLon = sum(lonList) / len(lonList)\n portAvgLat = sum(latList) / len(latList)\n aPolyPortCoorArray = np.array(aPolyPortCoorList)\n # 判断停泊事件是否存在于次多边形内\n\n moor_port_dst = getDist(lon1=portAvgLon, lat1=portAvgLat, lon2=moorLon, lat2=moorLat)\n moorPortList = [None] * 6\n navPolyBool = False\n if moor_port_dst < 30.:\n if point_poly(moorLon, moorLat, aPolyPortCoorArray):\n # 在原来的停泊事件字段内添加港口名称、港口经度、港口纬度数据\n moorPortList = [portNameStr, -1, -1, -1, portAvgLon, portAvgLat]\n navPolyBool = True\n return moorPortList, navPolyBool\n return moorPortList, navPolyBool\n\n # 获取离停泊事件发生地点最近的地点ID\n def __get_closest_port(self, close_port_df, moorLon, moorLat, min_radius):\n min_dst = 999999999.\n closest_port_name = \"no port\"\n closest_breth_id = -1\n closest_terminal_id = -1\n closest_port_id = -1\n closest_lon = -999\n closest_lat = -999\n\n # 找到距离最近的地点信息\n close_port_array = np.array(close_port_df)\n for aPort in close_port_array:\n aPortName = aPort[0]\n aPortLon = float(aPort[4])\n aPortLat = float(aPort[5])\n\n # 获取两点间的距离\n tmp_dst = getDist(lon1=moorLon, lat1=moorLat, lon2=aPortLon, lat2=aPortLat)\n if tmp_dst < min_dst:\n min_dst = tmp_dst\n closest_port_name = aPortName\n closest_breth_id = aPort[3]\n closest_terminal_id = aPort[2]\n closest_port_id = aPort[1]\n closest_lon = aPortLon\n closest_lat = aPortLat\n\n # 判断最近的地点信息是否满足距离阈值条件\n if min_dst < min_radius: # 若满足条件\n moorPortList = [closest_port_name, closest_port_id, closest_terminal_id, closest_breth_id, closest_lon,\n closest_lat]\n moorPointBool = True\n else:\n moorPortList = [None] * 6\n moorPointBool = False\n return moorPortList, moorPointBool\n\n\n # 判断停泊事件与点港口之间的位置关系\n # 输入参数:pointPortArray -- 点港口数据;\n # moorLon -- 停泊事件所在经度;moorLat -- 停泊事件所在纬度\n def __moorPoint(self, pointPortDF, moorLon, moorLat):\n # 根据停泊事件的经纬度坐标,获取该坐标附近区域的areaID,记作moorCloseAreaID\n moorCloseAreaID = getCloseArea(areaCenterLon=moorLon, areaCenterLat=moorLat,\n dst=25., grade=0.1)\n closePortDF = pointPortDF[pointPortDF[\"areaID\"].isin(moorCloseAreaID)]\n\n # 找出BrethID不为-1的\n close_breth_df = closePortDF[closePortDF[\"BrethID\"] != -1]\n if len(close_breth_df) != 0:\n close_breth_res_df, close_breth_res_bool = self.__get_closest_port(close_port_df=close_breth_df,\n moorLon=moorLon,\n moorLat=moorLat,\n min_radius=self.moorDst)\n else:\n close_breth_res_bool = False\n\n # 找出TerminalID不为-1的\n close_terminal_res_bool = False\n if not close_breth_res_bool:\n close_terminal_df = closePortDF[(closePortDF[\"TerminalID\"] != -1) & (closePortDF[\"BrethID\"] == -1)]\n if len(close_terminal_df) != 0:\n close_terminal_res_df, close_terminal_res_bool = self.__get_closest_port(close_port_df=close_terminal_df,\n moorLon=moorLon,\n moorLat=moorLat,\n min_radius=self.moorDst)\n else:\n close_terminal_res_bool = False\n\n # 找出PortID不为-1的\n if not (close_breth_res_bool | close_terminal_res_bool):\n close_port_df = closePortDF[(closePortDF[\"TerminalID\"] == -1) & (closePortDF[\"BrethID\"] == -1)]\n close_port_res_df, close_port_res_bool = self.__get_closest_port(close_port_df=close_port_df,\n moorLon=moorLon,\n moorLat=moorLat,\n min_radius=self.moorDst)\n return close_port_res_df\n else:\n if close_breth_res_bool:\n return close_breth_res_df\n if close_terminal_res_bool:\n return close_terminal_res_df\n\n\n # 判断停泊事件与港口数据之间的关系\n # 输入参数:moorRDD -- 停泊事件数据;polyPort -- 多边形港口数据;pointPort -- 点港口数据\n def moorPort(self, moorRDD, polyPortDF, pointPortDF):\n # 初始化停泊事件数据列表\n moorList = []\n # 将moorRDD按行进行分割\n moorRDDList = moorRDD.split(\"\\n\")\n\n for moorRDDLine in moorRDDList:\n if(moorRDDLine):\n # 分割每行停泊事件数据\n moorLineList = moorRDDLine.split(\",\")\n # print moorLineList[0]\n # 获取停泊事件中的平均经纬度数据,areaID\n moorAvgLon = float(moorLineList[15]) / 1000000.\n moorAvgLat = float(moorLineList[16]) / 1000000.\n moorAreaID = int(float(moorLineList[28]))\n # 对每个\"多边形港口\"中的人工标定码头进行判断\n # 判断停泊事件与多边形港口之间的位置关系\n # moorPortList存放所在的多边形港口信息,navPolyBool判断是否在多边形港口内出现过\n moorPortList, moorPolyBool = self.__moorPoly(polyPortDF=polyPortDF,\n moorLon=moorAvgLon,\n moorLat=moorAvgLat)\n # 判断该停泊事件是否在多边形港口内出现过\n if(moorPolyBool): # 若在多边形内出现,不判断点港口数据\n pass\n else: # 若没有在多边形港口内出现过,判断点港口数据\n # 循环每条点港口数据\n moorPortList = self.__moorPoint(pointPortDF=pointPortDF,\n moorLon=moorAvgLon,\n moorLat=moorAvgLat)\n # 判断该停泊事件是否存在于多边形港口或点港口内\n moorLineList.extend(moorPortList)\n moorList.append(moorLineList)\n moorStr = self.__getNavStr(moorList)\n return moorStr\n\n\nif __name__ == \"__main__\":\n MASTER_HOME = \"local[2]\"\n conf = SparkConf()\n conf.setMaster(MASTER_HOME)\n conf.setAppName(\"ais_moor\")\n conf.set(\"spark.driver.maxResultSize\", \"4g\")\n conf.set(\"spark.driver.memory\", \"4g\")\n conf.set(\"spark.executor.memory\", \"4g\")\n\n sc = SparkContext(conf=conf)\n fc = format_convert()\n mr = moor()\n\n polyPortDF = pd.read_csv(\"./data/Asia_anchores.csv\")\n polyPortDF.columns = [\"longitude\", \"latitude\", \"portName\"]\n\n GlobalPortDF = pd.read_csv(\"./data/GlobalPort.csv\")\n\n staticDF = pd.DataFrame(columns=[\"shipid\", \"time\", \"shiptype\", \"length\", \"width\", \"left\",\n \"trail\", \"imo\", \"name\", \"callsign\", \"draught\", \"destination\", \"eta\"])\n\n shipsAISRDD = sc.textFile(\"/Users/qiujiayu/Downloads/bm_ais_test.tsv\") \\\n .map(lambda line: fc.bm_to_thr(line)) \\\n .filter(lambda line: line != None) \\\n .map(lambda line: line.split(\",\")) \\\n .groupBy(lambda v: v[0])\n\n navsRDD = shipsAISRDD.map(lambda group: mr.moorShipGroup(shipAIS=group, staticDF=staticDF)) \\\n .filter(lambda group: group != None)\n\n moorPortRDD = navsRDD.map(lambda group: mr.moorPort(moorRDD=group, pointPortDF=GlobalPortDF,\n polyPortDF=polyPortDF)) \\\n .repartition(1) \\\n .saveAsTextFile(\"./moortest\")\n sc.stop()\n","sub_path":"aismoor/aismoor.py","file_name":"aismoor.py","file_ext":"py","file_size_in_byte":55883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"74969565","text":"# -*- coding: utf-8 -*-\n# Description:\n# Created: liujiaye 2019/12/09\nimport math\nimport numpy as np\n\n\ndef sigmoid(x, use_status=True):\n if use_status:\n return 1.0 / (1 + np.exp(-float(x)))\n else:\n return float(x)\n\n\n# 归一化工具(my)\nclass ZScore:\n def __init__(self, sample):\n '''\n :param entity:样本数据 type:list\n '''\n if len(sample) == 0:\n print(\"sample is null\")\n self.u = 0\n self.o = 1\n else:\n sample_sum = sum(data for data in sample)\n self.u = float(sample_sum / len(sample)) # 样本数据均值\n self.o = math.sqrt(sum(data - self.u for data in sample) / len(sample)) # 样本数据的标准差\n\n def z_score(self, data):\n return (float(data) - self.u) / self.o\n\n\n# if __name__ == '__main__':\n# sample = [1, 213, 4, 12, 32, -213, 0, -3, -1, -42]\n# result = sigmoid(sample[6])\n# print(type(result))\n# print(result)\n","sub_path":"app/tool/z_score.py","file_name":"z_score.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"564521020","text":"def scramble(s1, s2):\n print(s1, s2)\n\n for i in set(s2):\n if s1.count(i) < s2.count(i):\n print(False)\n return False\n print(True)\n return True\n\n\nscramble('rkqodlw', 'world')\nscramble('cedewaraaossoqqyt', 'codewars')\nscramble('katas', 'steak')\nscramble('scriptjava', 'javascript')\nscramble('scriptingjava', 'javascript')\n","sub_path":"python/scramblies/scramblies.py","file_name":"scramblies.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"543748166","text":"import grpc\nimport MFTApi_pb2\nimport MFTApi_pb2_grpc\n\nchannel = grpc.insecure_channel('localhost:7004')\nstub = MFTApi_pb2_grpc.MFTApiServiceStub(channel)\n\nrequest = MFTApi_pb2.FetchResourceMetadataRequest(resourceId= \"remote-ssh-dir-resource\",\n resourceType = \"SCP\",\n resourceToken = \"local-ssh-cred\",\n resourceBackend = \"FILE\",\n resourceCredentialBackend= \"FILE\",\n targetAgentId = \"agent0\",\n childPath= \"\",\n mftAuthorizationToken = \"user token\")\n\nresponse = stub.getDirectoryResourceMetadata(request)\nprint(response)","sub_path":"examples/src/main/python/directory_browse.py","file_name":"directory_browse.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574977369","text":"import re\nimport os\nimport click\nimport operator\nimport tabulate\nimport itertools\nimport simplejson as json\n\nfrom sh import cp\nfrom time import strftime\n\nfrom overseer import Tip\nfrom overseer import Overseer\nfrom config import Config\nfrom decoration import Decoration\n\n@click.group()\n@click.pass_context\ndef cli(ctx, **kwargs):\n config = Config()\n obj = ctx.obj = {'conf': config, 'tip_overseer': Overseer(config)}\n\n@cli.command('save')\n@click.option('-t', '--title', help='Title for tip', metavar='')\n@click.option('-b', '--brief-desc', help='Brief description for tip', metavar='')\n@click.option('--content', help='Use if it is a short tip', metavar='')\n@click.option('--file', type=click.Path(exists=True), help='Path to file', metavar='')\n@Decoration.build_categories\n@click.pass_obj\ndef save(obj, title, **kwargs):\n \"\"\" Create new tips \"\"\"\n\n now = strftime(\"%Y-%m-%d %H:%M:%S\")\n tip = Tip(title, creation_date=now, modification_date=now, **kwargs)\n tip_id = obj['tip_overseer'].save_tip(tip)\n\n if kwargs['file'] is not None:\n copy_file(kwargs['file'], obj['conf'].getSection('files')['path'])\n\n click.echo('Tip was saved with id #%s' % tip_id)\n\n@cli.command('delete')\n@click.option('-t', '--tip-id', type=int, help='Deletes tip')\n@click.pass_obj\ndef delete(obj, tip_id):\n \"\"\" Delete tips \"\"\"\n\n if tip_id is None:\n tip_id = click.prompt('Please enter an id associated with a tip')\n \n try:\n obj['tip_overseer'].delete_tip(tip_id)\n except KeyError as e:\n click.echo('There\\'s no tip associated with id: %s' % str(tip_id))\n\n@cli.command('list')\n@click.option('--format', type=click.Choice(tabulate.tabulate_formats),\n help='Visual text formatting for the output table')\n@Decoration.build_sort\n@Decoration.build_columns\n@Decoration.build_filters\n@click.pass_obj\ndef list(obj, **kwargs):\n \"\"\" Query tips \"\"\"\n\n overseer = obj['tip_overseer']\n options = decide_options(\n obj['conf'].getSection('list'),\n kwargs\n )\n headers = overseer.headers()\n data = overseer.get_tip_list()\n data = apply_filters(obj, headers, data, options['filter'])\n data = sort_table(obj, headers, data, options)\n headers, data = column_decider(obj, headers, data, options)\n click.echo(table(headers, data, options['format']))\n\ndef table(header, data, design):\n return tabulate.tabulate(data, headers=header, tablefmt=design)\n\n@cli.command('modify')\n@click.option('-t', '--tip-id', type=int, help='Modify tip')\ndef modify():\n \"\"\" Modify tips \"\"\"\n # IMPLEMENT ME\n pass\n\ndef apply_filters(obj, headers, data, filter):\n \"\"\" Applies regex to column \"\"\"\n\n for col, regex in filter:\n data = do_filter(headers, data, col, regex)\n return data\n\ndef do_filter(headers, data, col, regex):\n \"\"\" Filters data \"\"\"\n\n try:\n index = headers.index(col)\n except ValueError:\n raise click.ClickException(\n 'Unknown column \"{}\"'.format(col))\n try:\n regex = re.compile(regex)\n except ValueError:\n raise click.ClickException(\n 'Could not compile regular expression \"{}\"'.format(regex))\n return (e for e in data if regex.search(str(e[index])))\n\ndef column_decider(obj, headers, data, options):\n \"\"\" Decides which columns are to be printed \"\"\"\n\n column, columns, dump_all = operator.itemgetter('column', 'columns', 'dump_all')(options)\n columns = headers if dump_all else itertools.chain((columns or headers), column)\n\n try:\n indexes = [headers.index(col) for col in columns]\n except ValueError:\n raise click.ClickException('Unknown column set: {}'.format(columns))\n\n headers = [headers[i] for i in indexes]\n data = [operator.itemgetter(*indexes)(tip) for tip in data]\n\n return headers, data\n\n\ndef sort_table(obj, headers, data, options):\n \"\"\" Sort entries \"\"\"\n\n sort_by, group_by, reverse = operator.itemgetter('sort_by', 'group_by', 'reverse')(options)\n\n try:\n indexes = [headers.index(h) for h in sort_by]\n except ValueError:\n raise click.ClickException('Unknown column set: {}'.format(sort_by))\n\n if group_by:\n try:\n index = headers.index(group_by)\n except ValueError:\n raise click.ClickException('Unknown column: {}'.format(group_by))\n\n try:\n indexes.remove(index)\n except ValueError:\n pass\n\n indexes.insert(0, index)\n\n getter = operator.itemgetter(*indexes)\n return sorted(data, key=getter, reverse=reverse)\n\ndef copy_file(src, dest):\n dest = process_destination(dest)\n try:\n cp(src, dest)\n except Exception as e:\n raise e\n\ndef process_destination(dest):\n if dest.startswith('~'):\n dest = os.path.expanduser(dest)\n if not os.path.exists(dest):\n raise Exception('Destination file %s does not exist' % dest)\n return dest\n\ndef decide_options(config, user_options={}):\n \"\"\" User input steps on config \"\"\"\n\n options = {}\n options.update(config)\n options.update({k: v for k, v in user_options.items() if k not in options or (v is not None and v != ())})\n return options\n","sub_path":"tips/tip.py","file_name":"tip.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653974213","text":"#!/usr/bin/env python3\n\nimport angr #the main framework\n\nimport os\nfrom subprocess import Popen, PIPE\n\nproj = angr.Project(\"afl_strcmp\", auto_load_libs=False) # auto_load_libs False for improved performance\n\nstate = proj.factory.entry_state() # states\n\nsimgr = proj.factory.simulation_manager(state) # simulation manager\n\nfind_addr = 0x004007f9 # mov edi, str.You_got_the_crash\n\nsimgr.explore(find=find_addr)\n\nif simgr.found:\n found = simgr.found[0].posix.dumps(0) # A state that reached the find condition from explore\n print(found.decode())\n p = Popen('./afl_strcmp', stdin=PIPE) # start the exe\n p.stdin.write(found) # cause the crash by giving the flag\n","sub_path":"l3/do_strcmp.py","file_name":"do_strcmp.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"552550284","text":"from django.urls import path\nfrom . import views\nfrom risapp.views import article_view,article_create_view,comments_create,comments_view,index\n\n\nurlpatterns = [\n path('home', index, name='index'),\n path('article', views.article_view, name='article'),\n path('create', article_create_view, name='create'), \n path('comment', comments_create, name='comment'),\n path('comm', comments_view, name='comm'),\n]\n\n","sub_path":"risapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"137297665","text":"#!/home/kimkk/miniconda3/envs/lomin/bin/python\n\nimport os\nimport argparse\nfrom pprint import pprint\nfrom datetime import datetime\nfrom glob import glob\nfrom tqdm import tqdm\nfrom PIL import Image\nimport torch\nimport numpy as np\n\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data.transforms import build_transforms\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.utils.label_catalog import LabelCatalog\nfrom maskrcnn_benchmark.utils.converter import Converter \n\n\nclass DetectionDemo(object):\n def __init__(self, cfg, weight, is_recognition=False):\n self.is_recognition = is_recognition\n self.cfg = cfg.clone()\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.model = build_detection_model(self.cfg)\n self.model.to(self.device)\n self.model.eval()\n\n checkpointer = DetectronCheckpointer(cfg, self.model, save_dir='/dev/null')\n _ = checkpointer.load(weight)\n\n self.transforms = build_transforms(self.cfg, self.is_recognition)\n self.cpu_device = torch.device(\"cpu\")\n self.score_thresh = self.cfg.TEST.SCORE_THRESHOLD\n\n def run_on_pil_image(self, image_origin):\n prediction = self.compute_prediction(image)\n prediction = self.filter_by_score(prediction)\n prediction = prediction.resize(image_origin.size)\n result = self.parse_result(prediction)\n return result\n\n def compute_prediction(self, image):\n image = self.transforms(image)\n image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY).to(self.device)\n with torch.no_grad():\n pred = self.model(image_list)\n pred = pred[0].to(self.cpu_device)\n return pred\n \n def filter_by_score(self, prediction):\n filter_thres = prediction.get_field('scores') > self.score_thresh\n return prediction[filter_thres]\n\n def parse_result(self, pred):\n bbox = pred.bbox.numpy().tolist()\n scores = pred.get_field('scores').numpy().tolist()\n labels = pred.get_field('labels').numpy().tolist()\n return dict(\n bboxes=bbox,\n labels=labels,\n scores=scores,\n )\n \n\nclass RecognitionDemo(DetectionDemo):\n def __init__(self, cfg, weight):\n self.batch_max_length = cfg.MODEL.TEXT_RECOGNIZER.BATCH_MAX_LENGTH\n self.load_converter(cfg)\n super(RecognitionDemo, self).__init__(cfg ,weight, True)\n\n def run_on_pil_image(self, image_origin):\n prediction = self.compute_prediction(image)\n encoded_text = prediction.get_field('pred')\n decoded_text = self.decode_text(encoded_text)\n return decoded_text\n\n def load_converter(self, cfg):\n characters = LabelCatalog.get(cfg.MODEL.TEXT_RECOGNIZER.CHARACTER)\n self.converter = Converter(characters)\n \n def decode_text(self, encoded_text):\n text = self.converter.decode(encoded_text)\n text = text[:text.find('[s]')]\n return text\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--type', required=True, choices=['detection', 'recognition'])\n args = parser.parse_args()\n ocr_type = args.type\n\n cfg_file = os.path.join('model', ocr_type, 'config.yaml')\n weight_file = os.path.join('model', ocr_type, 'model.pth')\n cfg = cfg.clone()\n cfg.merge_from_file(cfg_file)\n\n demo = {\n 'detection': DetectionDemo,\n 'recognition': RecognitionDemo\n }[ocr_type](cfg, weight_file)\n\n input_dir = os.path.join('sample_images', ocr_type)\n imglist = sorted([_ for _ in os.listdir(input_dir)\n if os.path.splitext(_)[1].lower() in ['.jpg', '.png', '.jpeg', '.tif']])\n\n for imgname in tqdm(imglist):\n print(imgname)\n image = Image.open(os.path.join(input_dir, imgname)).convert('RGB')\n prediction = demo.run_on_pil_image(image)\n pprint(prediction)\n","sub_path":"demo_analysis.py","file_name":"demo_analysis.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"11617758","text":"\"\"\"\r\nRatiorg got statues of different sizes as a present from CodeMaster\r\nfor his birthday, each statue having an non-negative integer size.\r\nSince he likes to make things perfect, he wants to arrange them from\r\nsmallest to largest so that each statue will be bigger than the previous\r\none exactly by 1. He may need some additional statues to be able to accomplish\r\nthat. Help him figure out the minimum number of additional statues needed.\r\n\r\nExample:\r\nFor statues = [6, 2, 3, 8], the output should be\r\nsolution(statues) = 3.\r\n\r\nRatiorg needs statues of sizes 4, 5 and 7.\r\n\r\n[execution time limit] 4 seconds (py3)\r\n[input] array.integer statues\r\n\r\n[output] integer\r\nThe minimal number of statues that need to be added to existing statues such\r\nthat it contains every integer size from an interval [L, R] (for some L, R)\r\nand no other sizes.\r\n\r\nTests passed: 10/10.\r\n\"\"\"\r\ndef solution(statues):\r\n statues.sort()\r\n count = 1\r\n \r\n for x in range(statues[0], statues[len(statues)-1]):\r\n count += 1\r\n \r\n count -= len(statues)\r\n return count\r\n","sub_path":"Python - Coding Challenges/makearrayconsecutive2.py","file_name":"makearrayconsecutive2.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"507920664","text":"from sqlite3.dbapi2 import Time\nfrom flask.helpers import url_for\nimport requests\nfrom movements import app \nfrom flask import render_template, request, redirect, url_for\nimport sqlite3 \nfrom movements.forms import MovementForm, Status_Form\nfrom datetime import date, timezone, datetime\nfrom config import*\nfrom movements.db_ejec import*\n\nurl_coin = \"https://pro-api.coinmarketcap.com/v1/tools/price-conversion?amount={}&symbol={}&convert={}&CMC_PRO_API_KEY={}\"\n\ndef peticion(url):\n respuesta = requests.get(url) \n if respuesta.status_code == 200:\n datos = respuesta.json() \n return datos\n\n@app.route('/')\ndef listaIngresos():\n \n form = MovementForm()\n mensajes = []\n \n try:\n ingresos = consulta('SELECT date, time, from_currency, form_quantity, to_currency, to_quantity, precio FROM movimientos;')\n except Exception as e:\n print(\"**ERROR**🔧: Acceso a base de datos:{} - {}\".format(type(e).__name__, e))\n mensajes.append(\"Error en acceso a base de datos. Consulte con el administrador.\")\n\n return render_template('movimientos.html', form=form, movimientos=[], mensajes=mensajes)\n \n return render_template(\"movimientos.html\", datos=ingresos, form = form)\n \n\n@app.route('/purchase', methods=['GET', 'POST'])\ndef nuevaCompra():\n\n form = MovementForm() \n mensajes = []\n try:\n moneda_saldo = monedas_activas() \n form.from_currency.choices=moneda_saldo \n saldo_total = moneda_saldo_total()\n \n except Exception as e:\n print(\"**ERROR**🔧: Acceso a base de datos:{} - {}\".format(type(e).__name__, e))\n mensajes.append(\"Error en acceso a base de datos. Consulte con el administrador.\")\n\n return render_template(\"purchase.html\", form = form, vacio = True,mensajes=mensajes)\n\n if request.method == 'POST' and form.validate(): \n if form.calculadora.data == True:\n try:\n amount = form.from_cantidad.data \n symbol = form.from_currency.data\n convert = form.to_currency.data\n respuesta = peticion(url_coin.format(amount, symbol, convert, API_KEY))\n cantidad_coin = respuesta['data']['quote'][convert]['price']\n\n pu = float(amount) / float(cantidad_coin)\n\n api_coin = [amount, symbol, convert, cantidad_coin,pu]\n return render_template(\"purchase.html\", form = form, api_coin = api_coin, vacio = False)\n except Exception as e:\n print(\"**ERROR**🔧: Acceso a API - insert: {} - {}\". format(type(e).__name__, e))\n mensajes.append(\"Error en acceso a API. Consulte con el administrador.\")\n return render_template(\"purchase.html\", form = form, mensajes = mensajes, vacio = True)\n else:\n try: \n consulta('INSERT INTO movimientos (date, time, from_currency, form_quantity,to_currency, to_quantity, precio) VALUES (?, ?, ? , ? , ? , ?, ?);', \n (\n today_2,\n time,\n form.from_currency.data,\n float(form.from_cantidad.data),\n form.to_currency.data,\n float(form.to_cantidad.data), \n float(form.precio_unitario.data)\n )) \n return redirect(url_for('listaIngresos'))\n except Exception as e:\n print(\"**ERROR**🔧: Acceso a base de datos - insert: {} - {}\". format(type(e).__name__, e))\n mensajes.append(\"Error en acceso a base de datos. Consulte con el administrador.\")\n\n else:\n return render_template(\"purchase.html\", form = form, vacio = True, mensajes = mensajes)\n\n\n@app.route('/status', methods =['GET'])\ndef Estado_Inversion():\n form = Status_Form()\n mensajes = []\n try: \n ingresos = consulta('SELECT SUM(to_quantity) AS total, to_currency FROM movimientos WHERE from_currency = \"EUR\" GROUP BY to_currency')\n\n ingresos_2 = consulta('SELECT SUM(form_quantity) AS total, from_currency FROM movimientos WHERE from_currency=\"EUR\"')\n \n ingresos_2 = str(ingresos_2[0]['total'])+ \" €\"\n\n total = 0\n try: \n for ingreso in ingresos:\n respuesta = peticion(url_coin.format(ingreso['total'], ingreso['to_currency'],\"EUR\", API_KEY))\n total += float(respuesta['data']['quote']['EUR']['price'])\n total = str(total) + \" €\"\n return render_template (\"status.html\", form = form, valor_invertido= ingresos_2, valor_actual=total)\n except Exception as e:\n print(\"**ERROR**🔧: Acceso a API - insert: {} - {}\". format(type(e).__name__, e))\n mensajes.append(\"Error en acceso a API. Consulte con el administrador.\")\n return render_template (\"status.html\", form = form, mensajes = mensajes)\n except Exception as e:\n print(\"**ERROR**🔧: Acceso a base de datos - insert: {} - {}\". format(type(e).__name__, e))\n mensajes.append(\"Error en acceso a base de datos. Consulte con el administrador.\")\n return render_template (\"status.html\", form = form, mensajes = mensajes)\n","sub_path":"movements/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131934514","text":"# -*-coding:utf-8-*-\nimport os\nimport time\nfrom multiprocessing import Process\n\n\ndef task(name, t):\n print(f'{name}({os.getpid()}) is running')\n time.sleep(t)\n print(f'{name} is done')\n\n\nif __name__ == '__main__':\n p1 = Process(target=task, args=('zh-1', 1)) # create subprocess\n p2 = Process(target=task, args=('zh-2', 2)) # create subprocess\n p3 = Process(target=task, args=('zh-3', 3)) # create subprocess\n p4 = Process(target=task, args=('zh-4', 4)) # create subprocess\n\n p1.start() # send a message to os and prepare subprocess resources\n p1.join() # main process is working until subprocess is done\n p2.start() # send a message to os and prepare subprocess resources\n p2.join() # main process is working until subprocess is done\n p3.start() # send a message to os and prepare subprocess resources\n p3.join() # main process is working until subprocess is done\n p4.start() # send a message to os and prepare subprocess resources\n p4.join() # main process is working until subprocess is done\n\n\n print(f'main process({os.getppid()})')\n","sub_path":"process/process_04.py","file_name":"process_04.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121008755","text":"from .... import directories as direc\nfrom ....utils.utils_old import getting_tag, get_box, Min_Max, add_topology\n# from pymoab import core, types, rng, topo_util\nfrom pymoab import types, rng\nimport numpy as np\nimport pdb\nfrom ....data_class.data_manager import DataManager\nimport scipy.sparse as sp\nfrom ....directories import data_loaded\nfrom ....errors.err import DualStructureError\nfrom ....adm.adm_method import get_levelantids_levelids\n\nfrom packs.multiscale.preprocess.dual_primal.paralell import paralell_dual_and_primal\n\nimport time\n\n\nclass MultilevelData(DataManager):\n\n def __init__(self, data_impress, M, data_name: str='MultilevelData.npz', load=False):\n carregar = load\n super().__init__(data_name=data_name, load=load)\n self.tags = dict()\n self.tags_to_infos = dict()\n self._carregar = carregar\n M.multilevel_data = self\n self.mesh = M\n\n self.levels = data_loaded['n_levels']\n self.l1 = 1\n self.data_impress = data_impress\n\n self.fine_primal_id = 'fine_primal_id_level_'\n self.coarse_volumes = 'coarse_volumes_level_'\n self.coarse_primal_id = 'coarse_primal_id_level_'\n self.fine_dual_id = 'fine_dual_id_level_'\n self.interns = 'interns_level_'\n self.faces = 'faces_level_'\n self.edges = 'edges_level_'\n self.vertex = 'vertex_level_'\n self.meshset_vertices = 'meshset_vertices_level_'\n self.reordered_id = 'reordered_id_'\n self.faces_boundary_meshset_level = 'FACES_BOUNDARY_MESHSETS_LEVEL_'\n self.name_mesh = 'flying/multilevel_data'\n self.coarse_neig_face = 'coarse_neig_face_level_'\n self.coarse_id_neig_face = 'coarse_id_neig_face_level_'\n self.restriction = 'restriction_level_'\n self.coarse_faces = 'coarse_faces_level_'\n self.coarse_internal_faces = 'coarse_internal_faces_level_'\n self.coarse_intersect_faces = 'coarse_intersect_faces_level_'\n self.fine_vertex_coarse_volumes = 'fine_vertex_coarse_volumes_level_'\n self.neig_intersect_faces = 'neig_intersect_faces_level_'\n self.internal_boundary_fine_volumes = 'internal_boundary_fine_volumes_level_'\n self.dual_structure = 'dual_structure_level_'\n self.centroids_name = 'centroids_level_'\n self.volumes_without_grav = 'volumes_without_grav_level_'\n\n def run(self):\n M = self.mesh\n\n assert not self._loaded\n import time\n\n t0=time.time()\n self.create_tags()\n print(\"Time to create tags: {} seconds\".format(time.time()-t0))\n t0=time.time()\n self.generate_dual_and_primal_any_D(M)\n print(\"Time to create dual: {} seconds\".format(time.time()-t0))\n t0=time.time()\n self.get_elements(M)\n print(\"Time to get elements: {} seconds\".format(time.time()-t0))\n t0=time.time()\n self.get_boundary_coarse_faces(M)\n print(\"Time to get boundary: {} seconds\".format(time.time()-t0))\n t0=time.time()\n # self.get_dual_structure()\n # print(\"Time to dual structure: {} seconds\".format(time.time()-t0))\n # t0=time.time()\n self.get_dual_structure_with_graph()\n print(\"Time to dual structure with graph: {} seconds\".format(time.time()-t0))\n t0=time.time()\n self.set_volumes_without_gravity_source_term()\n print(\"Time to set volumes gravity: {} seconds\".format(time.time()-t0))\n t0=time.time()\n self.export_to_npz()\n print(\"Time to export npz: {} seconds\".format(time.time()-t0))\n t0=time.time()\n self.loaded()\n print(\"Time to loaded: {} seconds\".format(time.time()-t0))\n\n def create_tags(self):\n assert not self._loaded\n M = self.mesh\n\n mb = M.core.mb\n\n l = ['D1', 'D2', 'FINE_TO_PRIMAL_CLASSIC_1', 'FINE_TO_PRIMAL_CLASSIC_2', 'reordered_id_1', 'reordered_id_2',\n 'local_id_internos', 'local_fac_internos']\n for name in l:\n n = 1\n tipo = 'integer'\n entitie = 'volumes'\n t1 = types.MB_TYPE_INTEGER\n t2 = types.MB_TAG_SPARSE\n getting_tag(mb, name, n, t1, t2, True, entitie, tipo, self.tags, self.tags_to_infos)\n\n l = ['PRIMAL_ID_1']\n for name in l:\n n = 1\n tipo = 'integer'\n entitie = 'coarse_volumes_lv1'\n t1 = types.MB_TYPE_INTEGER\n t2 = types.MB_TAG_SPARSE\n getting_tag(mb, name, n, t1, t2, True, entitie, tipo, self.tags, self.tags_to_infos)\n\n l = ['PRIMAL_ID_2']\n for name in l:\n n = 1\n tipo = 'integer'\n entitie = 'coarse_volumes'\n t1 = types.MB_TYPE_INTEGER\n t2 = types.MB_TAG_SPARSE\n getting_tag(mb, name, n, t1, t2, True, entitie, tipo, self.tags, self.tags_to_infos)\n\n l = ['NEIG_FACE']\n for name in l:\n n = 1\n tipo = 'handle'\n entitie = 'coarse_volumes_lv1'\n t1 = types.MB_TYPE_HANDLE\n t2 = types.MB_TAG_SPARSE\n getting_tag(mb, name, n, t1, t2, True, entitie, tipo, self.tags, self.tags_to_infos)\n\n l = ['L2_MESHSET', 'MV_1', 'MV_2']\n for name in l:\n n = 1\n tipo = 'handle'\n entitie = 'root_set'\n t1 = types.MB_TYPE_HANDLE\n t2 = types.MB_TAG_MESH\n getting_tag(mb, name, n, t1, t2, True, entitie, tipo, self.tags, self.tags_to_infos)\n\n return 0\n\n def load_tags(self):\n assert not self._loaded\n M = self.mesh\n\n tags0 = ['D', 'FINE_TO_PRIMAL_CLASSIC_', 'PRIMAL_ID_', 'MV_', 'reordered_id_']\n tags1 = ['L2_MESHSET', 'local_id_internos', 'local_fac_internos', 'NEIG_FACE']\n name_tag_faces_boundary_meshsets = self.faces_boundary_meshset_level\n n_levels = 2\n\n mb = M.core.mb\n\n for name in tags0:\n for i in range(2):\n j = i + 1\n name2 = name + str(j)\n tag = mb.tag_get_handle(name2)\n self.tags[name2] = tag\n\n for name in tags1:\n self.tags[name] = mb.tag_get_handle(name)\n\n for i in range(n_levels):\n name_tag = name_tag_faces_boundary_meshsets + str(i+1)\n tag_boundary = mb.tag_get_handle(name_tag)\n self.tags[name_tag] = tag_boundary\n\n def loaded(self):\n assert not self._loaded\n self._loaded = True\n\n def generate_dual_and_primal_any_D(self, M):\n M1=M.core\n M1.all_centroids=M.data[\"centroid_volumes\"]\n M1.primal_id_tag1=self.tags[\"PRIMAL_ID_1\"]\n M1.primal_id_tag2=self.tags[\"PRIMAL_ID_2\"]\n M1.fine_to_primal1_classic_tag=self.tags[\"FINE_TO_PRIMAL_CLASSIC_1\"]\n M1.fine_to_primal2_classic_tag=self.tags[\"FINE_TO_PRIMAL_CLASSIC_2\"]\n M1.D1_tag=self.tags[\"D1\"]\n M1.D2_tag=self.tags[\"D2\"]\n\n coord_nodes = M.data['centroid_nodes']\n cent_volumes = M.data['centroid_volumes']\n\n t0=time.time()\n print(\"creating dual mesh\")\n paralell_dual_and_primal.DualPrimal(M1, coord_nodes, cent_volumes, external_vertex_on_boundary=True)\n print(time.time()-t0,\"tempo para criar a dual\")\n\n def get_elements(self, M):\n assert not self._loaded\n\n mb = M.core.mb\n mtu = M.core.mtu\n tags_fine = ['D', 'FINE_TO_PRIMAL_CLASSIC_']\n tags_coarse = ['PRIMAL_ID_']\n coarse_id_impress = 'GID_'\n tag_mv = ['MV_']\n tag_reordered_id = ['reordered_id_']\n all_volumes = M.core.all_volumes\n dict_volumes = dict(zip(all_volumes, M.volumes.all))\n mvs = [M.core.root_set]\n fine_centroids = self.data_impress['centroid_volumes']\n\n self._data[self.centroids_name + str(0)] = fine_centroids.copy()\n\n for i in range(2):\n n = i + 1\n level = n\n name_tag_c = tags_coarse[0] + str(n)\n dual_fine_name = tags_fine[0] + str(n)\n primal_fine_name = tags_fine[1] + str(n)\n tag_reord_id = tag_reordered_id[0] + str(n)\n mv = mvs[i]\n n_reord = 0\n\n interns = mb.get_entities_by_type_and_tag(mv, types.MBHEX, np.array([self.tags[dual_fine_name]]),\n np.array([0]))\n if n not in [1]:\n mb.tag_set_data(self.tags[tag_reord_id], interns, np.arange(n_reord, len(interns)))\n n_reord += len(interns)\n if n == 1:\n interns = np.array([dict_volumes[k] for k in interns])\n else:\n interns = mb.tag_get_data(self.tags[tags_fine[1] + str(n-1)], interns, flat=True)\n\n faces = mb.get_entities_by_type_and_tag(mv, types.MBHEX, np.array([self.tags[dual_fine_name]]),\n np.array([1]))\n if n not in [1]:\n mb.tag_set_data(self.tags[tag_reord_id], faces, np.arange(n_reord, n_reord + len(faces)))\n n_reord += len(faces)\n if n == 1:\n faces = np.array([dict_volumes[k] for k in faces])\n else:\n faces = mb.tag_get_data(self.tags[tags_fine[1] + str(n-1)], faces, flat=True)\n\n edges = mb.get_entities_by_type_and_tag(mv, types.MBHEX, np.array([self.tags[dual_fine_name]]),\n np.array([2]))\n if n not in [1]:\n mb.tag_set_data(self.tags[tag_reord_id], edges, np.arange(n_reord, n_reord + len(edges)))\n n_reord += len(edges)\n if n == 1:\n edges = np.array([dict_volumes[k] for k in edges])\n else:\n edges = mb.tag_get_data(self.tags[tags_fine[1] + str(n-1)], edges, flat=True)\n\n vertex = mb.get_entities_by_type_and_tag(mv, types.MBHEX, np.array([self.tags[dual_fine_name]]),\n np.array([3]))\n if n not in [1]:\n mb.tag_set_data(self.tags[tag_reord_id], vertex, np.arange(n_reord, n_reord + len(vertex)))\n n_reord += len(vertex)\n ids_fine_vertexes = np.array([dict_volumes[k] for k in vertex])\n if n == 1:\n # vertexes = np.array([dict_volumes[k] for k in vertex])\n vertexes = ids_fine_vertexes\n else:\n vertexes = mb.tag_get_data(self.tags[tags_fine[1] + str(n-1)], vertex, flat=True)\n\n self._data[self.fine_vertex_coarse_volumes+str(level)] = ids_fine_vertexes\n self._data[self.interns + str(level)] = interns\n self._data[self.faces + str(level)] = faces\n self._data[self.edges + str(level)] = edges\n self._data[self.vertex + str(level)] = vertexes\n wire_num = np.array([len(interns), len(faces), len(edges), len(vertexes)])\n nv = wire_num[3]\n\n coarse_volumes = []\n coarse_primal_ids = []\n coarse_neig_face = []\n coarse_id_neig_face = []\n lines_r = []\n cols_r = []\n\n contador_coarse_gids = 0\n centroids_coarse = np.zeros((len(vertex), 3), dtype=float)\n\n for i, vert in enumerate(vertex):\n neigs = []\n neigs_ids = []\n primal_id = mb.tag_get_data(self.tags[primal_fine_name], vert, flat=True)[0]\n centroid_vert = fine_centroids[dict_volumes[vert]]\n centroids_coarse[primal_id] = centroid_vert\n\n coarse_volume = \\\n mb.get_entities_by_type_and_tag(0, types.MBENTITYSET, np.array([self.tags[name_tag_c]]),\n np.array([primal_id]))[0]\n coarse_volumes.append(coarse_volume)\n coarse_primal_ids.append(primal_id)\n elems_in_meshset = mb.get_entities_by_handle(coarse_volume)\n n_elems = len(elems_in_meshset)\n gggids = np.array([dict_volumes[k] for k in elems_in_meshset])\n local_id = np.arange(n_elems)\n coarse_global_id = np.arange(contador_coarse_gids, contador_coarse_gids + n_elems)\n self.data_impress['COARSE_GID_'+str(level)][gggids] = coarse_global_id\n self.data_impress['COARSE_LOCAL_ID_'+str(level)][gggids] = local_id\n contador_coarse_gids += n_elems\n if n == 1:\n gids = gggids\n # gids = mb.tag_get_data(self.tags[tag_reord_id], elems_in_meshset, flat=True)\n else:\n gids = np.unique(mb.tag_get_data(self.tags[tags_fine[1] + str(n-1)], elems_in_meshset, flat=True))\n elems_fora = mtu.get_bridge_adjacencies(elems_in_meshset, 2, 3)\n elems_fora = rng.subtract(elems_fora, elems_in_meshset)\n ids_meshsets_vizinhos = np.unique(mb.tag_get_data(self.tags[primal_fine_name], elems_fora, flat=True))\n for j in ids_meshsets_vizinhos:\n m2 = mb.get_entities_by_type_and_tag(M.core.root_set, types.MBENTITYSET, np.array([self.tags[name_tag_c]]), np.array([j]))[0]\n neigs.append(m2)\n neigs_ids.append(j)\n\n neigs = np.array(neigs)\n neigs_ids = np.array(neigs_ids)\n coarse_neig_face.append(neigs)\n coarse_id_neig_face.append(neigs_ids)\n if level == 1:\n d2 = mb.tag_get_data(self.tags[tags_fine[0] + str(level+1)], vert, flat=True)[0]\n mb.tag_set_data(self.tags[tags_fine[0] + str(level+1)], elems_in_meshset, np.repeat(d2, len(elems_in_meshset)))\n\n coarse_neig_face = np.array(coarse_neig_face)\n coarse_id_neig_face = np.array(coarse_id_neig_face)\n # centroids_coarse = np.array(centroids_coarse)\n\n self._data[self.coarse_neig_face + str(level)] = coarse_neig_face\n self._data[self.coarse_id_neig_face + str(level)] = coarse_id_neig_face\n self._data[self.coarse_volumes + str(level)] = np.array(coarse_volumes)\n self._data[self.coarse_primal_id + str(level)] = np.array(coarse_primal_ids)\n self._data[self.centroids_name + str(level)] = centroids_coarse\n # dtype = [('elements', np.uint64), ('id', np.uint64)]\n # structured_array = np.zeros(len(coarse_volumes), dtype=dtype)\n # structured_array['elements'] = np.array(coarse_volumes)\n # structured_array['id'] = np.array(coarse_primal_ids)\n\n for volume, vizinhos in zip(coarse_volumes, coarse_neig_face):\n m = mb.create_meshset()\n mb.add_entities(m, vizinhos)\n mb.tag_set_data(self.tags['NEIG_FACE'], volume, m)\n\n nnn = tag_mv[0] + str(level)\n # if not self._carregar:\n mv1 = mb.create_meshset()\n mb.add_entities(mv1, vertex)\n mb.tag_set_data(self.tags[nnn], M.core.root_set, mv1)\n # else:\n # mv1 = mb.tag_get_data(self.tags[nnn], M.core.root_set, flat=True)[0]\n\n # self.mvs[level] = mv1\n mvs.append(mv1)\n self._data[self.meshset_vertices + str(level)] = mv1\n\n fine_primal_id = mb.tag_get_data(self.tags[primal_fine_name], all_volumes, flat=True)\n self._data[self.fine_primal_id + str(level)] = fine_primal_id\n\n fine_dual_id = mb.tag_get_data(self.tags[dual_fine_name], all_volumes, flat=True)\n self._data[self.fine_dual_id + str(level)] = fine_dual_id\n\n for m in self._data[self.coarse_volumes + str(1)]:\n elements = mb.get_entities_by_handle(m)\n ne = len(elements)\n ids = np.arange(ne)\n dual_info = mb.tag_get_data(self.tags[tags_fine[0] + str(1)], elements, flat=True)\n id_vert = ids[dual_info == 3]\n vertex = elements[id_vert]\n reord_id_2 = mb.tag_get_data(self.tags[tag_reordered_id[0] + str(2)], vertex, flat=True)[0]\n mb.tag_set_data(self.tags[tag_reordered_id[0] + str(2)], elements, np.repeat(reord_id_2, ne))\n\n self.data_impress['DUAL_1'] = mb.tag_get_data(self.tags['D1'], all_volumes, flat=True)\n self.data_impress['DUAL_2'] = mb.tag_get_data(self.tags['D2'], all_volumes, flat=True)\n self.data_impress[coarse_id_impress + str(2)] = mb.tag_get_data(self.tags['FINE_TO_PRIMAL_CLASSIC_2'], all_volumes, flat=True)\n self.data_impress[coarse_id_impress + str(1)] = mb.tag_get_data(self.tags['FINE_TO_PRIMAL_CLASSIC_1'], all_volumes, flat=True)\n self.data_impress[coarse_id_impress + str(0)] = M.volumes.all\n\n def get_boundary_coarse_faces(self, M):\n assert not self._loaded\n # meshsets_nv1 = self._coarse_volumes[1]\n # meshsets_nv2 = self._coarse_volumes[2]\n meshsets_nv1 = self[self.coarse_volumes + str(1)]\n meshsets_nv2 = self[self.coarse_volumes + str(2)]\n\n mb = M.core.mb\n mtu = M.core.mtu\n n_levels = 2\n all_volumes = M.core.all_volumes\n dict_volumes = dict(zip(all_volumes, M.volumes.all))\n\n name_tag_faces_boundary_meshsets = self.faces_boundary_meshset_level\n all_meshsets = [meshsets_nv1, meshsets_nv2]\n d_faces = dict(zip(M.core.all_faces, M.faces.all))\n b_faces_all = M.faces.boundary\n\n from ....utils import pymoab_utils as utpy\n\n for i in range(n_levels):\n level = i+1\n name = name_tag_faces_boundary_meshsets + str(i + 1)\n meshsets = all_meshsets[i]\n n = 1\n tipo = 'handle'\n entitie = 'root_set'\n t1 = types.MB_TYPE_HANDLE\n t2 = types.MB_TAG_MESH\n getting_tag(mb, name, n, t1, t2, True, entitie, tipo, self.tags, self.tags_to_infos)\n tag_boundary = self.tags[name]\n utpy.set_faces_in_boundary_by_meshsets(mb, mtu, meshsets, tag_boundary, M)\n faces_boundary = mb.tag_get_data(tag_boundary, M.core.root_set, flat=True)[0]\n faces_boundary = mb.get_entities_by_handle(faces_boundary)\n faces_boundary = np.array([d_faces[k] for k in faces_boundary])\n self._data[self.faces_boundary_meshset_level + str(i+1)] = faces_boundary\n if len(faces_boundary)>0:\n self._data[self.neig_intersect_faces+str(level)] = M.faces.bridge_adjacencies(faces_boundary, 2, 3)\n\n\n coarse_faces = []\n coarse_internal_faces = []\n coarse_intersect_faces = []\n coarse_internal_boundary_volumes = []\n\n for m in meshsets:\n # primal_id = mb.tag_get_data(tag_coarse_id, m, flat=True)[0]\n # assert primal_id == cids[cont]\n elements = mb.get_entities_by_handle(m)\n volumes = np.array([dict_volumes[k] for k in elements])\n faces = mtu.get_bridge_adjacencies(elements, 3, 2)\n faces = np.array([d_faces[k] for k in faces])\n coarse_faces.append(faces)\n internal_faces = np.setdiff1d(faces, faces_boundary)\n internal_faces = np.setdiff1d(internal_faces, b_faces_all)\n coarse_internal_faces.append(internal_faces)\n intersect_faces = np.intersect1d(faces, faces_boundary)\n coarse_intersect_faces.append(intersect_faces)\n boundary_faces = np.setdiff1d(faces, internal_faces)\n # internal_boundary_volumes = np.concatenate(M.faces.bridge_adjacencies(boundary_faces, 2, 3))\n internal_boundary_volumes = np.concatenate(M.faces.bridge_adjacencies(intersect_faces, 2, 3))\n internal_boundary_volumes = np.intersect1d(internal_boundary_volumes, volumes)\n coarse_internal_boundary_volumes.append(internal_boundary_volumes)\n\n # cont += 1\n\n coarse_faces = np.array(coarse_faces)\n coarse_internal_faces = np.array(coarse_internal_faces)\n coarse_intersect_faces = np.array(coarse_intersect_faces)\n coarse_internal_boundary_volumes = np.array(coarse_internal_boundary_volumes)\n self._data[self.internal_boundary_fine_volumes+str(level)] = coarse_internal_boundary_volumes\n self._data[self.coarse_faces+str(level)] = coarse_faces\n self._data[self.coarse_intersect_faces+str(level)] = coarse_intersect_faces\n self._data[self.coarse_internal_faces+str(level)] = coarse_internal_faces\n\n def get_elements_2(self, M):\n assert not self._loaded\n meshsets_nv1 = self._coarse_volumes[1]\n meshsets_nv2 = self._coarse_volumes[2]\n all_meshsets = [meshsets_nv1, meshsets_nv2]\n\n dict_all_faces = M.data.dict_elements[direc.entities_lv0[2]]\n dict_all_volumes = M.data.dict_elements[direc.entities_lv0[3]]\n dict_all_edges = M.data.dict_elements[direc.entities_lv0[1]]\n dict_all_nodes = M.data.dict_elements[direc.entities_lv0[0]]\n\n mb = M.core.mb\n mtu = M.core.mtu\n n_levels = 2\n\n name_tag_faces_boundary_meshsets = self.faces_boundary_meshset_level\n\n for i in range(n_levels):\n coarse_volumes_property = []\n level = i+1\n name = name_tag_faces_boundary_meshsets + str(level)\n meshsets = all_meshsets[i]\n tag_boundary = self.tags[name]\n boundary_faces_elements = mb.tag_get_data(tag_boundary, M.core.root_set, flat=True)[0]\n boundary_faces_elements = mb.get_entities_by_handle(boundary_faces_elements)\n boundary_faces_all = np.array([dict_all_faces[f] for f in boundary_faces_elements])\n\n for m in meshsets:\n loc = self._coarse_volumes[i+1] == m\n primal_id = self._coarse_primal_id[level]['id'][loc][0]\n\n volumes_element = mb.get_entities_by_handle(m)\n volumes = np.array([dict_all_volumes[k] for k in volumes_element])\n faces_element = mtu.get_bridge_adjacencies(volumes_element, 3, 2)\n faces = np.array([dict_all_faces[f] for f in faces_element])\n edges_element = mtu.get_bridge_adjacencies(volumes_element, 3, 1)\n edges = np.array([dict_all_edges[f] for f in edges_element])\n nodes_element = mtu.get_bridge_adjacencies(volumes_element, 3, 0)\n nodes = np.array([dict_all_nodes[f] for f in nodes_element])\n boundary_faces = np.intersect1d(boundary_faces_all, faces)\n\n dados = {\n 'volumes': volumes, 'volumes_element': volumes_element,\n 'faces': faces, 'faces_element': faces_element,\n 'edges': edges, 'edges_element': edges_element,\n 'nodes': nodes, 'nodes_element': nodes_element,\n 'primal_id': primal_id, 'boundary_faces': boundary_faces\n }\n\n coarse_volume = CoarseVolume(dados)\n coarse_volumes_property.append(coarse_volume)\n\n self._coarse_volumes_property[level] = coarse_volumes_property\n\n def get_dual_structure(self):\n M = self.mesh\n mb = M.core.mb\n gids = self.data_impress['GID_0']\n dt = [('volumes', np.dtype(int)), ('dual_id', np.dtype(int)), ('primal_id', np.dtype(int))]\n all_volumes = M.core.all_volumes\n\n for level in range(1, self.levels):\n structure = []\n gid_level = self.data_impress['GID_'+str(level-1)]\n coarse_id_level = self.data_impress['GID_'+str(level)]\n dual_ids = self.data_impress['DUAL_'+str(level)]\n set_interns = set(gids[dual_ids==dual_ids.min()])\n\n while set_interns:\n intern0 = [set_interns.pop()]\n inter = M.volumes.bridge_adjacencies(intern0, 0, 3)\n dif = set(inter) - set(intern0)\n\n while dif & set_interns:\n intern0 = np.setdiff1d(inter, gids[dual_ids!=dual_ids.min()])\n try:\n inter = np.unique(np.concatenate(M.volumes.bridge_adjacencies(intern0, 0, 3)))\n except:\n inter = np.unique(M.volumes.bridge_adjacencies(intern0, 0, 3))\n dif = set(inter) - set(intern0)\n\n if level == 1:\n primais1 = coarse_id_level[inter]\n all_primal_ids = np.unique(primais1)\n all_vertex = []\n for gidc in all_primal_ids:\n vertex_all = gid_level[dual_ids==3]\n vols_in_coarse_id = gid_level[coarse_id_level==gidc]\n vertex = np.intersect1d(vertex_all, vols_in_coarse_id)\n all_vertex.append(vertex)\n\n all_vertex = np.concatenate(all_vertex)\n vertex_in_inter = np.intersect1d(inter, all_vertex)\n all_vertex = np.setdiff1d(all_vertex, vertex_in_inter)\n if len(all_vertex) > 0:\n inter = np.concatenate([inter, all_vertex])\n\n _gids1 = gid_level[inter]\n _primais = coarse_id_level[inter]\n _duais = dual_ids[inter]\n\n if level > 1:\n yy1 = dict(zip(_gids1, _primais))\n yy2 = dict(zip(_gids1, _duais))\n test1 = np.array(list(yy1.keys()))\n test2 = np.array(list(yy2.keys()))\n if not np.allclose(test1, test2):\n print('erro')\n import pdb; pdb.set_trace()\n gids2 = test1\n primais = np.array(list(yy1.values()))\n duais = np.array(list(yy2.values()))\n # gids2, duais = get_levelantids_levelids(_gids1, _duais)\n # gids2, primais = get_levelantids_levelids(_gids1, _primais)\n else:\n gids2 = _gids1\n duais = _duais\n primais = _primais\n\n # ####################################\n # ## teste\n # if level == 1:\n # vertices = gids2[duais==3]\n # if len(vertices) < 8:\n # av = mb.create_meshset()\n # mb.add_entities(av, all_volumes[gids2])\n # mb.write_file('teste.vtk', [av])\n # import pdb; pdb.set_trace()\n # else:\n # vertices = gids2[duais==3]\n # av = mb.create_meshset()\n # for vv in gids2:\n # mb.add_entities(av, all_volumes[gids[gid_level==vv]])\n # mb.write_file('teste.vtk', [av])\n # import pdb; pdb.set_trace()\n # ####################################\n\n sarray = np.zeros(len(gids2), dtype=dt)\n sarray['volumes'] = gids2\n sarray['dual_id'] = duais\n sarray['primal_id'] = primais\n structure.append(sarray)\n set_interns = set_interns - set(inter)\n\n\n self._data[self.dual_structure+str(level)] = np.array(structure)\n\n def get_dual_structure_with_graph(self):\n M = self.mesh\n mb = M.core.mb\n\n gids = self.data_impress['GID_0']\n dt = [('volumes', np.dtype(int))]\n all_volumes = M.core.all_volumes\n\n for level in range(1, self.levels):\n dual_flags = self.data_impress['DUAL_'+str(level)]\n primal_ids = self.data_impress['GID_'+str(level)]\n interns = gids[dual_flags==dual_flags.min()]\n intern_definitor = -np.ones(len(M.volumes.all),dtype=int)\n intern_definitor[interns]=interns\n\n adjs=M.faces.bridge_adjacencies(M.faces.internal,2,3)\n adjs=intern_definitor[adjs]\n adjs=adjs[(adjs>-1).sum(axis=1)==2]\n\n adjs0=adjs[:,0]\n adjs1=adjs[:,1]\n\n mapd=np.arange(len(M.volumes.all))\n mapd[interns]=np.arange(len(interns))\n lines=np.concatenate([mapd[adjs0],mapd[adjs1]])\n cols=np.concatenate([mapd[adjs1],mapd[adjs0]])\n data=np.ones(len(lines))\n\n from scipy.sparse import csc_matrix, csgraph\n graph=csc_matrix((data,(lines,cols)),shape=(len(interns),len(interns)))\n n_l,labels=csgraph.connected_components(graph,connection='strong')\n conjs_interns=[interns[labels==l] for l in range(n_l)] \n structure = [np.unique(np.concatenate(M.volumes.bridge_adjacencies(intern0, 0, 3))) for intern0 in conjs_interns]\n self._data[self.dual_structure+str(level)]=structure\n\n def set_volumes_without_gravity_source_term(self):\n\n lim = self.data_impress['hs'].min()*(0.2)\n\n # all_centroids = self.data_impress['centroid_volumes']\n\n # for level in range(1, self.levels):\n # fazendo apenas para o nivel 1\n for level in range(1, 2):\n structures = self._data[self.dual_structure+str(level)]\n all_vols_without_grav = []\n all_centroids = self._data[self.centroids_name + str(level-1)]\n dual_flags = self.data_impress[\"DUAL_\"+str(level)]\n for structure in structures:\n # volumes = structure['volumes']\n # dual_id = structure['dual_id']\n volumes = structure\n dual_id = dual_flags[volumes]\n\n local_centroids = all_centroids[volumes]\n xmin, ymin, zmin = local_centroids.min(axis=0)\n xmax, ymax, zmax = local_centroids.max(axis=0)\n b1 = np.array([np.array([xmin-lim, ymin-lim, zmax-lim]), np.array([xmax+lim, ymax+lim, zmax+lim])])\n b2 = np.array([np.array([xmin-lim, ymin-lim, zmin-lim]), np.array([xmax+lim, ymax+lim, zmin+lim])])\n\n vols_without_grav = np.concatenate([volumes[get_box(local_centroids, b1)], volumes[get_box(local_centroids, b2)]])\n vols_without_grav = np.setdiff1d(vols_without_grav, volumes[dual_id==3])\n all_vols_without_grav.append(vols_without_grav)\n\n try:\n all_vols_without_grav = np.unique(np.concatenate(all_vols_without_grav))\n except Exception as e:\n all_vols_without_grav = np.unique(all_vols_without_grav)\n\n\n self._data[self.volumes_without_grav + str(level-1)] = all_vols_without_grav\n\n def save_mesh(self):\n M = self.mesh\n\n self.data_impress.update_variables_to_mesh()\n\n # M.core.print(file=self.name_mesh, config_input='input_cards/print_settings.yml')\n M.save_variables('multiscale_data')\n","sub_path":"packs/multiscale/preprocess/dual_primal/create_dual_and_primal_mesh.py","file_name":"create_dual_and_primal_mesh.py","file_ext":"py","file_size_in_byte":30845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"472514247","text":"# импортируем пакеты telegram api\nfrom telebot import apihelper\nimport telebot\n# импортируем системные пакеты\nfrom subprocess import Popen, PIPE\nimport shutil\nimport time\nimport ast\nimport os\n# импортируем файлы конфигурации\nfrom utils import send_messages, make_keyboard\nfrom telegram_token import TOKEN\nimport system_config\nimport config\n\n\n# подключаемся к боту через прокси\napihelper.proxy = system_config.PROXY_CONFIG\nbot = telebot.TeleBot(TOKEN)\n\n# объявляем глобальные переменные контекста для handler'ов\nimage_content = None\nmodel = None\n\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n \"\"\"\n Handler-метод для команд /start и /help с приветствием\n \"\"\"\n chat_id = message.chat.id\n send_messages(bot, chat_id, config.WELCOME_MSGS)\n\n\n@bot.message_handler(commands=['knock_knock'])\ndef send_second_welcome(message):\n \"\"\"\n Handler-метод для команды /knock_knock со вторым приветствием\n \"\"\"\n chat_id = message.chat.id\n send_messages(bot, chat_id, config.SECOND_WELCOME_MSGS)\n\n\n#\n@bot.message_handler(content_types=['photo'])\ndef photo(message):\n \"\"\"\n Handler-метод для принятия сообщений с фото\n \"\"\"\n process_photo(message)\n\n\ndef process_photo(message):\n \"\"\"\n Метод для обработки изображений из поступающих сообщений.\n Данный метод очень важен, потому что образует всю логику диалога.\n При получении первого фото создаётся меню выбора стиля.\n :param message: сообщение с фотовложением\n :return: None\n \"\"\"\n global image_content, model\n\n # создание папки для временного хранения фото собеседника\n if not image_content and not os.path.isdir(\"photos\"):\n os.mkdir(\"photos\")\n\n file_id = message.photo[-1].file_id\n file = bot.get_file(file_id)\n downloaded_file = bot.download_file(file.file_path)\n\n # сохранение файла изображения контента\n with open(file.file_path, 'wb') as new_file:\n new_file.write(downloaded_file)\n\n chat_id = message.chat.id\n if not image_content:\n send_messages(bot, chat_id, config.IMAGE_CONTENT_MSGS)\n image_content = file\n\n # создание меню выбора стиля\n bot.send_message(chat_id=chat_id,\n text=config.STYLES_TEXT,\n reply_markup=make_keyboard(chat_id),\n parse_mode='HTML')\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef handle_query(call):\n \"\"\"\n Handler-метод для обработки выбора пунктов меню стиля,\n вызова style transfer метода и отправки собеседнику обработанного фото\n \"\"\"\n global image_content\n\n style_model = ast.literal_eval(call.data)[0]\n chat_id = int(ast.literal_eval(call.data)[1])\n send_messages(bot, chat_id, config.IMAGE_STYLE_MSGS)\n\n # запуск метода для обработки изображения\n filename = transfer_style(image_content, style_model)\n\n # отправление файла\n try:\n bot.send_photo(chat_id, open(filename, 'rb'))\n except Exception as exc:\n print(exc)\n\n # очищение директории для временного хранения фото\n shutil.rmtree('./photos')\n image_content = None\n send_messages(bot, chat_id, config.STYLED_IMAGE_MSGS)\n\n\ndef transfer_style(img_content, model_style):\n \"\"\"\n Метод для вызова сторонней style transfer скрипта\n :param img_content: объект с изображением контента\n :param model_style: название модели style transfer\n :return: относительный путь к обработанному файлу\n \"\"\"\n # очищение директории для хранения результирующих фото\n shutil.rmtree('./outputs')\n os.mkdir(\"outputs\")\n\n timestamp = str(time.time())\n # вызов стороннего скрипта\n process = Popen([system_config.PYTHON_PATH + \" fast_neural_style/neural_style/neural_style.py \" +\n \"eval --content-image ./\" + img_content.file_path + \" \" +\n \"--model ./models/\" + model_style + \" \" +\n \"--output-image ./outputs/\" + timestamp + img_content.file_path.split(\"/\")[-1] + \" \" +\n \"--cuda \" + str(int(system_config.CUDA))], shell=True, stdout=PIPE)\n process.communicate()\n process.wait()\n\n while not os.listdir('./outputs'):\n pass\n return './outputs/' + timestamp + img_content.file_path.split(\"/\")[-1]\n\n\n# устойчивый перезапуск бота\nbegan = False\nif __name__ == \"__main__\":\n while True:\n if not began:\n try:\n bot.polling()\n except Exception as e:\n began = False\n print(e)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"508448763","text":"################################################################################\n# Utility functions.\n################################################################################\ndef import_symbol(symbol_path):\n \"\"\" Import the symbol defined by the specified symbol path.\n Copied from envisage's import manager.\n \"\"\"\n if ':' in symbol_path:\n module_name, symbol_name = symbol_path.split(':')\n module = import_module(module_name)\n symbol = eval(symbol_name, module.__dict__)\n else:\n components = symbol_path.split('.')\n module_name = '.'.join(components[:-1])\n symbol_name = components[-1]\n module = __import__(\n module_name, globals(), locals(), [symbol_name]\n )\n symbol = getattr(module, symbol_name)\n return symbol\n","sub_path":"LIVE/dj_demo/mysite/test_segment_base/metadata_0.py","file_name":"metadata_0.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"71807294","text":"import heapq\nimport Problem\nimport Graph\nimport Edge\n\ndef uniformCostSearch(problem):\n node = problem.initialState\n weight = problem.getCost(node) \n\n explored = set()\n frontier = list()\n heapq.heappush(frontier, (weight, node))\n\n while (len(frontier) > 0):\n node = heapq.heappop(frontier)\n nodeSetRepr = problem.getSetRepr(node[1])\n if (nodeSetRepr in explored):\n continue\n \n if (problem.isGoal(node[1])):\n return node[1]\n\n explored.add(nodeSetRepr)\n for neighbor in problem.getNeighbors(node[1]):\n if (problem.getSetRepr(neighbor) not in explored):\n childNode = (problem.getCost(neighbor), neighbor)\n heapq.heappush(frontier, childNode)\n\ndef constructEuclerianPath(graph):\n oddNodes = list()\n for vertex in graph.vertices:\n if (graph.getVertexDegree(vertex) % 2 == 1):\n oddNodes.append(vertex)\n \n distances = list()\n for i in range(len(oddNodes)):\n for j in range(i + 1, len(oddNodes)):\n edge = Edge.Edge(oddNodes[i], oddNodes[j])\n heapq.heappush(distances, edge)\n \n print(\"Num Odd: \", len(oddNodes))\n\n seen = set()\n while (len(seen) < len(oddNodes)):\n edge = heapq.heappop(distances)\n if (edge.start in seen or edge.end in seen):\n continue\n print(\"Adding edge: \", edge.start.id, edge.end.id)\n seen.add(edge.start)\n seen.add(edge.end)\n graph.addEdge(edge)\n return graph\n\n\ndef traverseEulerianPath(eulerianGraph, start):\n return list()","sub_path":"usma_files/WRATH/Prototype/Traversal.py","file_name":"Traversal.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325995241","text":"class Solution:\n def letterCombination(self, digits):\n lookup, result = [\"\",\"\", \"abc\", \"def\", \"ghi\", \"jkl\", \"mno\", \"pqrs\", \"tuv\", \"wxyz\"], []\n self.letterCombinationRecursion(result, digits, lookup, \"\", 0)\n return result\n\nclass Solution:\n def letterCombination(self, digits):\n lookup, result = [\"\", \"\", \"abc\", \"def\", \"ghi\", \"jkl\", \"mno\", \"pqrs\", \"tuv\", \"wxyz\"], []\n self.RecFunction(lookup, digits, result, '', 0)\n return result\n\n def RecFunction(self, lookup, digits, result, cur, n):\n if n == len(digits):\n result.append(cur)\n else:\n for choice in lookup[int(digits[n])]:\n self.RecFunction(lookup, digits, result, cur+choice, n+1)\n\n\nif __name__ == \"__main__\":\n print(Solution().letterCombination(\"23\"))\n","sub_path":"Backtracking/letter-phone.py","file_name":"letter-phone.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"504706844","text":"#!/usr/bin/env python\n \nfrom distutils.core import setup\nfrom distutils.extension import Extension\n\nfrom os.path import join as pjoin\n\n# Where to find extensions\nSPEC = 'src'\n\nsetup(name=\"PySpectrophore\",\n \tversion='1.0.0',\n\t \tdescription='Spectrophore class to be used as Python library',\n \t\tplatforms=['Linux', 'Unix'],\n \t \tauthor='Fabio Mendes, Hans de Winter',\n \t \tauthor_email='fabiomendes.farm@gmail.com, hans.dewinter@uantwerpen.be',\n \turl='www.uantwerpen.be',\n\t\tpy_modules=[pjoin(SPEC, 'spectrophore')],\n\t\tpackage_dir={'PySpectrophore': 'src'},\n\t\tpackages=['PySpectrophore']\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"251434350","text":"import keras\nimport tensorflow as tf\nimport sys\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nuploaded_image = str(sys.argv[1])\n# clean_data_filename = str(sys.argv[1])\nmodel_filename = 'models/anonymous_processed_model.h5'\n# clean_data_filename = clean_validation_data.h5\n# model = sunglasses_bd_net.h5\n\ndef data_loader(filepath):\n data = h5py.File(filepath, 'r')\n x_data = np.array(data['data'])\n y_data = np.array(data['label'])\n x_data = x_data.transpose((0,2,3,1))\n\n return x_data, y_data\n\ndef data_preprocess(x_data):\n return x_data/255\n\ndef main():\n # x_test, y_test = data_loader(clean_data_filename)\n # x_test = data_preprocess(x_test)\n img = mpimg.imread(uploaded_image)\n img = img[:, :, :3]\n img1 = img.reshape([1] + list(img.shape))\n img1 = tf.convert_to_tensor(img1)\n # print('Shape of output image is:', img1.shape)\n model = keras.models.load_model(model_filename)\n print('Output label =', np.argmax(model.predict(img1)))\n # model.predict(img)\n # clean_label_p = np.argmax(bd_model.predict(x_test), axis=1)\n # class_accu = np.mean(np.equal(clean_label_p, y_test))*100\n # print('Classification accuracy:', class_accu)\n\nif __name__ == '__main__':\n main()\n","sub_path":"eval_image_anonymous.py","file_name":"eval_image_anonymous.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217735049","text":"# importing the module\nimport imdb\n\n# creating instance of IMDb\nia = imdb.IMDb()\n\n# movie name\nname = \"Doctor Who\"\n\n# searching the movie\nsearch = ia.search_movie(name)\n\n# printing the result\nfor i in search:\n\tprint(i)\n","sub_path":"imdbtest.py","file_name":"imdbtest.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607983376","text":"\"\"\"\ndayong.impls\n~~~~~~~~~~~~\n\nImplementaion of interfaces and the logic for injecting them.\n\"\"\"\nimport asyncio\nfrom typing import Optional\n\nimport tanjun\nfrom sqlalchemy.ext.asyncio import create_async_engine\nfrom sqlmodel import SQLModel, select\nfrom sqlmodel.engine.result import ScalarResult\nfrom sqlmodel.ext.asyncio.session import AsyncSession\n\nfrom dayong.configs import DayongConfig, DayongConfigLoader\nfrom dayong.models import Message\n\n\nclass MessageDBImpl:\n \"\"\"Implementaion of a database connection for transacting and interacting with\n message tables, those of which derive from message table models.\n\n The data to be selected, retrieved, and modified is determined by the table model\n object and its type. The type, in this case, is `dayong.models.Message`.\n \"\"\"\n\n def __init__(self, database_uri: Optional[str] = None) -> None:\n self.engine = create_async_engine(\n database_uri if database_uri else DayongConfigLoader.load().database_uri\n )\n\n async def create_table(self) -> None:\n \"\"\"Create physical message tables for all the message table models stored in\n `SQLModel.metadata`.\n \"\"\"\n async with self.engine.begin() as conn:\n await conn.run_sync(SQLModel.metadata.create_all)\n\n async def add_row(self, tabe_model_object: Message) -> None:\n \"\"\"Insert a row in the message table.\n\n Args:\n table_model_object (Message): An instance of `dayong.models.Message` or one\n of its subclasses.\n \"\"\"\n async with AsyncSession(self.engine) as session:\n loop = asyncio.get_running_loop()\n await loop.run_in_executor(None, session.add, tabe_model_object)\n await session.commit()\n\n async def remove_row(self, tabe_model_object: Message) -> None:\n \"\"\"Delete a row in the message table.\n\n Args:\n table_model_object (Message): An instance of `dayong.models.Message` or one\n of its subclasses.\n \"\"\"\n table_model = type(tabe_model_object)\n async with AsyncSession(self.engine) as session:\n # Temp ignore incompatible type passed to `exec()`. See:\n # https://github.com/tiangolo/sqlmodel/issues/54\n # https://github.com/tiangolo/sqlmodel/pull/58\n row: ScalarResult = await session.exec(\n select(table_model).where( # type: ignore\n table_model.message_id == tabe_model_object.message_id\n )\n )\n await session.delete(row)\n await session.commit()\n\n async def get_row(self, tabe_model_object: Message) -> ScalarResult:\n \"\"\"Fetch a row from the message table.\n\n Args:\n tabe_model_object (Message): An instance of `dayong.models.Message` or one\n of its subclasses.\n\n Returns:\n ScalarResult: An `ScalarResult` object which contains a scalar value or\n sequence of scalar values.\n \"\"\"\n table_model = type(tabe_model_object)\n async with AsyncSession(self.engine) as session:\n # Temp ignore incompatible type passed to `exec()`. See:\n # https://github.com/tiangolo/sqlmodel/issues/54\n # https://github.com/tiangolo/sqlmodel/pull/58\n row: ScalarResult = await session.exec(\n select(table_model).where( # type: ignore\n table_model.message_id == tabe_model_object.message_id\n )\n )\n return row\n\n @classmethod\n async def connect(\n cls,\n config: DayongConfig = tanjun.injected(type=DayongConfig),\n ) -> \"MessageDBImpl\":\n \"\"\"Constuct an instance of `dayong.impls.MessageDBImpl`. This is used to\n register `MessageDBImpl` as a type dependency.\n\n Args:\n config (DayongConfig, optional): The config class to use. Defaults to\n `tanjun.injected(type=DayongConfig)`.\n\n Returns:\n MessageDBImpl: An instance `dayong.impls.MessageDBImpl`.\n \"\"\"\n return cls(database_uri=config.database_uri)\n","sub_path":"dayong/impls.py","file_name":"impls.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"472370164","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n'''Takes a 9000-line Metek 3D sonic 10-Hz 15min rotated output file, and adds \ntimestamps. Assumes start time in filename is accurate.'''\n\nimport pandas as pd\nfrom pandas.tseries.offsets import DateOffset\nfrom pandas.io.parsers import read_csv\nfrom datetime import datetime\n\nfrom os.path import join\nfrom optparse import OptionParser\n\nparser=OptionParser()\nparser.add_option('--dir', dest=\"DATADIR\", help=\"base data directory (e.g. /data/shareddata/tower/ )\", default='/data/shareddata/tower/Proc/sonw/')\nparser.add_option('--prefix', dest=\"PREFIX\", help=\"Prefix of the four input files, e.g. 140818_030000.01.metek '.rotated_' is assumed.\")\n\n(options, args) = parser.parse_args()\n\n#DATADIR='/data/shareddata/tower/Proc'\n#INFILE='140818_030000.01.metek.rotated_1.csv'\nDATADIR=options.DATADIR\nprefix = options.PREFIX\n\nfor i in range(0,4):\n INFILE = prefix + '.rotated_' + str(i+1) +'.csv'\n sonic_3d_in = read_csv(join(DATADIR,INFILE), header=None, names=['U','V','W','T'])\n\n basedate = datetime.strptime(prefix[0:13],'%y%m%d_%H%M%S') + DateOffset(minutes=(15*i))\n \n rnf = pd.date_range(basedate, periods=len(sonic_3d_in.index), freq=DateOffset(seconds=0.1))\n \n #prepend timeseries\n sonic_3d_in.insert(0, 'Time', rnf)\n \n #reset index\n sonic_3d_in.set_index('Time', inplace=True)\n \n sonic_out = sonic_3d_in.resample('1S',how='mean')\n \n sonic_out.to_csv(join(DATADIR,INFILE.replace('.csv','-1Hz.csv')), na_rep='NaN', encoding='utf8', header=[u'U / ms¯¹',u'V / ms¯¹',u'W / ms¯¹',u'T / °C'])\n","sub_path":"3d_sonic_log_script/add-timestamps.py","file_name":"add-timestamps.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"197522986","text":"def FastModularExponentiationBKM(b, k, m):\n \"\"\"Computest b**2**k % m using only 2k modular multiplications.\n \"\"\"\n # Start with `c = b mod m`\n c = b % m\n\n # Repeat k times: `c = c**2 mod m`\n i = 0\n while i < k:\n c = (c ** 2) % m\n i += 1\n\n return c\n\n\ndef FastModularExponentiationBEM(b, e, m):\n \"\"\"Computes `b^e mod m` using around `2*log2(e)` modular multiplications.\n \"\"\"\n # 1) Rewrite `e` in binary form\n binary = [x for x in bin(e)][2:]\n # if e == 16, binary == ['1', '0', '0', '0', '0']\n\n # 2) Compute `b**2**k mod m` for all `2**k <= e`\n max_exponent = len(binary) - 1\n exponents = []\n for idx, exp in enumerate(range(max_exponent, -1, -1)):\n if binary[idx] == \"1\":\n exponents.append(exp)\n computes = [FastModularExponentiationBKM(b, k, m) for k in exponents]\n\n # 3) Multiply all results for `2**k` in binary representation of `e`\n result = 1\n for x in computes:\n result *= x\n\n return result % m\n","sub_path":"specialization-intro-discrete-math-computer-science/number-theory-and-cryptography/week3/quiz-fast-modular-exponentiation-code/fast_modular_exponentiation.py","file_name":"fast_modular_exponentiation.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456768752","text":"\"\"\"Quick Sort Algorithm in Python\n\n1. Pick an element, called a pivot, from the array.\n2. Partitioning: reorder the array so that \nall elements with values less than the pivot come before the pivot, \nwhile all elements with values greater than the pivot \ncome after it (equal values can go either way). \nAfter this partitioning, the pivot is in its final position. \n3. Recursively apply the above steps to the sub-array of elements with smaller values\nand separately to the sub-array of elements with greater values.\n\"\"\"\n\nimport random\n\n\ndef quick_sort_easy(arr):\n\t\"\"\" Return sorted list by using quick sort algorithm\"\"\"\n\n\tn = len(arr)\n\t\n\tif n <= 1:\n\t\treturn arr\n\t\n\telse:\n\t\tindex = random.choice(range(n))\n\t\tpivot = arr.pop(index)\n\t\tsmaller = []\n\t\tbigger = []\n\n\t\tfor i in range(n-1):\n\t\t\tif arr[i] < pivot:\n\t\t\t\tsmaller.append(arr[i])\n\t\t\telse:\n\t\t\t\tbigger.append(arr[i])\n\n\t\treturn quick_sort_easy(smaller) + [pivot] + quick_sort_easy(bigger)\n\n\n\narr = [3, 19, 22, 48, 11, 1, 2, 5, 9, 8, 13, 22, 4, 7, 6, 18]\nprint(quick_sort_easy(arr))\n\n\n\n\n\t\n\n","sub_path":"python/sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"604576944","text":"from otree.api import (\r\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\r\n Currency as c, currency_range\r\n)\r\nimport random\r\n\r\nauthor = 'Your name here'\r\n\r\ndoc = \"\"\"\r\nYour app description\r\n\"\"\"\r\n\r\n\r\nclass Constants(BaseConstants):\r\n name_in_url = 'majority_determine1'\r\n players_per_group = 4\r\n num_rounds = 1\r\n\r\n\r\nclass Subsession(BaseSubsession):\r\n pass\r\n\r\n\r\nclass Group(BaseGroup):\r\n\r\n coin = models.CharField()\r\n\r\n def set_payoffs(self):\r\n\r\n self.coin = random.choice(['head', 'tail'])\r\n\r\n for player in self.get_players():\r\n result1 = []\r\n result2 = []\r\n option_temp = [player.option1, player.option2, player.option3, player.option4]\r\n player.option = option_temp.index(max(option_temp)) + 1\r\n\r\n if player.option == 3 or player.option == 4:\r\n for i in self.get_players():\r\n if i.id_in_group != player.id_in_group:\r\n if i.decision == '♡':\r\n result1.extend([i.decision])\r\n else:\r\n result2.extend([i.decision])\r\n else:\r\n pass\r\n if len(result1) > len(result2):\r\n player.yourresult = bool(player.option == 4)\r\n else:\r\n player.yourresult = bool(player.option == 3)\r\n\r\n else:\r\n if self.coin == 'head':\r\n player.yourresult = bool(player.option == 2)\r\n else:\r\n player.yourresult = bool(player.option == 1)\r\n\r\n\r\n player.payoff = player.yourresult * 4\r\n\r\n\r\nclass Player(BasePlayer):\r\n decision = models.CharField(\r\n choices=['♡', '☺︎'],\r\n doc=\"\"\"Either ♡ or ☺︎\"\"\",\r\n widget=widgets.RadioSelect()\r\n )\r\n option1 = models.CurrencyField(\r\n choices=currency_range(0, 4, c(0.01))\r\n )\r\n option2 = models.CurrencyField(\r\n choices=currency_range(0, 4, c(0.01))\r\n )\r\n option3 = models.CurrencyField(\r\n choices=currency_range(0, 4, c(0.01))\r\n )\r\n option4 = models.CurrencyField(\r\n choices=currency_range(0, 4, c(0.01))\r\n )\r\n\r\n option = models.IntegerField()\r\n\r\n\r\n\r\n\r\n","sub_path":"majority_determine1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"503412287","text":"# -*- coding: utf-8; -*-\nimport urllib.parse\nimport datetime\nimport os\nimport tempfile\nimport shutil\nimport zipfile\n\nimport common.logger as logger\nfrom common.request import send_request_api_partner, download_file_by_url\nfrom common.json_functions import validate_json_by_schema\nfrom common.config_module import load, get_value_from_config\nfrom common.system import function_name\nfrom common.database import CheDb\nfrom common.check_verify import is_equal\n\n# substitution parameters for tickets api calculation request\ncalc_subst = {\n \"key\": \"key\",\n \"action\": \"action\",\n \"method\": \"method\",\n \"date_start\": \"if[date_start]\",\n \"date_end\": \"if[date_end]\",\n \"company\": \"if[company]\",\n \"imputed_avia_cargo\": \"if[imputed.avia_cargo]\",\n \"imputed_delay_regular\": \"if[imputed.delay_regular]\",\n \"imputed_cancel_travel\": \"if[imputed.cancel_travel]\",\n \"imputed_avia_accident\": \"if[imputed.avia_accident]\",\n \"imputed_delay_dock\": \"if[imputed.delay_dock]\",\n \"params_price_imputed_avia_cargo\": \"if[params][price][imputed.avia_cargo]\",\n \"params_price_imputed_delay_regular\": \"if[params][price][imputed.delay_regular]\",\n \"params_price_imputed_cancel_travel\": \"if[params][price][imputed.cancel_travel]\",\n \"params_price_imputed_avia_accident\": \"if[params][price][imputed.avia_accident]\",\n \"params_price_imputed_delay_dock\": \"if[params][price][imputed.delay_dock]\",\n \"tourists\": \"if[tourists]\",\n \"tourist_birthday\": \"ord[tourists][0][birthDay]\",\n \"tourist_lastname\": \"ord[tourists][0][lastName]\",\n \"tourist_firstname\": \"ord[tourists][0][firstName]\",\n \"buyer_email\": \"ord[buyer][email]\",\n \"buyer_lastname\": \"ord[buyer][lastName]\",\n \"buyer_firstname\": \"ord[buyer][firstName]\",\n \"refid\": \"refid\"\n}\n# substitution parameters for tickets api confirmation request\nconf_subst = {\n \"key\": \"key\",\n \"calculation_code\": \"calculationCode\",\n \"confirm_action\": \"confirm_action\",\n \"action\": \"action\",\n \"method\": \"create_params[method]\",\n \"date_start\": \"create_params[if][date_start]\",\n \"date_end\": \"create_params[if][date_end]\",\n \"company\": \"create_params[if][company]\",\n \"imputed_avia_cargo\": \"create_params[if][imputed.avia_cargo]\",\n \"imputed_delay_regular\": \"create_params[if][imputed.delay_regular]\",\n \"imputed_cancel_travel\": \"create_params[if][imputed.cancel_travel]\",\n \"imputed_avia_accident\": \"create_params[if][imputed.avia_accident]\",\n \"imputed_delay_dock\": \"create_params[if][imputed.delay_dock]\",\n \"params_price_imputed_avia_cargo\": \"create_params[if][params][price][imputed.avia_cargo]\",\n \"params_price_imputed_delay_regular\": \"create_params[if][params][price][imputed.delay_regular]\",\n \"params_price_imputed_cancel_travel\": \"create_params[if][params][price][imputed.cancel_travel]\",\n \"params_price_imputed_avia_accident\": \"create_params[if][params][price][imputed.avia_accident]\",\n \"params_price_imputed_delay_dock\": \"create_params[if][params][price][imputed.delay_dock]\",\n \"tourists\": \"create_params[if][tourists]\",\n \"tourist_birthday\": \"create_params[ord][tourists][0][birthDay]\",\n \"tourist_lastname\": \"create_params[ord][tourists][0][lastName]\",\n \"tourist_firstname\": \"create_params[ord][tourists][0][firstName]\",\n \"buyer_email\": \"create_params[ord][buyer][email]\",\n \"buyer_lastname\": \"create_params[ord][buyer][lastName]\",\n \"buyer_firstname\": \"create_params[ord][buyer][firstName]\",\n \"refid\": \"create_params[refid]\"\n}\n\n\ndef construct_tickets_api_request(**kwargs):\n \"\"\"Construct the tickets api request with given parameters. Can be used for both calculation and confirmation request type.\n\n :param kwargs: named parameters corresponding to the keys from substitution dictionaries\n :return: string that represents the tickets api request with given parameters\n \"\"\"\n http = \"http:\" + get_value_from_config(\"[u'api'][u'url']\").replace(\"/v2\", \"/v1\") + \"/imputed.json\"\n if \"http\" in kwargs.keys():\n http = kwargs.pop(\"http\")\n\n # check if \"tourist_list\" list is present\n additional_tourists = []\n if \"tourist_list\" in kwargs.keys() and isinstance(kwargs[\"tourist_list\"], list):\n additional_tourists = kwargs.pop(\"tourist_list\")\n\n # Config values are taken from external config\n config = load(\"config/tickets.json\")\n options = {\n \"key\": urllib.parse.unquote(config[u'request_key']), # key may be already in quoted format, so fix it\n \"confirm_action\": None,\n \"calculation_code\": None,\n \"action\": \"new\",\n \"method\": \"imputed\",\n \"date_start\": \"default\",\n \"date_end\": \"default\",\n \"company\": \"independence\",\n \"tourists\": 1,\n \"imputed_avia_accident\": None,\n \"imputed_avia_cargo\": None,\n \"imputed_cancel_travel\": None,\n \"imputed_delay_dock\": None,\n \"imputed_delay_regular\": None,\n \"params_price_imputed_avia_accident\": None,\n \"params_price_imputed_avia_cargo\": None,\n \"params_price_imputed_cancel_travel\": None,\n \"params_price_imputed_delay_dock\": None,\n \"params_price_imputed_delay_regular\": None,\n \"tourist_birthday\": config[u'tourist_birthday'],\n \"tourist_lastname\": config[u'tourist_lastname'],\n \"tourist_firstname\": config[u'tourist_firstname'],\n \"buyer_email\": config[u'buyer_email'],\n \"buyer_lastname\": config[u'buyer_lastname'],\n \"buyer_firstname\": config[u'buyer_firstname'],\n \"refid\": config[u'refid']\n }\n\n if \"special\" in kwargs.keys():\n if kwargs[\"special\"] == \"url_without_parameters\":\n return http\n elif kwargs[\"special\"] == \"url_with_key_only\":\n return http + \"?key=\" + options[\"key\"]\n elif not kwargs[\"special\"]:\n pass\n else:\n raise ValueError(\"%s: incorrect function use, no valid parameter 'special' given\" % function_name())\n\n # replace default values in the parameters dict\n for item in kwargs:\n if item not in options.keys():\n logger.warning(\"%s: parameter '%s' cannot be evaluated as request parameter, skipped\" % (function_name(), item))\n continue\n options[item] = str(kwargs[item])\n\n # set start date and end date equal to empty value if requested\n if options[\"date_start\"] == False:\n options[\"date_start\"] = \"\"\n if options[\"date_end\"] == False:\n options[\"date_end\"] = \"\"\n\n # if start date is default then set start date to '+10 days' from today\n if options[\"date_start\"] == \"default\":\n options[\"date_start\"] = (datetime.date.today()+datetime.timedelta(days=10)).strftime(\"%d.%m.%Y\")\n # if end date is default then set end date to '+10 days' from date_start or\n # if start date is not in valid format then set end date to '+20 days' from today\n if options[\"date_end\"] == \"default\":\n try:\n options[\"date_end\"] = (datetime.datetime.strptime(options[\"date_start\"], \"%d.%m.%Y\") + datetime.timedelta(days=10)).strftime(\"%d.%m.%Y\")\n except Exception:\n options[\"date_end\"] = (datetime.date.today() + datetime.timedelta(days=20)).strftime(\"%d.%m.%Y\")\n\n # clear the dict from missing values\n options = {key: options[key] for key in options.keys() if options[key] not in [None, \"None\"]}\n\n # translate parameters names into request names\n if \"calculation_code\" not in options.keys():\n # create calculation request\n options = {calc_subst[key]:options[key] for key in options}\n else:\n # create confirmation request\n options = {conf_subst[key]: options[key] for key in options}\n options.update({\"create_params[action]\": options[\"action\"]})\n options[\"action\"] = options[\"confirm_action\"]\n options.pop(\"confirm_action\")\n\n # if additional tourists list given - check and add them to the perameters list\n if additional_tourists:\n for index, tourist in enumerate(additional_tourists):\n if not isinstance(tourist, dict):\n logger.warning(\"Parameter 'tourist_list' should be the list in format: \"\n \"[{\\\"lastname\\\":\\\"LName\\\", \\\"firstname\\\":\\\"FName\\\", \\\"birthday\\\":\\\"DD.MM.YYYY\\\"}, ...]\")\n raise ValueError(\"%s: incorrect value of 'tourist_list' parameter is used.\" % function_name())\n person = {\"create_params[ord][tourists][%s][lastName]\" % str(index + 1): tourist.get(\"lastname\"),\n \"create_params[ord][tourists][%s][firstName]\" % str(index + 1): tourist.get(\"firstname\"),\n \"create_params[ord][tourists][%s][birthDay]\" % str(index + 1): tourist.get(\"birthday\")}\n options.update(person)\n\n # return full URL\n request = http + \"?\" + urllib.parse.urlencode(options)\n return request\n\n\ndef execute_tickets_api_policy_creation(tc_name, **kwargs):\n \"\"\"Execute tickets api algorythm for policy creation. See for reference and\n algorythm description http://docs.cherehapa.ru/private/policy/ticketsCreate.\n\n :param tc_name: test case name\n :param kwargs: named parameters corresponding to the keys from substitution dictionaries\n :return: True if the policy was created and downloaded successfully or False otherwise\n \"\"\"\n request_data = {\"calculation request\": construct_tickets_api_request(**kwargs)}\n request_data[\"calculation response\"] = \\\n send_calculation_request_and_get_response(tc_name, request_data[\"calculation request\"])\n if not request_data[\"calculation response\"]:\n return False\n\n # if calculation was successful - construct and send confirmation request\n calculation_code = request_data[\"calculation response\"].get(\"calculationCode\")\n kwargs.update({\"calculation_code\": calculation_code, \"confirm_action\": \"confirm\"})\n request_data[\"confirmation request\"] = construct_tickets_api_request(**kwargs)\n request_data[\"first confirmation response\"] = \\\n send_first_confirmation_request_and_get_response(tc_name,\n request_data[\"confirmation request\"])\n if not request_data[\"first confirmation response\"]:\n print_tickets_api_dict_info(request_data)\n return False\n\n # if first confirmation response was successful then make second confirmation request\n request_data[\"second confirmation response\"] = \\\n send_second_confirmation_request_and_get_response(tc_name,\n request_data[\"confirmation request\"])\n if not request_data[\"second confirmation response\"]:\n print_tickets_api_dict_info(request_data)\n return False\n\n # confirmation was successful - check database\n with CheDb() as db:\n db.verify_avia_policy_data_in_database_by_task_id(\n task_id=request_data[\"second confirmation response\"].get(\"orderId\"),\n calc_id=calculation_code\n )\n\n # download and store policy archive\n if not download_and_verify_policy_archive(request_data[\"second confirmation response\"]):\n return False\n\n logger.success(\"Successfully processed orderId=%s with Tickets API\"\n % request_data[\"second confirmation response\"].get(\"orderId\"))\n return True\n\n\ndef send_calculation_request_and_get_response(req_name, calculation_request):\n request_name = \"'%s' tickets calculation request\" % req_name\n result, response = send_request_api_partner(\n [(request_name, calculation_request, 200)],\n schema_for_success=\"tickets_api_calculation_success_schema\",\n schema_for_error=\"api_error_without_description\")\n if not result:\n logger.fail(request_name)\n return False\n return response\n\n\ndef send_first_confirmation_request_and_get_response(req_name, confirmation_request):\n # first confirmation request should return json {'success': False},\n # policy has been already created by this moment\n request_name = \"'%s' tickets first confirmation request\" % req_name\n result, response = send_request_api_partner(\n [(request_name, confirmation_request, 200)],\n schema_for_success=\"false_only_json\",\n schema_for_error=\"api_error_without_description\")\n if not result:\n logger.fail(request_name)\n return False\n return response\n\n\ndef send_second_confirmation_request_and_get_response(req_name, confirmation_request):\n # maximum number of attempts to try to get correct response after confirmation request\n MAX_ATTEMPT = 10\n request_name = \"'%s' tickets second confirmation request\" % req_name\n for attempt in range(MAX_ATTEMPT):\n # second confirmation request should return succesful json with link to the document\n result, response = send_request_api_partner(\n [(\"%s, attempt %s\" % (request_name, attempt + 1), confirmation_request, 200)],\n schema_for_success=None,\n schema_for_error=None)\n # try to get correct json\n if validate_json_by_schema(json_data=response,\n schema_name=\"tickets_api_confirmation_success_schema\",\n abort_on_exception=False, message_on_exception=False):\n return response\n logger.fail(\"%s: Failed to get valid policy after %s attempts\"\n % (request_name, MAX_ATTEMPT))\n return False\n\n\ndef print_tickets_api_dict_info(print_dict):\n \"\"\"Function used to print requests and responses for tickets API.\n\n :param print_dict: dictionary of keys and values to be printed\n \"\"\"\n for key in sorted(print_dict.keys()):\n logger.info(\"Tickets API %s: %s\" % (key, print_dict[key]))\n return True\n\n\ndef download_and_verify_policy_archive(response_data):\n policy_dir = tempfile.NamedTemporaryFile().name\n os.makedirs(policy_dir)\n url = response_data[\"policy\"][\"download\"]\n destination = os.path.join(policy_dir, \"%s.zip\" % response_data[\"orderId\"])\n result = verify_policy_can_be_downloaded(url, destination) \\\n and verify_file_size_not_zero(destination) \\\n and verify_zip_format_is_correct(destination) \\\n and verify_archive_contents(task_id=response_data[\"orderId\"],\n destination=destination)\n if result:\n shutil.rmtree(policy_dir, ignore_errors=True)\n else:\n logger.debug(\"Files available for manual inspection: %s\" % destination)\n return result\n\n\ndef verify_policy_can_be_downloaded(url, destination):\n if not download_file_by_url(url, destination,\n file_description=\"Policy archive\"):\n logger.fail(\"Tickets API policy archive can be downloaded\")\n return False\n return True\n\n\ndef verify_file_size_not_zero(destination):\n if not os.path.getsize(destination):\n logger.fail(\"Tickets API policy archive has correct size\")\n return False\n return True\n\n\ndef verify_zip_format_is_correct(destination):\n if not zipfile.is_zipfile(destination):\n logger.fail(\"Tickets API policy archive has correct format\")\n return False\n return True\n\n\ndef verify_archive_contents(task_id, destination):\n with zipfile.ZipFile(destination) as policy_archive:\n actual_documents_list = [file.filename\n for file in policy_archive.filelist]\n with CheDb() as db:\n query = \"select policies.code from tasks \" \\\n \"left join policies on policies.taskId = tasks.id \" \\\n \"where tasks.taskId='%s' \" \\\n \"and tasks.`code`='CreateSingleAviaPolicy'\" % task_id\n codes_list = db.execute_query(query)\n expected_documents_list = [name[0] + \".pdf\" for name in codes_list]\n return is_equal(sorted(actual_documents_list),\n sorted(expected_documents_list),\n \"Documents list in archive and expected from database\")\n","sub_path":"che-test/scripts/autotests/common/tickets_api.py","file_name":"tickets_api.py","file_ext":"py","file_size_in_byte":15845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"514789947","text":"import numpy\nimport time\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.xception import Xception\nfrom keras.applications.vgg19 import VGG19\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Conv2D, Dense, Dropout, GlobalAveragePooling2D, MaxPooling2D\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import SGD\nfrom keras.preprocessing import image\nfrom keras.utils import np_utils\nfrom sklearn.datasets import load_files\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom tqdm import tqdm\n\nIMAGE_DIMENSION = 197\nIMAGE_SHAPE = (IMAGE_DIMENSION, IMAGE_DIMENSION)\n\n\nclass PreTrainedModel(object):\n def __init__(self, model_name, base_model):\n self.model_name = model_name\n self.base_model = base_model\n\n def prepare_model(self):\n # add a global spatial average pooling layer\n x = self.base_model.output\n x = GlobalAveragePooling2D()(x)\n\n # let's add a fully-connected layer\n x = Dense(1024, activation='relu')(x)\n\n # and a logistic layer\n predictions = Dense(2, activation='softmax')(x)\n\n # this is the model we will train\n return Model(inputs=self.base_model.input, outputs=predictions)\n\n def train_only_top_layers(self, model, train_tensors, train_targets, valid_tensors, valid_targets):\n for layer in self.base_model.layers:\n layer.trainable = False\n\n # compile the model (should be done *after* setting layers to non-trainable)\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['categorical_accuracy'])\n\n # train the model on the new data for a few epochs\n model.fit(train_tensors, train_targets, batch_size=16, epochs=1, verbose=2,\n validation_data=(valid_tensors, valid_targets))\n\n def train_remaining_top_layers(self, model, train_tensors, train_targets, valid_tensors, valid_targets):\n number_of_layers_to_freeze = int(len(model.layers) * 0.75)\n\n # we will freeze the first three fourth of layers and unfreeze the rest:\n for layer in model.layers[:number_of_layers_to_freeze]:\n layer.trainable = False\n\n for layer in model.layers[number_of_layers_to_freeze:]:\n layer.trainable = True\n\n # we need to recompile the model for these modifications to take effect\n # we use SGD with a low learning rate\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n\n checkpointer = ModelCheckpoint(\n filepath=self.model_name + '.weights.best.from_scratch.hdf5',\n verbose=1,\n save_best_only=True)\n\n # we train our model again (this time fine-tuning the top 2 inception blocks alongside the top Dense layers)\n model.fit(train_tensors, train_targets, batch_size=16, epochs=3, verbose=2,\n callbacks=[checkpointer], validation_data=(valid_tensors, valid_targets))\n\n def train_and_evaluate(self, train_tensors, train_targets, valid_tensors, valid_targets, test_tensors,\n test_targets):\n\n print(\"\\n\\nStarting for model: \" + self.model_name + \" ...\\n\")\n\n model = self.prepare_model()\n\n start_time = time.time()\n\n # first: train only the top layers (which were randomly initialized)\n self.train_only_top_layers(model, train_tensors, train_targets, valid_tensors, valid_targets)\n\n # at this point, the top layers are well trained and we can start fine-tuning convolutional layers. We will\n # freeze the bottom N layers and train the remaining top layers.\n self.train_remaining_top_layers(model, train_tensors, train_targets, valid_tensors, valid_targets)\n\n print(\"\\n--- %s seconds ---\" % (time.time() - start_time))\n\n # evaluate\n test(model, test_tensors, test_targets)\n\n\ndef load_dataset(path):\n data = load_files(path)\n files = numpy.array(data['filenames'])\n targets = np_utils.to_categorical(numpy.array(data['target']), len(data['target_names']))\n\n return files, targets\n\n\ndef print_dataset_statistics(train_files, valid_files, test_files):\n print('There are %s total images.' % len(numpy.hstack([train_files, valid_files, test_files])))\n print('There are %d training images.' % len(train_files))\n print('There are %d validation images.' % len(valid_files))\n print('There are %d test images.\\n\\n' % len(test_files))\n\n\ndef path_to_tensor(img_path):\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=IMAGE_SHAPE)\n\n # convert PIL.Image.Image type to 3D tensor\n three_d_tensor = image.img_to_array(img)\n\n # convert 3D tensor to 4D tensor and return 4D tensor\n return numpy.expand_dims(three_d_tensor, axis=0)\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]\n\n return numpy.vstack(list_of_tensors)\n\n\ndef file_to_tensor(files):\n return paths_to_tensor(files).astype('float32') / 255\n\n\ndef prepare_pre_trained_models():\n inception_base_model = InceptionV3(include_top=False, weights='imagenet',\n input_shape=(IMAGE_DIMENSION, IMAGE_DIMENSION, 3))\n\n res_net_50_base_model = ResNet50(include_top=False, weights='imagenet',\n input_shape=(IMAGE_DIMENSION, IMAGE_DIMENSION, 3))\n\n xception_base_model = Xception(include_top=False, weights='imagenet',\n input_shape=(IMAGE_DIMENSION, IMAGE_DIMENSION, 3))\n\n vgg_19_base_model = VGG19(include_top=False, weights='imagenet', input_shape=(IMAGE_DIMENSION, IMAGE_DIMENSION, 3))\n\n inception_v3 = PreTrainedModel('InceptionV3', inception_base_model)\n resnet50 = PreTrainedModel('ResNet50', res_net_50_base_model)\n xception = PreTrainedModel('Xception', xception_base_model)\n vgg19 = PreTrainedModel('VGG19', vgg_19_base_model)\n\n return [inception_v3, resnet50, xception, vgg19]\n\n\ndef prepare_own_model():\n model = Sequential()\n\n model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu',\n input_shape=(IMAGE_DIMENSION, IMAGE_DIMENSION, 3)))\n\n model.add(MaxPooling2D(pool_size=2))\n model.add(Dropout(0.2))\n model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Dropout(0.2))\n model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Dropout(0.2))\n model.add(Conv2D(filters=128, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Dropout(0.2))\n model.add(Conv2D(filters=256, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Dropout(0.2))\n model.add(Conv2D(filters=512, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Dropout(0.2))\n model.add(Conv2D(filters=1024, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(GlobalAveragePooling2D())\n model.add(Dense(2, activation='softmax'))\n\n return model\n\n\ndef test(model, test_tensors, test_targets):\n # get index of predicted category for each image in test set\n predictions = [numpy.argmax(model.predict(numpy.expand_dims(tensor, axis=0))) for tensor in test_tensors]\n\n # report scores\n y_test = numpy.argmax(test_targets, axis=1)\n\n print('\\nAccuracy score: ', format(accuracy_score(y_test, predictions)))\n print('Precision score: ', format(precision_score(y_test, predictions)))\n print('Recall score: ', format(recall_score(y_test, predictions)))\n print('F1 score: ', format(f1_score(y_test, predictions, average='micro')))\n\n\nprint(\"\\nLoading training, validation and test datasets ...\\n\")\n\n# load train, validation and test datasets\ntrain_files, train_targets = load_dataset('train')\nvalid_files, valid_targets = load_dataset('valid')\ntest_files, test_targets = load_dataset('test')\n\n# print statistics about the dataset\nprint_dataset_statistics(train_files, valid_files, test_files)\n\nprint(\"Converting files to tensors ...\\n\")\n\n# pre-process the data for Keras\ntrain_tensors = file_to_tensor(train_files)\nvalid_tensors = file_to_tensor(valid_files)\ntest_tensors = file_to_tensor(test_files)\n\nprint(\"\\nFinished loading dataset and converting files to tensors.\\n\\n\")\n\n# Train and evaluate own model\nprint(\"\\nStarting for own model ...\\n\")\n\nmodel = prepare_own_model()\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['categorical_accuracy'])\n\nmodel.fit(train_tensors, train_targets, batch_size=16, epochs=3, verbose=2,\n callbacks=[\n ModelCheckpoint(filepath='own_model.weights.best.from_scratch.hdf5', verbose=1, save_best_only=True)\n ],\n validation_data=(valid_tensors, valid_targets))\n\ntest(model, test_tensors, test_targets)\n\n# Train and evaluate pre-trained models(InceptionV3, ResNet50, Xception and VGG19)\nfor pre_trained_model in prepare_pre_trained_models():\n pre_trained_model.train_and_evaluate(train_tensors, train_targets, valid_tensors, valid_targets, test_tensors,\n test_targets)\n","sub_path":"capstone_classifying_x_rays/code/train_multiple.py","file_name":"train_multiple.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"302456841","text":"# -*- coding: utf-8 -*-\n#\n# File: RotaTool.py\n#\n# Copyright (c) 2007 by []\n# Generator: ArchGenXML Version 1.6.0-beta-svn\n# http://plone.org/products/archgenxml\n#\n# GNU General Public License (GPL)\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA.\n#\n\n__author__ = \"\"\"Jean Jordaan \"\"\"\n__docformat__ = 'plaintext'\n\nfrom AccessControl import ClassSecurityInfo\nfrom Products.Archetypes.atapi import *\nfrom zope import interface\nfrom Products.Relations.field import RelationField\nfrom Products.Bungeni.config import *\n\n# additional imports from tagged value 'import'\nfrom Products.OrderableReferenceField import OrderableReferenceField\n\n\nfrom Products.CMFCore.utils import UniqueObject\n\n \n##code-section module-header #fill in your manual code here\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.Archetypes.utils import DisplayList\n\nfrom Products.validation.config import validation\nfrom Products.validation.validators.RangeValidator import RangeValidator\nvalidation.register(RangeValidator('isPositiveNumber', 1, 600))\n##/code-section module-header\n\nschema = Schema((\n\n IntegerField(\n name='ReportingLeadTime',\n widget=IntegerField._properties['widget'](\n description=\"How many minutes before the take the reporters have to be in the Chamber/Committee Room\",\n label=\"Reporting Lead Time\",\n label_msgid='Bungeni_label_ReportingLeadTime',\n description_msgid='Bungeni_help_ReportingLeadTime',\n i18n_domain='Bungeni',\n ),\n required=1,\n validators=('isPositiveNumber',)\n ),\n\n IntegerField(\n name='TakeLength',\n widget=IntegerField._properties['widget'](\n description=\"The length of a take in minutes\",\n label='Takelength',\n label_msgid='Bungeni_label_TakeLength',\n description_msgid='Bungeni_help_TakeLength',\n i18n_domain='Bungeni',\n ),\n required=1,\n validators=('isPositiveNumber',)\n ),\n\n IntegerField(\n name='ExtraTakes',\n widget=IntegerField._properties['widget'](\n description=\"The allowance of extra takes to cater for sitting overrunning\",\n label='Extratakes',\n label_msgid='Bungeni_label_ExtraTakes',\n description_msgid='Bungeni_help_ExtraTakes',\n i18n_domain='Bungeni',\n ),\n required=1,\n validators=('isPositiveNumber',)\n ),\n\n OrderableReferenceField(\n name='AvailableReporters',\n vocabulary='getAvailableReportersVocab',\n widget=OrderableReferenceField._properties['widget'](\n label='Availablereporters',\n label_msgid='Bungeni_label_AvailableReporters',\n i18n_domain='Bungeni',\n ),\n allowed_types=['Staff',],\n relationship=\"rotatool_availablereporters\",\n relation_implementation=\"basic\"\n ),\n\n RelationField(\n name='RotaSubscribers',\n widget=ReferenceWidget(\n label='Rotasubscribers',\n label_msgid='Bungeni_label_RotaSubscribers',\n i18n_domain='Bungeni',\n ),\n relationship='rotatool_rotasubscribers',\n multiValued=1,\n vocabulary='getRotaSubscribersVocab',\n default_method='setRotaSubscribersDefault',\n allowed_types=\"['MemberOfParliament', 'Staff']\"\n ),\n\n),\n)\n\n##code-section after-local-schema #fill in your manual code here\n##/code-section after-local-schema\n\nRotaTool_schema = BaseSchema.copy() + \\\n schema.copy()\n\n##code-section after-schema #fill in your manual code here\n##/code-section after-schema\n\nclass RotaTool(UniqueObject, BaseContent):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n __implements__ = (getattr(UniqueObject,'__implements__',()),) + (getattr(BaseContent,'__implements__',()),)\n\n # This name appears in the 'add' box\n archetype_name = 'RotaTool'\n\n meta_type = 'RotaTool'\n portal_type = 'RotaTool'\n allowed_content_types = []\n filter_content_types = 0\n global_allow = 0\n #content_icon = 'RotaTool.gif'\n immediate_view = 'base_view'\n default_view = 'base_view'\n suppl_views = ()\n typeDescription = \"RotaTool\"\n typeDescMsgId = 'description_edit_rotatool'\n #toolicon = 'RotaTool.gif'\n\n _at_rename_after_creation = True\n\n schema = RotaTool_schema\n\n ##code-section class-header #fill in your manual code here\n ##/code-section class-header\n\n\n # tool-constructors have no id argument, the id is fixed\n def __init__(self, id=None):\n BaseContent.__init__(self,'portal_rotatool')\n self.setTitle('RotaTool')\n \n ##code-section constructor-footer #fill in your manual code here\n ##/code-section constructor-footer\n\n\n # tool should not appear in portal_catalog\n def at_post_edit_script(self):\n self.unindexObject()\n \n ##code-section post-edit-method-footer #fill in your manual code here\n ##/code-section post-edit-method-footer\n\n\n # Methods\n\n security.declarePublic('getAvailableReportersVocab')\n def getAvailableReportersVocab(self):\n \"\"\" Get the current parliament's team of reporters, and return\n the active memberships.\n \"\"\"\n members = self.getReporters()\n return DisplayList([(m.UID(), m.Title()) for m in members])\n\n security.declarePublic('getRotaSubscribersVocab')\n def getRotaSubscribersVocab(self):\n \"\"\"\n \"\"\"\n members = self.getReporters() + self.getMPs()\n return DisplayList([(m.UID(), m.Title()) for m in members])\n\n security.declarePublic('getReporters')\n def getReporters(self):\n \"\"\"\n \"\"\"\n # TODO: this looks for all Reporters in all teams. If someone is\n # a Reporter in more than one team, this can return duplicates.\n # Is that a bug?\n catalog = getToolByName(self, 'portal_catalog')\n reporter_proxies = catalog.search(\n {'allowedRolesAndUsers': 'Reporter', 'review_state': 'active',\n 'portal_type': 'Team Membership'}\n )\n reporters = [p.getObject() for p in reporter_proxies]\n members = [r.getMember() for r in reporters]\n return members\n\n security.declarePublic('getMPs')\n def getMPs(self):\n \"\"\"\n \"\"\"\n catalog = getToolByName(self, 'portal_catalog')\n mp_proxies = catalog.search(\n {'allowedRolesAndUsers': 'MemberOfParliament',\n 'review_state': 'active',\n 'portal_type': 'Team Membership'}\n )\n mps = [p.getObject() for p in mp_proxies]\n members = [r.getMember() for r in mps]\n return members\n\n\nregisterType(RotaTool, PROJECTNAME)\n# end of class RotaTool\n\n##code-section module-footer #fill in your manual code here\n##/code-section module-footer\n\n\n\n","sub_path":"archived/Bungeni/trunk/debaterecord/RotaTool.py","file_name":"RotaTool.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"84665884","text":"from Tkinter import *\n\nclass BresenhamCanvas(Canvas):\n\n def draw_point(self, x, y, color=\"red\"):\n self.create_line(x, y, x+1, y+1, fill=color)\n\n def draw_circle_points(self, x, y, center_x, center_y, color=\"red\"):\n '''Draw 8 points on a circle centered\n at center_x, center_y, by symmetry.'''\n self.draw_point(center_x + x, center_y + y, color)\n self.draw_point(center_x - x, center_y - y, color)\n self.draw_point(center_x + x, center_y - y, color)\n self.draw_point(center_x - x, center_y + y, color)\n\n # If x == y then these points will simply duplicate\n # points already drawn above. No need to repeat them then.\n if x != y:\n self.draw_point(center_x + y, center_y + x, color)\n self.draw_point(center_x - y, center_y - x, color)\n self.draw_point(center_x + y, center_y - x, color)\n self.draw_point(center_x - y, center_y + x, color)\n\n def draw_circle(self, center_x, center_y, radius, line_thickness, color=\"red\"):\n\n # Start at the top of the circle\n x = 0\n yin = radius - int(line_thickness/2)\n yout = radius + int(line_thickness/2)\n\n din = 1 - radius - int(line_thickness/2) # midpoint decision variable\n deltaEin = 3 # initial delta for move E\n deltaSEin = -2*(radius - int(line_thickness/2)) + 5 # initial delta for move SE\n dout = 1 - radius + int(line_thickness/2) # midpoint decision variable\n deltaEout = 3 # initial delta for move E\n deltaSEout = -2*(radius + int(line_thickness/2)) + 5 # initial delta for move SE\n\n # First point\n for y in range(yin,yout):\n self.draw_circle_points(x, y, center_x, center_y, color)\n\n # Stop when we cross the line y == x, which is the edge\n # of the first octant of the circle\n while yout > x:\n if din < 0:\n # Moving E\n din = din + deltaEin\n deltaEin = deltaEin + 2\n deltaSEin = deltaSEin + 2\n else:\n # Moving SE\n din = din + deltaSEin\n deltaEin = deltaEin + 2\n deltaSEin = deltaSEin + 4\n yin = yin - 1\n if dout < 0:\n # Moving E\n dout = dout + deltaEout\n deltaEout = deltaEout + 2\n deltaSEout = deltaSEout + 2\n else:\n # Moving SE\n dout = dout + deltaSEout\n deltaEout = deltaEout + 2\n deltaSEout = deltaSEout + 4\n yout = yout - 1\n x = x + 1\n for y in range(yin,yout):\n self.draw_circle_points(x, y, center_x, center_y, color)\n\ndef run():\n import math\n CANVAS_SIZE = 600\n\n root = Tk()\n canvas = BresenhamCanvas(root, width=CANVAS_SIZE, height=CANVAS_SIZE)\n canvas.pack()\n\n margin = CANVAS_SIZE / 10\n\n origin_x = int(CANVAS_SIZE / 2)\n origin_y = int(CANVAS_SIZE / 2)\n center_dist = ((CANVAS_SIZE / 2) - 2*margin)\n radius = margin*2\n\n n_circles = 10\n angle_step = (2 * math.pi) / n_circles\n\n for i in range(n_circles):\n theta = angle_step * i\n center_x = int(center_dist * math.cos(theta)) + origin_x\n center_y = int(center_dist * math.sin(theta)) + origin_y\n canvas.draw_circle(center_x, center_y, radius, 10, color=\"blue\")\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"py help/04. geometry/Bresenham circle.py","file_name":"Bresenham circle.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653974447","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom collections import namedtuple\nimport time\nimport os\n\n\ndef write_bytes_to_file(bytes, file_path):\n with open(file_path, mode='w+b') as out_file:\n out_file.write(bytes)\n\n\nRepository = namedtuple('Repository',['id', 'name', 'link'])\n\n\ndef get_re3data_repository_list():\n re3data_repositories_api_url = 'https://www.re3data.org/api/v1/repositories'\n re3data_api_url = 'https://www.re3data.org'\n r = requests.get(re3data_repositories_api_url)\n if r.status_code == 200:\n soup = BeautifulSoup(r.content, \"html.parser\")\n repositories = soup.select('repository')\n if repositories:\n for repository in repositories:\n id = repository.select('id')[0].get_text()\n name = repository.select('name')[0].get_text()\n link = re3data_api_url + repository.select('link')[0]['href']\n yield Repository(id=id, name=name, link=link)\n\n\ndef crawl_re3data_repository(repository_urls_file_path, save_dir):\n with open(repository_urls_file_path) as input_file:\n count = 0\n for line in input_file.readlines():\n count += 1\n if count < 1723:\n continue\n line = line.strip()\n r = requests.get(line)\n if r.status_code == 200:\n write_bytes_to_file(r.content, save_dir+'/'+line[line.rindex('/')+1:])\n time.sleep(1)\n\n\ndef get_entrydate(repository_dir):\n for filename in os.listdir(repository_dir):\n with open(repository_dir+'/'+filename, encoding='utf-8') as input_file:\n data = input_file.read()\n entry_date = data[data.find('')+15:data.find('')]\n print(entry_date)\n\n\nif __name__ == '__main__':\n repository_urls_file_path = 'E:/data/re3data/repositories.txt'\n save_dir = 'E:/data/re3data/repositories'\n # crawl_re3data_repository(repository_urls_file_path, save_dir)\n get_entrydate(save_dir)\n","sub_path":"top/iuui/crawler/re3data_crawler.py","file_name":"re3data_crawler.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"73283050","text":"import requests\nimport json\nimport random\nimport re\n\nsubreddits = ['dankmemes']\npopularity = ['month', 'year', 'all']\n\nurl = (\"https://reddit.com/r/\" + subreddits[random.randint(0,(len(subreddits) - 1))] + \"/top/.json\")\nheaders = { 'User-Agent': 'Desktop:zhubot:v0.1.0 (by /u/zhubot)' }\nparams = {'limit': 50,\n 't': popularity[random.randint(0,(len(popularity)-1))],}\n\n\ndef get_memes():\n \n response = requests.get(url, headers=headers, params=params)\n try:\n data_dict = json.loads(response.text)\n except:\n print(response)\n total_memes = len(data_dict['data']['children'])\n meme = data_dict['data']['children'][random.randint(0,(total_memes - 1))]['data']['url']\n \n #Check url from ireddituploads doesnt contain & as it breaks the link\n if 'i.reddituploads' in meme and '&' in meme:\n meme = re.sub('&', '&', meme)\n return meme\n\n","sub_path":"commands/memes.py","file_name":"memes.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28631489","text":"import unittest\n\nfrom fountain.command import ChangeNozzlePressureAndColorFountainCommand\nfrom fountain.program import FountainProgram\n\n\nclass TestFountainProgram(unittest.TestCase):\n def test_parse_json(self):\n json = \"\"\"{\n \"version\": 1,\n \"commands\": [\n {\n \"nozzle\": 1,\n \"pressure\": 42,\n \"color\": \"green\",\n \"time\": 5\n },\n {\n \"nozzle\": 2,\n \"pressure\": 3.14,\n \"color\": \"red\",\n \"time\": 2\n },\n {\n \"nozzle\": 5,\n \"pressure\": 0,\n \"color\": \"yellow\",\n \"time\": 10\n }\n ]\n }\"\"\"\n\n program = FountainProgram.parse_json(json)\n\n self.assertListEqual([ChangeNozzlePressureAndColorFountainCommand(1, 42, 'green', 5),\n ChangeNozzlePressureAndColorFountainCommand(2, 3.14, 'red', 2),\n ChangeNozzlePressureAndColorFountainCommand(5, 0, 'yellow', 10)],\n program.commands)\n","sub_path":"tests/fountain/test_program.py","file_name":"test_program.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"332983279","text":"\n### one code one day\n### 2020/03/14\n### leetcode 300 最长子序列\n### 动态规划基本问题\n\n### 2020/04/06\n### 二刷\n大王\n### 贪心 + 二分 O(nlogn)\ndef lengthOfLIS(self, nums: List[int]) -> int:\n d = []\n for n in nums:\n if not d or n > d[-1]:\n d.append(n)\n else:\n if(d[0] >= n):\n d[0] = n\n else:\n ### 二分\n l, r = 0, len(d)-1\n while(l < r-1):\n mid = (l + r) // 2\n if(d[mid] >= n):\n r = mid\n else:\n l = mid\n d[r] = n\n return len(d)\n\n### 动态规划\ndef lengthOfLIS(self, nums: List[int]) -> int:\n dp = [1] * len(nums)\n for i in range(len(nums)):\n for j in range(i):\n if(nums[j] < nums[i]):\n dp[i] = max(dp[i], dp[j]+1)\n return max(dp)\n","sub_path":"动态规划/lengthOfLIS.py","file_name":"lengthOfLIS.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"563476757","text":"from __future__ import annotations\n\nimport logging\nfrom progressivis.core.module import Module\n\nfrom typing import (\n Type,\n Tuple,\n Any,\n Dict,\n Optional,\n List,\n TYPE_CHECKING,\n Union,\n Iterable,\n)\n\nif TYPE_CHECKING:\n from progressivis.core.scheduler import Scheduler\n from .table import Pipeable\n\nlogger = logging.getLogger(__name__)\n\n\nclass ValidationError(RuntimeError):\n pass\n\n\ndef filter_underscore(lst: Iterable[str]) -> List[str]:\n return [elt for elt in lst if not elt.startswith(\"_\")]\n\n\nclass Expr:\n def __init__(\n self,\n module_class: Type[Module],\n args: Tuple[Any, ...],\n kwds: Dict[str, Any],\n output_slot: Optional[str] = None,\n module: Optional[Module] = None,\n ):\n self._module_class = module_class\n lazy = kwds.pop(\"lazy\", False)\n self._args = args\n self._kwds = kwds\n self._module = module\n self._output_slot = output_slot\n self._valid: Optional[bool] = (module is not None) or None\n self._expr_args: Tuple[Expr, ...] = ()\n self._non_expr_args: Tuple[Any, ...] = ()\n self._expr_kwds: Dict[str, Expr] = {}\n self._non_expr_kwds: Dict[str, Any] = {}\n self._repipe: Optional[str] = None\n if not lazy:\n self.validate()\n\n @property\n def module(self) -> Optional[Module]:\n return self._module\n\n @property\n def output_slot(self) -> Optional[str]:\n return self._output_slot\n\n def get_data(self, name: str) -> Any:\n if self.module is None:\n return None\n return self.module.get_data(name)\n\n def __getitem__(self, output_slot: str) -> Expr:\n assert self._module is not None\n self._module.get_output_slot(\n output_slot\n ) # raise an error if output_slot does not exist\n return Expr(\n self._module_class,\n self._non_expr_args,\n dict(lazy=True, **self._non_expr_kwds),\n output_slot=output_slot,\n module=self._module,\n )\n\n def tee(self, lambda1: Any, lambda2: Any) -> Any:\n lambda1(self)\n return lambda2(self)\n\n def _validate_args(self) -> None:\n modules: List[Expr] = []\n non_modules: List[Any] = []\n for a in self._args:\n if isinstance(a, Expr):\n a.validate()\n modules.append(a)\n else:\n non_modules.append(a)\n self._expr_args = tuple(modules)\n self._non_expr_args = tuple(non_modules)\n\n def _validate_kwds(self) -> None:\n modules: Dict[str, Expr] = {}\n non_modules: Dict[str, Any] = {}\n for (k, a) in self._kwds.items():\n if isinstance(a, Expr):\n a.validate()\n modules[k] = a\n else:\n non_modules[k] = a\n self._expr_kwds = modules\n self._non_expr_kwds = non_modules\n\n def _connect(\n self, module: Module, expr: Expr, input_slot: Optional[str] = None\n ) -> None:\n input_module = expr.module\n assert input_module\n output_slot = expr.output_slot\n if output_slot is None:\n output_slots = filter_underscore(input_module.output_slot_names())\n if len(output_slots) == 0:\n raise ValueError(\n \"Cannot extract output slot from module %s\", input_module\n )\n output_slot = output_slots[0] # take the first one\n if input_slot is None:\n input_slots = filter_underscore(module.input_slot_names())\n for inp in input_slots:\n if not module.has_input_slot(inp): # no input slot connected yet\n input_slot = inp\n break\n if input_slot is None:\n raise ValueError(\"Cannot extract input slot from module %s\", module)\n input_module.connect_output(output_slot, module, input_slot)\n\n def _instanciate_module(self) -> None:\n module = self._module_class(*self._non_expr_args, **self._non_expr_kwds)\n for expr in self._expr_args:\n self._connect(module, expr, None)\n\n for (input_slot, expr) in self._expr_kwds.items():\n self._connect(module, expr, input_slot)\n\n self._module = module\n\n def validate(self) -> Module:\n if self._valid is None:\n try:\n self._validate_args()\n self._validate_kwds()\n self._instanciate_module()\n self._valid = True\n except Exception:\n self._valid = False\n raise\n if self._valid is False:\n raise ValidationError(\"Module not valid\")\n assert self._module is not None\n return self._module\n\n def invalidate(self) -> None:\n self._valid = None\n\n def scheduler(self) -> Scheduler:\n assert self._module\n return self._module.scheduler()\n\n def repipe(self, mod_name: str, out: Optional[str] = None) -> Expr:\n mod_ = self.scheduler()[mod_name]\n if isinstance(mod_, (Module, Module)):\n from .table import PDataExpr\n\n return PDataExpr(\n type(mod_), (), dict(lazy=True), module=mod_, output_slot=out\n )\n return Expr(type(mod_), (), dict(lazy=True), module=mod_, output_slot=out)\n\n def fetch(\n self, mod_name: str, out: Optional[str] = None\n ) -> Expr: # a simple alias for repipe\n return self.repipe(mod_name, out)\n\n def __or__(self, other: Union[None, Expr, Pipeable]) -> Expr:\n if other is None:\n return self\n if isinstance(other, Expr):\n return other\n # assert isinstance(other, Pipeable)\n ret: Expr = other._expr_class(\n other._module_class, (self,) + other._args, other._kwds\n )\n if other._repipe is not None:\n return ret.repipe(other._repipe, out=other._repipe_out)\n return ret\n","sub_path":"progressivis/expr/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"321532278","text":"# vim: set fileencoding=utf8:\nimport re\nimport datetime\nimport json\nimport subprocess\n\nfrom django.db import transaction\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, Http404\nfrom django.shortcuts import get_object_or_404, render, redirect, render_to_response\nfrom django.core.urlresolvers import reverse\nfrom django.template import RequestContext\nfrom django import forms\nfrom django.views.decorators.http import condition\nfrom django.views.generic import ListView, FormView, UpdateView, TemplateView, View, CreateView\nfrom django.views.generic.edit import ProcessFormView\nfrom django.contrib.auth.models import User\n\nfrom .. import settings\nfrom ..settings import YEAR\nfrom ..tutor.models import RusClass, TutorProfile, Rus\nfrom ..tutor.auth import user_tutor_data, tutor_required_error, NotTutor, rusclass_required_error, tutorbest_required_error\n\nfrom .models import ImportSession, ImportLine, Note, ChangeLogEntry, Handout, HandoutRusResponse, HandoutClassResponse\nfrom .models import LightboxRusClassState, LightboxNote\nfrom .email import make_password_reset_message, send_messages\n\n# =============================================================================\n\nclass BurStartView(TemplateView):\n template_name = 'reg/bur_start.html'\n\n# =============================================================================\n\nclass ChooseSessionView(ListView):\n model = ImportSession\n\n def get_queryset(self):\n return ImportSession.objects.filter(year__exact=YEAR)\n\nclass NewSessionView(ProcessFormView):\n \"\"\"POST target used by ChooseSessionView to create a new ImportSession.\"\"\"\n\n def post(self, request):\n importsession = ImportSession(year=YEAR, author=request.user.tutorprofile)\n importsession.save()\n return HttpResponseRedirect(reverse('import_session_edit', kwargs={'pk': importsession.pk}))\n\nclass EditSessionForm(forms.ModelForm):\n class Meta:\n model = ImportSession\n fields = ('year', 'name', 'regex', 'author')\n\n year = forms.CharField(required=False)\n author = forms.CharField(required=False)\n imported = forms.CharField(required=False)\n\n regex = forms.CharField()\n name = forms.CharField()\n lines = forms.CharField(widget=forms.Textarea)\n\n def clean_regex(self):\n \"\"\"Check if regex is valid by compiling it.\n\n Later on, clean() then checks if the regex matches useful things.\"\"\"\n\n data = self.cleaned_data['regex']\n try:\n r = re.compile(data)\n except re.error as v:\n raise forms.ValidationError(u\"Fejl i regulært udtryk: \"+unicode(v))\n\n return data\n\n def clean_year(self):\n \"\"\"Read-only model field as far as this form is concerned.\"\"\"\n return self.instance.year\n\n def clean_author(self):\n \"\"\"Read-only model field as far as this form is concerned.\"\"\"\n return self.instance.author\n\n def clean_imported(self):\n \"\"\"Read-only model field as far as this form is concerned.\"\"\"\n return self.instance.imported\n\n def clean(self):\n \"\"\"Check if regex matches the lines and yields the right capture groups.\n\n It is expected to yield just the named groups specified by `expected`.\n\n It is not expected to yield any other named groups, or any numbered groups.\"\"\"\n\n cleaned_data = super(EditSessionForm, self).clean()\n\n regex = cleaned_data.get('regex')\n lines = cleaned_data.get('lines')\n\n # Named groups we expect in the input\n expected = frozenset(('rusclass', 'name', 'studentnumber'))\n expected_string = u', '.join('(?P<'+k+'>...)' for k in expected) + u'.'\n\n if regex and lines:\n r = re.compile(regex)\n matches = 0\n linecount = 0\n for line in lines.splitlines():\n linecount = linecount + 1\n m = r.match(line)\n if m:\n matches = matches + 1\n groups = m.groups()\n groupdict = m.groupdict()\n # `groups` contains both named and numbered match groups;\n # `groupdict` contains only named match groups.\n # We only want named match groups.\n if len(groups) != len(groupdict):\n raise forms.ValidationError(u\"Det regulære udtryk matcher UNAVNGIVNE grupper. \"\n +u\"Brug kun navngivne grupper \"+expected_string)\n groupkeys = frozenset(groupdict.keys())\n if not groupkeys.issubset(expected):\n raise forms.ValidationError(u\"Det regulære udtryk matcher UKENDTE gruppenavne. \"\n +u\"Brug kun navngivne grupper \"+expected_string)\n if not expected.issubset(groupkeys):\n raise forms.ValidationError(u\"Det regulære udtryk matcher IKKE \"\n +u\"alle de navngivne grupper \"+expected_string)\n\n for n, v in groupdict.items():\n if v == '':\n raise forms.ValidationError(u\"Det regulære udtryk matcher gruppen '\"\n +n+u\"' som den tomme streng.\")\n\n if matches == 0:\n raise forms.ValidationError(u\"Det regulære udtryk matcher ingen strenge i input.\")\n\n return cleaned_data\n\nclass EditSessionView(UpdateView):\n \"\"\"An import session hits the EditSessionView multiple times.\n\n First, the NewSessionView redirects the user to an empty ImportSession.\n The user inputs a regular expression and a bunch of lines and submits.\n Then, this view matches the regex against the lines and saves the result\n as ImportLines and displays them to the user.\n\n If the user then wants to create the appropriate Rus and RusClass objects,\n he clicks the submit button named \"create\".\"\"\"\n form_class = EditSessionForm\n template_name = 'reg/edit_session_form.html'\n\n def get_form(self, form_class):\n form = super(EditSessionView, self).get_form(form_class)\n if form.instance and form.instance.pk is not None:\n lines = u'\\n'.join(il.line\n for il in ImportLine.objects.filter(session=form.instance).order_by('position'))\n form.fields['lines'].initial = lines\n return form\n\n def get_object(self):\n return ImportSession.objects.get(pk=self.kwargs.get('pk'))\n\n def get_context_data(self, form, **kwargs):\n context_data = super(EditSessionView, self).get_context_data(form=form, **kwargs)\n if form.instance:\n context_data['lines'] = ImportLine.objects.filter(session=form.instance)\n if form.instance.imported:\n context_data['imported'] = form.instance.imported\n return context_data\n\n def form_valid(self, form):\n # Form input\n line_strings = form.cleaned_data['lines'].splitlines()\n regex = re.compile(form.cleaned_data['regex'])\n year = form.instance.year\n\n if form.instance.imported:\n context_data = super(EditSessionView, self).get_context_data(form=form)\n context_data['error'] = u'Denne rusliste er allerede importeret'\n return self.render_to_response(context_data)\n\n # Line objects\n lines = []\n position = 1\n studentnumbers = set()\n studentnumbers_duplicate = set()\n for line in line_strings:\n il = ImportLine(session=form.instance, line=line, position=position, matched=False)\n position = position + 1\n\n m = regex.match(line)\n if m:\n il.matched = True\n il.rusclass = m.group('rusclass')\n il.name = m.group('name')\n il.studentnumber = m.group('studentnumber')\n if il.studentnumber in studentnumbers:\n studentnumbers_duplicate.add(il.studentnumber)\n else:\n studentnumbers.add(il.studentnumber)\n\n lines.append(il)\n\n lines_saved = False\n\n if studentnumbers_duplicate:\n context_data = super(EditSessionView, self).get_context_data(form=form)\n context_data['error'] = u'Årskortnummer/-numre er ikke unikke: '+u', '.join(studentnumbers_duplicate)\n return self.render_to_response(context_data)\n\n\n # Save form and perform bulk delete/insert of lines\n with transaction.atomic():\n importsession = form.save()\n ImportLine.objects.filter(session=form.instance).delete()\n ImportLine.objects.bulk_create(lines)\n lines_saved = True\n\n context_data = self.get_context_data(form=form)\n\n if lines_saved and 'create' in self.request.POST:\n class RusError(Exception):\n pass\n\n try:\n with transaction.atomic():\n\n profiles = {}\n for tp in TutorProfile.objects.filter(studentnumber__in=(il.studentnumber for il in lines)):\n profiles[tp.studentnumber] = tp\n\n rusclasses = {}\n def get_rusclass(rusclass):\n if rusclass in rusclasses:\n return rusclasses[rusclass]\n try:\n o = RusClass.objects.get(year=year, official_name=il.rusclass)\n except RusClass.DoesNotExist:\n o = RusClass.objects.create_from_official(year=year, official_name=il.rusclass)\n o.save()\n rusclasses[rusclass] = o\n return o\n\n for il in lines:\n if not il.matched:\n continue\n\n rusclass = get_rusclass(il.rusclass)\n\n if il.studentnumber in profiles:\n tp = profiles[il.studentnumber]\n existing_rus = Rus.objects.filter(profile=tp, year=year)\n if existing_rus.exists():\n raise RusError(u\"Studienummer %s findes allerede i ruslisterne\" % il.studentnumber)\n else:\n first_name, last_name = il.name.split(' ', 1)\n u = User.objects.create(username=il.studentnumber, first_name=first_name, last_name=last_name)\n tp = TutorProfile.objects.create(name=il.name, studentnumber=il.studentnumber, user=u)\n tp.set_default_email()\n\n rus = Rus.objects.create(profile=tp, year=year, rusclass=rusclass, initial_rusclass=rusclass)\n\n importsession.imported = datetime.datetime.now()\n importsession.save()\n context_data['imported'] = importsession.imported\n\n except RusError as e:\n context_data['create_error'] = unicode(e)\n\n context_data['lines_saved'] = lines_saved\n\n return self.render_to_response(context_data)\n\n# =============================================================================\n\nclass RusListView(TemplateView):\n template_name = 'reg/rus_list.html'\n\n def get_page_data(self):\n return {\n 'rus_list': self.get_rus_list_data(),\n 'rusclass_list': self.get_rusclass_list_data(),\n 'note_list': self.get_note_list_data(),\n 'change_list_newest': self.get_change_list_newest(),\n }\n\n def get_rusclass_list_data(self):\n return [{\n 'handle': rusclass.handle,\n 'internal_name': rusclass.internal_name,\n } for rusclass in self.get_rusclass_list()]\n\n def get_rus_list_data(self):\n return [rus.json_of() for rus in self.get_rus_list()]\n\n def get_context_data(self, **kwargs):\n context_data = super(RusListView, self).get_context_data(**kwargs)\n context_data['page_data_json'] = json.dumps(self.get_page_data())\n return context_data\n\n def get_change_list_newest(self):\n try:\n return ChangeLogEntry.objects.order_by('-pk')[:1].get().pk\n except ChangeLogEntry.DoesNotExist:\n return 0\n\n def get_note_list_data(self):\n rus_list = self.get_rus_list()\n rusclass_list = self.get_rusclass_list()\n\n rus_dict = {}\n for o in rus_list:\n rus_dict[o.pk] = o\n rusclass_dict = {}\n for o in rusclass_list:\n rusclass_dict[o.pk] = o\n\n rus_pks = frozenset(o.pk for o in rus_list)\n rusclass_pks = frozenset(o.pk for o in rusclass_list)\n\n rus_notes_qs = Note.objects.filter(subject_kind__exact='rus', subject_pk__in=rus_pks,\n deleted__isnull=True)\n rusclass_notes_qs = Note.objects.filter(subject_kind__exact='rusclass', subject_pk__in=rusclass_pks,\n deleted__isnull=True)\n\n note_list = list(rus_notes_qs) + list(rusclass_notes_qs)\n note_list_data = []\n\n for note in note_list:\n note_list_data.append({'pk': note.pk, 'note': note.json_of()})\n\n return note_list_data\n\n def get_rus_list(self):\n rus_list = Rus.objects.filter(year=YEAR)\n return rus_list\n\n def get_rusclass_list(self):\n rusclass_list = RusClass.objects.filter(year=YEAR)\n return rusclass_list\n\n\nclass RusCreateForm(forms.Form):\n name = forms.CharField(label='Navn')\n studentnumber = forms.CharField(label=u'Årskortnummer')\n email = forms.CharField(required=False, label='Email')\n rusclass = forms.ModelChoiceField(queryset=RusClass.objects.filter(year=YEAR), label='Hold')\n arrived = forms.BooleanField(required=False, label='Ankommet')\n note = forms.CharField(required=False, label='Note')\n\n def clean_studentnumber(self):\n studentnumber = self.cleaned_data['studentnumber']\n if TutorProfile.objects.filter(studentnumber=studentnumber).exists():\n raise forms.ValidationError(u\"Årskortnummeret findes allerede på hjemmesiden.\")\n return studentnumber\n\nclass RusCreateView(FormView):\n template_name = 'reg/ruscreateform.html'\n form_class = RusCreateForm\n\n def get_context_data(self, **kwargs):\n context_data = super(RusCreateView, self).get_context_data(**kwargs)\n context_data['rusclass_list'] = self.get_rusclass_list()\n return context_data\n\n def get_rusclass_list(self):\n rusclass_list = RusClass.objects.filter(year=YEAR)\n for rusclass in rusclass_list:\n rusclass.notes = Note.objects.filter(subject_kind='rusclass', subject_pk=rusclass.pk)\n rusclass.arrived_rus_count = Rus.objects.filter(rusclass=rusclass, arrived=True).count()\n rusclass.rus_count = Rus.objects.filter(rusclass=rusclass).count()\n return rusclass_list\n\n def form_valid(self, form):\n d = user_tutor_data(self.request.user)\n data = form.cleaned_data\n try:\n first_name, last_name = data['name'].split(' ', 1)\n except ValueError:\n first_name, last_name = data['name'], ''\n with transaction.atomic():\n user = User.objects.create(\n username=data['studentnumber'],\n first_name=first_name,\n last_name=last_name,\n email=data['email'])\n tutorprofile = TutorProfile.objects.create(\n studentnumber=data['studentnumber'],\n user=user,\n name=data['name'],\n email=data['email'])\n rus = Rus.objects.create(\n profile=tutorprofile,\n year=YEAR,\n arrived=data['arrived'],\n rusclass=data['rusclass'])\n if data['note']:\n note = Note.objects.create(\n subject_kind='rus',\n subject_pk=rus.pk,\n body=data['note'],\n author=d.profile)\n return HttpResponseRedirect(reverse('reg_rus_list'))\n\nclass RPCError(Exception):\n pass\n\nclass RusListRPC(View):\n def get_data(self, request):\n try:\n pk = int(self.get_param('pk'))\n except ValueError:\n raise RPCError(u'Invalid pk')\n queryset = ChangeLogEntry.objects.filter(pk__gt=pk).order_by('pk')\n\n sleep_each = 1\n sleep_max = 5\n\n sleep_remaining = sleep_max\n\n while sleep_remaining > 0:\n # undo caching\n queryset = queryset.all()\n if not queryset:\n import time\n time.sleep(sleep_each)\n sleep_remaining -= sleep_each\n continue\n\n pk = max(entry.pk for entry in queryset)\n\n payloads = []\n\n for entry in queryset:\n payloads.append(entry.json_of())\n\n return {'pk': pk, 'payloads': payloads}\n\n return {'pk': pk, 'payloads': []}\n\n def get(self, request):\n try:\n data = self.get_data(request)\n except RPCError as e:\n data = {'error': unicode(e)}\n return HttpResponse(json.dumps(data))\n\n def get_param(self, param):\n try:\n return self.request.POST[param]\n except KeyError:\n try:\n return self.request.GET[param]\n except KeyError:\n raise RPCError(u'Missing parameter %s' % param)\n\n ACTIONS = (\n 'arrived',\n 'rusclass',\n 'add_rus_note',\n 'add_rusclass_note',\n 'delete_note',\n )\n\n def log(self, **kwargs):\n kwargs['serialized_data'] = json.dumps(kwargs.pop('serialized_data'))\n return ChangeLogEntry.objects.create(\n author=self.author,\n **kwargs).json_of()\n\n def action_arrived(self, rus):\n rus.arrived = not rus.arrived\n rus.save()\n return self.log(kind='rus_arrived',\n related_pk=rus.pk,\n serialized_data=rus.json_of())\n\n def action_rusclass(self, rus, rusclass):\n rus.rusclass = rusclass\n rus.save()\n return self.log(kind='rus_rusclass',\n related_pk=rus.pk,\n serialized_data=rus.json_of())\n\n def action_add_rus_note(self, rus, body):\n note = Note.objects.create(\n subject_kind='rus',\n subject_pk=rus.pk,\n body=body,\n author=self.author)\n return self.log(kind='note_add',\n related_pk=note.pk,\n serialized_data=note.json_of())\n\n def action_add_rusclass_note(self, rusclass, body):\n note = Note.objects.create(\n subject_kind='rusclass',\n subject_pk=rusclass.pk,\n body=body,\n author=self.author)\n return self.log(kind='note_add',\n related_pk=note.pk,\n serialized_data=note.json_of())\n\n def action_delete_note(self, note):\n note.deleted = datetime.datetime.now()\n note.save()\n return self.log(kind='note_delete',\n related_pk=note.pk,\n serialized_data=note.json_of())\n\n def handle_post(self, request):\n d = user_tutor_data(request.user)\n self.author = d.profile\n\n action = self.get_param('action')\n if action not in self.ACTIONS:\n raise RPCError(\"Unknown action %u\" % action)\n\n fn = getattr(self, 'action_'+action)\n\n import inspect\n args, varargs, keywords, defaults = inspect.getargspec(fn)\n params = {}\n\n if 'request' in args:\n params['request'] = request\n\n if 'rus' in args:\n try:\n params['rus'] = Rus.objects.get(year=YEAR, profile__studentnumber=self.get_param('rus'))\n except Rus.DoesNotExist:\n raise RPCError(u'No such rus')\n\n if 'rusclass' in args:\n try:\n params['rusclass'] = RusClass.objects.get(year=YEAR, handle=self.get_param('rusclass'))\n except Rus.DoesNotExist:\n raise RPCError(u'No such rusclass')\n\n if 'note' in args:\n try:\n params['note'] = Note.objects.get(pk=self.get_param('note'))\n except Note.DoesNotExist:\n raise RPCError(u'No such note')\n\n if 'body' in args:\n params['body'] = self.get_param('body')\n\n with transaction.atomic():\n return fn(**params)\n\n def post(self, request):\n try:\n data = self.handle_post(request)\n except RPCError as e:\n data = {'error': unicode(e)}\n return HttpResponse(json.dumps(data))\n\n\nclass RusChangesView(TemplateView):\n template_name = 'reg/rus_changes.html'\n\n def get_context_data(self, **kwargs):\n context_data = super(RusChangesView, self).get_context_data(**kwargs)\n context_data['rus_list'] = self.get_rus_list()\n return context_data\n\n def get_rus_list(self):\n from django.db.models import F\n rus_list = (list(Rus.objects.filter(year=YEAR).exclude(rusclass=F('initial_rusclass')))\n + list(Rus.objects.filter(year=YEAR, initial_rusclass__isnull=True)))\n return rus_list\n\n\n# =============================================================================\n\nclass HandoutListView(TemplateView):\n template_name = 'reg/handout_list.html'\n\n def get_context_data(self, **kwargs):\n context_data = super(HandoutListView, self).get_context_data(**kwargs)\n\n handouts = Handout.objects.filter(year=YEAR)\n rusclasses = RusClass.objects.filter(year=YEAR)\n\n responses = HandoutClassResponse.objects.filter(handout__in=handouts, rusclass__in=rusclasses)\n response_matrix = {}\n for response in responses:\n response_matrix[(response.handout.pk, response.rusclass.pk)] = response\n\n for handout in handouts:\n handout.row = []\n for rusclass in rusclasses:\n x = (handout.pk, rusclass.pk)\n if x in response_matrix:\n handout.row.append(response_matrix[x])\n else:\n handout.row.append(HandoutClassResponse(handout=handout, rusclass=rusclass))\n\n context_data['handouts'] = handouts\n context_data['rusclasses'] = rusclasses\n\n return context_data\n\n\nclass HandoutForm(forms.Form):\n kind = forms.ChoiceField(choices=Handout.KINDS)\n name = forms.CharField()\n note = forms.CharField(required=False, widget=forms.Textarea)\n\n\nclass HandoutNewView(FormView):\n form_class = HandoutForm\n template_name = 'reg/handout_form.html'\n\n def get_context_data(self, **kwargs):\n context_data = super(HandoutNewView, self).get_context_data(**kwargs)\n\n context_data['presets'] = [{'name': name, 'kind': kind} for name, kind in Handout.PRESETS]\n\n return context_data\n\n def form_valid(self, form):\n data = form.cleaned_data\n handout = Handout(year=YEAR,\n kind=data['kind'], name=data['name'], note=data['note'])\n handout.save()\n return super(HandoutNewView, self).form_valid(form)\n\n def form_invalid(self, form):\n return super(HandoutNewView, self).form_invalid(form)\n\n def get_success_url(self):\n return reverse('handout_list')\n\n\nclass HandoutSummaryView(TemplateView):\n def get_template_names(self):\n kind = self.get_handout().kind\n if kind == u'subset':\n return ['reg/handout_summary.html']\n elif kind == u'note':\n return ['reg/handout_notes.html']\n else:\n raise AssertionError(\"Unknown handout kind\")\n\n def get_handout(self):\n if not hasattr(self, '_handout'):\n self._handout = get_object_or_404(Handout, pk__exact=self.kwargs['handout'])\n return self._handout\n\n def get_classes(self):\n handout = self.get_handout()\n year = handout.year\n rusclasses = RusClass.objects.filter(year__exact=year)\n responses = {}\n for response in HandoutClassResponse.objects.filter(handout=handout):\n responses[response.rusclass.pk] = response\n\n all_russes = list(Rus.objects\n .filter(rusclass__in=rusclasses)\n .select_related('rusclass', 'profile', 'profile__user')\n .order_by('profile__studentnumber')\n )\n\n for rusclass in rusclasses:\n rusclass.russes = [rus for rus in all_russes if rus.rusclass.pk == rusclass.pk]\n rusclass.rus_total_count = len(rusclass.russes)\n if rusclass.pk in responses:\n rusclass.response = responses[rusclass.pk]\n rusclass.has_response = True\n response_queryset = HandoutRusResponse.objects.filter(\n handout=handout,\n rus__in=rusclass.get_russes()).select_related('rus', 'rus__rusclass', 'rus__profile', 'rus__profile__user')\n rusclass.rus_checked_count = (response_queryset\n .filter(checkmark=True).count())\n rus_responses = {}\n for r in response_queryset:\n rus_responses[r.rus.pk] = r\n for rus in rusclass.russes:\n if rus.pk in rus_responses:\n rus.response = rus_responses[rus.pk]\n else:\n rusclass.has_response = False\n rusclass.rus_checked_count = 0\n\n return rusclasses\n\n def get_context_data(self, **kwargs):\n context_data = super(HandoutSummaryView, self).get_context_data(**kwargs)\n\n context_data['handout'] = self.get_handout()\n context_data['classes'] = self.get_classes()\n context_data['class_total_count'] = len(context_data['classes'])\n context_data['class_response_count'] = len(\n [c for c in context_data['classes'] if c.has_response])\n context_data['rus_checked_count'] = sum(r.rus_checked_count\n for r in context_data['classes'])\n context_data['rus_total_count'] = sum(r.rus_total_count\n for r in context_data['classes'])\n\n return context_data\n\n\nclass HandoutResponseForm(forms.Form):\n note = forms.CharField(required=False, widget=forms.Textarea)\n\n def __init__(self, *args, **kwargs):\n rus_list = kwargs.pop('rus_list')\n super(HandoutResponseForm, self).__init__(*args, **kwargs)\n\n for rus in rus_list:\n self.fields['rus_%s_checkmark' % rus.pk] = forms.BooleanField(required=False)\n self.fields['rus_%s_note' % rus.pk] = forms.CharField(required=False)\n\n\nclass HandoutResponseView(FormView):\n template_name = 'reg/handout_response.html'\n form_class = HandoutResponseForm\n\n def get_form_kwargs(self):\n kwargs = super(HandoutResponseView, self).get_form_kwargs()\n kwargs['rus_list'] = self.get_rus_list()\n return kwargs\n\n def get_initial(self):\n data = {'note': self.handout_response.note}\n for rus in self.rus_list:\n data['rus_%s_checkmark' % rus.pk] = rus.rus_response.checkmark\n data['rus_%s_note' % rus.pk] = rus.rus_response.note\n return data\n\n def dispatch(self, request, *args, **kwargs):\n try:\n handout = Handout.objects.get(\n year=YEAR, pk=kwargs['handout'])\n rusclass = RusClass.objects.get(\n year=YEAR, handle__exact=kwargs['rusclass'])\n except Handout.DoesNotExist:\n raise Http404()\n except RusClass.DoesNotExist:\n raise Http404()\n\n self.handout = handout\n self.rusclass = rusclass\n\n try:\n self.handout_response = HandoutClassResponse.objects.get(\n handout=handout, rusclass=rusclass)\n except HandoutClassResponse.DoesNotExist:\n self.handout_response = HandoutClassResponse(\n handout=handout, rusclass=rusclass)\n\n self.rus_list = self.get_rus_list()\n\n return super(HandoutResponseView, self).dispatch(request, *args, **kwargs)\n\n def get_rus_list(self):\n if self.handout.kind == 'note':\n return ()\n\n rus_list = (\n self.rusclass.get_russes()\n .order_by('profile__name')\n .select_related('profile', 'profile__user'))\n rus_responses = (\n HandoutRusResponse.objects.filter(handout=self.handout, rus__in=rus_list)\n .select_related('rus'))\n rus_response_map = {}\n\n for rus_response in rus_responses:\n rus_response_map[rus_response.rus.pk] = rus_response\n\n for rus in rus_list:\n if rus.pk in rus_response_map:\n rus.rus_response = rus_response_map[rus.pk]\n else:\n rus.rus_response = HandoutRusResponse(handout=self.handout, rus=rus)\n\n return rus_list\n\n def get_context_data(self, **kwargs):\n context_data = super(HandoutResponseView, self).get_context_data(**kwargs)\n\n context_data['handout'] = self.handout\n context_data['rusclass'] = self.rusclass\n context_data['handout_response'] = self.handout_response\n form = context_data['form']\n for rus in self.rus_list:\n rus.checkmark_field = form['rus_%s_checkmark' % rus.pk]\n rus.note_field = form['rus_%s_note' % rus.pk]\n context_data['rus_list'] = self.rus_list\n context_data['display_rus_list'] = self.handout.kind == u'subset'\n\n\n return context_data\n\n def form_valid(self, form):\n with transaction.atomic():\n data = form.cleaned_data\n self.handout_response.note = data['note']\n self.handout_response.save()\n for rus in self.rus_list:\n rus.rus_response.checkmark = data['rus_%s_checkmark' % rus.pk]\n rus.rus_response.note = data['rus_%s_note' % rus.pk]\n rus.rus_response.save()\n\n return HttpResponseRedirect(reverse('handout_list'))\n\n def form_invalid(self, form):\n return self.render_to_response(self.get_context_data(form=form, form_error=True))\n\n\nclass HandoutResponseDeleteView(TemplateView):\n template_name = 'reg/handout_response_delete.html'\n\n def dispatch(self, request, *args, **kwargs):\n try:\n handout = Handout.objects.get(\n year=YEAR, pk=kwargs['handout'])\n rusclass = RusClass.objects.get(\n year=YEAR, handle__exact=kwargs['rusclass'])\n except Handout.DoesNotExist:\n raise Http404()\n except RusClass.DoesNotExist:\n raise Http404()\n\n self.handout = handout\n self.rusclass = rusclass\n\n try:\n self.handout_response = HandoutClassResponse.objects.get(\n handout=handout, rusclass=rusclass)\n except HandoutClassResponse.DoesNotExist:\n raise Http404()\n\n return super(HandoutResponseDeleteView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context_data = super(HandoutResponseDeleteView, self).get_context_data(**kwargs)\n\n context_data['handout'] = self.handout\n context_data['rusclass'] = self.rusclass\n context_data['handout_response'] = self.handout_response\n\n return context_data\n\n def post(self, request, *args, **kwargs):\n HandoutRusResponse.objects.filter(handout=self.handout, rus__rusclass=self.rusclass).delete()\n self.handout_response.delete()\n return HttpResponseRedirect(reverse('handout_list'))\n\n# =============================================================================\n\nclass RusInfoListView(ListView):\n template_name = 'reg/rusinfo_list.html'\n context_object_name = 'rusclasses'\n\n def get_queryset(self):\n return RusClass.objects.filter(year__exact=YEAR).order_by('internal_name')\n\n def get(self, request):\n d = user_tutor_data(request.user)\n tutor = d.tutor\n if not tutor.is_tutorbur():\n if tutor.rusclass:\n return HttpResponseRedirect(reverse('rusinfo', kwargs={'handle': tutor.rusclass.handle}))\n else:\n return rusclass_required_error(request)\n return super(RusInfoListView, self).get(request)\n\n\nclass RusInfoForm(forms.Form):\n def __init__(self, *args, **kwargs):\n fields = kwargs.pop('fields')\n rus_list = kwargs.pop('rus_list')\n super(RusInfoForm, self).__init__(*args, **kwargs)\n self.rus_list = rus_list\n\n field_ctors = {'reset_password': forms.BooleanField}\n widget_ctors = {'reset_password': forms.CheckboxInput}\n sizes = {'street': 20, 'city': 15, 'email': 25, 'phone': 10}\n\n for rus in rus_list:\n for field in fields:\n field_ctor = field_ctors.get(field, forms.CharField)\n widget_ctor = widget_ctors.get(field, forms.TextInput)\n attrs = {}\n if field in sizes: attrs['size'] = sizes[field]\n widget = widget_ctor(attrs=attrs)\n self.fields['rus_%s_%s' % (rus.pk, field)] = field_ctor(required=False, widget=widget)\n\n def clean(self):\n cleaned_data = super(RusInfoForm, self).clean()\n for rus in self.rus_list:\n password_field = 'rus_%s_reset_password' % rus.pk\n email_field = 'rus_%s_email' % rus.pk\n if (cleaned_data[password_field]\n and not cleaned_data[email_field]):\n msg = u'Du skal indtaste en emailadresse for at nulstille kodeordet.'\n self._errors[email_field] = self.error_class([msg])\n del cleaned_data[email_field]\n del cleaned_data[password_field]\n\n return cleaned_data\n\n\n\nclass RusInfoView(FormView):\n form_class = RusInfoForm\n template_name = 'reg/rusinfo_form.html'\n\n fields = ('street', 'city', 'email', 'phone', 'reset_password')\n\n def get_form_kwargs(self):\n kwargs = super(RusInfoView, self).get_form_kwargs()\n kwargs['fields'] = self.fields\n kwargs['rus_list'] = self.rus_list\n return kwargs\n\n def get_initial(self):\n data = {}\n\n for rus in self.rus_list:\n data['rus_%s_street' % rus.pk] = rus.profile.street\n data['rus_%s_city' % rus.pk] = rus.profile.city\n data['rus_%s_email' % rus.pk] = rus.profile.email\n data['rus_%s_phone' % rus.pk] = rus.profile.phone\n\n return data\n\n def dispatch(self, request, handle):\n try:\n d = user_tutor_data(request.user)\n except NotTutor:\n return tutor_required_error(request)\n if not d.tutor:\n return tutor_required_error(request)\n\n self.rusclass = get_object_or_404(RusClass, handle__exact=handle, year__exact=YEAR)\n if not d.tutor.can_manage_rusclass(self.rusclass):\n return tutorbest_required_error(request)\n\n self.rus_list = self.get_rus_list()\n\n return super(RusInfoView, self).dispatch(request, handle=handle)\n\n def get_rus_list(self):\n return (self.rusclass.get_russes()\n .order_by('profile__studentnumber')\n .select_related('profile'))\n\n def get_context_data(self, **kwargs):\n context_data = super(RusInfoView, self).get_context_data(**kwargs)\n form = context_data['form']\n for rus in self.rus_list:\n for field in self.fields:\n setattr(rus, '%s_field' % field, form['rus_%s_%s' % (rus.pk, field)])\n context_data['rus_list'] = self.rus_list\n context_data['rusclass'] = self.rusclass\n return context_data\n\n def form_valid(self, form):\n tutor = user_tutor_data(self.request.user)\n changes = 0\n messages = []\n with transaction.atomic():\n data = form.cleaned_data\n for rus in self.rus_list:\n in_street = data['rus_%s_street' % rus.pk]\n in_city = data['rus_%s_city' % rus.pk]\n in_email = data['rus_%s_email' % rus.pk]\n in_phone = data['rus_%s_phone' % rus.pk]\n in_password = data['rus_%s_reset_password' % rus.pk]\n in_profile = (in_street, in_city, in_email, in_phone)\n cur_profile = (rus.profile.street, rus.profile.city,\n rus.profile.email, rus.profile.phone)\n\n if in_profile != cur_profile:\n rus.profile.street = in_street\n rus.profile.city = in_city\n rus.profile.email = in_email\n rus.profile.phone = in_phone\n rus.profile.save()\n changes += 1\n\n if in_password:\n pwlength = 8\n try:\n p = subprocess.Popen(['/usr/bin/pwgen',\n '--capitalize', '--numerals', str(pwlength), '1'],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n pw, err = p.communicate()\n pw = pw.strip()\n except:\n letters = string.ascii_letters + string.digits\n pw = 'r'+''.join(random.choice(letters) for i in xrange(pwlength))\n rus.profile.user.set_password(pw)\n\n msg = make_password_reset_message(\n rus.profile,\n tutor.profile,\n pw)\n messages.append(msg)\n\n rus.profile.user.save()\n changes += 1\n\n send_messages(messages)\n return self.render_to_response(self.get_context_data(form=form, form_saved=True, changes=changes))\n\n def form_invalid(self, form):\n return self.render_to_response(self.get_context_data(form=form, form_errors=True))\n\n# =============================================================================\n\ndef get_lightbox_state_by_study(year):\n states = LightboxRusClassState.objects.get_for_year(year)\n\n study_dict = {}\n for state in states:\n rusclass = state.rusclass\n study = rusclass.get_study()\n l = study_dict.setdefault(study, [])\n l.append(state)\n\n study_list = []\n for study in sorted(study_dict.keys()):\n l = study_dict[study]\n o = {'study': study}\n o['rusclasses'] = sorted(l, key=lambda state: state.rusclass.handle)\n study_list.append(o)\n\n return study_list\n\ndef get_lightbox_state(year):\n note = LightboxNote.objects.get_for_year(year)\n by_study = get_lightbox_state_by_study(year)\n return {'note': note, 'by_study': by_study}\n\nclass LightboxView(TemplateView):\n template_name = 'reg/burtavle.html'\n\n def get_context_data(self, **kwargs):\n context_data = super(LightboxView, self).get_context_data(**kwargs)\n\n d = get_lightbox_state(YEAR)\n context_data['note'] = d['note']\n context_data['state_by_study'] = d['by_study']\n\n return context_data\n\nburtavle = LightboxView.as_view()\n\nclass BurtavleFramesetView(TemplateView):\n template_name = 'reg/burtavle_frameset.html'\n\nburtavle_frameset = BurtavleFramesetView.as_view()\n\nclass LightboxAdminViewResponse(Exception):\n def __init__(self, o):\n self.response = o\n\n\nclass LightboxAdminForm(forms.Form):\n COLORS = (\n ('green', u'Grøn'),\n ('yellow', u'Gul'),\n ('red', u'Rød'),\n )\n\n rusclass = forms.CharField(required=False)\n color = forms.ChoiceField(choices=COLORS)\n note = forms.CharField(required=False, widget=forms.Textarea())\n\n\nclass LightboxAdminView(LightboxView):\n template_name = 'reg/burtavle_admin.html'\n\n def get_form(self):\n note = LightboxNote.objects.get_for_year(YEAR)\n form = LightboxAdminForm({'note': note.note, 'color': note.color})\n return form\n\n def get_post_response(self, request):\n d = user_tutor_data(request.user)\n\n form = LightboxAdminForm(request.POST)\n if not form.is_valid():\n return {'error': form.errors}\n\n data = form.cleaned_data\n if data['rusclass']:\n try:\n rusclass = RusClass.objects.get(year=YEAR, handle=data['rusclass'])\n except RusClass.DoesNotExist:\n return {'error': 'no such rusclass'}\n\n try:\n state = LightboxRusClassState.objects.get(rusclass=rusclass)\n except LightboxRusClassState.DoesNotExist:\n state = LightboxRusClassState(rusclass=rusclass)\n else:\n state = LightboxNote.objects.get_for_year(YEAR)\n\n state.color = data['color']\n state.note = data['note']\n state.author = d.profile\n state.save()\n return {'success': True}\n\n def post(self, request):\n try:\n data = self.get_post_response(request)\n except LightboxAdminViewResponse as e:\n data = e.response\n return HttpResponse(json.dumps(data))\n\n def get_context_data(self, **kwargs):\n context_data = super(LightboxAdminView, self).get_context_data(**kwargs)\n context_data['form'] = self.get_form()\n return context_data\n","sub_path":"mftutor/reg/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":41674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"55363603","text":"\"\"\"Ingest NCEI's ISH data into the IEM archives.\"\"\"\nimport sys\nimport subprocess\nimport os\n\nimport tqdm\nimport requests\nfrom pyiem.ncei import ds3505\nfrom pyiem.util import get_dbconn, utc, exponential_backoff, logger\n\nLOG = logger()\nADD_ONLY = True\nTMPDIR = \"/mesonet/tmp\"\n\n\ndef main(argv):\n \"\"\"Go\"\"\"\n pgconn = get_dbconn(\"asos\")\n airforce = int(argv[1])\n wban = int(argv[2])\n faa = argv[3]\n if len(faa) == 3:\n LOG.error(\"Provided faa ID should be 4 chars, abort\")\n return\n year = max([int(argv[4]), 1928]) # database starts in 1928\n year2 = int(argv[5])\n failedyears = []\n msgs = []\n dbid = faa if len(faa) == 4 and faa[0] != \"K\" else faa[1:]\n for year in tqdm.tqdm(range(year, year2)):\n sts = utc(year, 1, 1)\n ets = sts.replace(year=year + 1)\n cursor = pgconn.cursor()\n lfn = \"%06i-%05i-%s\" % (airforce, wban, year)\n if not os.path.isfile(\"%s/%s\" % (TMPDIR, lfn)):\n uri = \"https://www1.ncdc.noaa.gov/pub/data/noaa/%s/%s.gz\" % (\n year,\n lfn,\n )\n req = exponential_backoff(requests.get, uri, timeout=30)\n if req is None or req.status_code != 200:\n LOG.info(\"Failed to fetch %s\", uri)\n failedyears.append(year)\n continue\n with open(\"%s/%s.gz\" % (TMPDIR, lfn), \"wb\") as fh:\n fh.write(req.content)\n subprocess.call(\n \"gunzip %s/%s.gz\" % (TMPDIR, lfn),\n shell=True,\n stderr=subprocess.PIPE,\n )\n added = 0\n bad = 0\n removed = 0\n skipped = 0\n current = []\n if ADD_ONLY:\n # build out our current obs\n cursor.execute(\n \"SELECT valid at time zone 'UTC' from alldata where \"\n \"station = %s and valid >= %s and valid < %s \"\n \"ORDER by valid ASC\",\n (dbid, sts, ets),\n )\n for row in cursor:\n current.append(row[0].strftime(\"%Y%m%d%H%M\"))\n # ignore any bad bytes, sigh\n for line in open(\"%s/%s\" % (TMPDIR, lfn), errors=\"ignore\"):\n try:\n data = ds3505.parser(line.strip(), faa, add_metar=True)\n except Exception as exp:\n print(f\"failed to parse line: '{line.strip()}'\")\n print(exp)\n data = None\n if data is None:\n bad += 1\n continue\n if added == 0 and not ADD_ONLY:\n cursor.execute(\n \"DELETE from alldata where station = %s and \"\n \"valid >= %s and valid < %s\",\n (dbid, sts, ets),\n )\n if cursor.rowcount > 0:\n LOG.info(\"deleted %s rows for %s\", cursor.rowcount, dbid)\n removed = cursor.rowcount\n if ADD_ONLY and data[\"valid\"].strftime(\"%Y%m%d%H%M\") in current:\n skipped += 1\n continue\n res = ds3505.sql(cursor, faa, data)\n if res is None:\n skipped += 1\n else:\n added += 1\n msgs.append(\n (\" %s: %s added: %s removed: %s bad: %s\" \" skipped: %s\")\n % (year, faa, added, removed, bad, skipped)\n )\n cursor.close()\n pgconn.commit()\n os.unlink(\"%s/%s\" % (TMPDIR, lfn))\n LOG.info(\" failed years: %s\", failedyears)\n LOG.info(\"\\n\".join(msgs))\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"scripts/ingestors/ncdc/ingest_isd.py","file_name":"ingest_isd.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"427685094","text":"from functools import wraps\n\n\ndef singleton(cls):\n original_new = cls.__new__\n\n @wraps(cls.__new__)\n def wrapper(cls, *args, **kw):\n instance = getattr(cls, \"single_instance\", None)\n if instance:\n return instance\n instance = original_new(cls)\n instance.__init__(*args, **kw)\n cls.single_instance = instance\n return instance\n\n cls.__new__ = wrapper\n return cls\n\n\nclass singletonmethod:\n \"\"\"\n Decorates a method that needs a class instance to be run\n but have to be called as if it where a classmethod.\n \"\"\"\n\n def __init__(self, meth):\n self.meth = meth\n\n def __get__(self, instance, owner):\n if not instance:\n instance = getattr(owner, \"single_instance\", None)\n if not instance:\n raise RuntimeError(\"singletonmethod attempted on non-initialized class\")\n return lambda *args, **kwargs: self.meth(instance, *args, **kwargs)\n","sub_path":"keyloop/ext/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354279324","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\sboon\\AppData\\Local\\Temp\\pip-install-ptdbtr91\\quarchpy\\quarchpy\\qps\\qpsFuncs.py\n# Compiled at: 2020-03-25 05:10:07\n# Size of source mod 2**32: 5022 bytes\nimport os, sys, datetime, time, platform\nfrom quarchpy.qis import isQisRunning, startLocalQis\nfrom quarchpy.connection_specific.connection_QIS import QisInterface\nfrom quarchpy.connection_specific.connection_QPS import QpsInterface\nfrom quarchpy.user_interface import *\nimport subprocess, logging\n\ndef isQpsRunning(host='127.0.0.1', port=9822):\n answer = '0'\n try:\n myQps = QpsInterface(host, port)\n answer = myQps.sendCmdVerbose(cmd='$list')\n except:\n pass\n\n if answer is None or answer == '':\n logging.debug('QPS did not return expected output from $list')\n logging.debug('$list: ' + str(answer))\n return False\n if answer[0] == '1' or 'no device' in str(answer).lower() or 'no module' in str(answer).lower():\n return True\n logging.debug('QPS did not return expected output from $list')\n logging.debug('$list: ' + str(answer))\n return False\n\n\ndef startLocalQps(keepQisRunning=True, args=None):\n if keepQisRunning:\n if not isQisRunning():\n startLocalQis()\n else:\n QpsPath = os.path.dirname(os.path.abspath(__file__))\n QpsPath, junk = os.path.split(QpsPath)\n QpsPath = os.path.join(QpsPath, 'connection_specific', 'QPS', 'qps.jar')\n current_direc = os.getcwd()\n os.chdir(os.path.dirname(QpsPath))\n command = '-jar \"' + QpsPath + '\"'\n currentOs = platform.system()\n if currentOs in 'Windows':\n command = 'start /high /b javaw -Djava.awt.headless=true ' + command\n os.system(command)\n else:\n if currentOs in 'Linux':\n if sys.version_info[0] < 3:\n os.popen2('java ' + command + ' 2>&1')\n else:\n os.popen('java ' + command + ' 2>&1')\n else:\n command = 'start /high /b javaw -Djava.awt.headless=true ' + command\n os.system(command)\n while not isQpsRunning():\n time.sleep(0.1)\n\n os.chdir(current_direc)\n\n\ndef closeQps(host='127.0.0.1', port=9822):\n myQps = QpsInterface(host, port)\n myQps.sendCmdVerbose('$shutdown')\n del myQps\n\n\ndef GetQpsModuleSelection(QpsConnection, favouriteOnly=True, additionalOptions=[], scan=True):\n tableHeaders = ['Module']\n devList = QpsConnection.getDeviceList(scan=scan)\n if 'no device' in devList[0].lower() or 'no module' in devList[0].lower():\n favouriteOnly = False\n devList = [x for x in devList if 'rest' not in x]\n message = 'Select a quarch module'\n if favouriteOnly:\n index = 0\n sortedDevList = []\n conPref = ['USB', 'TCP', 'SERIAL', 'REST', 'TELNET']\n while len(sortedDevList) != len(devList):\n for device in devList:\n if conPref[index] in device.upper():\n sortedDevList.append(device)\n\n index += 1\n\n devList = sortedDevList\n favConDevList = []\n index = 0\n for device in sortedDevList:\n if favConDevList == [] or device.split('::')[1] not in str(favConDevList):\n favConDevList.append(device)\n\n devList = favConDevList\n myDeviceID = listSelection(title=message, message=message, selectionList=devList, additionalOptions=additionalOptions, nice=True, tableHeaders=tableHeaders, indexReq=True)\n return myDeviceID\n\n\ndef legacyAdjustTime(timestamp):\n return timestamp\n\n\ndef toQpsTimeStamp(timestamp):\n if type(timestamp) is datetime:\n newTime = time.mktime(timestamp.timetuple())\n return int(newTime * 1000)\n if type(timestamp) is float or type(timestamp) is int:\n return int(timestamp)\n try:\n timestamp = float(timestamp)\n return int(timestamp)\n except:\n newTime = time.mktime(datetime.datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S:%f').timetuple())\n return int(newTime * 1000)","sub_path":"pycfiles/quarchpy-2.0.14-py2.py3-none-any/qpsFuncs.cpython-37.py","file_name":"qpsFuncs.cpython-37.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"96870088","text":"import math\nfrom django.core import serializers\nfrom django.db import models\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom homepage.models import Slider, Blog, AppUpdate\n\n\n# 轮播图\ndef getSlider(request):\n # 获取前三个\n # sliders=Slider.objects.all()\n sliders = Slider.objects.values('id', 'title', 'img', 'url').order_by(\"-id\")[:4]\n\n data = {\n 'code': 200,\n 'detail': list(sliders),\n 'msg': 'success'\n }\n return JsonResponse(data, safe=False)\n\n\n# 轮播图详情\ndef getSliderContent(request):\n id = request.GET.get('id', 0)\n blog = Slider.objects.values('content').get(id=id)\n return render(request, 'blog.html', {'blog': blog})\n\n\n# 获取博客\ndef getBlog(request):\n page = int(request.GET.get('page', 0))\n page_size = int(request.GET.get('page_size', 15))\n\n allBlog = Blog.objects.values('id', 'type', 'title', 'url', 'origin', 'img', 'describe', 'readCount',\n 'create_date').order_by(\"-id\")\n\n current_page = page - 1\n start = current_page * page_size\n blogs = allBlog[start:start + page_size]\n\n detail = {\n 'currentPage': page,\n 'totalPage': math.ceil(len(allBlog) / int(page_size)),\n 'blog': list(blogs)\n }\n\n data = {\n 'code': 200,\n 'detail': detail,\n 'msg': 'success'\n }\n return JsonResponse(data, safe=False)\n\n\n# 获取blog内容\ndef getBlogContent(request):\n id = request.GET.get('id', 0)\n blog = Blog.objects.values('content').get(id=id)\n return render(request, 'blog.html', {'blog': blog})\n\n\n# 阅读量加1\ndef readBlog(request):\n blog_id = request.GET['id']\n target = Blog.objects.get(id=blog_id)\n currentRead = target.readCount + 1\n Blog.objects.filter(id=blog_id).update(readCount=currentRead)\n data = {\n 'code': 200,\n 'detail': blog_id + '阅读量' + str(currentRead),\n 'msg': 'success'\n }\n return JsonResponse(data, safe=False)\n\n\n# 检查版本\ndef checkVersion(request):\n app_code = request.GET.get('app_code', 0)\n last_version = AppUpdate.objects.last()\n data = {}\n if last_version.version_code > int(app_code):\n data['code'] = 201\n detail = {'url': last_version.path.url, 'versionName': last_version.version_name,\n 'describe': last_version.content}\n data['detail'] = detail\n data['msg'] = \"update\"\n else:\n data['code'] = 200\n data['detail'] = \"null\"\n data['msg'] = \"success\"\n\n return JsonResponse(data, safe=False)\n","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"626735245","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the countingValleys function below.\ndef countingValleys(n, s):\n valleyCount = 0\n elevation = 0\n inValley = False\n for direction in s:\n if direction == 'U':\n elevation += 1\n if direction == 'D':\n elevation -= 1\n if elevation < 0:\n inValley = True\n if inValley == True and elevation == 0:\n valleyCount += 1\n inValley = False\n return valleyCount\n\nif __name__ == '__main__':\n s = 'UDDDUDUU'\n result = countingValleys(8, s)\n print(result)\n","sub_path":"Warm Ups/Counting_Valleys/Counting_Valleys.py","file_name":"Counting_Valleys.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"491824786","text":"# -*- coding: utf-8 -*-\n\n\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\nclass Aluno (models.Model):\n\n matricula = models.CharField(\n 'matricula',\n max_length=50,\n )\n\n nome = models.CharField(\n 'nome',\n max_length=80)\n \n email = models.EmailField(\n 'email', \n max_length=254)\n\n curso = models.CharField(\n 'curso',\n max_length=50,\n blank = True,\n null = True)\n\n senha = models.CharField(\n 'senha',\n max_length=50,\n )\n\n def __str__(self):\n return self.nome\n\nclass Tipo(models.Model):\n nome = models.CharField(\n 'nome_tipo',\n max_length = 20\n )\n\n def __str__(self):\n return self.nome\n\nclass Setor(models.Model):\n nome = models.CharField(\n 'nome_setor',\n max_length = 30\n )\n\n def __str__(self):\n return self.nome\n\nclass Maquina(models.Model):\n \n numeracao = models.CharField(\n 'codigo-pc',\n max_length=50)\n\n setor = models.ForeignKey(\n Setor,\n verbose_name = 'setor',\n related_name = 'maquina'\n )\n\n\n def __str__(self):\n return self.numeracao\n \n\nclass Solicitar_servico(models.Model):\n \n matricula = models.ForeignKey(\n Aluno,\n verbose_name = 'Matricula',\n related_name = 'servico',\n )\n \n setor = models.ForeignKey(\n Setor,\n verbose_name = 'setor',\n related_name = 'servico',\n )\n\n image = models.ImageField(\n 'imagem',\n upload_to=None,\n max_length=100 ,\n blank = True,\n null = True\n )\n\n text = models.TextField() \n\n \n def __str__(self):\n return '%s - %s' % (self.matricula, self.setor)\n \n\nclass Mensagem(models.Model):\n matricula = models.ForeignKey(\n Aluno,\n verbose_name = 'Matricula',\n related_name = 'Mensagem',\n )\n\n text = models.TextField()\n\n critica = models.ForeignKey(\n Tipo,\n verbose_name = 'critica',\n related_name = 'Mensagem',\n )\n\n def __str__(self):\n return '%s - %s' % (self.matricula, self.critica)\n\n","sub_path":"app_ouvidoria/aplicacao/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517317885","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Usage:\n#\n# $ python -m fixit.cli.run_rules --help\n# $ python -m fixit.cli.run_rules\n# $ python -m fixit.cli.run_rules --rules AvoidOrInExceptRule\n# $ python -m fixit.cli.run_rules . --rules AvoidOrInExceptRule NoUnnecessaryListComprehensionRule\n# $ python -m fixit.cli.run_rules . --rules AvoidOrInExceptRule my.custom.rules.package\n# $ python -m fixit.cli.run_rules . --rules fixit.rules\n\nimport argparse\nimport itertools\nimport shutil\nimport sys\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Iterable, Mapping, Optional, Sequence\n\nfrom libcst import ParserSyntaxError, parse_module\nfrom libcst.metadata import MetadataWrapper\n\nfrom fixit.cli import find_files, map_paths\nfrom fixit.cli.args import (\n get_compact_parser,\n get_multiprocessing_parser,\n get_paths_parser,\n get_rules_parser,\n get_skip_ignore_byte_marker_parser,\n get_use_ignore_comments_parser,\n)\nfrom fixit.cli.formatter import LintRuleReportFormatter\nfrom fixit.cli.full_repo_metadata import (\n get_metadata_caches,\n rules_require_metadata_cache,\n)\nfrom fixit.cli.utils import print_red\nfrom fixit.common.utils import LintRuleCollectionT\nfrom fixit.rule_lint_engine import lint_file\n\n\nif TYPE_CHECKING:\n from libcst.metadata.base_provider import ProviderT\n\n\n@dataclass(frozen=True)\nclass LintOpts:\n rules: LintRuleCollectionT\n use_ignore_byte_markers: bool\n use_ignore_comments: bool\n formatter: LintRuleReportFormatter\n\n\ndef get_formatted_reports_for_path(\n path: Path,\n opts: LintOpts,\n metadata_cache: Optional[Mapping[\"ProviderT\", object]] = None,\n) -> Iterable[str]:\n with open(path, \"rb\") as f:\n source = f.read()\n\n try:\n cst_wrapper = None\n if metadata_cache is not None:\n cst_wrapper = MetadataWrapper(parse_module(source), True, metadata_cache)\n raw_reports = lint_file(\n path,\n source,\n rules=opts.rules,\n use_ignore_byte_markers=opts.use_ignore_byte_markers,\n use_ignore_comments=opts.use_ignore_comments,\n cst_wrapper=cst_wrapper,\n )\n except (SyntaxError, ParserSyntaxError) as e:\n print_red(\n f\"Encountered the following error while parsing source code in file {path}:\"\n )\n print(e)\n return []\n\n # linter completed successfully\n return [opts.formatter.format(rr) for rr in raw_reports]\n\n\ndef main(raw_args: Sequence[str]) -> int:\n parser = argparse.ArgumentParser(\n description=(\n \"Validates your lint rules by running them against the specified, \"\n + \"directory or file(s). This is not a substitute for unit tests, \"\n + \"but it can provide additional confidence in your lint rules.\\n\"\n + \"If no lint rules or packages are specified, runs all lint rules \"\n + \"found in the packages specified in `fixit.config.yaml`.\"\n ),\n parents=[\n get_paths_parser(),\n get_rules_parser(),\n get_use_ignore_comments_parser(),\n get_skip_ignore_byte_marker_parser(),\n get_compact_parser(),\n get_multiprocessing_parser(),\n ],\n )\n\n parser.add_argument(\n \"--cache-timeout\",\n type=int,\n help=\"Timeout (seconds) for metadata cache fetching. Default is 2 seconds.\",\n default=2,\n )\n\n args = parser.parse_args(raw_args)\n width = shutil.get_terminal_size(fallback=(80, 24)).columns\n\n # expand path if it's a directory\n file_paths = tuple(find_files(args.paths))\n all_rules = args.rules\n\n if not args.compact:\n print(f\"Scanning {len(file_paths)} files\")\n print(f\"Testing {len(all_rules)} rules\")\n print()\n start_time = time.time()\n\n metadata_caches: Optional[Mapping[str, Mapping[\"ProviderT\", object]]] = None\n if rules_require_metadata_cache(all_rules):\n metadata_caches = get_metadata_caches(args.cache_timeout, file_paths)\n\n # opts is a more type-safe version of args that we pass around\n opts = LintOpts(\n rules=all_rules,\n use_ignore_byte_markers=args.use_ignore_byte_markers,\n use_ignore_comments=args.use_ignore_comments,\n formatter=LintRuleReportFormatter(width, args.compact),\n )\n\n formatted_reports_iter = itertools.chain.from_iterable(\n map_paths(\n get_formatted_reports_for_path,\n file_paths,\n opts,\n workers=args.workers,\n metadata_caches=metadata_caches,\n )\n )\n\n formatted_reports = []\n for formatted_report in formatted_reports_iter:\n # Reports are yielded as soon as they're available. Stream the output to the\n # terminal.\n print(formatted_report)\n # save the report from the iterator for later use\n formatted_reports.append(formatted_report)\n\n if not args.compact:\n print()\n print(\n f\"Found {len(formatted_reports)} reports in {len(file_paths)} files in \"\n + f\"{time.time() - start_time :.2f} seconds.\"\n )\n\n # Return with an exit code of 1 if there are any violations found.\n return int(bool(formatted_reports))\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n","sub_path":"fixit/cli/run_rules.py","file_name":"run_rules.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"227272099","text":"import json\nimport string\nimport time\n\nJSON_Base_Path = \"./static/JSON/\"\n\n# Paths\nCountries_JSON_path = JSON_Base_Path + \"Countries.json\"\nCountries_JSON_path_new = JSON_Base_Path + \"Countries_new.json\"\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# Utility Functions\n#\n\ndef Open_JSON( File_Location ):\n \n with open( File_Location, \"r\", encoding=\"utf-8\" ) as JSON_RAW_INFO:\n\n JSON_DATA = json.load( JSON_RAW_INFO )\n\n return JSON_DATA\n\ndef Save_JSON( JSON, filename, indent ):\n # open(filename, 'a', encoding=\"utf-8\")\n\n #print(json.dump(\n #JSON))\n\n out_file = open( filename, \"w\", encoding=\"utf-8\" ) \n \n # print( \"Before Dump\" )\n\n if( indent == None ):\n json.dump( JSON, out_file ) \n if( indent == 4 ):\n json.dump( JSON, out_file, indent = 4 ) \n \n # print( \"After Dump\" )\n \n out_file.close()\n \n return \"Success!!!\"\n\ndef Get_Countries( ):\n return Countries_JSON\n#\n#\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# Initilization\n#\n\n# Ran at begining of the Application to initilize the objects\ndef Initialize_OBJS( ):\n global Countries_JSON\n print( \"\\n\\nPre-Initilization\\n\\n\" )\n Countries_JSON = Open_JSON( Countries_JSON_path )\n \n print( \"Initilization Done!!!\" )\n # Check later!\n #global Milage_To_Vendors_JSON\n #Milage_To_Vendors_JSON = Open_JSON( Milage_To_Vendors_JSON_path )\n \n #global UPC_ITEMS_JSON \n #UPC_ITEMS_JSON = Open_JSON( UPC_ITEMS_JSON_path )\n\nInitialize_OBJS()\n#\n#\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# Use Functions / In application functions\n#\n\ndef Get_Country_Names( ):\n print( \"Inside: Get_Country_Names( )\" )\n All_Countries_Names = []\n for Country in Countries_JSON['Countries']:\n All_Countries_Names.append( Country['name'] )\n\n return All_Countries_Names\n#\n#\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# Test Area\n#\n\n\n#\n#\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n","sub_path":"scripts/IO_Stuff/JSON_Functions.py","file_name":"JSON_Functions.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"35333518","text":"import time\r\nimport numpy as np\r\nimport pyeeg\r\nimport scipy\r\nimport matplotlib.pyplot as plt\r\nimport sklearn\r\nfrom sklearn import svm\r\nfrom getdata import GetData\r\nfrom getdata2 import GetData2\r\nimport pickle\r\nfrom sklearn.externals import joblib\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.preprocessing import label_binarize\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom scipy import interp\r\n#from pythonosc import osc_message_builder\r\n#from pythonosc import udp_client\r\n\r\nfrom OSC import OSCClient\r\n\r\nfile = 'mbt-conc.txt' \r\nfile23 = 'mbt-dist.txt' \r\nfid = open(file, 'r')\r\n\r\nfile2 = '91t.txt'\r\nlines = fid.readlines()\r\nnof = len(lines)-6\r\nprint(nof)\r\n\r\n\r\nlastnum=0\r\npo=400\r\n\r\npfds = np.zeros((4,int(nof/po)))\r\ndfas = np.zeros((4,int(nof/po)))\r\nhursts = np.zeros((4,int(nof/po)))\r\nbins = np.zeros(((int(nof/po),4,2,4)))\r\n\r\n\r\ndef getdata(num,):\r\n\tglobal lastnum\r\n\tglobal pfds\r\n\tglobal dfas\r\n\tglobal hursts\r\n\tglobal bins\r\n\tglobal nof\r\n\tglobal po\r\n\t#file = 'C:\\\\Users\\\\Ammar Raufi\\\\Desktop\\\\openbci\\\\software\\\\application.windows64\\\\SavedData\\\\OpenBCI-RAW-2017-03-18_18-46-49.txt' \r\n\r\n\t#fid = open(file, 'r')\r\n\t\r\n\t#lines = fid.readlines()\r\n\r\n\t#numberOfFrames = len(lines)-6\r\n\t#print(numberOfFrames-lastnum)\r\n\t\r\n\t\r\n\tchannels = np.zeros((4,po))\r\n\t\r\n\t#alpha = np.zeros(4)\r\n\tfor x in range(0,po): #numberOfFrames-lastnum-6\r\n\t\t\r\n\t\tfor y in range(0,4):\r\n\t\t\t\r\n\t\t\tchannels[y,x] = float(lines[lastnum+x+6].split(',')[y+1])\r\n\t\r\n\t#alpha=[]\r\n\tif((nof-lastnum)!=0):\r\n\t\tfor x in range(0,4):\r\n\t\t\thursts[x,num] = pyeeg.hurst(channels[x])\r\n\t\t\t#pfds[x,num] = pyeeg.pfd(channels[x])\r\n\t\t\t#dfas[x,num] = pyeeg.dfa(channels[x])\t\t\t\r\n\t\t\tbins[num,x] = pyeeg.bin_power(channels[x], [0.5,4,7,12,30], 200)\t\r\n\t\t\tk=1\r\n\r\n\tprint (lastnum)\r\n\t\t#print (alpha)\r\n\t\t\r\n\tlastnum=lastnum+po\r\n\treturn channels[0]\r\n\r\n\t\r\n\t\r\ndef ml(hursts,bt,pfds,hfd,targets,nof):\r\n\r\n\tdata = np.zeros((nof,16))\t\r\n\tfor i in range (0,int(nof)):\r\n\t\tfor y in range (0,4):\r\n\t\t\r\n\t\t\tdata[i,y] = hursts[y,i]\r\n\t\t\tdata[i,y+4] = bt[y,i]\r\n\t\t\tdata[i,y+8]=pfds[y,i]\r\n\t\t\tdata[i,y+12]=hfd[y,i]\t\t\r\n\t\t\r\n\t#print(data)\r\n\tclf = svm.SVC(kernel='linear', C=100,class_weight={2:3}) #support v\r\n\tclf_lda = LinearDiscriminantAnalysis()\r\n\t#clf = joblib.load('classifier.pkl') \t\r\n\t\r\n\ttargets2=np.zeros((len(targets)))\r\n\tdata2=np.zeros((len(data)))\r\n\t\r\n\tfor i in range(0,len(data)):\t\r\n\t\ttargets2[i] = int(targets[i])\r\n#\tprint(targets2.ravel())\r\n#\ty = label_binarize(targets2.ravel(), classes=[1, 2])\r\n#\tprint(y)\r\n#\tn_classes = y.shape[1]\r\n\t\r\n#\tX_train, X_test, y_train, y_test = train_test_split(data,y.ravel(), test_size=.5)\r\n#\ty_score = clf.fit(X_train, y_train).decision_function(X_test)\r\n#\ty_score2 = clf_lda.fit(X_train, y_train).decision_function(X_test)\r\n\t\r\n#\tfpr = dict()\r\n#\ttpr = dict()\r\n#\troc_auc = dict()\r\n\r\n\t\r\n\t#\tfpr, tpr, _ = roc_curve(y_test, y_score)\r\n#\tfpr2, tpr2, _ = roc_curve(y_test, y_score2)\r\n#\troc_auc = auc(fpr, tpr)\r\n#\troc_auc2 = auc(fpr2, tpr2)\r\n\t\r\n\t\t\r\n\r\n\t#plt.figure()\r\n\t#lw = 2\r\n\t#plt.plot(fpr, tpr, color='darkorange',\r\n#\t\t\t lw=lw, label='ROC curve SVM (area = %0.2f)' % roc_auc)\r\n#\tplt.plot(fpr2, tpr2, color='green',\r\n#\t\t\t lw=lw, label='ROC curve LDA(area = %0.2f)' % roc_auc2)\r\n#\tplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n#\tplt.xlim([0.0, 1.0])\r\n#\tplt.ylim([0.0, 1.05])\r\n#\tplt.xlabel('False Positive Rate')\r\n#\tplt.ylabel('True Positive Rate')\r\n#\tplt.title('Receiver operating characteristic')\r\n#\tplt.legend(loc=\"lower right\")\t\r\n#\tplt.show()\r\n\t\r\n\t\r\n\r\n\t\r\n\t\r\n\t\t\r\n\t\r\n\ttargets2 = np.reshape(targets2,(len(data),1))\r\n\t#print (targets2)\r\n\t\r\n\t\r\n\tfor i in range (0,int(nof)):\r\n\t\tif(np.all(np.isfinite(data[i]))==False):\r\n\t\t\tfor y in range (0,len(data[i])):\r\n\t\t\t\tif(np.isnan(data[i,y])):\t\t\t\t\t\r\n\t\t\t\t\tdata[i,y] = 0.4 \r\n\t\t\r\n\t\t\r\n\t#parameters = {'kernel': ('linear', 'rbf'), 'C': [50,60,70,80,90,100,110,120,130,140,150,300,400]}\r\n\t#svr = svm.SVC()\r\n\t#clf8 = grid_search.GridSearchCV(svr, parameters)\r\n\t\r\n\tc, r = targets2.shape\r\n\ttargets2 = targets2.reshape(c,)\r\n\t#clf8.fit(data, targets2)\r\n\t#print(clf8.best_params_)\r\n\t#time.sleep(10)\r\n\t\t\r\n\tclf.fit(data, targets2)\r\n\tclf_lda.fit(data, targets2)\r\n\t\r\n#\tfor i in range (0,len(data)):\r\n\t\t#print(data[i].reshape(1,-1))\r\n#\t\ta=clf.predict(data[i].reshape(1,-1))\r\n#\t\tb=clf_lda.predict(data[i].reshape(1,-1))\r\n\t\t\r\n#\t\tif(a==[1.]):\r\n#\t\t\tprint('concentrated')\r\n#\t\telse:\r\n#\t\t\tprint('distracted')\r\n\t\t\r\n#\t\tif(b==[1.]):\r\n#\t\t\tprint('lda concentrated')\r\n#\t\telse:\r\n#\t\t\tprint('lda distracted')\r\n\t\t\r\n\tjoblib.dump(clf, 'classifier.pkl') \r\n\tjoblib.dump(clf_lda, 'classifier_lda.pkl') \r\n\r\n\t\r\n\t\r\n\t\r\ndef get_state_features(state):\r\n\t\r\n\tnof = len(state)\r\n\tpo = 600\r\n\t\r\n\tpfds = np.zeros((4,int(nof/po)))\r\n\t\r\n\tap_entropy = np.zeros((4,int(nof/po)))\r\n\thursts = np.zeros((4,int(nof/po)))\r\n\thfd = np.zeros((4,int(nof/po)))\r\n\tbins = np.zeros(((int(nof/po),4,2,5)))\r\n\t\r\n\tlastnum=0\r\n\r\n\tfor i in range (0,(int(nof/po))):\r\n\t\tchannels = np.zeros((4,po))\t\t\r\n\t\tchannels2 = np.zeros((4,po))\r\n\t\tchannels3 = np.zeros((4,po))\r\n\t\tchannels4 = np.zeros((4,po))\r\n\t\tchannels5 = np.zeros((4,po))\r\n\t\t\r\n\t\tfor x in range(0,po):\t\t\t\r\n\t\t\tfor y in range(0,4):\t\t\t\t\r\n\t\t\t\tchannels[y,x] = float(state[lastnum+x,y])\r\n\t\t\t\t\r\n\t\tfor y in range(0,4):\t\t\t\t\r\n\t\t\tchannels[y] = scipy.signal.savgol_filter(channels[y], 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\t\r\n\t\t\r\n\t\t#for y in range(0,4):\r\n\t\t\r\n\t\t\t#nyq = 0.5 * 200\r\n\t\t\t#low = 1 / nyq\r\n\t\t\t#high = 50 / nyq\r\n\t\t\t#high2 = 70 / nyq\r\n\t\t\t#high3 = 90 / nyq\r\n\t\t\t#high4 = 95 / nyq\r\n\t\t\t#b, a = butter(5, [low, high], btype='band')\r\n\t\t\t#b2, a2 = butter(5, [low, high2], btype='band')\r\n\t\t\t#b3, a3 = butter(5, [low, high3], btype='band')\r\n\t\t\t#b4, a4 = butter(5, [low, high4], btype='band')\r\n\t\t\t\r\n\t\t\t#channels2[y] = lfilter(b, a, channels[y])\r\n\t\t\t#channels3[y] = lfilter(b2, a2, channels[y])\r\n\t\t\t#channels4[y] = lfilter(b3, a3, channels[y])\r\n\t\t\t#channels5[y] = lfilter(b4, a4, channels[y])\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t#x = np.linspace(0,len(channels[1]),len(channels[1]))\r\n\t\t#f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\r\n\t\t#f.suptitle(\"Time Series\")\r\n\t\t#ax1.set_ylabel('Amplitude (uV)')\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t#ax1.plot(x, channels2[0],color='red')\r\n\t\t#ax1.plot(x, channels3[0],color='blue')\r\n\t\t#ax1.plot(x, channels4[0],color='blue')\r\n\t\t\r\n\t\t#ax1.plot(x, channels[0])\r\n\t\t#ax1.plot(x, channels5[0],color='yellow')\r\n\t\t#ax1.plot(x, y4)\r\n\t\t#ax1.plot(x, y5,color='red')\r\n\t\t#ax1.plot(x, y4,color='green')\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t#ax1.set_title('Fp1')\r\n\t\t\r\n\t\t#ax2.plot(x, channels2[1],color='red')\r\n\t\t#ax2.plot(x, channels3[1],color='blue')\r\n\t\t\r\n\t\t#ax2.plot(x, channels4[1],color='blue')\r\n\t\t#ax2.plot(x, channels[1])\r\n\t\t#ax2.plot(x, y5)\r\n\t\t#ax2.set_title('Fp2')\r\n\t\t\r\n\t\t#ax3.plot(x, channels2[2],color='red')\r\n\t\t#ax3.plot(x, channels3[2],color='blue')\r\n\t\t#ax3.plot(x, channels4[2],color='blue')\r\n\t\t#ax3.plot(x,channels[2])\r\n\t\t#ax3.plot(x,y6)\r\n\t\t#ax3.set_title('O1')\r\n\t\t#ax3.set_xlabel('sample')\r\n\t\t#ax3.set_ylabel('Amplitude (uV)')\r\n\t\t\r\n\t\t#ax4.plot(x, channels2[3],color='red')\r\n\t\t#ax4.plot(x, channels3[3],color='blue')\r\n\t\t#ax4.plot(x, channels4[3],color='blue')\r\n\t\t#ax4.plot(x,channels[3])\r\n\t\t#ax4.plot(x,y6)\r\n\t\t#ax4.set_title('O2')\r\n\t\t#ax4.set_xlabel('sample')\r\n\t\t#plt.show()\r\n\t\t\r\n\t\tif((nof-lastnum)!=0):\r\n\t\t\tfor x in range(0,4):\r\n\t\t\t\thursts[x,i] = pyeeg.hurst(channels[x])\r\n\t\t\t\tpfds[x,i] = pyeeg.pfd(channels[x])\t\r\n\t\t\t\t#ap_entropy[x,i] = pyeeg.ap_entropy(X, M, R)\r\n\t\t\t\thfd[x,i] = pyeeg.hfd(channels[x],15)\r\n\t\t\t\tbins[i,x] = pyeeg.bin_power(channels[x], [0.5,4,7,12,15,18], 200)\t\t\t\t\r\n\t\t\t\tk=1\r\n\t\tlastnum=lastnum+po\r\n\t\r\n\treturn pfds,dfas,hursts,bins,hfd\t\r\n\t\r\n\t\r\nif __name__ == \"__main__\":\r\n\tglobal nof\r\n\tglobal po\r\n\tclient = OSCClient()\r\n\tclient.connect( (\"127.0.0.1\", 8000) )\t\r\n\r\n\tforever=True\r\n\t\r\n\tdata_getter = GetData()\r\n\tdata_getter2 = GetData2()\r\n\t\r\n\r\n\t#concentrated,resting,blinking,normal_pics,trippy_pics = data_getter.get_states(file,file2)\r\n\tconcentrated= data_getter2.get_states(file)\r\n\tresting= data_getter2.get_states(file23)\r\n\t\r\n\tprint(concentrated)\r\n\t\r\n\tpfds_c,dfas_c,hursts_c,bins_c,hfd_c=get_state_features(concentrated)\r\n\tpfds_r,dfas_r,hursts_r,bins_r,hfd_r=get_state_features(resting)\r\n\t\r\n\t\r\n\tdelta_c= np.zeros((4,len(bins_c)))\r\n\tbeta_c= np.zeros((4,len(bins_c)))\r\n\talpha_c= np.zeros((4,len(bins_c)))\r\n\ttheta_c= np.zeros((4,len(bins_c)))\r\n\tbt_c = np.zeros((4,len(bins_c)))\r\n\t\r\n\tdelta_r= np.zeros((4,len(bins_r)))\r\n\tbeta_r= np.zeros((4,len(bins_r)))\r\n\talpha_r= np.zeros((4,len(bins_r)))\r\n\ttheta_r= np.zeros((4,len(bins_r)))\r\n\tbt_r = np.zeros((4,len(bins_r)))\r\n\t\r\n\t\r\n\tfor i in range (0,len(bins_c)):\r\n\t\tfor y in range (0,4):\r\n\t\t\tdelta_c[y,i] = bins_c[i,y,0,0]\r\n\t\t\ttheta_c[y,i] = bins_c[i,y,0,1]\r\n\t\t\talpha_c[y,i] = bins_c[i,y,0,2]\r\n\t\t\tbeta_c[y,i] = bins_c[i,y,0,4]\r\n\t\t\tbt_c[y,i] = theta_c[y,i]/beta_c[y,i]\r\n\t\r\n\tfor i in range (0,len(bins_r)):\r\n\t\tfor y in range (0,4):\r\n\t\t\tdelta_r[y,i] = bins_r[i,y,0,0]\r\n\t\t\ttheta_r[y,i] = bins_r[i,y,0,1]\r\n\t\t\talpha_r[y,i] = bins_r[i,y,0,2]\r\n\t\t\tbeta_r[y,i] = bins_r[i,y,0,4]\r\n\t\t\tbt_r[y,i] = theta_r[y,i]/beta_r[y,i]\r\n\t\r\n\t\r\n\tall_hursts = np.zeros((4,len(hursts_c[0])+len(hursts_r[0])))\t\r\n\tall_bt = np.zeros((4,len(hursts_c[0])+len(hursts_r[0])))\r\n\tall_pfds = np.zeros((4,len(hursts_c[0])+len(hursts_r[0])))\t\r\n\tall_hfd = np.zeros((4,len(hursts_c[0])+len(hursts_r[0])))\r\n\tall_targets = np.zeros((len(hursts_c[0])+len(hursts_r[0])))\r\n\t\r\n\t\r\n\t\r\n\tprint(len(hursts_c[0]))\r\n\tprint(len(hursts_r[0]))\r\n\tfor i in range (0,len(hursts_c[0])):\r\n\t\tfor y in range (0,4):\r\n\t\t\tall_hursts[y,i] = hursts_c[y,i]\r\n\t\t\tall_bt[y,i] = bt_c[y,i]\r\n\t\t\tall_pfds[y,i] = pfds_c[y,i]\r\n\t\t\tall_hfd[y,i] = hfd_c[y,i]\r\n\t\t\t\r\n\t\tall_targets[i] = 1\r\n\t\t\t\r\n\tfor i in range (len(hursts_c[0]),len(hursts_c[0])+len(hursts_r[0])):\r\n\t\tfor y in range (0,4):\r\n\t\t\tall_hursts[y,i] = hursts_r[y,i%len(hursts_c[0])]\r\n\t\t\tall_bt[y,i] = bt_r[y,i%len(hursts_c[0])]\r\n\t\t\tall_pfds[y,i] = pfds_r[y,i%len(hursts_c[0])]\r\n\t\t\tall_hfd[y,i] = hfd_r[y,i%len(hursts_c[0])]\r\n\t\t\t\r\n\t\tall_targets[i] = 2\t\r\n\r\n\t#print(all_hursts[0])\r\n\tx = np.linspace(0,len(all_bt[0])*3,len(all_bt[0]))\r\n\t#x2 = np.linspace(0,len(all_tmps[0]),len(all_tmps[0]))\r\n\t#print (x)\r\n\t\r\n\ty=all_hursts[0]\r\n\ty1=all_bt[0]\r\n\ty2=all_pfds[0]\r\n\ty3=all_hfd[0]\r\n\ty4= scipy.signal.savgol_filter(y, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\r\n\tplt.plot(x, y)\r\n\tplt.title(\"Hurst Exponent\")\r\n\tplt.ylabel(\"Hurst Exponent\")\r\n\tplt.xlabel(\"Time(s)\")\r\n\tplt.show()\r\n\t\r\n\tplt.plot(x, y1)\r\n\tplt.title(\"Theta/Beta\")\r\n\tplt.ylabel(\"Theta/Beta\")\r\n\tplt.xlabel(\"Time(s)\")\r\n\tplt.show()\r\n\tplt.plot(x, y2)\r\n\tplt.title(\"Petrosian Fractal Dimensions\")\r\n\tplt.ylabel(\"PFD\")\r\n\tplt.xlabel(\"Time(s)\")\r\n\tplt.show()\r\n\tplt.plot(x, y3)\r\n\tplt.title(\"Higuchi Fractal Dimensions\")\r\n\tplt.ylabel(\"HFD\")\r\n\tplt.xlabel(\"Time(s)\")\r\n\tplt.show()\r\n\t\r\n\tplt.plot(x, y)\r\n\tplt.plot(x, y4,color='red')\r\n\tplt.title(\"Smoothed Hurst Exponent\")\r\n\tplt.ylabel(\"HFD\")\r\n\tplt.xlabel(\"Time(s)\")\r\n\tplt.show()\r\n\t\r\n\t#y=all_tmps[0]\r\n\t#y1=all_tmps[1]\r\n\t#y2=all_tmps[2]\r\n\t#y3=all_tmps[3]\r\n\t#y5= scipy.signal.savgol_filter(y, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\ty4= scipy.signal.savgol_filter(y, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\ty5= scipy.signal.savgol_filter(y1, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\ty6= scipy.signal.savgol_filter(y2, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\ty7= scipy.signal.savgol_filter(y3, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\r\n\t\r\n\t\r\n\t\r\n\t#plt.plot(x, y5,color='red')\r\n\t#plt.plot(x, y4,color='green')\r\n\tplt.show()\r\n\t\r\n\tf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\r\n\t\r\n\tax1.plot(x, y)\r\n\tf.suptitle(\"Features\")\r\n\tax1.set_ylabel(\"Hurst Exponent\")\r\n\tax3.set_ylabel(\"Hurst Exponent\")\r\n\tax3.set_xlabel(\"Time (s)\")\r\n\tax4.set_xlabel(\"Time (s)\")\r\n\t#ax1.plot(x, y4)\r\n\t#ax1.plot(x, y5,color='red')\r\n\t#ax1.plot(x, y4,color='red')\r\n\t\r\n\tax1.set_title('hurst - ch1')\r\n\tax2.plot(x, y1)\r\n\t#ax2.plot(x, y5,color='red')\r\n\tax2.set_title('ch2')\r\n\tax3.plot(x,y2)\r\n\t#ax3.plot(x,y6,color='red')\r\n\tax3.set_title('ch3')\r\n\tax4.plot(x,y3)\r\n\t#ax4.plot(x,y7,color='red')\r\n\tax4.set_title('ch4')\r\n\tplt.show()\r\n\t\r\n\t\r\n\ty=all_bt[0]\r\n\ty1=all_bt[1]\r\n\ty2=all_bt[2]\r\n\ty3=all_bt[3]\r\n\t#y= scipy.signal.savgol_filter(y, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y1= scipy.signal.savgol_filter(y1, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y2= scipy.signal.savgol_filter(y2, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y3= scipy.signal.savgol_filter(y3, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\r\n\t\r\n\tf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\r\n\tf.suptitle(\"Theta/Beta\")\r\n\tax1.set_ylabel(\"Theta/Beta\")\r\n\tax3.set_ylabel(\"Theta/Beta\")\r\n\tax3.set_xlabel(\"Time (s)\")\r\n\tax4.set_xlabel(\"Time (s)\")\r\n\t\r\n\tax1.plot(x, y)\r\n\tax1.set_title('Fp1')\r\n\tax2.plot(x, y1)\r\n\tax2.set_title('Fp2')\r\n\tax3.plot(x,y2)\r\n\tax3.set_title('O1')\r\n\tax4.plot(x,y3)\r\n\tax4.set_title('O2')\r\n\tplt.show()\r\n\t\r\n\t\r\n\ty=all_pfds[0]\r\n\ty1=all_pfds[1]\r\n\ty2=all_pfds[2]\r\n\ty3=all_pfds[3]\r\n\t#y= scipy.signal.savgol_filter(y, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y1= scipy.signal.savgol_filter(y1, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y2= scipy.signal.savgol_filter(y2, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y3= scipy.signal.savgol_filter(y3, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\r\n\t\r\n\tf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\r\n\t\r\n\t\r\n\tf.suptitle(\"PFD\")\r\n\tax1.set_ylabel(\"PFD\")\r\n\tax3.set_ylabel(\"PFD\")\r\n\tax3.set_xlabel(\"Time (s)\")\r\n\tax4.set_xlabel(\"Time (s)\")\r\n\t\r\n\t\r\n\tax1.plot(x, y)\r\n\tax1.set_title('Fp1')\r\n\tax2.plot(x, y1)\r\n\tax2.set_title('Fp2')\r\n\tax3.plot(x,y2)\r\n\tax3.set_title('O1')\r\n\tax4.plot(x,y3)\r\n\tax4.set_title('O2')\r\n\tplt.show()\r\n\t\r\n\ty=all_hfd[0]\r\n\ty1=all_hfd[1]\r\n\ty2=all_hfd[2]\r\n\ty3=all_hfd[3]\r\n\t#y= scipy.signal.savgol_filter(y, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y1= scipy.signal.savgol_filter(y1, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y2= scipy.signal.savgol_filter(y2, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t#y3= scipy.signal.savgol_filter(y3, 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\r\n\t\r\n\tf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\r\n\tf.suptitle(\"Higuchi Fractial Dimensions\")\r\n\tax1.set_ylabel(\"HFD\")\r\n\tax3.set_ylabel(\"HFD\")\r\n\tax3.set_xlabel(\"Time (s)\")\r\n\tax4.set_xlabel(\"Time (s)\")\r\n\t\r\n\t\r\n\tax1.plot(x, y)\r\n\tax1.set_title('Fp1')\r\n\tax2.plot(x, y1)\r\n\tax2.set_title('Fp2')\r\n\tax3.plot(x,y2)\r\n\tax3.set_title('O1')\r\n\tax4.plot(x,y3)\r\n\tax4.set_title('O2')\r\n\tplt.show()\t\r\n\t\t\r\n\tfor i in range (0,4):\r\n\t\tall_hursts[i]= scipy.signal.savgol_filter(all_hursts[i], 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\tall_bt[i]= scipy.signal.savgol_filter(all_bt[i], 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\tall_pfds[i]= scipy.signal.savgol_filter(all_pfds[i], 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\tall_hfd[i]= scipy.signal.savgol_filter(all_hfd[i], 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\r\n\t\r\n\t\r\n\tml(all_hursts,all_bt,all_pfds,all_hfd,all_targets,len(all_hursts[0]))\r\n\t\r\n\t\r\n\t\r\n\tabc = np.zeros((4,nof))\r\n\t\r\n\tfor x in range(0,nof): #numberOfFrames-lastnum-6\t\t\r\n\t\tfor y in range(0,4):\t\t\t\r\n\t\t\tabc[y,x] = float(lines[x+6].split(',')[y+1])\r\n\t\r\n\tPxx, freqs, bins2, im = plt.specgram(abc[0], Fs=200)\r\n\tplt.show()\r\n\t\r\n\t\r\n\t\r\n\t\r\n\tfor i in range (0,(int(nof/po))):\r\n\t\tok = getdata(i)\r\n\t\tforever= False\r\n\r\n\ta= np.zeros((nof/po))\r\n\tb= np.zeros((nof/po))\r\n\tc= np.zeros((nof/po))\r\n\td = np.zeros((nof/po))\r\n\te = np.zeros((nof/po))\t\r\n\tfor i in range (0,nof/po):\r\n\t\ta[i] = bins[i,0,1,0]\r\n\t\tb[i] = bins[i,0,1,1]\r\n\t\tc[i] = bins[i,0,1,2]\r\n\t\td[i] = bins[i,0,1,3]\r\n\t\te[i] = b[i]/d[i]\t\t\r\n\tx = np.linspace(0,nof,nof/po)\r\n\r\n\ty= a.copy()\t\r\n\ty1= b\t\r\n\ty2= c\r\n\ty3= d\t\r\n\tf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\r\n\tax1.plot(x, y)\r\n\tax1.set_title('delta (0.5-4)')\r\n\tax2.plot(x, y1)\r\n\tax2.set_title('theta (4-7)')\r\n\tax3.plot(x,y2)\r\n\tax3.set_title('alpha (7-12)')\r\n\tax4.plot(x,y3)\r\n\tax4.set_title('beta (12-30)')\r\n\tplt.show()\r\n\t\r\n\ty4 = hursts[1]\r\n\t\r\n\tf = plt.plot(x,y4)\r\n\tplt.title(\"hurst expo\")\r\n\tplt.show()\r\n\t\r\n\t\r\n\ty5 = pfds[1]\r\n\t\r\n\tf = plt.plot(x,y5)\r\n\tplt.title(\"pfds\")\r\n\tplt.show()\r\n\t\r\n\t\r\n\ty6 = dfas[1]\r\n\t\r\n\tf = plt.plot(x,y6)\r\n\tplt.title(\"dfas\")\r\n\tplt.show()\r\n\t\r\n\ty7 = e\r\n\t\r\n\tf = plt.plot(x,y7)\r\n\tplt.title(\"theta/beta\")\r\n\tplt.show()","sub_path":"proj/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":16435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"620831416","text":"def merge_sort(l,start,end):\n if startb[j]:\n l[k]=b[j]\n k=k+1\n j=j+1\n else:\n l[k]=a[i]\n k=k+1\n i=i+1\n while i!=n1:\n l[k]=a[i]\n k=k+1\n i=i+1\n while j!=n2:\n l[k]=b[j]\n k=k+1\n j=j+1\n\n\nif __name__==\"__main__\":\n l=[12,13,14,15,9,10,7,6]\n merge_sort(l,0,7)\n print(l)\n\n \n \n ","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"23322573","text":"from hellperTool import *\n\n\n@timer()\ndef bubbleSort(nums):\n for i in range(len(nums) - 1): # 这个循环负责设置冒泡排序进行的次数(比如说n个数,则只要进行n-1次冒泡,就可以把这个n个数排序好,对吧)\n for j in range(len(nums) - i - 1):\n if nums[j] > nums[j + 1]:\n nums[j], nums[j + 1] = nums[j + 1], nums[j]\n return nums\n\n# 变种一\n\n\nif __name__ == '__main__':\n arr = generateRandomArray(10000,0,10)\n bubbleSort(arr)","sub_path":"LanguageDatastruct/PythonLearn/someSort/bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"154433745","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom .db.db_engine import DBsession\nfrom .models.user import User\nimport redis\nfrom telnetlib import Telnet\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport datetime\nfrom .redis_pool import redisPool\nfrom redis import Redis\n\nclass UserspiderPipeline(object):\n def process_item(self, item, spider):\n\n if True:\n # 存入Mysql\n session = DBsession()\n new_user = User(user_id=item.get('user_id', ''),\n user_nickname=item.get('user_nickname', ''),\n signature=item.get('signature', ''),\n location=item.get('location', ''),\n check_in_time=item.get('check_in_time', ''),\n user_intro=item.get('user_intro', ''),\n books_wanted=item.get('books_wanted', ''),\n books_red=item.get('books_red', ''),\n movies_wanted=item.get('movies_wanted', ''),\n movies_watched=item.get('movies_watched', ''),\n groups=item.get('groups', ''),\n dou_list=item.get('dou_list', ''))\n\n session.add(new_user)\n session.commit()\n session.close()\n if True:\n # 将关注人的用户id存入Redis\n user_ids = item.get('follow_by', '').split(' ')\n r = redis.Redis(connection_pool=redisPool)\n # 将'userid_wanted'中的数据存入'userid_used'\n # r.sunionstore('userid_used', 'userid_used', 'userid_wanted')\n # 清除'userid_wanted'\n # r.delete('userid_wanted')\n # 将该用户关注的人写入'userid_wanted'\n r.sadd('userid_wanted', *user_ids)\n r.sadd('userid_used',item.get('user_id',''))\n return item\n\n\nclass ProxyIpspiderPipeline(object):\n\n def telnet_test(self, url):\n ip = url.split('/')[2]\n host, port = ip.split(':')\n try:\n Telnet(host, port=port, timeout=2)\n except:\n return 'unavail'\n else:\n return url\n\n def process_item(self, item, spider):\n all_ips = list(set(item.get('xici_ips',[])).union(item.get('data5u_ips',[]), item.get('ip66_ips',[])))\n # 测试ip是否可用\n pool = ThreadPool(10)\n available_ips = list(set(pool.map(self.telnet_test,all_ips)))\n pool.close()\n pool.join()\n available_ips.remove('unavail')\n print(available_ips)\n #存入redis中\n r = Redis(connection_pool=redisPool)\n r.delete('ip_pool')\n r.sadd('ip_pool', *available_ips)\n","sub_path":"userSpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15991433","text":"import os\nimport datetime\nimport time\nfrom multiprocessing import Process\n\nfrom flask import Flask, request, abort\nfrom linebot import LineBotApi, WebhookParser\nfrom linebot.exceptions import InvalidSignatureError, LineBotApiError\nfrom linebot.models import MessageEvent, TextMessage, TextSendMessage\n\n#from service.hardcode import send_menu\n#from service.blockchain.functionality import init_blockchain\n#from service.basic import send_text_message\n#from service.firebase import get_user_list, init_db\n#from machine import create_machine\nfrom dotenv import load_dotenv\n\nimport pandas as pd\nimport datetime as dt\nimport matplotlib.pyplot as plt\nfrom binance_f import RequestClient\nfrom binance_f.model import *\nfrom binance_f.constant.test import *\nfrom binance_f.base.printobject import *\nimport time\nimport threading\nimport math\n\n\nload_dotenv()\napp = Flask(__name__, static_url_path=\"\")\n\n# Get channel_secret and channel_access_token from environment variable\nchannel_access_token = os.getenv(\"LINE_CHANNEL_ACCESS_TOKEN\", None)\nchannel_secret = os.getenv(\"LINE_CHANNEL_SECRET\", None)\n\nline_bot_api = LineBotApi(channel_access_token)\nparser = WebhookParser(channel_secret)\n\n# Unique FSM for each user\nmachines = {}\n\n# get line user list\nuser_list = []#get_user_list()\n\n# the 'api_key_secret.txt' should be a text file which record your api_key on first line and api_secret on second line\n\nwith open('src/api_key_secret.txt') as f:\n lines = f.read().splitlines() \n f.close()\n# API Key (You need to get these from Binance account)\ng_api_key = lines[0]\ng_api_secret = lines[1]\n# connect to binance futures using api\nrequest_client = RequestClient(api_key=g_api_key, secret_key=g_api_secret)\n\ndef pair_price_ratio_bollinger_information(_symbol_1, _symbol_2):\n \n history_days = 30\n \n \n # fist coin\n result = request_client.get_candlestick_data(symbol=_symbol_1, interval=CandlestickInterval.DAY1, \n\t\t\t\t\t\t\t\t\t\t\t\tstartTime= None, endTime=None, limit =history_days)\n D = pd.DataFrame()\n D['open_date_time'] = [dt.datetime.fromtimestamp((result[x].closeTime / 1000)-86400) for x in range(len(result))]\n D['open'] = [result[x].open for x in range(len(result))]\n D['close'] = [result[x].close for x in range(len(result))]\n D['low'] = [result[x].low for x in range(len(result))]\n D['high'] = [result[x].high for x in range(len(result))]\n D['volume'] = [result[x].volume for x in range(len(result))]\n D['c_l_h_mean'] = (D['close'].astype(float)+D['low'].astype(float)+D['high'].astype(float))/3\n D['percentage_change'] = D['c_l_h_mean'].pct_change()\n \n # second coin\n result = request_client.get_candlestick_data(symbol=_symbol_2, interval=CandlestickInterval.DAY1, \n\t\t\t\t\t\t\t\t\t\t\t\tstartTime= None, endTime=None, limit =history_days)\n \n D_2 = pd.DataFrame()\n D_2['open_date_time'] = [dt.datetime.fromtimestamp((result[x].closeTime / 1000)-86400) for x in range(len(result))]\n D_2['open'] = [result[x].open for x in range(len(result))]\n D_2['close'] = [result[x].close for x in range(len(result))]\n D_2['low'] = [result[x].low for x in range(len(result))]\n D_2['high'] = [result[x].high for x in range(len(result))]\n D_2['volume'] = [result[x].volume for x in range(len(result))]\n D_2['c_l_h_mean'] = (D_2['close'].astype(float)+D_2['low'].astype(float)+D_2['high'].astype(float))/3\n D_2['percentage_change'] = D_2['c_l_h_mean'].pct_change()\n \n # caculate bollinger's band\n \n bands_width = 2 # staddard deviation(unit)\n \n pairs_df = pd.DataFrame()\n pairs_df['c_l_h_mean'] = D['c_l_h_mean']/D_2['c_l_h_mean']\n pairs_df['std'] = pairs_df['c_l_h_mean'].rolling(20).std(ddof=0)\n pairs_df['MA'] = pairs_df['c_l_h_mean'].rolling(20).mean()\n pairs_df['BOLU'] = pairs_df['MA'] + bands_width*pairs_df['std']\n pairs_df['BOLD'] = pairs_df['MA'] - bands_width*pairs_df['std']\n pairs_df = pairs_df.dropna()\n \n \n return pairs_df\n\n\n\n# Signaling function\n\nrun_flag = 1\nrenew_frequency = 60\nrenew_check_interval= 5\ntext_for_line_bot_later = ''\n\ndef pair_trading_signal(symbol_1 = \"BTCUSDT\", symbol_2 = \"ETHUSDT\" , leverage_ = 2, contract_amount = 0.001, lookback_ = 20, entryZ_ = 2, exitZ_ = 1 ):\n \n global text_for_line_bot_later\n global run_flag\n global renew_frequency\n global renew_check_interval\n \n '''\n now = dt.datetime.now()\n dt_string = now.strftime(\"%Y-%d-%m %H:%M:%S\")\n symbol_to_trade_futures = symbol_ ## \"BTCUSDT\"\n ## accountName='U123456'\n\n leverage_ratio = leverage_#2\n numContract= contract_amount #0.001\n lookback= lookback_ # Number of periods used to compute Bollinger band ex.20\n entryZ= entryZ_ #2 # The distance between the upper and the lower band is 2*entryZ ex. 2\n exitZ= exitZ_ # Exit when price revert to 1 standard deviation from the mean ex.1\n\n\n result = request_client.change_initial_leverage(symbol=symbol_to_trade_futures, leverage=leverage_ratio)# adjust the leverage ratio\n '''\n pos = 0\n\n while run_flag == 1:\n\n result_symbol_1 = request_client.get_mark_price(symbol_1)\n p_1 = result_symbol_1.markPrice\n result_symbol_2 = request_client.get_mark_price(symbol_2)\n p_2 = result_symbol_2.markPrice\n p_distance = p_1/p_2 # define the distance to ratio\n time_of_signal = str(dt.datetime.fromtimestamp((result_symbol_2.time / 1000)-86400)) \n #Calculate deviation of ask or bid price from moving average\n temp = pair_price_ratio_bollinger_information(symbol_1, symbol_2)\n mstd = temp['std'].to_list()[-1]\n ma = temp['MA'].to_list()[-1]\n #zscoreAsk=(askPrice-ma)/mstd;\n #zscoreBid=(bidPrice-ma)/mstd;\n \n zscore_p_distance = (p_distance-ma)/mstd\n \n \n '''\n print('UB : ',(ma+(2*mstd)),\", Mean : \",ma,\", LB : \",(ma-(2*mstd)))\n print('distance now', p_distance,'zscore distance now :', zscore_p_distance)\n \n '''\n \n if zscore_p_distance > 2 and pos== 0 :\n print('Time of signal : ', time_of_signal)\n print('進場訊號! long : ', symbol_2,', short : ', symbol_1)\n pos = -1\n\n text_content = '\\n\\nTime of signal : ' + time_of_signal + '進場訊號! long : '+symbol_2+', short : '+symbol_1\n \n #text_for_line_bot_later\n text_for_line_bot_later += text_content\n \n \n for user_id in user_list:\n try:\n line_bot_api.push_message(user_id, TextSendMessage(text=text_content))\n except LineBotApiError as e:\n print(e) \n \n if zscore_p_distance < 1 and pos== -1 :\n print('Time of signal : ', time_of_signal)\n print('出場訊號! long : ', symbol_1,', short : ', symbol_2)\n pos = 0 \n \n text_content = 'Time of signal : ' + time_of_signal + '出場訊號! long : '+symbol_1+', short : '+symbol_2\n \n #text_for_line_bot_later\n text_for_line_bot_later += text_content\n \n for user_id in user_list:\n try:\n line_bot_api.push_message(user_id, TextSendMessage(text=text_content))\n except LineBotApiError as e:\n print(e) \n\n\n if zscore_p_distance < -2 and pos==0:\n print('Time of signal : ', time_of_signal)\n print('進場訊號! long : ', symbol_1,', short : ', symbol_2)\n pos = 1\n\n text_content = 'Time of signal : ' + time_of_signal + '進場訊號! long : '+symbol_1+', short : '+symbol_2\n \n #text_for_line_bot_later\n text_for_line_bot_later += text_content\n \n for user_id in user_list:\n try:\n line_bot_api.push_message(user_id, TextSendMessage(text=text_content))\n except LineBotApiError as e:\n print(e) \n \n if zscore_p_distance > -1 and pos==1:\n print('Time of signal : ', time_of_signal)\n print('出場訊號! long : ', symbol_2,', short : ', symbol_1)\n pos = 0\n\n text_content = 'Time of signal : ' + time_of_signal + '出場訊號! long : '+symbol_2+', short : '+symbol_1\n \n #text_for_line_bot_later\n text_for_line_bot_later += text_content\n \n for user_id in user_list:\n try:\n line_bot_api.push_message(user_id, TextSendMessage(text=text_content))\n except LineBotApiError as e:\n print(e) \n \n for i in range(0,math.ceil(renew_frequency/renew_check_interval)):\n \n if run_flag==0:\n break\n \n time.sleep(renew_check_interval)\n \n \n print('finish running')\n \ndef stop_robot():\n global run_flag\n global renew_frequency\n run_flag = 0\n time.sleep(renew_check_interval*(2))\n run_flag = 1\n\ndef symbol_pairs_generator():\n pair_main = 'BTCUSDT'\n pair_others = ['LTCUSDT','ETHUSDT','EOSUSDT','XRPUSDT','BCHUSDT','DOGEUSDT','DOTUSDT','BNBUSDT','ADAUSDT','UNIUSDT','SOLUSDT','LINKUSDT','MATICUSDT','ICPUSDT']\n pairs = []\n for i in pair_others :\n pairs.append((pair_main,i))\n return pairs\n\npairs_list = symbol_pairs_generator()\n\ndef run_robot(_pairs_list) :\n thread_list = []\n for i in _pairs_list:\n thread_list.append(threading.Thread(target = pair_trading_signal, args=(i[0], i[1], 2, 0.001, 20, 2, 1 ), daemon = True))\n for i in thread_list:\n i.start()\n\n\nline_is_running = 0\n\n@app.route(\"/webhook\", methods = [\"POST\"])\ndef webhook_handler():\n \n global line_is_running \n global text_for_line_bot_later\n \n \n signature = request.headers[\"X-Line-Signature\"]\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(f\"Request body: {body}\")\n\n # parse webhook body\n try:\n events = parser.parse(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n for event in events:\n if not isinstance(event, MessageEvent):\n continue\n if not isinstance(event.message, TextMessage):\n continue\n if not isinstance(event.message.text, str):\n continue\n \n \n \n \n if str(event.message.text).strip() == 'bot' or str(event.message.text).strip() == 'Bot' :\n \n\n if line_is_running ==0:\n run_robot(pairs_list)\n line_is_running = 1\n \n time.sleep(4) \n if str(event.source.type).strip() == \"group\":\n if (event.source.groupId not in user_list):\n user_list.append(event.source.groupId)\n \n \n #print(event.message.text)\n #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=event.message.text))\n line_bot_api.reply_message(event.reply_token,TextSendMessage(text='您好! 我是您的投資助理,目前投資機會 : '+text_for_line_bot_later))\n if (event.source.user_id not in user_list):\n user_list.append(event.source.user_id)\n \n else:\n #line_bot_api.reply_message(event.reply_token,TextSendMessage('沒監測到....'))\n a = 1 #沒用途,只為了騙過編譯器\n \n text_for_line_bot_later = '' ##不再發送已告知之之訊號\n \n return \"OK\"\n\n\n\n\nif __name__ == \"__main__\":\n port = os.environ.get(\"PORT\", 8000)\n # https://stackoverflow.com/questions/55436443/how-to-thread-a-flask-app-and-function-with-a-while-loop-to-run-simultaneously\n # Process(target=app.run, kwargs=dict(host='0.0.0.0', port=port)).start()\n # Process(target=loop_notify_users).start()\n app.run(host=\"0.0.0.0\", port=port)\n \n \n \n ","sub_path":"2021_07_26_linebot_pair_signal/linebot_pair_bollinger_signal.py","file_name":"linebot_pair_bollinger_signal.py","file_ext":"py","file_size_in_byte":11784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"31537962","text":"from datetime import datetime\nstart_real = datetime.now()\nimport numpy as np\nimport pandas as pd\nimport time\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Input, Dropout, Dense, concatenate, GRU, Embedding, Flatten, Activation\n# from keras.layers import Bidirectional\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras import backend as K\nfrom nltk.corpus import stopwords\nimport math\n# set seed\nnp.random.seed(123)\n\n\ndef rmsle(Y, Y_pred):\n assert Y.shape == Y_pred.shape\n return np.sqrt(np.mean(np.square(Y_pred - Y )))\n\n\n# get name and description lengths\ndef wordCount(text):\n try:\n if text == 'No description yet':\n return 0\n else:\n text = text.lower()\n words = [w for w in text.split(\" \")]\n return len(words)\n except:\n return 0\n\n\n# split category name into 3 parts\ndef split_cat(text):\n try:\n return text.split(\"/\")\n except:\n return\"No Label\", \"No Label\", \"No Label\"\n\n\n# ratio optimum finder for 3 models\ndef get_best_ratio(Y_dev_preds_rnn, Y_dev_preds_ridgeCV, Y_dev_preds_ridge):\n best1 = 0\n best2 = 0\n lowest = 0.99\n for i in range(100):\n for j in range(100):\n r = i*0.01\n r2 = j*0.01\n if r+r2 < 1.0:\n Y_dev_preds = aggregate_predicts3(Y_dev_preds_rnn, Y_dev_preds_ridgeCV, Y_dev_preds_ridge, r, r2)\n fpred = rmsle(Y_dev, Y_dev_preds)\n if fpred < lowest:\n best1 = r\n best2 = r2\n lowest = fpred\n # print(str(r)+\"-RMSL error for RNN + Ridge + RidgeCV on dev set:\", fpred)\n return best1, best2\n\n\ndef aggregate_predicts3(Y1, Y2, Y3, ratio1, ratio2):\n assert Y1.shape == Y2.shape\n return Y1 * ratio1 + Y2 * ratio2 + Y3 * (1.0 - ratio1-ratio2)\n\n\n# Filling missing values\ndef fill_missing_values(df):\n df.category_name.fillna(value=\"missing\", inplace=True)\n df.brand_name.fillna(value=\"missing\", inplace=True)\n df.item_description.fillna(value=\"missing\", inplace=True)\n df.item_description.replace('No description yet',\"missing\", inplace=True)\n return df\n\n\ndef brandfinder(line):\n brand = line[0]\n name = line[1]\n namesplit = name.split(' ')\n if brand == 'missing':\n for x in namesplit:\n if x in all_brands:\n return name\n if name in all_brands:\n return name\n return brand\n\n\ntimettl1 = time.time()\n\n# train_df = pd.read_table('../input/train.tsv')\n# test_df = pd.read_table('../input/test.tsv')\npath = \"D:/Project/Price/\"\ntrain_df = pd.read_table(path + \"train.tsv\", sep=None, engine='python')\ntest_df = pd.read_table(path + \"test.tsv\", sep=None, engine='python')\nprint(train_df.shape, test_df.shape)\n\n# 去除低价商品\ntrain_df = train_df.drop(train_df[(train_df.price < 3.0)].index)\nprint(train_df.shape)\n\n# stop = stopwords.words('english')\n# train_df.item_description.fillna(value='No description yet', inplace=True)\n# train_df['item_description'] = train_df['item_description'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n# train_df.name.fillna(value=\"missing\", inplace=True)\n# train_df['name'] = train_df['name'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n\n# test_df.item_description.fillna(value='No description yet', inplace=True)\n# test_df['item_description'] = test_df['item_description'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n# test_df.name.fillna(value=\"missing\", inplace=True)\n# test_df['name'] = test_df['name'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n\n# train_df.head()\n\ntrain_df['desc_len'] = train_df['item_description'].apply(lambda x: wordCount(x))\ntest_df['desc_len'] = test_df['item_description'].apply(lambda x: wordCount(x))\ntrain_df['name_len'] = train_df['name'].apply(lambda x: wordCount(x))\ntest_df['name_len'] = test_df['name'].apply(lambda x: wordCount(x))\nprint(train_df.head())\n\ntrain_df['subcat_0'], train_df['subcat_1'], train_df['subcat_2'] = zip(*train_df['category_name'].apply(lambda x: split_cat(x)))\ntest_df['subcat_0'], test_df['subcat_1'], test_df['subcat_2'] = zip(*test_df['category_name'].apply(lambda x: split_cat(x)))\n\n# %%time\n# attempt to find missing brand names\n# train_df['name'] = train_df.name.str.lower()\n# train_df['brand_name'] = train_df.brand_name.str.lower()\n# test_df['name'] = test_df.name.str.lower()\n# test_df['brand_name'] = test_df.brand_name.str.lower()\nfull_set = pd.concat([train_df, test_df])\nall_brands = set(full_set['brand_name'].values)\ntrain_df.brand_name.fillna(value=\"missing\", inplace=True)\ntest_df.brand_name.fillna(value=\"missing\", inplace=True)\n\n# get to finding!\npremissing = len(train_df.loc[train_df['brand_name'] == 'missing'])\n\ntrain_df['brand_name'] = train_df[['brand_name', 'name']].apply(brandfinder, axis=1)\ntest_df['brand_name'] = test_df[['brand_name', 'name']].apply(brandfinder, axis=1)\nfound = premissing-len(train_df.loc[train_df['brand_name'] == 'missing'])\nprint(found)\n\n# Scale target variable to log.\ntrain_df[\"target\"] = np.log1p(train_df.price)\n\n# Split training examples into train/dev examples.\ntrain_df, dev_df = train_test_split(train_df, random_state=123, train_size=0.99)\n\n# Calculate number of train/dev/test examples.\nn_trains = train_df.shape[0]\nn_devs = dev_df.shape[0]\nn_tests = test_df.shape[0]\nprint(\"Training on\", n_trains, \"examples\")\nprint(\"Validating on\", n_devs, \"examples\")\nprint(\"Testing on\", n_tests, \"examples\")\n\n# Concatenate train - dev - test data for easy to handle\nfull_df = pd.concat([train_df, dev_df, test_df])\n\nprint(\"Filling missing data...\")\nfull_df = fill_missing_values(full_df)\nprint(full_df.category_name[1])\n\nprint(\"Processing categorical data...\")\nle = LabelEncoder()\n# full_df.category = full_df.category_name\nle.fit(full_df.category_name)\nfull_df['category'] = le.transform(full_df.category_name)\n\nle.fit(full_df.brand_name)\nfull_df.brand_name = le.transform(full_df.brand_name)\n\nle.fit(full_df.subcat_0)\nfull_df.subcat_0 = le.transform(full_df.subcat_0)\n\nle.fit(full_df.subcat_1)\nfull_df.subcat_1 = le.transform(full_df.subcat_1)\n\nle.fit(full_df.subcat_2)\nfull_df.subcat_2 = le.transform(full_df.subcat_2)\n\ndel le\n\n# # Break category_name into parts\n# def catgsub(col):\n# col = col.str.replace(' ','')\n# col = col.str.replace('/',' ')\n# col = col.str.replace('&','')\n# return col\n# full_df['category_name'] = catgsub(full_df['category_name'])\n# print(full_df.category_name[1])\n\nprint(\"Transforming text data to sequences...\")\nraw_text = np.hstack([full_df.item_description.str.lower(), full_df.name.str.lower(), full_df.category_name.str.lower()])\n\nprint(\" Fitting tokenizer...\")\ntok_raw = Tokenizer()\ntok_raw.fit_on_texts(raw_text)\n\nprint(\" Transforming text to sequences...\")\nfull_df['seq_item_description'] = tok_raw.texts_to_sequences(full_df.item_description.str.lower())\nfull_df['seq_name'] = tok_raw.texts_to_sequences(full_df.name.str.lower())\n# full_df['seq_category'] = tok_raw.texts_to_sequences(full_df.category_name.str.lower())\n\ndel tok_raw\n\nprint(full_df['seq_name'][:5])\n\nMAX_NAME_SEQ = 10 #17\nMAX_ITEM_DESC_SEQ = 75 #269\nMAX_CATEGORY_SEQ = 8 #8\nMAX_TEXT = np.max([\n np.max(full_df.seq_name.max()),\n np.max(full_df.seq_item_description.max()),\n# np.max(full_df.seq_category.max()),\n]) + 100\nMAX_CATEGORY = np.max(full_df.category.max()) + 1\nMAX_BRAND = np.max(full_df.brand_name.max()) + 1\nMAX_CONDITION = np.max(full_df.item_condition_id.max()) + 1\nMAX_DESC_LEN = np.max(full_df.desc_len.max()) + 1\nMAX_NAME_LEN = np.max(full_df.name_len.max()) + 1\nMAX_SUBCAT_0 = np.max(full_df.subcat_0.max()) + 1\nMAX_SUBCAT_1 = np.max(full_df.subcat_1.max()) + 1\nMAX_SUBCAT_2 = np.max(full_df.subcat_2.max()) + 1\n\n\ndef get_rnn_data(dataset):\n X = {\n 'name': pad_sequences(dataset.seq_name, maxlen=MAX_NAME_SEQ),\n 'item_desc': pad_sequences(dataset.seq_item_description, maxlen=MAX_ITEM_DESC_SEQ),\n 'brand_name': np.array(dataset.brand_name),\n 'category': np.array(dataset.category),\n# 'category_name': pad_sequences(dataset.seq_category, maxlen=MAX_CATEGORY_SEQ),\n 'item_condition': np.array(dataset.item_condition_id),\n 'num_vars': np.array(dataset[[\"shipping\"]]),\n 'desc_len': np.array(dataset[[\"desc_len\"]]),\n 'name_len': np.array(dataset[[\"name_len\"]]),\n 'subcat_0': np.array(dataset.subcat_0),\n 'subcat_1': np.array(dataset.subcat_1),\n 'subcat_2': np.array(dataset.subcat_2),\n }\n return X\n\ntrain = full_df[:n_trains]\ndev = full_df[n_trains:n_trains+n_devs]\ntest = full_df[n_trains+n_devs:]\n\nX_train = get_rnn_data(train)\nY_train = train.target.values.reshape(-1, 1)\n\nX_dev = get_rnn_data(dev)\nY_dev = dev.target.values.reshape(-1, 1)\n\nX_test = get_rnn_data(test)\n\n\ndef root_mean_squared_logarithmic_error(y_true, y_pred):\n first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)\n second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)\n return K.sqrt(K.mean(K.square(first_log - second_log), axis=-1)+0.0000001)\ndef root_mean_squared_error(y_true, y_pred):\n return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)+0.0000001)\n\n\n# set seed again in case testing models adjustments by looping next 2 blocks\nnp.random.seed(123)\n\n\ndef new_rnn_model(lr=0.001, decay=0.0):\n # Inputs\n name = Input(shape=[X_train[\"name\"].shape[1]], name=\"name\")\n item_desc = Input(shape=[X_train[\"item_desc\"].shape[1]], name=\"item_desc\")\n brand_name = Input(shape=[1], name=\"brand_name\")\n # category = Input(shape=[1], name=\"category\")\n # category_name = Input(shape=[X_train[\"category_name\"].shape[1]], name=\"category_name\")\n item_condition = Input(shape=[1], name=\"item_condition\")\n num_vars = Input(shape=[X_train[\"num_vars\"].shape[1]], name=\"num_vars\")\n desc_len = Input(shape=[1], name=\"desc_len\")\n name_len = Input(shape=[1], name=\"name_len\")\n subcat_0 = Input(shape=[1], name=\"subcat_0\")\n subcat_1 = Input(shape=[1], name=\"subcat_1\")\n subcat_2 = Input(shape=[1], name=\"subcat_2\")\n\n # Embeddings layers (adjust outputs to help model)\n emb_name = Embedding(MAX_TEXT, 20)(name)\n emb_item_desc = Embedding(MAX_TEXT, 60)(item_desc)\n emb_brand_name = Embedding(MAX_BRAND, 10)(brand_name)\n # emb_category_name = Embedding(MAX_TEXT, 20)(category_name)\n # emb_category = Embedding(MAX_CATEGORY, 10)(category)\n emb_item_condition = Embedding(MAX_CONDITION, 5)(item_condition)\n emb_desc_len = Embedding(MAX_DESC_LEN, 5)(desc_len)\n emb_name_len = Embedding(MAX_NAME_LEN, 5)(name_len)\n emb_subcat_0 = Embedding(MAX_SUBCAT_0, 10)(subcat_0)\n emb_subcat_1 = Embedding(MAX_SUBCAT_1, 10)(subcat_1)\n emb_subcat_2 = Embedding(MAX_SUBCAT_2, 10)(subcat_2)\n\n # rnn layers (GRUs are faster than LSTMs and speed is important here)\n rnn_layer1 = GRU(16)(emb_item_desc)\n rnn_layer2 = GRU(8)(emb_name)\n # rnn_layer3 = GRU(8) (emb_category_name)\n\n # main layers\n main_l = concatenate([\n Flatten()(emb_brand_name)\n # , Flatten() (emb_category)\n , Flatten()(emb_item_condition)\n , Flatten()(emb_desc_len)\n , Flatten()(emb_name_len)\n , Flatten()(emb_subcat_0)\n , Flatten()(emb_subcat_1)\n , Flatten()(emb_subcat_2)\n , rnn_layer1\n , rnn_layer2\n # , rnn_layer3\n , num_vars\n ])\n # (incressing the nodes or adding layers does not effect the time quite as much as the rnn layers)\n main_l = Dropout(0.1)(Dense(512, kernel_initializer='normal', activation='relu')(main_l))\n main_l = Dropout(0.1)(Dense(256, kernel_initializer='normal', activation='relu')(main_l))\n main_l = Dropout(0.1)(Dense(128, kernel_initializer='normal', activation='relu')(main_l))\n main_l = Dropout(0.1)(Dense(64, kernel_initializer='normal', activation='relu')(main_l))\n\n # the output layer.\n output = Dense(1, activation=\"linear\")(main_l)\n\n model = Model([name, item_desc, brand_name, item_condition,\n num_vars, desc_len, name_len, subcat_0, subcat_1, subcat_2], output)\n\n optimizer = Adam(lr=lr, decay=decay)\n # (mean squared error loss function works as well as custom functions)\n model.compile(loss='mse', optimizer=optimizer)\n\n return model\n\n\nmodel = new_rnn_model()\nmodel.summary()\ndel model\n\n\n# Set hyper parameters for the model.\nBATCH_SIZE = 512 * 3\nepochs = 2\n\n# Calculate learning rate decay.\nexp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1\nsteps = int(len(X_train['name']) / BATCH_SIZE) * epochs\nlr_init, lr_fin = 0.005, 0.001\nlr_decay = exp_decay(lr_init, lr_fin, steps)\n\n# Create model and fit it with training dataset.\nrnn_model = new_rnn_model(lr=lr_init, decay=lr_decay)\nrnn_model.fit(\n X_train, Y_train, epochs=epochs, batch_size=BATCH_SIZE,\n validation_data=(X_dev, Y_dev), verbose=1,\n)\n\nprint(\"Evaluating the model on validation data...\")\nY_dev_preds_rnn = rnn_model.predict(X_dev, batch_size=BATCH_SIZE)\nprint(\" RMSLE error:\", rmsle(Y_dev, Y_dev_preds_rnn))\n\n\nrnn_preds = rnn_model.predict(X_test, batch_size=BATCH_SIZE, verbose=1)\nrnn_preds = np.expm1(rnn_preds)\n\ntimettl2 = time.time()\nprint(timettl2 - timettl1)\n","sub_path":"src/RNN42-RNN.py","file_name":"RNN42-RNN.py","file_ext":"py","file_size_in_byte":13656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"381292832","text":"import torch\nimport random\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n# custom module\nfrom config import *\n\n# Define cls model class\nclass GRU_ATT(nn.Module):\n\tdef __init__(self):\n\t\tsuper(GRU_ATT, self).__init__()\n\t\tself.rnn = nn.GRU(WORD_DIM,HIDDEN_DIM,NUM_LAYER, batch_first = True, bidirectional = BIDIRECTIONAL)\n\t\tself.att_layer = nn.Linear(HIDDEN_DIM*(int(BIDIRECTIONAL)+1), 1)\n\t\tself.fc = nn.Sequential(nn.Linear(HIDDEN_DIM*(int(BIDIRECTIONAL)+1), 5), nn.LogSoftmax(dim=1)) # definition, process, property, example, the others\n\t\tself.hidden = None\n\n\tdef forward(self, x):\n\t\t# x : batch x seq_len x word_dim\n\t\tmask = ~torch.eq(x[:,:,0], torch.zeros(*x.size()[:-1]))\n\t\tx = F.dropout(x, p = DROPOUT_RATE, training=self.training)\n\t\tout, ht = self.rnn(x, self.hidden[:,:x.size(0),:])\n\t\tatt_weight = F.softmax(self.att_layer(out), dim = 1)\n\t\tatt_weight = att_weight*mask.unsqueeze(2) / (att_weight*mask.unsqueeze(2)).sum(1,keepdim=True)\n\t\tatt_applied = (att_weight * out).sum(1)\n\t\tlogit = self.fc(att_applied)\n\t\treturn logit\n\n\tdef init_hidden(self):\n\t\treturn nn.Parameter(torch.zeros(NUM_LAYER*(int(BIDIRECTIONAL)+1), BATCH_SIZE, HIDDEN_DIM)) \n\nclass GRU_ATT_WRAP:\n\tdef __init__(self, model):\n\t\tself.model = model\n\t\tself.model.hidden = self.model.init_hidden()\n\t\tself.optimizer = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE)\n\t\tself.scheduler = ReduceLROnPlateau(self.optimizer, mode='min', patience=10)\n\t\tself.criterion = nn.NLLLoss()","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"30381887","text":"\"\"\"\nbackuper.py:\n\nBackup-, Verschlüsselungs- und Entschlüsselungs-Tool\n\"\"\"\nimport os\nimport sys\nimport argparse\nfrom subprocess import call\nfrom datetime import datetime\n\n\ndef backup_configs(config_backup_path):\n \"\"\"\n Erstellt Listen der installierten Programme, ein Backup der Softwarequellen\n und ein Backup der Keys für die Softwarequellen\n \"\"\"\n os.mkdir(config_backup_path)\n print(\"backup/ erstellt\")\n\n packages = \"dpkg --get-selections > /home/martin/backup/packages.list\"\n call(packages, shell=True)\n print(\"packages.list gespeichert\")\n\n sources = \"sudo cp -R /etc/apt/sources.list* /home/martin/backup/\"\n call(sources, shell=True)\n print(\"sources.list* gespeichert\")\n\n keys = \"sudo apt-key exportall > /home/martin/backup/repo.keys\"\n call(keys, shell=True)\n print(\"repo.keys gespeichert\")\n\n\n\ndef backup_files(destination_folder):\n \"\"\"\n Erstellt ein tar.gz Archiv mit unten aufgeführten Ausnahmen\n \"\"\"\n backuper_working_dir = os.getcwd()\n os.chdir(destination_folder)\n\n now = datetime.now()\n timestamp = now.strftime(\"%Y-%m-%d--%H-%M\")\n backup_filename = \"backup--{}.tar.gz\".format(timestamp)\n\n tar_backup = (\"sudo tar cvpzf {0} \"\n # exclude the backup tar itself\n \"--exclude={0} \"\n # exclude versioning systems\n \"--exclude=.git \"\n \"--exclude=.hg \"\n # exclude files not needed in /home/martin\n \"--exclude=/home/martin/Downloads \"\n \"--exclude=/home/martin/scripts \"\n \"--exclude=/home/martin/VirtualBox\\ VMs \"\n \"--exclude=/home/martin/Videos \"\n \"--exclude=/home/martin/workspace \"\n \"--exclude=/home/martin/.adobe \"\n \"--exclude=/home/martin/.aptitude \"\n \"--exclude=/home/martin/.audacity-data \"\n \"--exclude=/home/martin/.audacity_temp \"\n \"--exclude=/home/martin/.cache \"\n \"--exclude=/home/martin/.codeintel \"\n \"--exclude=/home/martin/.dbus \"\n \"--exclude=/home/martin/.distlib \"\n \"--exclude=/home/martin/.dropbox \"\n \"--exclude=/home/martin/.dropbox-dist \"\n \"--exclude=/home/martin/Dropbox/.dropbox.cache \"\n \"--exclude=/home/martin/.ecryptfs \"\n \"--exclude=/home/martin/.furiusisomount \"\n \"--exclude=/home/martin/.gconf \"\n \"--exclude=/home/martin/.gimp-2.8 \"\n \"--exclude=/home/martin/.gnome \"\n \"--exclude=/home/martin/.gnome2 \"\n \"--exclude=/home/martin/.gnome2_private \"\n \"--exclude=/home/martin/.gstreamer-0.10 \"\n \"--exclude=/home/martin/.gvfs \"\n \"--exclude=/home/martin/.kde \"\n \"--exclude=/home/martin/.kdenlive \"\n \"--exclude=/home/martin/.kivy \"\n \"--exclude=/home/martin/.local/share/Steam \"\n \"--exclude=/home/martin/.local/share/Trash \"\n \"--exclude=/home/martin/.macromedia \"\n \"--exclude=/home/martin/.miro \"\n \"--exclude=/home/martin/.nv \"\n \"--exclude=/home/martin/.openvr \"\n \"--exclude=/home/martin/.pip \"\n \"--exclude=/home/martin/.pki \"\n \"--exclude=/home/martin/.Private \"\n \"--exclude=/home/martin/.pyrenamer \"\n \"--exclude=/home/martin/.steam \"\n \"--exclude=/home/martin/.thumbnails \"\n \"--exclude=/home/martin/.steam \"\n \"--exclude=/home/martin/.gksu.lock \"\n \"--exclude=/home/martin/.steampath \"\n \"--exclude=/home/martin/.steampid \"\n # exclude files not needed in /\n \"--exclude=/bin \"\n \"--exclude=/boot \"\n \"--exclude=/cdrom \"\n \"--exclude=/dev \"\n \"--exclude=/etc \"\n \"--exclude=/home/downwind \"\n \"--exclude=/home/.ecryptfs \"\n \"--exclude=/home/lost+found \"\n \"--exclude=/lib \"\n \"--exclude=/lib32 \"\n \"--exclude=/lib64 \"\n \"--exclude=/lost+found \"\n \"--exclude=/media \"\n \"--exclude=/mnt \"\n \"--exclude=/opt \"\n \"--exclude=/proc \"\n \"--exclude=/root \"\n \"--exclude=/run \"\n \"--exclude=/sbin \"\n \"--exclude=/srv \"\n \"--exclude=/sys \"\n \"--exclude=/tmp \"\n \"--exclude=/usr \"\n \"--exclude=/var \"\n \"--exclude=/initrd.img \"\n \"--exclude=/initrd.img.old \"\n \"--exclude=/vmlinuz \"\n \"--exclude=/vmlinuz.old \"\n \"--exclude-backups \"\n \"--exclude-caches \"\n \"/\").format(backup_filename)\n\n call(tar_backup, shell=True)\n\n os.chdir(backuper_working_dir)\n\n\n\ndef remove_config_backups(config_backup_path):\n \"\"\"\n Löscht die temporären Config Backup Dateien und Ordner\n \"\"\"\n packages = \"rm -rf /home/martin/backup/packages.list\"\n call(packages, shell=True)\n print(\"packages.list gelöscht\")\n\n sources = \"sudo rm -rf /home/martin/backup/sources.list*\"\n call(sources, shell=True)\n print(\"sources.list* gelöscht\")\n\n keys = \"sudo rm -rf /home/martin/backup/repo.keys\"\n call(keys, shell=True)\n print(\"repo.keys gelöscht\")\n\n os.rmdir(config_backup_path)\n print(\"backup/ gelöscht\")\n\n\ndef help():\n \"\"\"\n Eine kleine Anleitung\n \"\"\"\n return \"\"\"\n backup:\n dpkg --get-selections > ~/packages.list\n sudo cp -R /etc/apt/sources.list* /home/martin/backup/\n sudo apt-key exportall > /home/martin/backup/repo.keys\n\n reinstall:\n sudo apt-key add /home/martin/backup/repo.keys\n sudo cp -R /home/martin/backup/sources.list* /etc/apt/\n sudo apt-get update\n sudo apt-get install dselect\n sudo dpkg --set-selections < /home/martin/backup/packages.list\n sudo apt-get dselect-upgrade -y\n \"\"\"\n\n\ndef encrypt_backup(file_to_encrypt):\n \"\"\"\n Verschlüsselt eine Datei mittels gpg\n \"\"\"\n encrypt = \"gpg -c --cipher-algo=twofish {}\".format(file_to_encrypt)\n call(encrypt, shell=True)\n\n\ndef decrypt_backup(file_to_decrypt, output_filepath):\n \"\"\"\n Entschlüsselt eine mit gpg verschlüsselte Datei\n \"\"\"\n decrypt = \"gpg -d --output {1} {0}\".format(file_to_decrypt, output_filepath)\n call(decrypt, shell=True)\n\n\nif __name__ == \"__main__\":\n # Parse Arguments\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description=\"Save the backup in this directory\")\n parser.add_argument(\"-d\",\n \"--destination\",\n metavar=\"DESTINATION-FOLDERPATH\",\n nargs=1,\n help=help(),\n required=False,\n )\n\n parser.add_argument(\"--encrypt\",\n metavar=\"FILEPATH-TO-ENCRYPT\",\n nargs=1,\n help=\"encrypt a file with gpg -c --cipher-algo=twofish\",\n required=False,\n )\n\n parser.add_argument(\"--decrypt\",\n metavar=(\"FILEPATH-TO-DECRYPT\", \"FILEPATH-FOR-OUTPUTFILE\"),\n nargs=2,\n help=\"decrypt a file with gpg -d\",\n required=False,\n )\n\n args = parser.parse_args()\n\n # Wenn -d gegeben ist, dann wird ein Backup erstellt\n if args.destination:\n print(\"BACKUP\")\n config_backup_path = \"/home/martin/backup\"\n backup_configs(config_backup_path)\n\n destination_folder = args.destination[0]\n if not os.path.exists(destination_folder):\n print(\"ERROR: Destination folder does not exist. Exiting.\")\n sys.exit()\n\n backup_files(destination_folder)\n\n remove_config_backups(config_backup_path)\n\n # Wenn --encrypt gegeben ist, wird eine datei verschlüsselt\n if args.encrypt:\n file_to_encrypt = args.encrypt[0]\n encrypt_backup(file_to_encrypt)\n\n # Wenn --decrypt gegeben ist, wird eine datei entschlüsselt\n if args.decrypt:\n file_to_decrypt = args.decrypt[0]\n output_filepath = args.decrypt[1]\n decrypt_backup(file_to_decrypt, output_filepath)\n\n\n print(\"*** ALL DONE ***\")","sub_path":"backuper.py","file_name":"backuper.py","file_ext":"py","file_size_in_byte":7902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393747666","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib.auth import authenticate, login\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.models import User\r\nfrom .forms import UserRegisterForm\r\nfrom .models import Questions, Scores, ExtendedUser\r\n\r\n'''\r\nHome -> templates -> home -> ( base.html | home.html | about.html )\r\n'''\r\n\r\nposts = [\r\n {\r\n \"title\": \"Welcome to Trivia Quiz\",\r\n \"author\": \"Lorem Ipsum\",\r\n \"content\": \"Start by clicking Login\",\r\n \"date_posted\": \"27-AUG-2019\"\r\n },\r\n {\r\n \"title\": \"Want to post questions ?\",\r\n \"author\": \"Jane Doe\",\r\n \"content\": \"Login as Instructor\",\r\n \"date_posted\": \"28-AUG-2019\"\r\n }\r\n\r\n]\r\n\r\n\r\ndef home(request):\r\n context = {\r\n \"posts\": posts\r\n }\r\n return render(request, \"home/home.html\", context)\r\n\r\n\r\ndef about(request):\r\n return render(request, \"home/about.html\", {\"title\": \"About us !!\"})\r\n\r\n\r\n'''\r\nUsers -> ( login.html | logout.html | profile.html | register.html )\r\n'''\r\n\r\n\r\ndef register(request):\r\n if request.method == 'POST':\r\n form = UserRegisterForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n email = form.cleaned_data.get('email')\r\n usertype = form.cleaned_data.get('usertype')\r\n password1 = form.cleaned_data.get('password1')\r\n password2 = form.cleaned_data.get('password2')\r\n\r\n if password1 == password2:\r\n user = User.objects.get(email=email)\r\n if usertype == \"student\":\r\n ext = ExtendedUser(user=user, user_type=usertype, user_points=30)\r\n ext.save()\r\n else:\r\n ext = ExtendedUser(user=user, user_type=usertype)\r\n ext.save()\r\n print(\"New User Registered.\")\r\n messages.success(request, f'Account created for {email}!')\r\n return redirect('login')\r\n\r\n else:\r\n form = UserRegisterForm()\r\n return render(request, \"home/register.html\", {'form': form})\r\n\r\n\r\ndef login_view(request):\r\n if request.method == 'POST':\r\n\r\n user_name = request.POST['uname']\r\n password = request.POST['pass']\r\n retr = authenticate(username=user_name, password=password)\r\n\r\n if retr:\r\n print(\"user found !!\")\r\n user = User.objects.get(username=user_name)\r\n email = User.objects.only('email').get(username=user_name).email\r\n points = ExtendedUser.objects.only('user_points').get(user=user).user_points\r\n utype = ExtendedUser.objects.only('user_type').get(user=user).user_type\r\n print(utype)\r\n request.session['user'] = user_name\r\n request.session['email'] = email\r\n request.session['type'] = utype\r\n request.session['points'] = points\r\n print(\"SESSION INVOKED -->Name = \" + request.session['user'] + \" Logged in.\")\r\n if utype == \"instructor\":\r\n # messages.success(request, f'Logged In as Instructor')\r\n print(\"Logged in as Instructor\")\r\n return render(request, \"home/instructor.html\")\r\n else:\r\n print(\"Logged in as student\")\r\n messages.success(request, f'Logged In as Student')\r\n return render(request, \"home/student.html\")\r\n else:\r\n print(\"No User found\")\r\n messages.success(request, f'Wrong credentials (or) User not found')\r\n return render(request, \"home/login.html\", {'error': 1})\r\n else:\r\n print(\"Not a request POST\")\r\n return render(request, \"home/login.html\")\r\n\r\n\r\ndef logout(request):\r\n try:\r\n del request.session['user']\r\n del request.session['email']\r\n del request.session['type']\r\n del request.session['points']\r\n return render(request, \"home/logout.html\")\r\n except Exception as e:\r\n raise e\r\n\r\n\r\ndef profile(request):\r\n if 'user' in request.session:\r\n return render(request, \"home/profile.html\")\r\n else:\r\n messages.success(request, f'Access Denied !!')\r\n return render(request, \"home/home.html\")\r\n\r\n\r\n\"\"\"\r\nInstructor level access\r\n\"\"\"\r\n\r\nquizzes = [\r\n {\r\n \"title\": \"Welcome Instructor\",\r\n \"author\": \"Instructor\",\r\n \"content\": \"Create Quiz\",\r\n \"date_posted\": \"30-AUG-2019\"\r\n }\r\n]\r\n\r\n\r\ndef instructor(request):\r\n context = {\r\n \"posts\": quizzes\r\n }\r\n print(\"instructor method invoked\")\r\n if 'user' in request.session:\r\n if request.session['type'] == 'instructor':\r\n print(\"Instructor Authenticated :)\")\r\n return render(request, \"home/instructor.html\", context)\r\n else:\r\n messages.success(request, f'Access Denied !! ')\r\n return render(request, \"home/home.html\")\r\n else:\r\n messages.success(request, f'Login to view page !! ')\r\n return render(request, \"home/login.html\")\r\n\r\n\r\ndef create_quiz(request):\r\n if 'user' in request.session:\r\n if request.session['type'] == \"instructor\":\r\n print(\"Instructor Authenticated to create Quiz :)\")\r\n\r\n if request.method == 'POST':\r\n ques = request.POST['ques']\r\n qtype = request.POST['qtype']\r\n qtype = int(qtype)\r\n user = User.objects.get(email=request.session['email'])\r\n if qtype == 1:\r\n print(\"tf called\")\r\n tf_ans = request.POST['tf_ans']\r\n auth = Questions(author=user, question=ques, question_type=1, ans=tf_ans,\r\n weightage=10)\r\n auth.save()\r\n print(\"TRUE/FALSE Question submitted successfully !!\")\r\n elif qtype == 2:\r\n op1 = request.POST['opt1']\r\n op2 = request.POST['opt2']\r\n op3 = request.POST['opt3']\r\n op4 = request.POST['opt4']\r\n cl = {1: op1, 2: op2, 3: op3, 4: op4}\r\n ans = cl[int(request.POST['ans'])]\r\n auth = Questions(author=user, question=ques, question_type=2, op1=op1,\r\n op2=op2,\r\n op3=op3,\r\n op4=op4, ans=ans, weightage=20)\r\n auth.save()\r\n print(\"MCQ Question submitted successfully !!\")\r\n elif qtype == 3:\r\n auth = Questions(author=user, question=ques, question_type=3, weightage=30)\r\n auth.save()\r\n print(\"ESSAY Question submitted successfully !!\")\r\n\r\n messages.success(request, f'Question Added !!')\r\n return render(request, \"home/create_quiz.html\")\r\n\r\n else:\r\n print(\"Not a request POST\")\r\n return render(request, \"home/create_quiz.html\")\r\n\r\n else:\r\n messages.success(request, f'Access Denied !! ')\r\n return render(request, \"home/home.html\")\r\n else:\r\n messages.success(request, f'Login to view page !! ')\r\n return render(request, \"home/login.html\")\r\n\r\n\r\ndef display_questions(request):\r\n if 'user' in request.session:\r\n if request.session['type'] == \"instructor\":\r\n if request.method == 'POST':\r\n print(\"Deletion requested !!\")\r\n selection = request.POST.getlist('sel')\r\n user = User.objects.get(email=request.session['email'])\r\n for l in selection:\r\n Questions.objects.filter(author=user, question=l).delete()\r\n print(\"Deletion Complete :)\")\r\n que_cat1 = Questions.objects.all()\r\n else:\r\n que_cat1 = Questions.objects.all()\r\n context = {\r\n \"Questions\": que_cat1\r\n }\r\n return render(request, \"home/display_questions.html\", context)\r\n else:\r\n messages.success(request, f'Access Denied !! ')\r\n return render(request, \"home/home.html\")\r\n else:\r\n messages.success(request, f'Login to view page !! ')\r\n return render(request, \"home/login.html\")\r\n\r\n\r\ndef view_scores(request):\r\n if 'user' in request.session:\r\n if request.session['type'] == \"instructor\":\r\n\r\n results = Scores.objects.all()\r\n context = {\r\n \"Scores\": results\r\n }\r\n return render(request, \"home/view_scores.html\", context)\r\n else:\r\n messages.success(request, f'Access Denied !! ')\r\n return render(request, \"home/home.html\")\r\n else:\r\n messages.success(request, f'Login to view page !! ')\r\n return render(request, \"home/login.html\")\r\n\r\n\r\n\"\"\"\r\nStudent Level Access\r\n\"\"\"\r\n\r\n\r\ndef student(request):\r\n if 'user' in request.session:\r\n if request.session['type'] == \"student\":\r\n print(\"Student Authenticated :)\")\r\n return render(request, \"home/student.html\")\r\n else:\r\n messages.success(request, f'Login as Student to view page !! ')\r\n return render(request, \"home/instructor.html\")\r\n else:\r\n messages.success(request, f'Login to view page !! ')\r\n return render(request, \"home/home.html\")\r\n\r\n\r\nref = []\r\n\r\n\r\ndef start_quiz(request):\r\n if 'user' in request.session:\r\n if request.method == 'POST':\r\n print(\"Quiz Complete !\")\r\n score = 0\r\n l = [request.POST['1'], request.POST['2'], request.POST['3'], request.POST['4'],\r\n request.POST['5']]\r\n\r\n for i, question in enumerate(ref):\r\n get = Questions.objects.filter(question=question)\r\n qtype = get[0].question_type\r\n print(qtype)\r\n actual_ans = get[0].ans\r\n user_ans = l[i]\r\n if qtype == 1 and user_ans == actual_ans:\r\n score += 10\r\n elif qtype == 2 and user_ans == actual_ans:\r\n score += 20\r\n elif qtype == 3:\r\n if len(user_ans) > 50:\r\n score += 30\r\n\r\n user = User.objects.get(email=request.session['email'])\r\n auth = Scores(user_name=request.session['user'], score=score)\r\n auth.save()\r\n current = ExtendedUser.objects.only('user_points').get(user=user).user_points + score\r\n u = ExtendedUser.objects.filter(user=user).update(user_points=current)\r\n messages.success(request, f'Trivia Complete ! Click view scores to see results')\r\n return render(request, \"home/student.html\")\r\n\r\n else:\r\n ques = Questions.objects.all().order_by('?')[:5]\r\n ref.clear()\r\n for j in ques:\r\n ref.append(j.question)\r\n context = {\r\n \"Questions\": ques\r\n }\r\n return render(request, \"home/start_quiz.html\", context)\r\n\r\n\r\n else:\r\n messages.success(request, f'Login to view page !! ')\r\n return render(request, \"home/home.html\")\r\n\r\n\r\ndef user_scores(request):\r\n if 'user' in request.session:\r\n if request.session['type'] == \"student\":\r\n print(\"Student Authenticated to view scores :)\")\r\n user = request.session['user']\r\n results = Scores.objects.filter(user_name=user)\r\n context = {\r\n \"Scores\": results\r\n }\r\n return render(request, \"home/user_scores.html\", context)\r\n else:\r\n messages.success(request, f'Login as student to view page !! ')\r\n return render(request, \"home/instructor.html\")\r\n else:\r\n messages.success(request, f'Login to view page !! ')\r\n return render(request, \"home/home.html\")\r\n","sub_path":"online_quiz/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"170114626","text":"import json\nimport queue\nfrom control.WorkerQueue import WorkerQueue as WQ\nfrom data.StorageIO import StorageIO\n\n'''\nThe WorkerControl coordinates workers and assigns jobs.\nWorker register themself at startup. The controller queues workers as well as jobs in two seperate queues.\nAs soon as a worker and a job are available, they are taken from the queues and the job_id is send to the worker\nvia MQTT. After the worker finishes its job, it will be put back into the queue\n'''\nclass WorkerControl:\n config_queue = queue.Queue(-1) # infinite size\n\n COMMAND_START = \"start\"\n COMMAND_STOP = \"stop\"\n\n commandIO = None\n storageIO: StorageIO = None\n worker_list = {} # \"worker_id\" : \"job_id\"\n\n worker_job_mapping = {}\n\n worker_queue = WQ()\n\n def get_worker_info(self):\n return self.worker_list\n\n # Function called by external Thread !!!\n def busy_changed_callback(self, worker_id, busy_message):\n try:\n if len(busy_message) == 0:\n print(\"Worker LOST: \" + worker_id)\n self.worker_queue.remove_worker(worker_id)\n self.worker_list.pop(worker_id, None)\n if not worker_id in self.worker_job_mapping:\n print(\"Unknown worker reported busy change! This should not happen\")\n else:\n self.update_status(worker_id, \"lost\")\n else:\n message = json.loads(busy_message)\n is_busy = message[\"busy\"] # either False or the job_id\n\n self.worker_list[worker_id] = is_busy\n if is_busy == False:\n if \"job_id\" in message:\n self.update_status(worker_id, message[\"status\"])\n\n if worker_id in self.worker_job_mapping:\n del self.worker_job_mapping[worker_id]\n self.worker_queue.add_to_queue(worker_id)\n else:\n job_id = message[\"job_id\"]\n self.worker_queue.remove_worker(worker_id)\n self.worker_job_mapping[worker_id] = job_id\n self.update_status(worker_id, message[\"status\"])\n print(\"Worker is busy: \" + worker_id)\n except Exception as e:\n print(\"An error occurred in MQTT callback: \" + str(e))\n\n\n def update_status(self, worker_id: str, status: str):\n if not worker_id in self.worker_job_mapping:\n print(\"ERROR. Tried to set status for unset worker!\")\n else:\n self.storageIO.update_job_status(self.worker_job_mapping[worker_id], status)\n\n def __init__(self, commandIO, storageIO: StorageIO):\n self.commandIO = commandIO\n self.storageIO = storageIO\n self.commandIO.on_busy_changed(self.busy_changed_callback)\n\n def modify_job_state(self, job_list, command: str):\n for job in job_list:\n config = {\"job_id\": job}\n\n if command == self.COMMAND_START:\n self.create_new_job(config)\n else:\n pass\n\n # Function called by external Thread !!!\n def create_new_job(self, job_config: dict):\n try:\n print(\"-> Job ready (ID=\" + job_config[\"job_id\"] + \")\")\n self.config_queue.put(job_config, timeout=1)\n except:\n return False\n return True\n\n def run(self):\n while (True):\n jsonConfig = self.config_queue.get()\n job_id = jsonConfig[\"job_id\"]\n print(\"<- Job selected (ID=\" + job_id + \")\")\n ready_worker = self.worker_queue.get_next_worker()\n\n print(\"Starting new job (id: \" + job_id + \")\")\n self.commandIO.start_new_job(ready_worker, json.dumps(jsonConfig))\n if ready_worker in self.worker_job_mapping:\n print(\"Removing orphaned job from worker job mapping\")\n del self.worker_job_mapping[ready_worker]\n self.worker_job_mapping[ready_worker] = job_id\n self.update_status(ready_worker, \"assigned\")\n\n","sub_path":"Controller/control/WorkerControl.py","file_name":"WorkerControl.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440381498","text":"import os\nimport h5py\nimport pytest\nimport numpy as np\nimport torch as tr\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom neural_wrappers.pytorch import device, FeedForwardNetwork\nfrom neural_wrappers.readers import MNISTReader, StaticBatchedDatasetReader, PercentDatasetReader\nfrom neural_wrappers.pytorch import FeedForwardNetwork\n\nclass ModelFC(FeedForwardNetwork):\n\t# (28, 28, 1) => (10, 1)\n\tdef __init__(self, inputShape, outputNumClasses):\n\t\tsuper().__init__()\n\n\t\tself.inputShapeProd = int(np.prod(np.array(inputShape)))\n\t\tself.fc1 = nn.Linear(self.inputShapeProd, 100)\n\t\tself.fc2 = nn.Linear(100, 100)\n\t\tself.fc3 = nn.Linear(100, outputNumClasses)\n\n\tdef forward(self, x):\n\t\tx = x[\"images\"].view(-1, self.inputShapeProd)\n\t\ty1 = F.relu(self.fc1(x))\n\t\ty2 = F.relu(self.fc2(y1))\n\t\ty3 = self.fc3(y2)\n\t\treturn y3\n\ndef lossFn(y, t):\n\t# Negative log-likeklihood (used for softmax+NLL for classification), expecting targets are one-hot encoded\n\ty = F.softmax(y, dim=1)\n\tt = t.type(tr.bool)\n\treturn (-tr.log(y[t] + 1e-5)).mean()\n\ntry:\n\t# This path must be supplied manually in order to pass these tests\n\tMNIST_READER_PATH = os.environ[\"MNIST_READER_PATH\"]\n\tpytestmark = pytest.mark.skipif(False, reason=\"Dataset path not found.\")\nexcept Exception:\n\tpytestmark = pytest.mark.skip(\"MNIST Dataset path must be set.\", allow_module_level=True)\n\nclass TestMNISTClassifier:\n\tdef test(self):\n\t\ttrainReader = PercentDatasetReader(\n\t\t\tStaticBatchedDatasetReader(\n\t\t\t\tMNISTReader(h5py.File(MNIST_READER_PATH, \"r\")[\"train\"]),\n\t\t\tbatchSize=10),\n\t\tpercent=1)\n\t\tmodel = ModelFC(inputShape=(28, 28, 1), outputNumClasses=10).to(device)\n\t\tmodel.setCriterion(lossFn)\n\t\tmodel.setOptimizer(optim.SGD, lr=0.01)\n\t\tmodel.trainGenerator(trainReader.iterate(), numEpochs=1)\n\ndef main():\n\tTestMNISTClassifier().test()\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"test/examples/mnist/test_mnist-classifier.py","file_name":"test_mnist-classifier.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216399413","text":"# Ivan Carvalho\n# Solution to https://www.beecrowd.com.br/judge/problems/view/1865\n#!/usr/bin/env python\n# encoding : utf-8\nordem = int(input())\nfor i in range(ordem):\n a, b = input().split(\" \")\n if a == \"Thor\":\n print(\"Y\")\n else:\n print(\"N\")\n","sub_path":"beecrowd/1865.py","file_name":"1865.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"114089870","text":"# -*- coding: utf-8 -*-\n\"\"\"The data type definition reader objects.\"\"\"\n\nimport abc\nimport glob\nimport os\nimport yaml\n\nfrom dtfabric import data_types\nfrom dtfabric import definitions\nfrom dtfabric import errors\n\n\n# TODO: complete _ReadFormatDefinition\n\n\nclass DataTypeDefinitionsReader(object):\n \"\"\"Data type definitions reader interface.\"\"\"\n\n @abc.abstractmethod\n def ReadDefinitionFromDict(self, definitions_registry, definition_values):\n \"\"\"Reads a data type definition from a dictionary.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n\n Returns:\n DataTypeDefinition: data type definition or None.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n\n\nclass DataTypeDefinitionsFileReader(DataTypeDefinitionsReader):\n \"\"\"Data type definitions file reader interface.\"\"\"\n\n _DATA_TYPE_CALLBACKS = {\n definitions.TYPE_INDICATOR_BOOLEAN: u'_ReadBooleanDataTypeDefinition',\n definitions.TYPE_INDICATOR_CHARACTER: u'_ReadCharacterDataTypeDefinition',\n definitions.TYPE_INDICATOR_CONSTANT: u'_ReadConstantDataTypeDefinition',\n definitions.TYPE_INDICATOR_ENUMERATION: (\n u'_ReadEnumerationDataTypeDefinition'),\n definitions.TYPE_INDICATOR_FLOATING_POINT: (\n u'_ReadFloatingPointDataTypeDefinition'),\n definitions.TYPE_INDICATOR_INTEGER: u'_ReadIntegerDataTypeDefinition',\n definitions.TYPE_INDICATOR_SEQUENCE: u'_ReadSequenceDataTypeDefinition',\n definitions.TYPE_INDICATOR_STRUCTURE: u'_ReadStructureDataTypeDefinition',\n definitions.TYPE_INDICATOR_UUID: u'_ReadUUIDDataTypeDefinition',\n }\n\n _INTEGER_FORMAT_ATTRIBUTES = frozenset([\n definitions.FORMAT_SIGNED,\n definitions.FORMAT_UNSIGNED])\n\n def _ReadFixedSizeDataTypeDefinition(\n self, unused_definitions_registry, definition_values,\n data_type_definition_class, definition_name, default_size=None,\n default_units=u'bytes'):\n \"\"\"Reads a fixed-size data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n data_type_definition_class (str): data type definition class.\n definition_name (str): name of the definition.\n default_size (Optional[int]): default size.\n default_units (Optional[str]): default units.\n\n Returns:\n FixedSizeDataTypeDefinition: fixed-size data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n aliases = definition_values.get(u'aliases', None)\n description = definition_values.get(u'description', None)\n urls = definition_values.get(u'urls', None)\n\n definition_object = data_type_definition_class(\n definition_name, aliases=aliases, description=description, urls=urls)\n\n attributes = definition_values.get(u'attributes')\n if attributes:\n byte_order = attributes.get(u'byte_order', definitions.BYTE_ORDER_NATIVE)\n if byte_order not in definitions.BYTE_ORDERS:\n error_message = u'unsupported byte-order attribute: {0!s}'.format(\n byte_order)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n size = attributes.get(u'size', default_size)\n try:\n int(size)\n except ValueError:\n error_message = u'unuspported size attribute: {0!s}'.format(size)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n definition_object.byte_order = byte_order\n definition_object.size = size\n definition_object.units = attributes.get(u'units', default_units)\n\n return definition_object\n\n def _ReadBooleanDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads a boolean data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n BooleanDataTypeDefinition: boolean data type definition.\n \"\"\"\n return self._ReadFixedSizeDataTypeDefinition(\n definitions_registry, definition_values,\n data_types.BooleanDefinition, definition_name)\n\n def _ReadCharacterDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads a character data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n CharacterDataTypeDefinition: character data type definition.\n \"\"\"\n return self._ReadFixedSizeDataTypeDefinition(\n definitions_registry, definition_values,\n data_types.CharacterDefinition, definition_name)\n\n def _ReadConstantDataTypeDefinition(\n self, unused_definitions_registry, definition_values, definition_name):\n \"\"\"Reads a constant data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n ConstantDataTypeDefinition: constant data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n value = definition_values.get(u'value', None)\n if value is None:\n error_message = u'missing value'\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n aliases = definition_values.get(u'aliases', None)\n description = definition_values.get(u'description', None)\n urls = definition_values.get(u'urls', None)\n\n definition_object = data_types.ConstantDefinition(\n definition_name, aliases=aliases, description=description, urls=urls)\n definition_object.value = value\n\n return definition_object\n\n def _ReadEnumerationDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads an enumeration data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n EnumerationDataTypeDefinition: enumeration data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n values = definition_values.get(u'values')\n if not values:\n error_message = u'missing values'\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n definition_object = self._ReadFixedSizeDataTypeDefinition(\n definitions_registry, definition_values,\n data_types.EnumerationDefinition, definition_name)\n\n last_name = None\n for enumeration_value in values:\n aliases = enumeration_value.get(u'aliases', None)\n description = enumeration_value.get(u'description', None)\n name = enumeration_value.get(u'name', None)\n value = enumeration_value.get(u'value', None)\n\n if not name or value is None:\n if last_name:\n error_location = u'after: {0:s}'.format(last_name)\n else:\n error_location = u'at start'\n\n error_message = u'{0:s} missing name or value'.format(error_location)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n else:\n try:\n definition_object.AddValue(\n name, value, aliases=aliases, description=description)\n except KeyError:\n error_message = u'value: {0:s} already exists'.format(name)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n last_name = name\n\n return definition_object\n\n def _ReadFloatingPointDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads a floating-point data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n FloatingPointDefinition floating-point data type definition.\n \"\"\"\n return self._ReadFixedSizeDataTypeDefinition(\n definitions_registry, definition_values,\n data_types.FloatingPointDefinition, definition_name)\n\n def _ReadFormatDefinition(self, definition_values, definition_name):\n \"\"\"Reads a format definition.\n\n Args:\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n FormatDefinition: format definition.\n \"\"\"\n description = definition_values.get(u'description', None)\n urls = definition_values.get(u'urls', None)\n\n definition_object = data_types.FormatDefinition(\n definition_name, description=description, urls=urls)\n\n # TODO: implement.\n\n return definition_object\n\n def _ReadIntegerDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads an integer data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n IntegerDataTypeDefinition: integer data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n definition_object = self._ReadFixedSizeDataTypeDefinition(\n definitions_registry, definition_values,\n data_types.IntegerDefinition, definition_name)\n\n attributes = definition_values.get(u'attributes')\n if attributes:\n format_attribute = attributes.get(u'format', definitions.FORMAT_SIGNED)\n if format_attribute not in self._INTEGER_FORMAT_ATTRIBUTES:\n error_message = u'unsupported format attribute: {0!s}'.format(\n format_attribute)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n definition_object.format = format_attribute\n\n return definition_object\n\n def _ReadSequenceDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads a sequence data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n SequenceDefinition: sequence data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n attributes = definition_values.get(u'attributes')\n if attributes:\n error_message = u'attributes not supported by sequence data type'\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n element_data_type = definition_values.get(u'element_data_type', None)\n if not element_data_type:\n error_message = u'missing element data type'\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n number_of_elements = definition_values.get(u'number_of_elements', None)\n if not number_of_elements:\n error_message = u'missing number of elements'\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n element_data_type_definition = definitions_registry.GetDefinitionByName(\n element_data_type)\n if not element_data_type_definition:\n error_message = u'undefined element data type: {0:s}.'.format(\n element_data_type)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n aliases = definition_values.get(u'aliases', None)\n description = definition_values.get(u'description', None)\n urls = definition_values.get(u'urls', None)\n\n definition_object = data_types.SequenceDefinition(\n definition_name, element_data_type_definition, aliases=aliases,\n data_type=element_data_type, description=description, urls=urls)\n\n try:\n definition_object.number_of_elements = int(number_of_elements)\n except ValueError:\n definition_object.number_of_elements_expression = number_of_elements\n\n return definition_object\n\n def _ReadStructureDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads a structure data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n StructureDefinition: structure data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n members = definition_values.get(u'members', None)\n if not members:\n error_message = u'missing members'\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n aliases = definition_values.get(u'aliases', None)\n description = definition_values.get(u'description', None)\n urls = definition_values.get(u'urls', None)\n\n definition_object = data_types.StructureDefinition(\n definition_name, aliases=aliases, description=description, urls=urls)\n\n self._ReadStructureDataTypeDefinitionMembers(\n definitions_registry, members, definition_object)\n\n return definition_object\n\n def _ReadStructureDataTypeDefinitionMember(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads a structure data type definition member.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n DataTypeDefinition: structure member data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n name = definition_values.get(u'name', None)\n if not name:\n error_message = u'invalid structure member missing name'\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n if not definition_values:\n error_message = (\n u'invalid structure member: {0:s} missing definition values').format(\n name)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n data_type = definition_values.get(u'data_type', None)\n type_indicator = definition_values.get(u'type', None)\n\n if type_indicator is not None and data_type is not None:\n error_message = (\n u'invalid structure member: {0:s} both type: {1:s} and data '\n u'type: {2:s} are set').format(name, type_indicator, data_type)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n if type_indicator is not None:\n definition_object = self.ReadDefinitionFromDict(\n definitions_registry, definition_values)\n\n else:\n data_type_definition = definitions_registry.GetDefinitionByName(\n data_type)\n if not data_type_definition:\n error_message = (\n u'invalid structure member: {0:s} undefined data type: '\n u'{1:s}').format(name, data_type)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n aliases = definition_values.get(u'aliases', None)\n description = definition_values.get(u'description', None)\n\n definition_object = data_types.StructureMemberDefinition(\n name, data_type_definition, aliases=aliases, data_type=data_type,\n description=description)\n\n return definition_object\n\n def _ReadStructureDataTypeDefinitionMembers(\n self, definitions_registry, definition_values, data_type_definition):\n \"\"\"Reads structure data type definition members.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n data_type_definition (DataTypeDefinition): data type definition.\n \"\"\"\n for member in definition_values:\n structure_member = self._ReadStructureDataTypeDefinitionMember(\n definitions_registry, member, data_type_definition.name)\n data_type_definition.members.append(structure_member)\n\n def _ReadUUIDDataTypeDefinition(\n self, definitions_registry, definition_values, definition_name):\n \"\"\"Reads an UUID data type definition.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n\n Returns:\n UUIDDataTypeDefinition: UUID data type definition.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n definition_object = self._ReadFixedSizeDataTypeDefinition(\n definitions_registry, definition_values,\n data_types.UUIDDefinition, definition_name, default_size=16)\n\n if definition_object.size != 16:\n error_message = u'unsupported size: {0:d}.'.format(definition_object.size)\n raise errors.DefinitionReaderError(definition_name, error_message)\n\n return definition_object\n\n def ReadDefinitionFromDict(self, definitions_registry, definition_values):\n \"\"\"Reads a data type definition from a dictionary.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n\n Returns:\n DataTypeDefinition: data type definition or None.\n\n Raises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.\n \"\"\"\n if not definition_values:\n error_message = u'missing definition values'\n raise errors.DefinitionReaderError(None, error_message)\n\n name = definition_values.get(u'name', None)\n if not name:\n error_message = u'missing name'\n raise errors.DefinitionReaderError(None, error_message)\n\n type_indicator = definition_values.get(u'type', None)\n if not type_indicator:\n error_message = u'invalid definition missing type'\n raise errors.DefinitionReaderError(name, error_message)\n\n if type_indicator == u'format':\n return self._ReadFormatDefinition(definition_values, name)\n\n data_type_callback = self._DATA_TYPE_CALLBACKS.get(type_indicator, None)\n if data_type_callback:\n data_type_callback = getattr(self, data_type_callback, None)\n if not data_type_callback:\n error_message = u'unuspported data type definition: {0:s}.'.format(\n type_indicator)\n raise errors.DefinitionReaderError(name, error_message)\n\n return data_type_callback(definitions_registry, definition_values, name)\n\n def ReadDirectory(self, definitions_registry, path, extension=None):\n \"\"\"Reads data type definitions from a directory.\n\n This function does not recurse sub directories into the registry.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n path (str): path of the directory to read from.\n extension (Optional[str]): extension of the filenames to read.\n \"\"\"\n if extension:\n glob_spec = os.path.join(path, u'*.{0:s}'.format(extension))\n else:\n glob_spec = os.path.join(path, u'*')\n\n for definition_file in glob.glob(glob_spec):\n self.ReadFile(definitions_registry, definition_file)\n\n def ReadFile(self, definitions_registry, path):\n \"\"\"Reads data type definitions from a file into the registry.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n path (str): path of the file to read from.\n \"\"\"\n with open(path, 'r') as file_object:\n self.ReadFileObject(definitions_registry, file_object)\n\n @abc.abstractmethod\n def ReadFileObject(self, definitions_registry, file_object):\n \"\"\"Reads data type definitions from a file-like object into the registry.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n file_object (file): file-like object to read from.\n \"\"\"\n\n\nclass YAMLDataTypeDefinitionsFileReader(DataTypeDefinitionsFileReader):\n \"\"\"YAML data type definitions file reader.\"\"\"\n\n def ReadDirectory(self, definitions_registry, path, extension=u'yaml'):\n \"\"\"Reads data type definitions from a directory.\n\n This function does not recurse sub directories into the registry.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n path (str): path of the directory to read from.\n extension (Optional[str]): extension of the filenames to read.\n \"\"\"\n super(YAMLDataTypeDefinitionsFileReader, self).ReadDirectory(\n definitions_registry, path, extension=extension)\n\n def ReadFileObject(self, definitions_registry, file_object):\n \"\"\"Reads data type definitions from a file-like object into the registry.\n\n Args:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n file_object (file): file-like object to read from.\n\n Raises:\n FormatError: if the definitions values are missing or if the format is\n incorrect.\n \"\"\"\n yaml_generator = yaml.safe_load_all(file_object)\n\n last_definition_object = None\n error_location = None\n error_message = None\n for yaml_definition in yaml_generator:\n try:\n definition_object = self.ReadDefinitionFromDict(\n definitions_registry, yaml_definition)\n\n except errors.DefinitionReaderError as exception:\n definition_object = None\n if exception.name:\n error_location = u'In: {0:s}'.format(exception.name)\n error_message = u''.join(exception.message)\n\n if not definition_object:\n if not error_location:\n name = yaml_definition.get(u'name', None)\n if name:\n error_location = u'In: {0:s}'.format(name)\n elif last_definition_object:\n error_location = u'After: {0:s}'.format(last_definition_object.name)\n else:\n error_location = u'At start'\n\n if not error_message:\n error_message = u'Missing definition object.'\n\n error_message = u'{0:s} {1:s}'.format(error_location, error_message)\n raise errors.FormatError(error_message)\n\n definitions_registry.RegisterDefinition(definition_object)\n last_definition_object = definition_object\n","sub_path":"dtfabric/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":23259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345860133","text":"from operator import itemgetter, setitem\r\nfrom cacheutils import CacheCollection, CacheItem, SimpleDefDictCache, SimpleDictCache\r\nfrom cache_sql import rad_sql, core_sql\r\nfrom collections import defaultdict\r\nfrom rad_class.AccountData import AccountData\r\nfrom rad_class.NasData import NasData\r\nfrom rad_class.SwitchData import SwitchData\r\nfrom rad_class.DefaultSpeedData import DefaultSpeedData\r\nfrom rad_class.SpeedData import SpeedData\r\nfrom rad_class.SpeedlimitData import SpeedlimitData\r\nfrom rad_class.RadiusAttrsData import RadiusAttrsData\r\nfrom rad_class.SubAccountsData import SubAccountsData\r\nfrom core_cache import TimePeriodAccessCache as PeriodCache\r\nfrom rad_class.IpPoolData import IpPoolData\r\nfrom common.AddonServiceData import AddonServiceData\r\n#from common.AddonServiceTarifData import AddonServiceTarifData\r\nfrom common.AccountAddonServiceData import AccountAddonServiceData\r\n\r\nfrom core_cache import AddonServiceCache, AddonServiceTarifCache, AccessParametersCache\r\n\r\nclass RadCaches(CacheCollection):\r\n __slots__ = ('account_cache', 'period_cache', 'nas_cache', 'defspeed_cache', 'speed_cache', 'speedlimit_cache', 'radattrs_cache', 'addonservice_cache', 'accountaddonservice_cache', 'subaccount_cache','ippool_cache', 'switch_cache')\r\n \r\n def __init__(self, date, fMem):\r\n super(RadCaches, self).__init__(date)\r\n self.account_cache = AccountCache(date)\r\n self.period_cache = PeriodCache(date, fMem)\r\n self.nas_cache = NasCache()\r\n self.defspeed_cache = DefaultSpeedCache()\r\n self.speed_cache = SpeedCache()\r\n self.speedlimit_cache = SpeedlimitCache()\r\n self.radattrs_cache = RadiusAttrsCache()\r\n self.addonservice_cache = AddonServiceCache()\r\n self.accountaddonservice_cache = AccountAddonServiceCache()\r\n self.subaccount_cache = SubAccountsCache()\r\n self.ippool_cache = IpPoolCache()\r\n self.switch_cache = SwitchCache()\r\n self.caches = [self.account_cache, self.period_cache, self.nas_cache, self.defspeed_cache, self.speed_cache, self.speedlimit_cache, self.radattrs_cache, self.addonservice_cache, self.accountaddonservice_cache, self.subaccount_cache, self.ippool_cache, self.switch_cache]\r\n\r\n\r\nclass AccountCache(CacheItem):\r\n __slots__ = ('by_username', 'by_ipn_mac', 'by_ipn_ip_nas', 'by_id')\r\n \r\n datatype = AccountData\r\n sql = rad_sql['accounts']\r\n \r\n def __init__(self, date):\r\n super(AccountCache, self).__init__()\r\n self.vars = (date,)\r\n \r\n def reindex(self):\r\n self.by_username = {}\r\n self.by_ipn_mac = {}\r\n self.by_ipn_ip_nas = {}\r\n self.by_id = {}\r\n\r\n for acct in self.data:\r\n self.by_username[acct.username] = acct\r\n self.by_ipn_mac[str(acct.ipn_mac_address)] = acct\r\n self.by_ipn_ip_nas[(acct.ipn_ip_address, acct.nas_id)] = acct\r\n self.by_id[acct.account_id] = acct\r\n \r\nclass DefaultSpeedCache(SimpleDictCache):\r\n '''By tarif id'''\r\n __slots__ = ()\r\n datatype = DefaultSpeedData\r\n sql = rad_sql['defspeed']\r\n num = 11\r\n\r\nclass SpeedCache(SimpleDefDictCache):\r\n '''by tarif_id'''\r\n __slots__ = ()\r\n datatype = SpeedData\r\n sql = rad_sql['speed']\r\n num = 14 \r\n\r\nclass SpeedlimitCache(SimpleDictCache):\r\n '''By account_id'''\r\n __slots__ = ('by_account_id')\r\n datatype = SpeedlimitData\r\n sql = rad_sql['limit']\r\n num = 0\r\n \r\n def reindex(self):\r\n self.by_account_id = {}\r\n for speed_l in self.data:\r\n self.by_account_id[speed_l[1]] = speed_l[2:]\r\n \r\nclass NasCache(CacheItem):\r\n __slots__ = ('by_id', 'by_ip','by_ip_n_identify')\r\n datatype = NasData\r\n sql = rad_sql['nas']\r\n \r\n def __init__(self):\r\n super(NasCache, self).__init__()\r\n self.by_ip = {}\r\n self.by_ip_n_identify = {}\r\n self.by_id = {}\r\n \r\n def reindex(self):\r\n self.by_ip = {}\r\n self.by_ip_n_identify = {}\r\n for nas in self.data:\r\n \r\n if not self.by_ip_n_identify.get((str(nas.ipaddress), str(nas.identify))):\r\n self.by_ip_n_identify[(str(nas.ipaddress), str(nas.identify))] = []\r\n self.by_ip_n_identify[(str(nas.ipaddress), str(nas.identify))].append(nas)\r\n \r\n if not self.by_ip.get(str(nas.ipaddress)):\r\n self.by_ip[str(nas.ipaddress)] = []\r\n self.by_ip[str(nas.ipaddress)].append(nas)\r\n \r\n self.by_id[nas.id] = nas\r\n \r\nclass RadiusAttrsCache(CacheItem):\r\n '''by tarif_id'''\r\n __slots__ = ('by_tarif_id','by_nas_id')\r\n datatype = RadiusAttrsData\r\n sql = rad_sql['attrs']\r\n\r\n def reindex(self):\r\n self.by_tarif_id={}\r\n self.by_nas_id={}\r\n for item in self.data:\r\n if item.tarif_id:\r\n if item.tarif_id not in self.by_tarif_id:\r\n self.by_tarif_id[item.tarif_id]=[]\r\n self.by_tarif_id[item.tarif_id].append(item)\r\n if item.nas_id:\r\n if item.tarif_id not in self.by_tarif_id:\r\n self.by_nas_id[item.nas_id]=[]\r\n self.by_nas_id[item.nas_id].append(item)\r\n\r\n\r\nclass AddonServiceCache(SimpleDictCache):\r\n '''By id'''\r\n __slots__ = ()\r\n datatype = AddonServiceData\r\n sql = core_sql['addon_service']\r\n \r\nclass SwitchCache(CacheItem):\r\n '''By id'''\r\n __slots__ = ('by_remote_id','by_id')\r\n datatype = SwitchData\r\n sql = rad_sql['switch']\r\n\r\n def __init__(self):\r\n super(SwitchCache, self).__init__()\r\n self.by_remote_id = {}\r\n self.by_id = {}\r\n \r\n def reindex(self):\r\n\r\n for switch in self.data:\r\n self.by_id[switch.id]=switch\r\n if switch.remote_id:\r\n self.by_remote_id[switch.remote_id]=switch\r\n \r\nclass IpPoolCache(SimpleDictCache):\r\n '''By id'''\r\n __slots__ = ()\r\n datatype = IpPoolData\r\n sql = rad_sql['ippool']\r\n \r\n \r\nclass AccountAddonServiceCache(CacheItem):\r\n __slots__ = ('by_id', 'by_account', 'by_subaccount', 'by_service')\r\n \r\n datatype = AccountAddonServiceData\r\n sql = core_sql['addon_account']\r\n \r\n def __init__(self):\r\n super(AccountAddonServiceCache, self).__init__()\r\n self.by_id = {}\r\n self.by_service = defaultdict(list)\r\n #index on tariff_id\r\n self.by_account = defaultdict(list)\r\n self.by_subaccount = defaultdict(list)\r\n \r\n def reindex(self):\r\n self.by_id.clear()\r\n #index on accounttarif.id\r\n self.by_service.clear()\r\n #index on tariff_id\r\n self.by_account.clear()\r\n self.by_subaccount.clear()\r\n for addon in self.data:\r\n self.by_id[addon.id] = addon\r\n if addon.account_id:\r\n self.by_account[addon.account_id].append(addon)\r\n if addon.subaccount_id:\r\n self.by_subaccount[addon.subaccount_id].append(addon)\r\n self.by_service[addon.service_id].append(addon)\r\n \r\n \r\nclass SubAccountsCache(CacheItem):\r\n __slots__ = ('by_id', 'by_username', 'by_username_w_ipn_vpn_link', 'by_mac', 'by_ipn_ip', 'by_vpn_ip', 'by_ipn_ip_nas_id', 'by_switch_port')\r\n \r\n datatype = SubAccountsData\r\n sql = rad_sql['subaccounts']\r\n \r\n def __init__(self):\r\n super(SubAccountsCache, self).__init__()\r\n \r\n def reindex(self):\r\n self.by_id = {}\r\n self.by_username = {}\r\n self.by_mac = {}\r\n self.by_ipn_ip = {}\r\n self.by_vpn_ip = {}\r\n self.by_ipn_ip_nas_id = {}\r\n self.by_username_w_ipn_vpn_link = {}\r\n self.by_switch_port = {}\r\n #self.by_username_w_pppoe_mac = {}\r\n \r\n for item in self.data:\r\n self.by_id[item.id] = item\r\n if item.username:\r\n self.by_username[item.username] = item\r\n if item.ipn_mac_address:\r\n self.by_mac[item.ipn_mac_address.lower()] = item\r\n if item.ipn_ip_address and item.ipn_ip_address is not \"0.0.0.0\" :\r\n self.by_ipn_ip[item.ipn_ip_address] = item\r\n #self.by_ipn_ip_nas_id[(item.ipn_ip_address, item.nas_id)] = item \r\n if item.vpn_ip_address and item.vpn_ip_address is not \"0.0.0.0\" :\r\n self.by_vpn_ip[item.vpn_ip_address] = item\r\n if item.ipn_ip_address and item.ipn_ip_address is not \"0.0.0.0\" and item.associate_pptp_ipn_ip==True:\r\n self.by_username_w_ipn_vpn_link[(item.username, item.ipn_ip_address)]=item\r\n\r\n if item.ipn_mac_address and item.associate_pppoe_ipn_mac==True:\r\n self.by_username_w_ipn_vpn_link[(item.username, item.ipn_mac_address)]=item\r\n \r\n if item.switch_id and item.switch_port:\r\n self.by_switch_port[(item.switch_id,item.switch_port)]=item\r\n \r\n \r\n \r\n","sub_path":"classes/rad_cache.py","file_name":"rad_cache.py","file_ext":"py","file_size_in_byte":9001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212127495","text":"import json\nimport uuid\nimport time\nimport boto3\nimport botocore\n\nclass AmazonSQS(object):\n\n def __init__(self, queue_name=None):\n # Create SQS client\n self.sqs = boto3.client('sqs')\n if queue_name is None:\n pass\n else:\n self.createQueue(queue_name)\n\n def listQueues(self):\n ''' List all available queues\n '''\n rs = self.sqs.list_queues()\n return rs['QueueUrls']\n\n def createQueue(self, queue_name, delay_seconds='60', message_retention_period='86400'):\n try:\n rsp = self.sqs.create_queue(QueueName=queue_name,\n Attributes={'DelaySeconds': delay_seconds,\n 'MessageRetentionPeriod': message_retention_period\n }\n )\n return rsp['QueueUrl']\n except botocore.exceptions.ClientError:\n return None\n\n\n def getQueueUrl(self, queue_name):\n rsp = self.sqs.get_queue_url(QueueName=queue_name)\n return rsp['QueueUrl']\n\n\n def deleteQueue(self, queue_url):\n self.sqs.delete_queue(QueueUrl=queue_url)\n\n\n def addMessageToQueue(self, queue_url, message):\n ''' Pushes a message onto the queue\n '''\n # Data required by the API\n data = {'created_at': time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime()),\n 'key': str(uuid.uuid1()),\n 'msg': str(message)\n }\n\n # Put the message in the queue\n rsp = self.sqs.send_message(QueueUrl=queue_url, DelaySeconds=1,\n MessageBody = json.dumps(data))\n return rsp['MessageId']\n\n\n\n def readMessageFromQueue(self, queue_url, deleteAfterRead=False):\n ''' Polls the queue for messages\n '''\n rsp = self.sqs.receive_message(QueueUrl=queue_url,\n MaxNumberOfMessages=1,\n MessageAttributeNames=['All'],\n VisibilityTimeout=0,\n WaitTimeSeconds=0\n )\n try:\n message = rsp['Messages'][0]\n if deleteAfterRead is True:\n receipt_handle = message['ReceiptHandle']\n self.sqs.delete_message(QueueUrl=queue_url,\n ReceiptHandle=receipt_handle)\n except KeyError:\n message = None\n\n return message\n","sub_path":"src/awstools/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"76100297","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom pandas.api.types import is_string_dtype\r\nfrom pandas.api.types import is_numeric_dtype\r\n\r\ndata_file = \"E:\\\\USvideos.csv\"\r\n\r\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\r\nplt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\r\n\r\n# 读取数据集并保留需要的属性列\r\ndataset = pd.read_csv(data_file, usecols=[3, 4, 7, 8, 9, 10, 12, 13, 14])\r\n\r\n# 频数统计\r\n# params:\r\n# data:数据集\r\n# name:数据列名称\r\ndef data_summary(data, name):\r\n print(data[name].value_counts())\r\n\r\n# 五数概括\r\n# params:\r\n# data:数据集\r\n# name:数据列名称\r\ndef five_number_summary(data, name):\r\n datacol = data[name]\r\n min, max = datacol.min(), datacol.max()\r\n q1, q2, q3 = datacol.quantile(0.25), datacol.quantile(0.5), datacol.quantile(0.75)\r\n nan = datacol.isnull().sum()\r\n \r\n print(\"Five-number summary of data attribute {}:\".format(name))\r\n print(\"Min value: \" + str(min))\r\n print(\"Q1 value: \" + str(q1))\r\n print(\"Q2 value: \" + str(q2))\r\n print(\"Q3 value: \" + str(q3))\r\n print(\"Max value: \" + str(max))\r\n print(\"Number of missing value: \" + nan)\r\n \r\n# 条形图绘制\r\n# params:\r\n# data:数据集\r\n# name:数据列名称\r\ndef bar_plot(data, name):\r\n summ = data[name].value_counts()\r\n summ.plot(color='r', kind='bar')\r\n plt.show()\r\n \r\n# 盒图绘制\r\n# params:\r\n# data:数据集\r\n# name:数据列名称\r\ndef box_plot(data, name):\r\n data[name].plot(kind='box')\r\n plt.show()\r\n \r\n# 条形图对比\r\n# params:\r\n# olddata:老数据集\r\n# newdata:新数据集\r\n# name:数据列名称\r\ndef barplot_comp(olddata, newdata, name):\r\n old_summ = olddata[name].value_counts()\r\n new_summ = newdata[name].value_counts()\r\n summs = pd.concat([old_summ, new_summ], axis=1)\r\n ax = summs.plot(colormap='Paired', kind='bar')\r\n ax.legend(['old', 'new'])\r\n plt.show()\r\n \r\n# 盒图对比\r\n# params:\r\n# olddata:老数据集\r\n# newdata:新数据集\r\n# name:数据列名称\r\ndef boxplot_comp(olddata, newdata, name):\r\n summs = pd.concat([olddata[name], newdata[name]], axis=1)\r\n summs.columns = ['old', 'new']\r\n ax = summs.plot(kind='box')\r\n plt.show()\r\n \r\n\r\n# 将缺失部分剔除\r\n# params:\r\n# data:数据集\r\ndef remove_missing_value(data):\r\n return data.dropna()\r\n\r\n# 用出现频率最高值填补缺失值\r\n# params:\r\n# data:数据集\r\ndef fill_with_most_frequent(data):\r\n new_data = data.copy()\r\n for column in new_data.columns:\r\n most_frequent = new_data[column].value_counts().index[0]\r\n new_data[column].fillna(most_frequent, inplace=True)\r\n return new_data\r\n\r\n# 计算两数据点之间的欧氏距离\r\n# params:\r\n# data:数据集\r\n# i:数据点1\r\n# j:数据点2\r\ndef distance(data, i, j):\r\n i_row = data.iloc[i]\r\n j_row = data.iloc[j]\r\n dist_factor = []\r\n if j_row.isna().any():\r\n return float('inf')\r\n \r\n for k in range(len(i_row)):\r\n if is_numeric_dtype(i_row[k]):\r\n dist_factor.append(abs(i_row[k] - j_row[k]))\r\n else:\r\n dist_factor.append(1 if i_row[k] == j_row[k] else 0) \r\n dist = 0\r\n for k in range(len(dist_factor)):\r\n dist += dist_factor[k] * dist_factor[k]\r\n dist = pow(dist, 0.5)\r\n return dist\r\n\r\n# 基于相似性填补数据集\r\n# params:\r\n# data:数据集\r\ndef fill_with_similarity(data):\r\n index = []\r\n new_data = data.copy()\r\n nans = new_data.isna().any(axis=1)\r\n for i in range(len(nans)):\r\n if nans[i] == True:\r\n index.append(i)\r\n \r\n for i in index:\r\n dist = float('inf')\r\n sim_row = 0\r\n for j in range(new_data.shape[0]):\r\n if i == j:\r\n continue\r\n tmp_dist = distance(new_data, i, j)\r\n if tmp_dist < dist:\r\n dist = tmp_dist\r\n sim_row = j\r\n new_data.iloc[i] = new_data.iloc[sim_row]\r\n return new_data\r\n\r\n# 基于相关关系填充\r\n# params:\r\n# data:数据集\r\ndef fill_with_corr(data):\r\n new_data = data.copy()\r\n numeric_col = ['points', 'price']\r\n for column in new_data.columns:\r\n if column in numeric_col:\r\n new_data[column].fillna(new_data[column].mean(), inplace=True)\r\n else:\r\n most_frequent = new_data[column].value_counts().index[0]\r\n new_data[column].fillna(most_frequent, inplace=True)\r\n return new_data\r\n\r\n\r\n ","sub_path":"Trending YouTube Video Statistics.py","file_name":"Trending YouTube Video Statistics.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"10903285","text":"\"\"\"\nAnteriormente vimos como a diferença de List compreension e Generators,\nmas o que é um GENERATOR?\n\nBascimente, quando criamos um Generator, podemos aplicar funções que evitam\na utilização de FOR e CONDIÇÕES, exemplo:\n\n- map() -> (EQUIVALE A UM COMPREENSION LIST COM FOR) serve para aplicarmos uma\nfunção a cada elemento de uma lista, retornando uma nova lista contendo os\nelementos resultantes da aplicaçãoda função\n\n- filter() - (EQUIVALE A UM COMPREENSION LIST COM IF) Faz uma filtragem...\n\n- lambda() - (EQUIVALE A UMA FUNÇÃO SEM NOME, ELA DURA SOMENTE NA EXECUÇÃO)\n\n- reduce() - **NÃO ACHEI ÚTIL!!!** é uma espécie de somatório...\n\"\"\"\nimport math\nlist1 = [1, 4, 9, 100]\n\n#-----Map(função, lista)------\nraiz = map(math.sqrt, list1)\nprint(raiz) # Não vai printar a lista, e sim o OBJETO... você precisa transformar:\n#exemplo: dict(raiz) ... tuple(raiz) ... list(raiz)\nlist_raiz = list(raiz)\nprint(list_raiz)\n\n# ou podemos simplicar tudo (MODO CORRETO)\n\nraiz = list(map(math.sqrt, list1))\nprint(raiz)\n#---------- SEM O MAP(), COMO FARIAMOS?----------\n\n#Aplicariamos List Compreension \"[]\"\nraiz = [math.sqrt(element) for element in list1]\nprint(raiz)\n\n\n#--------Filter(function,list)----------\n#vou criar uma função com condição\ndef nossa_condicao(x):\n return x>5\nfiltrar = filter(nossa_condicao,list1)\nprint(list(filtrar))\n\n# ou podemos fazer por List Comprehension \"[]\" (NÃO PRECISAMOS FAZER FUNÇÃO)\nfiltrar = [element for element in list1 if element > 5]\nprint(filtrar)\n\n# -------- Lambda()---------------------\n\"\"\"\nNo exemplo da função filter(), tivemos que definir uma nova função \n(chamada \"nossa_condicao\") para usar somente dentro da função filter(),\nsendo chamada uma vez para cada elemento. Ao invés de definir uma nova\nfunção dessa forma, poderíamos definir uma função válida somente enquanto \ndurar a execução do filter. Não é necessário nem dar um nome a tal função,\nsendo portanto chamada de função anônima ou função lambda. Considere o exemplo\nabaixo:\"\"\"\n\nfiltrar = filter(lambda x: x>5, list1)\nprint(list(filtrar))\n\n\n# Podemos fazer por list comprehension\nfiltrar =[lambda element: element >5 for element in list1] # Não deu certo kkk\nprint(filtrar)","sub_path":"2019/01_Curso_Geek_basico_avancado/01_Meu_Flat_is_better_than_nested/04_Generators.py","file_name":"04_Generators.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"114528603","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"EDMNtuple\")\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring('/store/user/meloam/SingleMu/meloam_feb12_tlbsm53x2_Run2012C_24Aug2012_v1/20130212222033/00000/4463A7D0-9075-E211-95CC-003048F2E8C2.root')\n)\nprocess.cleanElectronTriggerMatchHLTEle27CaloIdVTCaloIsoTTrkIdTTrkIsoT = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('path( \"HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_v*\" )'),\n src = cms.InputTag(\"cleanPatElectrons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanElectronTriggerMatchL1EGammaCollection = cms.EDProducer(\"PATTriggerMatcherDRLessByR\",\n matchedCuts = cms.string('coll( \"l1extraParticles:NonIsolated\" ) || coll( \"l1extraParticles:Isolated\" )'),\n src = cms.InputTag(\"cleanPatElectrons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(False),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanJetTriggerMatchHLTJet240 = cms.EDProducer(\"PATTriggerMatcherDRLessByR\",\n matchedCuts = cms.string('path( \"HLT_Jet240_v*\" )'),\n src = cms.InputTag(\"cleanPatJets\"),\n maxDPtRel = cms.double(3.0),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.4),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanJetTriggerMatchHLTMu17CentralJet30 = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('type( \"TriggerJet\" ) && path( \"HLT_Mu17_CentralJet30_v*\" )'),\n src = cms.InputTag(\"cleanPatJets\"),\n maxDPtRel = cms.double(3.0),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.4),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanMuonTriggerMatchHLTDoubleMu6 = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('path( \"HLT_DoubleMu6_v*\" )'),\n src = cms.InputTag(\"cleanPatMuons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanMuonTriggerMatchHLTMu17CentralJet30 = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('type( \"TriggerMuon\" ) && path( \"HLT_Mu17_CentralJet30_v*\" )'),\n src = cms.InputTag(\"cleanPatMuons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanMuonTriggerMatchHLTMu20 = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('path( \"HLT_Mu20_v*\" )'),\n src = cms.InputTag(\"cleanPatMuons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanMuonTriggerMatchPDSingleMu = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('path( \"HLT_IsoMu12_v*\" ) || path( \"HLT_IsoMu15_v*\" ) || path( \"HLT_IsoMu17_v*\" ) || path( \"HLT_IsoMu24_v*\" ) || path( \"HLT_IsoMu30_v*\" ) || path( \"HLT_L1SingleMu10_v*\" ) || path( \"HLT_L1SingleMu20_v*\" ) || path( \"HLT_L2Mu10_v*\" ) || path( \"HLT_L2Mu20_v*\" ) || path( \"HLT_Mu3_v*\" ) || path( \"HLT_Mu5_v*\" ) || path( \"HLT_Mu8_v*\" ) || path( \"HLT_Mu12_v*\" ) || path( \"HLT_Mu15_v*\" ) || path( \"HLT_Mu20_v*\" ) || path( \"HLT_Mu24_v*\" ) || path( \"HLT_Mu30_v*\" )'),\n src = cms.InputTag(\"cleanPatMuons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanMuonTriggerMatchTriggerMuon = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('type( \"TriggerL1Mu\" ) || type( \"TriggerMuon\" )'),\n src = cms.InputTag(\"cleanPatMuons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(False),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanPatElectronsTriggerMatch = cms.EDProducer(\"PATTriggerMatchElectronEmbedder\",\n matches = cms.VInputTag(\"cleanElectronTriggerMatchHLTEle27CaloIdVTCaloIsoTTrkIdTTrkIsoT\"),\n src = cms.InputTag(\"cleanPatElectrons\")\n)\n\n\nprocess.cleanPatJetsTriggerMatch = cms.EDProducer(\"PATTriggerMatchJetEmbedder\",\n matches = cms.VInputTag(\"cleanJetTriggerMatchHLTJet240\"),\n src = cms.InputTag(\"cleanPatJets\")\n)\n\n\nprocess.cleanPatMuonsTriggerMatch = cms.EDProducer(\"PATTriggerMatchMuonEmbedder\",\n matches = cms.VInputTag(\"cleanMuonTriggerMatchHLTMu20\", \"cleanMuonTriggerMatchHLTDoubleMu6\"),\n src = cms.InputTag(\"cleanPatMuons\")\n)\n\n\nprocess.cleanPatPhotonsTriggerMatch = cms.EDProducer(\"PATTriggerMatchPhotonEmbedder\",\n matches = cms.VInputTag(\"cleanPhotonTriggerMatchHLTPhoton26IsoVLPhoton18\"),\n src = cms.InputTag(\"cleanPatPhotons\")\n)\n\n\nprocess.cleanPatTausTriggerMatch = cms.EDProducer(\"PATTriggerMatchTauEmbedder\",\n matches = cms.VInputTag(\"cleanTauTriggerMatchHLTDoubleIsoPFTau20Trk5\"),\n src = cms.InputTag(\"cleanPatTaus\")\n)\n\n\nprocess.cleanPhotonTriggerMatchHLTPhoton26IsoVLPhoton18 = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('path( \"HLT_Photon26_IsoVL_Photon18_v*\" )'),\n src = cms.InputTag(\"cleanPatPhotons\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.cleanTauTriggerMatchHLTDoubleIsoPFTau20Trk5 = cms.EDProducer(\"PATTriggerMatcherDRDPtLessByR\",\n matchedCuts = cms.string('path( \"HLT_DoubleIsoPFTau20_Trk5_v*\" )'),\n src = cms.InputTag(\"cleanPatTaus\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.metTriggerMatchHLTMET100 = cms.EDProducer(\"PATTriggerMatcherDRLessByR\",\n matchedCuts = cms.string('path( \"HLT_MET100_v*\" )'),\n src = cms.InputTag(\"patMETs\"),\n maxDPtRel = cms.double(3.0),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.4),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.metTriggerMatchHLTMu20 = cms.EDProducer(\"PATTriggerMatcherDRLessByR\",\n matchedCuts = cms.string('path( \"HLT_Mu20_v*\" )'),\n src = cms.InputTag(\"patMETs\"),\n maxDPtRel = cms.double(0.5),\n resolveByMatchQuality = cms.bool(True),\n maxDeltaR = cms.double(0.5),\n resolveAmbiguities = cms.bool(True),\n matched = cms.InputTag(\"patTrigger\")\n)\n\n\nprocess.patMETsTriggerMatch = cms.EDProducer(\"PATTriggerMatchMETEmbedder\",\n matches = cms.VInputTag(\"metTriggerMatchHLTMET100\"),\n src = cms.InputTag(\"patMETs\")\n)\n\n\nprocess.patTrigger = cms.EDProducer(\"PATTriggerProducer\",\n processName = cms.string('HLT'),\n onlyStandAlone = cms.bool(False)\n)\n\n\nprocess.patTriggerEvent = cms.EDProducer(\"PATTriggerEventProducer\",\n patTriggerMatches = cms.VInputTag(),\n processName = cms.string('HLT')\n)\n\n\nprocess.pfTupleEle = cms.EDFilter(\"EDSHyFTSelector\",\n matchByHand = cms.bool(False),\n shyftSelection = cms.PSet(\n muonSrc = cms.InputTag(\"selectedPatMuonsPFlow\"),\n dxy = cms.double(0.02),\n eEt = cms.double(30.0),\n eleEtaMaxLoose = cms.double(2.5),\n tauTrig = cms.string('butz'),\n unclMetScale = cms.double(0.0),\n jecPayloads = cms.vstring('Jec12_V2_L1FastJet_AK5PFchs.txt', \n 'Jec12_V2_L2Relative_AK5PFchs.txt', \n 'Jec12_V2_L3Absolute_AK5PFchs.txt', \n 'Jec12_V2_L2L3Residual_AK5PFchs.txt', \n 'Jec12_V2_Uncertainty_AK5PFchs.txt'),\n muPtMin = cms.double(30.0),\n rawJetPtCut = cms.double(0.0),\n eleEtaMax = cms.double(2.4),\n useNoPFIso = cms.bool(True),\n elDcot = cms.double(0.02),\n rhoSrc = cms.InputTag(\"kt6PFJets\",\"rho\"),\n jetScale = cms.double(0.0),\n trigSrc = cms.InputTag(\"patTriggerEvent\"),\n ePlusJets = cms.bool(True),\n metSrc = cms.InputTag(\"patMETs\"),\n pfCandidateMap = cms.InputTag(\"particleFlow\",\"electrons\"),\n pvSelector = cms.PSet(\n maxZ = cms.double(24.0),\n minNdof = cms.double(4.0),\n NPV = cms.int32(1),\n maxRho = cms.double(2.0),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n tauEtaMax = cms.double(2.4),\n elDist = cms.double(0.02),\n jetSmear = cms.double(0.0),\n tauPtMin = cms.double(20.0),\n useData = cms.bool(True),\n electronIdVeto = cms.PSet(\n vtxFitConv = cms.bool(True),\n sihih_EE = cms.double(0.03),\n sihih_EB = cms.double(0.01),\n ooemoop_EB = cms.double(0.05),\n ooemoop_EE = cms.double(0.05),\n d0_EB = cms.double(0.02),\n d0_EE = cms.double(0.02),\n version = cms.string('VETO'),\n deta_EB = cms.double(0.004),\n deta_EE = cms.double(0.005),\n rhoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n dZ_EB = cms.double(0.1),\n dphi_EB = cms.double(0.03),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\"),\n dZ_EE = cms.double(0.1),\n mHits = cms.int32(0),\n reliso_EE = cms.double(0.1),\n cutsToIgnore = cms.vstring(''),\n reliso_EB = cms.double(0.1),\n hoe_EE = cms.double(0.01),\n hoe_EB = cms.double(0.12),\n dphi_EE = cms.double(0.02)\n ),\n pfEleSrc = cms.InputTag(\"particleFlow\"),\n cutsToIgnore = cms.vstring('Trigger', \n 'Dilepton Veto'),\n tauSrc = cms.InputTag(\"selectedPatTausPFlow\"),\n muEtaMax = cms.double(2.1),\n muJetDR = cms.double(0.3),\n electronSrc = cms.InputTag(\"selectedPatElectronsPFlow\"),\n eleEtMinLoose = cms.double(15.0),\n eleJetDR = cms.double(0.5),\n muPtMinLoose = cms.double(10.0),\n jetEtaMax = cms.double(2.4),\n muEtaMaxLoose = cms.double(2.5),\n useNoID = cms.bool(True),\n eleEtMin = cms.double(20.0),\n ePtScale = cms.double(0.0),\n muPlusJets = cms.bool(False),\n eRelIso = cms.double(0.1),\n vertexCut = cms.double(1.0),\n ePtUncertaintyEE = cms.double(0.025),\n rhoIsoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n eleTrig = cms.string('HLT_Ele10_LW_L1R'),\n muRelIso = cms.double(0.125),\n jetPtMin = cms.double(30.0),\n minJets = cms.int32(1),\n jetUncertainty = cms.double(0.0),\n muTrig = cms.string('HLT_Mu9'),\n jetSrc = cms.InputTag(\"goodPatJetsPFlow\"),\n identifier = cms.string('AK5 PF'),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n )\n)\n\n\nprocess.pfTupleEleCA8Pruned = cms.EDFilter(\"EDSHyFTSelector\",\n matchByHand = cms.bool(True),\n shyftSelection = cms.PSet(\n muonSrc = cms.InputTag(\"selectedPatMuonsPFlow\"),\n dxy = cms.double(0.02),\n eEt = cms.double(30.0),\n eleEtaMaxLoose = cms.double(2.5),\n tauTrig = cms.string('butz'),\n unclMetScale = cms.double(0.0),\n jecPayloads = cms.vstring('Jec12_V2_L1FastJet_AK5PFchs.txt', \n 'Jec12_V2_L2Relative_AK5PFchs.txt', \n 'Jec12_V2_L3Absolute_AK5PFchs.txt', \n 'Jec12_V2_L2L3Residual_AK5PFchs.txt', \n 'Jec12_V2_Uncertainty_AK5PFchs.txt'),\n muPtMin = cms.double(30.0),\n rawJetPtCut = cms.double(0.0),\n eleEtaMax = cms.double(2.4),\n useNoPFIso = cms.bool(True),\n elDcot = cms.double(0.02),\n rhoSrc = cms.InputTag(\"kt6PFJets\",\"rho\"),\n jetScale = cms.double(0.0),\n trigSrc = cms.InputTag(\"patTriggerEvent\"),\n ePlusJets = cms.bool(True),\n metSrc = cms.InputTag(\"patMETs\"),\n pfCandidateMap = cms.InputTag(\"particleFlow\",\"electrons\"),\n pvSelector = cms.PSet(\n maxZ = cms.double(24.0),\n minNdof = cms.double(4.0),\n NPV = cms.int32(1),\n maxRho = cms.double(2.0),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n tauEtaMax = cms.double(2.4),\n elDist = cms.double(0.02),\n jetSmear = cms.double(0.0),\n tauPtMin = cms.double(20.0),\n useData = cms.bool(True),\n electronIdVeto = cms.PSet(\n vtxFitConv = cms.bool(True),\n sihih_EE = cms.double(0.03),\n sihih_EB = cms.double(0.01),\n ooemoop_EB = cms.double(0.05),\n ooemoop_EE = cms.double(0.05),\n d0_EB = cms.double(0.02),\n d0_EE = cms.double(0.02),\n version = cms.string('VETO'),\n deta_EB = cms.double(0.004),\n deta_EE = cms.double(0.005),\n rhoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n dZ_EB = cms.double(0.1),\n dphi_EB = cms.double(0.03),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\"),\n dZ_EE = cms.double(0.1),\n mHits = cms.int32(0),\n reliso_EE = cms.double(0.1),\n cutsToIgnore = cms.vstring(''),\n reliso_EB = cms.double(0.1),\n hoe_EE = cms.double(0.01),\n hoe_EB = cms.double(0.12),\n dphi_EE = cms.double(0.02)\n ),\n pfEleSrc = cms.InputTag(\"particleFlow\"),\n cutsToIgnore = cms.vstring('Trigger', \n 'Dilepton Veto'),\n tauSrc = cms.InputTag(\"selectedPatTausPFlow\"),\n muEtaMax = cms.double(2.1),\n muJetDR = cms.double(0.3),\n electronSrc = cms.InputTag(\"selectedPatElectronsPFlow\"),\n eleEtMinLoose = cms.double(15.0),\n eleJetDR = cms.double(0.5),\n muPtMinLoose = cms.double(10.0),\n jetEtaMax = cms.double(2.4),\n muEtaMaxLoose = cms.double(2.5),\n useNoID = cms.bool(True),\n eleEtMin = cms.double(20.0),\n ePtScale = cms.double(0.0),\n muPlusJets = cms.bool(False),\n eRelIso = cms.double(0.1),\n vertexCut = cms.double(1.0),\n ePtUncertaintyEE = cms.double(0.025),\n rhoIsoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n eleTrig = cms.string('HLT_Ele10_LW_L1R'),\n muRelIso = cms.double(0.125),\n jetPtMin = cms.double(30.0),\n minJets = cms.int32(1),\n jetUncertainty = cms.double(0.0),\n muTrig = cms.string('HLT_Mu9'),\n jetSrc = cms.InputTag(\"goodPatJetsCA8PrunedPF\"),\n identifier = cms.string('CA8 Prunded PF'),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n ePlusJets = cms.bool(True),\n muPlusJuets = cms.bool(False)\n)\n\n\nprocess.pfTupleEleSelectedJets = cms.EDFilter(\"EDSHyFTSelector\",\n matchByHand = cms.bool(True),\n shyftSelection = cms.PSet(\n muonSrc = cms.InputTag(\"selectedPatMuonsPFlow\"),\n dxy = cms.double(0.02),\n eEt = cms.double(30.0),\n eleEtaMaxLoose = cms.double(2.5),\n tauTrig = cms.string('butz'),\n unclMetScale = cms.double(0.0),\n jecPayloads = cms.vstring('Jec12_V2_L1FastJet_AK5PFchs.txt', \n 'Jec12_V2_L2Relative_AK5PFchs.txt', \n 'Jec12_V2_L3Absolute_AK5PFchs.txt', \n 'Jec12_V2_L2L3Residual_AK5PFchs.txt', \n 'Jec12_V2_Uncertainty_AK5PFchs.txt'),\n muPtMin = cms.double(30.0),\n rawJetPtCut = cms.double(0.0),\n eleEtaMax = cms.double(2.4),\n useNoPFIso = cms.bool(True),\n elDcot = cms.double(0.02),\n rhoSrc = cms.InputTag(\"kt6PFJets\",\"rho\"),\n jetScale = cms.double(0.0),\n trigSrc = cms.InputTag(\"patTriggerEvent\"),\n ePlusJets = cms.bool(True),\n metSrc = cms.InputTag(\"patMETs\"),\n pfCandidateMap = cms.InputTag(\"particleFlow\",\"electrons\"),\n pvSelector = cms.PSet(\n maxZ = cms.double(24.0),\n minNdof = cms.double(4.0),\n NPV = cms.int32(1),\n maxRho = cms.double(2.0),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n tauEtaMax = cms.double(2.4),\n elDist = cms.double(0.02),\n jetSmear = cms.double(0.0),\n tauPtMin = cms.double(20.0),\n useData = cms.bool(True),\n electronIdVeto = cms.PSet(\n vtxFitConv = cms.bool(True),\n sihih_EE = cms.double(0.03),\n sihih_EB = cms.double(0.01),\n ooemoop_EB = cms.double(0.05),\n ooemoop_EE = cms.double(0.05),\n d0_EB = cms.double(0.02),\n d0_EE = cms.double(0.02),\n version = cms.string('VETO'),\n deta_EB = cms.double(0.004),\n deta_EE = cms.double(0.005),\n rhoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n dZ_EB = cms.double(0.1),\n dphi_EB = cms.double(0.03),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\"),\n dZ_EE = cms.double(0.1),\n mHits = cms.int32(0),\n reliso_EE = cms.double(0.1),\n cutsToIgnore = cms.vstring(''),\n reliso_EB = cms.double(0.1),\n hoe_EE = cms.double(0.01),\n hoe_EB = cms.double(0.12),\n dphi_EE = cms.double(0.02)\n ),\n pfEleSrc = cms.InputTag(\"particleFlow\"),\n cutsToIgnore = cms.vstring('Trigger', \n 'Dilepton Veto'),\n tauSrc = cms.InputTag(\"selectedPatTausPFlow\"),\n muEtaMax = cms.double(2.1),\n muJetDR = cms.double(0.3),\n electronSrc = cms.InputTag(\"selectedPatElectronsPFlow\"),\n eleEtMinLoose = cms.double(15.0),\n eleJetDR = cms.double(0.5),\n muPtMinLoose = cms.double(10.0),\n jetEtaMax = cms.double(2.4),\n muEtaMaxLoose = cms.double(2.5),\n useNoID = cms.bool(True),\n eleEtMin = cms.double(20.0),\n ePtScale = cms.double(0.0),\n muPlusJets = cms.bool(False),\n eRelIso = cms.double(0.1),\n vertexCut = cms.double(1.0),\n ePtUncertaintyEE = cms.double(0.025),\n rhoIsoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n eleTrig = cms.string('HLT_Ele10_LW_L1R'),\n muRelIso = cms.double(0.125),\n jetPtMin = cms.double(30.0),\n minJets = cms.int32(1),\n jetUncertainty = cms.double(0.0),\n muTrig = cms.string('HLT_Mu9'),\n jetSrc = cms.InputTag(\"goodPatJetsPFlow\"),\n identifier = cms.string('Good PAT PF'),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n ePlusJets = cms.bool(True),\n muPlusJuets = cms.bool(False)\n)\n\n\nprocess.pfTupleMuCA8Pruned = cms.EDFilter(\"EDSHyFTSelector\",\n matchByHand = cms.bool(True),\n shyftSelection = cms.PSet(\n muonSrc = cms.InputTag(\"selectedPatMuonsPFlow\"),\n dxy = cms.double(0.02),\n eEt = cms.double(30.0),\n eleEtaMaxLoose = cms.double(2.5),\n tauTrig = cms.string('butz'),\n unclMetScale = cms.double(0.0),\n jecPayloads = cms.vstring('Jec12_V2_L1FastJet_AK5PFchs.txt', \n 'Jec12_V2_L2Relative_AK5PFchs.txt', \n 'Jec12_V2_L3Absolute_AK5PFchs.txt', \n 'Jec12_V2_L2L3Residual_AK5PFchs.txt', \n 'Jec12_V2_Uncertainty_AK5PFchs.txt'),\n muPtMin = cms.double(30.0),\n rawJetPtCut = cms.double(0.0),\n eleEtaMax = cms.double(2.4),\n useNoPFIso = cms.bool(True),\n elDcot = cms.double(0.02),\n rhoSrc = cms.InputTag(\"kt6PFJets\",\"rho\"),\n jetScale = cms.double(0.0),\n trigSrc = cms.InputTag(\"patTriggerEvent\"),\n ePlusJets = cms.bool(True),\n metSrc = cms.InputTag(\"patMETs\"),\n pfCandidateMap = cms.InputTag(\"particleFlow\",\"electrons\"),\n pvSelector = cms.PSet(\n maxZ = cms.double(24.0),\n minNdof = cms.double(4.0),\n NPV = cms.int32(1),\n maxRho = cms.double(2.0),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n tauEtaMax = cms.double(2.4),\n elDist = cms.double(0.02),\n jetSmear = cms.double(0.0),\n tauPtMin = cms.double(20.0),\n useData = cms.bool(True),\n electronIdVeto = cms.PSet(\n vtxFitConv = cms.bool(True),\n sihih_EE = cms.double(0.03),\n sihih_EB = cms.double(0.01),\n ooemoop_EB = cms.double(0.05),\n ooemoop_EE = cms.double(0.05),\n d0_EB = cms.double(0.02),\n d0_EE = cms.double(0.02),\n version = cms.string('VETO'),\n deta_EB = cms.double(0.004),\n deta_EE = cms.double(0.005),\n rhoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n dZ_EB = cms.double(0.1),\n dphi_EB = cms.double(0.03),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\"),\n dZ_EE = cms.double(0.1),\n mHits = cms.int32(0),\n reliso_EE = cms.double(0.1),\n cutsToIgnore = cms.vstring(''),\n reliso_EB = cms.double(0.1),\n hoe_EE = cms.double(0.01),\n hoe_EB = cms.double(0.12),\n dphi_EE = cms.double(0.02)\n ),\n pfEleSrc = cms.InputTag(\"particleFlow\"),\n cutsToIgnore = cms.vstring('Trigger', \n 'Dilepton Veto'),\n tauSrc = cms.InputTag(\"selectedPatTausPFlow\"),\n muEtaMax = cms.double(2.1),\n muJetDR = cms.double(0.3),\n electronSrc = cms.InputTag(\"selectedPatElectronsPFlow\"),\n eleEtMinLoose = cms.double(15.0),\n eleJetDR = cms.double(0.5),\n muPtMinLoose = cms.double(10.0),\n jetEtaMax = cms.double(2.4),\n muEtaMaxLoose = cms.double(2.5),\n useNoID = cms.bool(True),\n eleEtMin = cms.double(20.0),\n ePtScale = cms.double(0.0),\n muPlusJets = cms.bool(False),\n eRelIso = cms.double(0.1),\n vertexCut = cms.double(1.0),\n ePtUncertaintyEE = cms.double(0.025),\n rhoIsoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n eleTrig = cms.string('HLT_Ele10_LW_L1R'),\n muRelIso = cms.double(0.125),\n jetPtMin = cms.double(30.0),\n minJets = cms.int32(1),\n jetUncertainty = cms.double(0.0),\n muTrig = cms.string('HLT_Mu9'),\n jetSrc = cms.InputTag(\"goodPatJetsCA8PrunedPF\"),\n identifier = cms.string('CA8 Prunded PF'),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n ePlusJets = cms.bool(False),\n muPlusJets = cms.bool(True)\n)\n\n\nprocess.pfTupleMuSelectedJets = cms.EDFilter(\"EDSHyFTSelector\",\n matchByHand = cms.bool(True),\n shyftSelection = cms.PSet(\n muonSrc = cms.InputTag(\"selectedPatMuonsPFlow\"),\n dxy = cms.double(0.02),\n eEt = cms.double(30.0),\n eleEtaMaxLoose = cms.double(2.5),\n tauTrig = cms.string('butz'),\n unclMetScale = cms.double(0.0),\n jecPayloads = cms.vstring('Jec12_V2_L1FastJet_AK5PFchs.txt', \n 'Jec12_V2_L2Relative_AK5PFchs.txt', \n 'Jec12_V2_L3Absolute_AK5PFchs.txt', \n 'Jec12_V2_L2L3Residual_AK5PFchs.txt', \n 'Jec12_V2_Uncertainty_AK5PFchs.txt'),\n muPtMin = cms.double(30.0),\n rawJetPtCut = cms.double(0.0),\n eleEtaMax = cms.double(2.4),\n useNoPFIso = cms.bool(True),\n elDcot = cms.double(0.02),\n rhoSrc = cms.InputTag(\"kt6PFJets\",\"rho\"),\n jetScale = cms.double(0.0),\n trigSrc = cms.InputTag(\"patTriggerEvent\"),\n ePlusJets = cms.bool(True),\n metSrc = cms.InputTag(\"patMETs\"),\n pfCandidateMap = cms.InputTag(\"particleFlow\",\"electrons\"),\n pvSelector = cms.PSet(\n maxZ = cms.double(24.0),\n minNdof = cms.double(4.0),\n NPV = cms.int32(1),\n maxRho = cms.double(2.0),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n tauEtaMax = cms.double(2.4),\n elDist = cms.double(0.02),\n jetSmear = cms.double(0.0),\n tauPtMin = cms.double(20.0),\n useData = cms.bool(True),\n electronIdVeto = cms.PSet(\n vtxFitConv = cms.bool(True),\n sihih_EE = cms.double(0.03),\n sihih_EB = cms.double(0.01),\n ooemoop_EB = cms.double(0.05),\n ooemoop_EE = cms.double(0.05),\n d0_EB = cms.double(0.02),\n d0_EE = cms.double(0.02),\n version = cms.string('VETO'),\n deta_EB = cms.double(0.004),\n deta_EE = cms.double(0.005),\n rhoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n dZ_EB = cms.double(0.1),\n dphi_EB = cms.double(0.03),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\"),\n dZ_EE = cms.double(0.1),\n mHits = cms.int32(0),\n reliso_EE = cms.double(0.1),\n cutsToIgnore = cms.vstring(''),\n reliso_EB = cms.double(0.1),\n hoe_EE = cms.double(0.01),\n hoe_EB = cms.double(0.12),\n dphi_EE = cms.double(0.02)\n ),\n pfEleSrc = cms.InputTag(\"particleFlow\"),\n cutsToIgnore = cms.vstring('Trigger', \n 'Dilepton Veto'),\n tauSrc = cms.InputTag(\"selectedPatTausPFlow\"),\n muEtaMax = cms.double(2.1),\n muJetDR = cms.double(0.3),\n electronSrc = cms.InputTag(\"selectedPatElectronsPFlow\"),\n eleEtMinLoose = cms.double(15.0),\n eleJetDR = cms.double(0.5),\n muPtMinLoose = cms.double(10.0),\n jetEtaMax = cms.double(2.4),\n muEtaMaxLoose = cms.double(2.5),\n useNoID = cms.bool(True),\n eleEtMin = cms.double(20.0),\n ePtScale = cms.double(0.0),\n muPlusJets = cms.bool(False),\n eRelIso = cms.double(0.1),\n vertexCut = cms.double(1.0),\n ePtUncertaintyEE = cms.double(0.025),\n rhoIsoSrc = cms.InputTag(\"kt6PFJetsForIsolation\",\"rho\"),\n eleTrig = cms.string('HLT_Ele10_LW_L1R'),\n muRelIso = cms.double(0.125),\n jetPtMin = cms.double(30.0),\n minJets = cms.int32(1),\n jetUncertainty = cms.double(0.0),\n muTrig = cms.string('HLT_Mu9'),\n jetSrc = cms.InputTag(\"goodPatJetsPFlow\"),\n identifier = cms.string('Good PAT PF'),\n pvSrc = cms.InputTag(\"offlinePrimaryVertices\")\n ),\n ePlusJets = cms.bool(False),\n muPlusJets = cms.bool(True)\n)\n\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string('edmTest.root'),\n SelectEvents = cms.untracked.PSet(\n SelectEvents = cms.vstring('p0', \n 'p1', \n 'p2', \n 'p3')\n ),\n outputCommands = cms.untracked.vstring('drop *', \n 'keep *_pfTuple*_*_*', \n 'keep *_patTriggerEvent_*_*', \n 'keep *_patTrigger_*_*', \n 'keep *_offlinePrimaryVertices_*_*', \n 'keep *_caPrunedPFlow_SubJets_*')\n)\n\n\nprocess.patTriggerDefaultSequence = cms.Sequence(process.patTrigger+process.patTriggerEvent)\n\n\nprocess.patTriggerMatchEmbedderDefaultSequence = cms.Sequence(process.cleanPatPhotonsTriggerMatch+process.cleanPatElectronsTriggerMatch+process.cleanPatMuonsTriggerMatch+process.cleanPatTausTriggerMatch+process.cleanPatJetsTriggerMatch+process.patMETsTriggerMatch)\n\n\nprocess.triggerMatchingDefaultSequence = cms.Sequence(process.cleanMuonTriggerMatchHLTMu20+process.cleanMuonTriggerMatchHLTDoubleMu6+process.cleanPhotonTriggerMatchHLTPhoton26IsoVLPhoton18+process.cleanElectronTriggerMatchHLTEle27CaloIdVTCaloIsoTTrkIdTTrkIsoT+process.cleanTauTriggerMatchHLTDoubleIsoPFTau20Trk5+process.cleanJetTriggerMatchHLTJet240+process.metTriggerMatchHLTMET100+process.cleanMuonTriggerMatchHLTMu17CentralJet30+process.cleanJetTriggerMatchHLTMu17CentralJet30)\n\n\nprocess.p0 = cms.Path(process.patTriggerDefaultSequence)\n\n\nprocess.p1 = cms.Path(process.pfTupleEleCA8Pruned)\n\n\nprocess.p2 = cms.Path(process.pfTupleMuCA8Pruned)\n\n\nprocess.p3 = cms.Path()\n\n\nprocess.p4 = cms.Path(process.pfTupleEleSelectedJets)\n\n\nprocess.p5 = cms.Path(process.pfTupleMuSelectedJets)\n\n\nprocess.outpath = cms.EndPath(process.out)\n\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n suppressInfo = cms.untracked.vstring(),\n debugs = cms.untracked.PSet(\n placeholder = cms.untracked.bool(True)\n ),\n suppressDebug = cms.untracked.vstring(),\n cout = cms.untracked.PSet(\n placeholder = cms.untracked.bool(True)\n ),\n cerr_stats = cms.untracked.PSet(\n threshold = cms.untracked.string('WARNING'),\n output = cms.untracked.string('cerr'),\n optionalPSet = cms.untracked.bool(True)\n ),\n warnings = cms.untracked.PSet(\n placeholder = cms.untracked.bool(True)\n ),\n default = cms.untracked.PSet(\n\n ),\n statistics = cms.untracked.vstring('cerr_stats'),\n cerr = cms.untracked.PSet(\n INFO = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n noTimeStamps = cms.untracked.bool(False),\n FwkReport = cms.untracked.PSet(\n reportEvery = cms.untracked.int32(100),\n optionalPSet = cms.untracked.bool(True),\n limit = cms.untracked.int32(10000000)\n ),\n default = cms.untracked.PSet(\n limit = cms.untracked.int32(10000000)\n ),\n Root_NoDictionary = cms.untracked.PSet(\n optionalPSet = cms.untracked.bool(True),\n limit = cms.untracked.int32(0)\n ),\n threshold = cms.untracked.string('INFO'),\n FwkJob = cms.untracked.PSet(\n optionalPSet = cms.untracked.bool(True),\n limit = cms.untracked.int32(1)\n ),\n FwkSummary = cms.untracked.PSet(\n reportEvery = cms.untracked.int32(1),\n optionalPSet = cms.untracked.bool(True),\n limit = cms.untracked.int32(10000000)\n ),\n optionalPSet = cms.untracked.bool(True),\n ERROR = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n )\n ),\n FrameworkJobReport = cms.untracked.PSet(\n default = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n optionalPSet = cms.untracked.bool(True),\n FwkJob = cms.untracked.PSet(\n optionalPSet = cms.untracked.bool(True),\n limit = cms.untracked.int32(10000000)\n )\n ),\n suppressWarning = cms.untracked.vstring('patTrigger'),\n errors = cms.untracked.PSet(\n placeholder = cms.untracked.bool(True)\n ),\n destinations = cms.untracked.vstring('warnings', \n 'errors', \n 'infos', \n 'debugs', \n 'cout', \n 'cerr'),\n debugModules = cms.untracked.vstring(),\n infos = cms.untracked.PSet(\n optionalPSet = cms.untracked.bool(True),\n Root_NoDictionary = cms.untracked.PSet(\n optionalPSet = cms.untracked.bool(True),\n limit = cms.untracked.int32(0)\n ),\n placeholder = cms.untracked.bool(True)\n ),\n categories = cms.untracked.vstring('FwkJob', \n 'FwkReport', \n 'FwkSummary', \n 'Root_NoDictionary'),\n fwkJobReports = cms.untracked.vstring('FrameworkJobReport')\n)\n\n\nprocess.CSCGeometryESModule = cms.ESProducer(\"CSCGeometryESModule\",\n appendToDataLabel = cms.string(''),\n useDDD = cms.bool(True),\n debugV = cms.untracked.bool(False),\n useGangedStripsInME1a = cms.bool(True),\n alignmentsLabel = cms.string(''),\n useOnlyWiresInME1a = cms.bool(False),\n useRealWireGeometry = cms.bool(True),\n useCentreTIOffsets = cms.bool(False),\n applyAlignment = cms.bool(True)\n)\n\n\nprocess.CaloGeometryBuilder = cms.ESProducer(\"CaloGeometryBuilder\",\n SelectedCalos = cms.vstring('HCAL', \n 'ZDC', \n 'CASTOR', \n 'EcalBarrel', \n 'EcalEndcap', \n 'EcalPreshower', \n 'TOWER')\n)\n\n\nprocess.CaloTopologyBuilder = cms.ESProducer(\"CaloTopologyBuilder\")\n\n\nprocess.CaloTowerHardcodeGeometryEP = cms.ESProducer(\"CaloTowerHardcodeGeometryEP\")\n\n\nprocess.CastorDbProducer = cms.ESProducer(\"CastorDbProducer\")\n\n\nprocess.CastorHardcodeGeometryEP = cms.ESProducer(\"CastorHardcodeGeometryEP\")\n\n\nprocess.DTGeometryESModule = cms.ESProducer(\"DTGeometryESModule\",\n appendToDataLabel = cms.string(''),\n fromDDD = cms.bool(True),\n applyAlignment = cms.bool(True),\n alignmentsLabel = cms.string('')\n)\n\n\nprocess.EcalBarrelGeometryEP = cms.ESProducer(\"EcalBarrelGeometryEP\",\n applyAlignment = cms.bool(False)\n)\n\n\nprocess.EcalElectronicsMappingBuilder = cms.ESProducer(\"EcalElectronicsMappingBuilder\")\n\n\nprocess.EcalEndcapGeometryEP = cms.ESProducer(\"EcalEndcapGeometryEP\",\n applyAlignment = cms.bool(False)\n)\n\n\nprocess.EcalLaserCorrectionService = cms.ESProducer(\"EcalLaserCorrectionService\")\n\n\nprocess.EcalPreshowerGeometryEP = cms.ESProducer(\"EcalPreshowerGeometryEP\",\n applyAlignment = cms.bool(False)\n)\n\n\nprocess.EcalTrigTowerConstituentsMapBuilder = cms.ESProducer(\"EcalTrigTowerConstituentsMapBuilder\",\n MapFile = cms.untracked.string('Geometry/EcalMapping/data/EndCap_TTMap.txt')\n)\n\n\nprocess.GlobalTrackingGeometryESProducer = cms.ESProducer(\"GlobalTrackingGeometryESProducer\")\n\n\nprocess.HcalHardcodeGeometryEP = cms.ESProducer(\"HcalHardcodeGeometryEP\")\n\n\nprocess.HcalTopologyIdealEP = cms.ESProducer(\"HcalTopologyIdealEP\")\n\n\nprocess.MuonDetLayerGeometryESProducer = cms.ESProducer(\"MuonDetLayerGeometryESProducer\")\n\n\nprocess.MuonNumberingInitialization = cms.ESProducer(\"MuonNumberingInitialization\")\n\n\nprocess.ParametrizedMagneticFieldProducer = cms.ESProducer(\"ParametrizedMagneticFieldProducer\",\n version = cms.string('OAE_1103l_071212'),\n parameters = cms.PSet(\n BValue = cms.string('3_8T')\n ),\n label = cms.untracked.string('parametrizedField')\n)\n\n\nprocess.RPCGeometryESModule = cms.ESProducer(\"RPCGeometryESModule\",\n useDDD = cms.untracked.bool(True),\n compatibiltyWith11 = cms.untracked.bool(True)\n)\n\n\nprocess.SiStripRecHitMatcherESProducer = cms.ESProducer(\"SiStripRecHitMatcherESProducer\",\n ComponentName = cms.string('StandardMatcher'),\n NSigmaInside = cms.double(3.0)\n)\n\n\nprocess.StripCPEfromTrackAngleESProducer = cms.ESProducer(\"StripCPEESProducer\",\n ComponentName = cms.string('StripCPEfromTrackAngle')\n)\n\n\nprocess.TrackerDigiGeometryESModule = cms.ESProducer(\"TrackerDigiGeometryESModule\",\n appendToDataLabel = cms.string(''),\n fromDDD = cms.bool(True),\n applyAlignment = cms.bool(True),\n alignmentsLabel = cms.string('')\n)\n\n\nprocess.TrackerGeometricDetESModule = cms.ESProducer(\"TrackerGeometricDetESModule\",\n fromDDD = cms.bool(True)\n)\n\n\nprocess.TrackerRecoGeometryESProducer = cms.ESProducer(\"TrackerRecoGeometryESProducer\")\n\n\nprocess.VolumeBasedMagneticFieldESProducer = cms.ESProducer(\"VolumeBasedMagneticFieldESProducer\",\n scalingVolumes = cms.vint32(14100, 14200, 17600, 17800, 17900, \n 18100, 18300, 18400, 18600, 23100, \n 23300, 23400, 23600, 23800, 23900, \n 24100, 28600, 28800, 28900, 29100, \n 29300, 29400, 29600, 28609, 28809, \n 28909, 29109, 29309, 29409, 29609, \n 28610, 28810, 28910, 29110, 29310, \n 29410, 29610, 28611, 28811, 28911, \n 29111, 29311, 29411, 29611),\n scalingFactors = cms.vdouble(1, 1, 0.994, 1.004, 1.004, \n 1.005, 1.004, 1.004, 0.994, 0.965, \n 0.958, 0.958, 0.953, 0.958, 0.958, \n 0.965, 0.918, 0.924, 0.924, 0.906, \n 0.924, 0.924, 0.918, 0.991, 0.998, \n 0.998, 0.978, 0.998, 0.998, 0.991, \n 0.991, 0.998, 0.998, 0.978, 0.998, \n 0.998, 0.991, 0.991, 0.998, 0.998, \n 0.978, 0.998, 0.998, 0.991),\n overrideMasterSector = cms.bool(False),\n useParametrizedTrackerField = cms.bool(True),\n label = cms.untracked.string(''),\n version = cms.string('grid_1103l_090322_3_8t'),\n debugBuilder = cms.untracked.bool(False),\n paramLabel = cms.string('parametrizedField'),\n geometryVersion = cms.int32(90322),\n cacheLastVolume = cms.untracked.bool(True)\n)\n\n\nprocess.ZdcHardcodeGeometryEP = cms.ESProducer(\"ZdcHardcodeGeometryEP\")\n\n\nprocess.fakeForIdealAlignment = cms.ESProducer(\"FakeAlignmentProducer\",\n appendToDataLabel = cms.string('fakeForIdeal')\n)\n\n\nprocess.hcal_db_producer = cms.ESProducer(\"HcalDbProducer\",\n file = cms.untracked.string(''),\n dump = cms.untracked.vstring('')\n)\n\n\nprocess.idealForDigiCSCGeometry = cms.ESProducer(\"CSCGeometryESModule\",\n appendToDataLabel = cms.string('idealForDigi'),\n useDDD = cms.bool(True),\n debugV = cms.untracked.bool(False),\n useGangedStripsInME1a = cms.bool(True),\n alignmentsLabel = cms.string('fakeForIdeal'),\n useOnlyWiresInME1a = cms.bool(False),\n useRealWireGeometry = cms.bool(True),\n useCentreTIOffsets = cms.bool(False),\n applyAlignment = cms.bool(False)\n)\n\n\nprocess.idealForDigiDTGeometry = cms.ESProducer(\"DTGeometryESModule\",\n appendToDataLabel = cms.string('idealForDigi'),\n fromDDD = cms.bool(True),\n applyAlignment = cms.bool(False),\n alignmentsLabel = cms.string('fakeForIdeal')\n)\n\n\nprocess.idealForDigiTrackerGeometry = cms.ESProducer(\"TrackerDigiGeometryESModule\",\n appendToDataLabel = cms.string('idealForDigi'),\n fromDDD = cms.bool(True),\n applyAlignment = cms.bool(False),\n alignmentsLabel = cms.string('fakeForIdeal')\n)\n\n\nprocess.siPixelQualityESProducer = cms.ESProducer(\"SiPixelQualityESProducer\",\n ListOfRecordToMerge = cms.VPSet(cms.PSet(\n record = cms.string('SiPixelQualityFromDbRcd'),\n tag = cms.string('')\n ), \n cms.PSet(\n record = cms.string('SiPixelDetVOffRcd'),\n tag = cms.string('')\n ))\n)\n\n\nprocess.siStripGainESProducer = cms.ESProducer(\"SiStripGainESProducer\",\n printDebug = cms.untracked.bool(False),\n appendToDataLabel = cms.string(''),\n APVGain = cms.VPSet(cms.PSet(\n Record = cms.string('SiStripApvGainRcd'),\n NormalizationFactor = cms.untracked.double(1.0),\n Label = cms.untracked.string('')\n ), \n cms.PSet(\n Record = cms.string('SiStripApvGain2Rcd'),\n NormalizationFactor = cms.untracked.double(1.0),\n Label = cms.untracked.string('')\n )),\n AutomaticNormalization = cms.bool(False)\n)\n\n\nprocess.siStripLorentzAngleDepESProducer = cms.ESProducer(\"SiStripLorentzAngleDepESProducer\",\n LatencyRecord = cms.PSet(\n record = cms.string('SiStripLatencyRcd'),\n label = cms.untracked.string('')\n ),\n LorentzAngleDeconvMode = cms.PSet(\n record = cms.string('SiStripLorentzAngleRcd'),\n label = cms.untracked.string('deconvolution')\n ),\n LorentzAnglePeakMode = cms.PSet(\n record = cms.string('SiStripLorentzAngleRcd'),\n label = cms.untracked.string('peak')\n )\n)\n\n\nprocess.siStripQualityESProducer = cms.ESProducer(\"SiStripQualityESProducer\",\n appendToDataLabel = cms.string(''),\n PrintDebugOutput = cms.bool(False),\n ThresholdForReducedGranularity = cms.double(0.3),\n UseEmptyRunInfo = cms.bool(False),\n ReduceGranularity = cms.bool(False),\n ListOfRecordToMerge = cms.VPSet(cms.PSet(\n record = cms.string('SiStripDetVOffRcd'),\n tag = cms.string('')\n ), \n cms.PSet(\n record = cms.string('SiStripDetCablingRcd'),\n tag = cms.string('')\n ), \n cms.PSet(\n record = cms.string('RunInfoRcd'),\n tag = cms.string('')\n ), \n cms.PSet(\n record = cms.string('SiStripBadChannelRcd'),\n tag = cms.string('')\n ), \n cms.PSet(\n record = cms.string('SiStripBadFiberRcd'),\n tag = cms.string('')\n ), \n cms.PSet(\n record = cms.string('SiStripBadModuleRcd'),\n tag = cms.string('')\n ), \n cms.PSet(\n record = cms.string('SiStripBadStripRcd'),\n tag = cms.string('')\n ))\n)\n\n\nprocess.sistripconn = cms.ESProducer(\"SiStripConnectivity\")\n\n\nprocess.GlobalTag = cms.ESSource(\"PoolDBESSource\",\n DBParameters = cms.PSet(\n authenticationPath = cms.untracked.string(''),\n enableReadOnlySessionOnUpdateConnection = cms.untracked.bool(False),\n idleConnectionCleanupPeriod = cms.untracked.int32(10),\n messageLevel = cms.untracked.int32(0),\n enablePoolAutomaticCleanUp = cms.untracked.bool(False),\n enableConnectionSharing = cms.untracked.bool(True),\n connectionRetrialTimeOut = cms.untracked.int32(60),\n connectionTimeOut = cms.untracked.int32(60),\n authenticationSystem = cms.untracked.int32(0),\n connectionRetrialPeriod = cms.untracked.int32(10)\n ),\n BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),\n toGet = cms.VPSet(),\n connect = cms.string('frontier://FrontierProd/CMS_COND_31X_GLOBALTAG'),\n globaltag = cms.string('GR_P_V40_AN1::All')\n)\n\n\nprocess.XMLIdealGeometryESSource = cms.ESSource(\"XMLIdealGeometryESSource\",\n geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml', \n 'Geometry/CMSCommonData/data/rotations.xml', \n 'Geometry/CMSCommonData/data/normal/cmsextent.xml', \n 'Geometry/CMSCommonData/data/cms.xml', \n 'Geometry/CMSCommonData/data/cmsMother.xml', \n 'Geometry/CMSCommonData/data/cmsTracker.xml', \n 'Geometry/CMSCommonData/data/caloBase.xml', \n 'Geometry/CMSCommonData/data/cmsCalo.xml', \n 'Geometry/CMSCommonData/data/muonBase.xml', \n 'Geometry/CMSCommonData/data/cmsMuon.xml', \n 'Geometry/CMSCommonData/data/mgnt.xml', \n 'Geometry/CMSCommonData/data/beampipe.xml', \n 'Geometry/CMSCommonData/data/cmsBeam.xml', \n 'Geometry/CMSCommonData/data/muonMB.xml', \n 'Geometry/CMSCommonData/data/muonMagnet.xml', \n 'Geometry/TrackerCommonData/data/pixfwdMaterials.xml', \n 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPlaq.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPlaq1x2.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPlaq1x5.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPlaq2x3.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPlaq2x4.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPlaq2x5.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPanelBase.xml', \n 'Geometry/TrackerCommonData/data/pixfwdPanel.xml', \n 'Geometry/TrackerCommonData/data/pixfwdBlade.xml', \n 'Geometry/TrackerCommonData/data/pixfwdNipple.xml', \n 'Geometry/TrackerCommonData/data/pixfwdDisk.xml', \n 'Geometry/TrackerCommonData/data/pixfwdCylinder.xml', \n 'Geometry/TrackerCommonData/data/pixfwd.xml', \n 'Geometry/TrackerCommonData/data/pixbarmaterial.xml', \n 'Geometry/TrackerCommonData/data/pixbarladder.xml', \n 'Geometry/TrackerCommonData/data/pixbarladderfull.xml', \n 'Geometry/TrackerCommonData/data/pixbarladderhalf.xml', \n 'Geometry/TrackerCommonData/data/pixbarlayer.xml', \n 'Geometry/TrackerCommonData/data/pixbarlayer0.xml', \n 'Geometry/TrackerCommonData/data/pixbarlayer1.xml', \n 'Geometry/TrackerCommonData/data/pixbarlayer2.xml', \n 'Geometry/TrackerCommonData/data/pixbar.xml', \n 'Geometry/TrackerCommonData/data/tibtidcommonmaterial.xml', \n 'Geometry/TrackerCommonData/data/tibmaterial.xml', \n 'Geometry/TrackerCommonData/data/tibmodpar.xml', \n 'Geometry/TrackerCommonData/data/tibmodule0.xml', \n 'Geometry/TrackerCommonData/data/tibmodule0a.xml', \n 'Geometry/TrackerCommonData/data/tibmodule0b.xml', \n 'Geometry/TrackerCommonData/data/tibmodule2.xml', \n 'Geometry/TrackerCommonData/data/tibstringpar.xml', \n 'Geometry/TrackerCommonData/data/tibstring0ll.xml', \n 'Geometry/TrackerCommonData/data/tibstring0lr.xml', \n 'Geometry/TrackerCommonData/data/tibstring0ul.xml', \n 'Geometry/TrackerCommonData/data/tibstring0ur.xml', \n 'Geometry/TrackerCommonData/data/tibstring0.xml', \n 'Geometry/TrackerCommonData/data/tibstring1ll.xml', \n 'Geometry/TrackerCommonData/data/tibstring1lr.xml', \n 'Geometry/TrackerCommonData/data/tibstring1ul.xml', \n 'Geometry/TrackerCommonData/data/tibstring1ur.xml', \n 'Geometry/TrackerCommonData/data/tibstring1.xml', \n 'Geometry/TrackerCommonData/data/tibstring2ll.xml', \n 'Geometry/TrackerCommonData/data/tibstring2lr.xml', \n 'Geometry/TrackerCommonData/data/tibstring2ul.xml', \n 'Geometry/TrackerCommonData/data/tibstring2ur.xml', \n 'Geometry/TrackerCommonData/data/tibstring2.xml', \n 'Geometry/TrackerCommonData/data/tibstring3ll.xml', \n 'Geometry/TrackerCommonData/data/tibstring3lr.xml', \n 'Geometry/TrackerCommonData/data/tibstring3ul.xml', \n 'Geometry/TrackerCommonData/data/tibstring3ur.xml', \n 'Geometry/TrackerCommonData/data/tibstring3.xml', \n 'Geometry/TrackerCommonData/data/tiblayerpar.xml', \n 'Geometry/TrackerCommonData/data/tiblayer0.xml', \n 'Geometry/TrackerCommonData/data/tiblayer1.xml', \n 'Geometry/TrackerCommonData/data/tiblayer2.xml', \n 'Geometry/TrackerCommonData/data/tiblayer3.xml', \n 'Geometry/TrackerCommonData/data/tib.xml', \n 'Geometry/TrackerCommonData/data/tidmaterial.xml', \n 'Geometry/TrackerCommonData/data/tidmodpar.xml', \n 'Geometry/TrackerCommonData/data/tidmodule0.xml', \n 'Geometry/TrackerCommonData/data/tidmodule0r.xml', \n 'Geometry/TrackerCommonData/data/tidmodule0l.xml', \n 'Geometry/TrackerCommonData/data/tidmodule1.xml', \n 'Geometry/TrackerCommonData/data/tidmodule1r.xml', \n 'Geometry/TrackerCommonData/data/tidmodule1l.xml', \n 'Geometry/TrackerCommonData/data/tidmodule2.xml', \n 'Geometry/TrackerCommonData/data/tidringpar.xml', \n 'Geometry/TrackerCommonData/data/tidring0.xml', \n 'Geometry/TrackerCommonData/data/tidring0f.xml', \n 'Geometry/TrackerCommonData/data/tidring0b.xml', \n 'Geometry/TrackerCommonData/data/tidring1.xml', \n 'Geometry/TrackerCommonData/data/tidring1f.xml', \n 'Geometry/TrackerCommonData/data/tidring1b.xml', \n 'Geometry/TrackerCommonData/data/tidring2.xml', \n 'Geometry/TrackerCommonData/data/tid.xml', \n 'Geometry/TrackerCommonData/data/tidf.xml', \n 'Geometry/TrackerCommonData/data/tidb.xml', \n 'Geometry/TrackerCommonData/data/tibtidservices.xml', \n 'Geometry/TrackerCommonData/data/tibtidservicesf.xml', \n 'Geometry/TrackerCommonData/data/tibtidservicesb.xml', \n 'Geometry/TrackerCommonData/data/tobmaterial.xml', \n 'Geometry/TrackerCommonData/data/tobmodpar.xml', \n 'Geometry/TrackerCommonData/data/tobmodule0.xml', \n 'Geometry/TrackerCommonData/data/tobmodule2.xml', \n 'Geometry/TrackerCommonData/data/tobmodule4.xml', \n 'Geometry/TrackerCommonData/data/tobrodpar.xml', \n 'Geometry/TrackerCommonData/data/tobrod0c.xml', \n 'Geometry/TrackerCommonData/data/tobrod0l.xml', \n 'Geometry/TrackerCommonData/data/tobrod0h.xml', \n 'Geometry/TrackerCommonData/data/tobrod0.xml', \n 'Geometry/TrackerCommonData/data/tobrod1l.xml', \n 'Geometry/TrackerCommonData/data/tobrod1h.xml', \n 'Geometry/TrackerCommonData/data/tobrod1.xml', \n 'Geometry/TrackerCommonData/data/tobrod2c.xml', \n 'Geometry/TrackerCommonData/data/tobrod2l.xml', \n 'Geometry/TrackerCommonData/data/tobrod2h.xml', \n 'Geometry/TrackerCommonData/data/tobrod2.xml', \n 'Geometry/TrackerCommonData/data/tobrod3l.xml', \n 'Geometry/TrackerCommonData/data/tobrod3h.xml', \n 'Geometry/TrackerCommonData/data/tobrod3.xml', \n 'Geometry/TrackerCommonData/data/tobrod4c.xml', \n 'Geometry/TrackerCommonData/data/tobrod4l.xml', \n 'Geometry/TrackerCommonData/data/tobrod4h.xml', \n 'Geometry/TrackerCommonData/data/tobrod4.xml', \n 'Geometry/TrackerCommonData/data/tobrod5l.xml', \n 'Geometry/TrackerCommonData/data/tobrod5h.xml', \n 'Geometry/TrackerCommonData/data/tobrod5.xml', \n 'Geometry/TrackerCommonData/data/tob.xml', \n 'Geometry/TrackerCommonData/data/tecmaterial.xml', \n 'Geometry/TrackerCommonData/data/tecmodpar.xml', \n 'Geometry/TrackerCommonData/data/tecmodule0.xml', \n 'Geometry/TrackerCommonData/data/tecmodule0r.xml', \n 'Geometry/TrackerCommonData/data/tecmodule0s.xml', \n 'Geometry/TrackerCommonData/data/tecmodule1.xml', \n 'Geometry/TrackerCommonData/data/tecmodule1r.xml', \n 'Geometry/TrackerCommonData/data/tecmodule1s.xml', \n 'Geometry/TrackerCommonData/data/tecmodule2.xml', \n 'Geometry/TrackerCommonData/data/tecmodule3.xml', \n 'Geometry/TrackerCommonData/data/tecmodule4.xml', \n 'Geometry/TrackerCommonData/data/tecmodule4r.xml', \n 'Geometry/TrackerCommonData/data/tecmodule4s.xml', \n 'Geometry/TrackerCommonData/data/tecmodule5.xml', \n 'Geometry/TrackerCommonData/data/tecmodule6.xml', \n 'Geometry/TrackerCommonData/data/tecpetpar.xml', \n 'Geometry/TrackerCommonData/data/tecring0.xml', \n 'Geometry/TrackerCommonData/data/tecring1.xml', \n 'Geometry/TrackerCommonData/data/tecring2.xml', \n 'Geometry/TrackerCommonData/data/tecring3.xml', \n 'Geometry/TrackerCommonData/data/tecring4.xml', \n 'Geometry/TrackerCommonData/data/tecring5.xml', \n 'Geometry/TrackerCommonData/data/tecring6.xml', \n 'Geometry/TrackerCommonData/data/tecring0f.xml', \n 'Geometry/TrackerCommonData/data/tecring1f.xml', \n 'Geometry/TrackerCommonData/data/tecring2f.xml', \n 'Geometry/TrackerCommonData/data/tecring3f.xml', \n 'Geometry/TrackerCommonData/data/tecring4f.xml', \n 'Geometry/TrackerCommonData/data/tecring5f.xml', \n 'Geometry/TrackerCommonData/data/tecring6f.xml', \n 'Geometry/TrackerCommonData/data/tecring0b.xml', \n 'Geometry/TrackerCommonData/data/tecring1b.xml', \n 'Geometry/TrackerCommonData/data/tecring2b.xml', \n 'Geometry/TrackerCommonData/data/tecring3b.xml', \n 'Geometry/TrackerCommonData/data/tecring4b.xml', \n 'Geometry/TrackerCommonData/data/tecring5b.xml', \n 'Geometry/TrackerCommonData/data/tecring6b.xml', \n 'Geometry/TrackerCommonData/data/tecpetalf.xml', \n 'Geometry/TrackerCommonData/data/tecpetalb.xml', \n 'Geometry/TrackerCommonData/data/tecpetal0.xml', \n 'Geometry/TrackerCommonData/data/tecpetal0f.xml', \n 'Geometry/TrackerCommonData/data/tecpetal0b.xml', \n 'Geometry/TrackerCommonData/data/tecpetal3.xml', \n 'Geometry/TrackerCommonData/data/tecpetal3f.xml', \n 'Geometry/TrackerCommonData/data/tecpetal3b.xml', \n 'Geometry/TrackerCommonData/data/tecpetal6f.xml', \n 'Geometry/TrackerCommonData/data/tecpetal6b.xml', \n 'Geometry/TrackerCommonData/data/tecpetal8f.xml', \n 'Geometry/TrackerCommonData/data/tecpetal8b.xml', \n 'Geometry/TrackerCommonData/data/tecwheel.xml', \n 'Geometry/TrackerCommonData/data/tecwheela.xml', \n 'Geometry/TrackerCommonData/data/tecwheelb.xml', \n 'Geometry/TrackerCommonData/data/tecwheelc.xml', \n 'Geometry/TrackerCommonData/data/tecwheeld.xml', \n 'Geometry/TrackerCommonData/data/tecwheel6.xml', \n 'Geometry/TrackerCommonData/data/tecservices.xml', \n 'Geometry/TrackerCommonData/data/tecbackplate.xml', \n 'Geometry/TrackerCommonData/data/tec.xml', \n 'Geometry/TrackerCommonData/data/trackermaterial.xml', \n 'Geometry/TrackerCommonData/data/tracker.xml', \n 'Geometry/TrackerCommonData/data/trackerpixbar.xml', \n 'Geometry/TrackerCommonData/data/trackerpixfwd.xml', \n 'Geometry/TrackerCommonData/data/trackertibtidservices.xml', \n 'Geometry/TrackerCommonData/data/trackertib.xml', \n 'Geometry/TrackerCommonData/data/trackertid.xml', \n 'Geometry/TrackerCommonData/data/trackertob.xml', \n 'Geometry/TrackerCommonData/data/trackertec.xml', \n 'Geometry/TrackerCommonData/data/trackerbulkhead.xml', \n 'Geometry/TrackerCommonData/data/trackerother.xml', \n 'Geometry/EcalCommonData/data/eregalgo.xml', \n 'Geometry/EcalCommonData/data/ebalgo.xml', \n 'Geometry/EcalCommonData/data/ebcon.xml', \n 'Geometry/EcalCommonData/data/ebrot.xml', \n 'Geometry/EcalCommonData/data/eecon.xml', \n 'Geometry/EcalCommonData/data/eefixed.xml', \n 'Geometry/EcalCommonData/data/eehier.xml', \n 'Geometry/EcalCommonData/data/eealgo.xml', \n 'Geometry/EcalCommonData/data/escon.xml', \n 'Geometry/EcalCommonData/data/esalgo.xml', \n 'Geometry/EcalCommonData/data/eeF.xml', \n 'Geometry/EcalCommonData/data/eeB.xml', \n 'Geometry/HcalCommonData/data/hcalrotations.xml', \n 'Geometry/HcalCommonData/data/hcalalgo.xml', \n 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', \n 'Geometry/HcalCommonData/data/hcalendcapalgo.xml', \n 'Geometry/HcalCommonData/data/hcalouteralgo.xml', \n 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', \n 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', \n 'Geometry/MuonCommonData/data/mbCommon.xml', \n 'Geometry/MuonCommonData/data/mb1.xml', \n 'Geometry/MuonCommonData/data/mb2.xml', \n 'Geometry/MuonCommonData/data/mb3.xml', \n 'Geometry/MuonCommonData/data/mb4.xml', \n 'Geometry/MuonCommonData/data/muonYoke.xml', \n 'Geometry/MuonCommonData/data/mf.xml', \n 'Geometry/ForwardCommonData/data/forward.xml', \n 'Geometry/ForwardCommonData/data/bundle/forwardshield.xml', \n 'Geometry/ForwardCommonData/data/brmrotations.xml', \n 'Geometry/ForwardCommonData/data/brm.xml', \n 'Geometry/ForwardCommonData/data/totemMaterials.xml', \n 'Geometry/ForwardCommonData/data/totemRotations.xml', \n 'Geometry/ForwardCommonData/data/totemt1.xml', \n 'Geometry/ForwardCommonData/data/totemt2.xml', \n 'Geometry/ForwardCommonData/data/ionpump.xml', \n 'Geometry/MuonCommonData/data/muonNumbering.xml', \n 'Geometry/TrackerCommonData/data/trackerStructureTopology.xml', \n 'Geometry/TrackerSimData/data/trackersens.xml', \n 'Geometry/TrackerRecoData/data/trackerRecoMaterial.xml', \n 'Geometry/EcalSimData/data/ecalsens.xml', \n 'Geometry/HcalCommonData/data/hcalsenspmf.xml', \n 'Geometry/HcalSimData/data/hf.xml', \n 'Geometry/HcalSimData/data/hfpmt.xml', \n 'Geometry/HcalSimData/data/hffibrebundle.xml', \n 'Geometry/HcalSimData/data/CaloUtil.xml', \n 'Geometry/MuonSimData/data/muonSens.xml', \n 'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml', \n 'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml', \n 'Geometry/CSCGeometryBuilder/data/cscSpecs.xml', \n 'Geometry/RPCGeometryBuilder/data/RPCSpecs.xml', \n 'Geometry/ForwardCommonData/data/brmsens.xml', \n 'Geometry/HcalSimData/data/HcalProdCuts.xml', \n 'Geometry/EcalSimData/data/EcalProdCuts.xml', \n 'Geometry/EcalSimData/data/ESProdCuts.xml', \n 'Geometry/TrackerSimData/data/trackerProdCuts.xml', \n 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', \n 'Geometry/MuonSimData/data/muonProdCuts.xml', \n 'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml', \n 'Geometry/CMSCommonData/data/FieldParameters.xml'),\n rootNodeName = cms.string('cms:OCMS')\n)\n\n\nprocess.eegeom = cms.ESSource(\"EmptyESSource\",\n iovIsRunNotTime = cms.bool(True),\n recordName = cms.string('EcalMappingRcd'),\n firstValid = cms.vuint32(1)\n)\n\n\nprocess.es_hardcode = cms.ESSource(\"HcalHardcodeCalibrations\",\n toGet = cms.untracked.vstring('GainWidths')\n)\n\n\nprocess.magfield = cms.ESSource(\"XMLIdealGeometryESSource\",\n geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/normal/cmsextent.xml', \n 'Geometry/CMSCommonData/data/cms.xml', \n 'Geometry/CMSCommonData/data/cmsMagneticField.xml', \n 'MagneticField/GeomBuilder/data/MagneticFieldVolumes_1103l.xml', \n 'MagneticField/GeomBuilder/data/MagneticFieldParameters_07_2pi.xml', \n 'Geometry/CMSCommonData/data/materials.xml'),\n rootNodeName = cms.string('cmsMagneticField:MAGF')\n)\n\n\nprocess.prefer(\"magfield\")\n\nprocess.CondDBSetup = cms.PSet(\n DBParameters = cms.PSet(\n authenticationPath = cms.untracked.string(''),\n enableReadOnlySessionOnUpdateConnection = cms.untracked.bool(False),\n idleConnectionCleanupPeriod = cms.untracked.int32(10),\n messageLevel = cms.untracked.int32(0),\n enablePoolAutomaticCleanUp = cms.untracked.bool(False),\n enableConnectionSharing = cms.untracked.bool(True),\n connectionRetrialTimeOut = cms.untracked.int32(60),\n connectionTimeOut = cms.untracked.int32(60),\n authenticationSystem = cms.untracked.int32(0),\n connectionRetrialPeriod = cms.untracked.int32(10)\n )\n)\n\nprocess.fieldScaling = cms.PSet(\n scalingVolumes = cms.vint32(14100, 14200, 17600, 17800, 17900, \n 18100, 18300, 18400, 18600, 23100, \n 23300, 23400, 23600, 23800, 23900, \n 24100, 28600, 28800, 28900, 29100, \n 29300, 29400, 29600, 28609, 28809, \n 28909, 29109, 29309, 29409, 29609, \n 28610, 28810, 28910, 29110, 29310, \n 29410, 29610, 28611, 28811, 28911, \n 29111, 29311, 29411, 29611),\n scalingFactors = cms.vdouble(1, 1, 0.994, 1.004, 1.004, \n 1.005, 1.004, 1.004, 0.994, 0.965, \n 0.958, 0.958, 0.953, 0.958, 0.958, \n 0.965, 0.918, 0.924, 0.924, 0.906, \n 0.924, 0.924, 0.918, 0.991, 0.998, \n 0.998, 0.978, 0.998, 0.998, 0.991, \n 0.991, 0.998, 0.998, 0.978, 0.998, \n 0.998, 0.991, 0.991, 0.998, 0.998, \n 0.978, 0.998, 0.998, 0.991)\n)\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1000)\n)\n\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool(True)\n)\n\n","sub_path":"EDSHyFT/test/SUSHyFT/pset.py","file_name":"pset.py","file_ext":"py","file_size_in_byte":58144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"189014378","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport imageio\nimport utils\nimport math\nimport glob\nimageio.plugins.ffmpeg.download()\n\n\ndef process_image(path, out):\n imageraw = cv2.imread(path)\n image = cv2.cvtColor(imageraw, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n # mag = utils.mag_thresh(gray)\n # dir = utils.dir_threshold(gray)\n # abs = utils.abs_sobel_thresh(gray)\n correct = utils.preprocess(image)\n #plt.imshow(correct, plt.gray())\n #plt.show()\n # plt.imshow(dir, plt.gray())\n # plt.show()\n\n x = cv2.Sobel(gray, cv2.CV_16S, 1, 0)\n y = cv2.Sobel(gray, cv2.CV_16S, 0, 1)\n\n absX = cv2.convertScaleAbs(x)\n absY = cv2.convertScaleAbs(y)\n\n dst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\n plt.imshow(dst, plt.gray())\n plt.show()\n\n # Define a kernel size and apply Gaussian smoothing\n kernel_size = 5\n blur_gray = cv2.GaussianBlur(correct, (kernel_size, kernel_size), 0)\n # bilateral = cv2.bilateralFilter(image, -1, 0.3, 10)\n\n # Define our parameters for Canny and apply\n canny = utils.canny_thresh(blur_gray)\n # canny2 = utils.canny_thresh(bilateral)\n\n # cv2.imwrite(\"canny.png\", canny)\n # plt.subplot(211)\n #plt.imshow(canny, plt.gray())\n # plt.subplot(212)\n # plt.imshow(canny2)\n #plt.show()\n\n mask_whole = np.zeros_like(canny)\n mask_base = np.zeros_like(canny)\n mask_least = np.zeros_like(canny)\n mask_left = np.zeros_like(canny)\n mask_right = np.zeros_like(canny)\n ignore_mask_color = 255\n\n '''\n Using Multi-masks to Define ROI\n '''\n imshape = image.shape\n vertices_base_roi = np.array([[(0, imshape[0]), (imshape[1] / 3, 1.7 * imshape[0] / 3),\n (2 * imshape[1] / 3, 1.7 * imshape[0] / 3), (imshape[1], imshape[0])]],\n dtype=np.int32)\n vertices_least = np.array([[(imshape[1] / 9, imshape[0]), (imshape[1] * 1.2 / 3, 1.7 * imshape[0] / 3),\n (1.8 * imshape[1] / 3, 1.7 * imshape[0] / 3), (8 * imshape[1] / 9, imshape[0])]],\n dtype=np.int32)\n vertices_middle_triangle = np.array(\n [[(imshape[1] / 3, imshape[0]), (imshape[1] / 2, 1.7 * imshape[0] / 3), (2 * imshape[1] / 3, imshape[0])]],\n dtype=np.int32)\n vertices_left_triangle = np.array([[(0, imshape[0]), (imshape[1] / 3, 1.7 * imshape[0] / 3),\n (0, 7 * imshape[0] / 8)]], dtype=np.int32)\n vertices_right_triangle = np.array([[(2 * imshape[1] / 3, 1.7 * imshape[0] / 3), (imshape[1], imshape[0]),\n (imshape[1], 7 * imshape[0] / 8)]], dtype=np.int32)\n\n cv2.fillPoly(mask_whole, vertices_base_roi, ignore_mask_color)\n cv2.fillPoly(mask_whole, vertices_right_triangle, ignore_mask_color)\n cv2.fillPoly(mask_whole, vertices_middle_triangle, 0)\n cv2.fillPoly(mask_whole, vertices_left_triangle, ignore_mask_color)\n cv2.fillPoly(mask_base, vertices_base_roi, ignore_mask_color)\n cv2.fillPoly(mask_base, vertices_middle_triangle, 0)\n cv2.fillPoly(mask_least, vertices_least, ignore_mask_color)\n cv2.fillPoly(mask_least, vertices_middle_triangle, 0)\n cv2.fillPoly(mask_left, vertices_base_roi, ignore_mask_color)\n cv2.fillPoly(mask_left, vertices_left_triangle, ignore_mask_color)\n cv2.fillPoly(mask_left, vertices_middle_triangle, 0)\n cv2.fillPoly(mask_right, vertices_base_roi, ignore_mask_color)\n cv2.fillPoly(mask_right, vertices_middle_triangle, 0)\n cv2.fillPoly(mask_right, vertices_right_triangle, ignore_mask_color)\n\n masked_edges_whole = cv2.bitwise_and(canny, mask_whole)\n masked_edges_base = cv2.bitwise_and(canny, mask_base)\n masked_edges_least = cv2.bitwise_and(canny, mask_least)\n masked_edges_left = cv2.bitwise_and(canny, mask_left)\n masked_edges_right = cv2.bitwise_and(canny, mask_right)\n\n #plt.subplot(211)\n # plt.imshow(masked_edges_least)\n # plt.subplot(212)\n # plt.imshow(masked_edges_base)\n # plt.show()\n\n # Define the Hough transform parameters\n # Make a blank the same size as our image to draw on\n rho = 2 # distance resolution in pixels of the Hough grid\n theta = np.pi / 180 # angular resolution in radians of the Hough grid\n threshold = 30 # minimum number of votes (intersections in Hough grid cell)\n min_line_length = 35 # minimum number of pixels making up a line\n max_line_gap = 10 # maximum gap in pixels between connectable line segments\n line_image = np.copy(image) * 0 # creating a blank to draw lines_whole on\n\n # Run Hough on edge detected image\n # Output \"lines_whole\" is an array containing endpoints of detected line segments\n lines_whole = cv2.HoughLinesP(masked_edges_whole, rho, theta, threshold, np.array([]),\n 30, max_line_gap)\n lines_base = cv2.HoughLinesP(masked_edges_base, 1, theta, threshold, np.array([]),\n min_line_length, max_line_gap)\n lines_least = cv2.HoughLinesP(masked_edges_least, 1, theta, 35, np.array([]),\n 40, max_line_gap)\n lines_left = cv2.HoughLinesP(masked_edges_left, rho, theta, threshold, np.array([]),\n min_line_length, max_line_gap)\n lines_right = cv2.HoughLinesP(masked_edges_right, rho, theta, threshold, np.array([]),\n min_line_length, max_line_gap)\n\n # norms = utils.hough_filter(lines_least)\n # for t in norms.keys():\n # for norm in norms[t]:\n # for x1, x2, y1, y2 in norm[1:]:\n # cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 5)\n\n l, r = utils.detect_lines(lines_base, imshape)\n if l and r:\n line_image = utils.draw_lines(lines_base, line_image, imshape)\n line_result = lines_least\n mask_result = mask_base\n elif (not l) and r:\n # using base mask to try if OK\n l, r = utils.detect_lines(lines_base, imshape)\n if l & r:\n line_image = utils.draw_lines(lines_base, line_image, imshape)\n line_result = lines_base\n mask_result = mask_base\n else:\n line_image = utils.draw_lines(lines_left, line_image, imshape)\n line_result = lines_left\n mask_result = mask_left\n elif (not r) and l:\n # using base mask to try if OK\n l, r = utils.detect_lines(lines_base, imshape)\n if l & r:\n line_image = utils.draw_lines(lines_base, line_image, imshape)\n line_result = lines_base\n mask_result = mask_base\n else:\n line_image = utils.draw_lines(lines_right, line_image, imshape)\n line_result = lines_right\n mask_result = mask_right\n else:\n # using base mask to try if OK\n l, r = utils.detect_lines(lines_base, imshape)\n if l & r:\n line_image = utils.draw_lines(lines_base, line_image, imshape)\n line_result = lines_base\n mask_result = mask_base\n else:\n line_image = utils.draw_lines(lines_whole, line_image, imshape)\n line_result = lines_whole\n mask_result = mask_whole\n\n # Iterate over the output \"lines_whole\" and draw lines_whole on a blank image\n if line_image is not None:\n image_result = cv2.addWeighted(imageraw, 0.8, line_image, 1, 0)\n else:\n image_result = image\n\n # Framework.is_courtesy(image_result, line_result, [(0, 100), (100, 400)])\n '''\n plt.subplot(221)\n plt.imshow(mask_result)\n plt.subplot(222)\n plt.imshow(image_result)\n plt.subplot(223)\n plt.imshow(masked_edges_base)\n plt.subplot(224)\n plt.imshow(masked_edges_base)\n plt.show()\n\n plt.imshow(image_result)\n plt.show()\n '''\n cv2.imwrite(out, image_result)\n\n return image_result\n\nprocess_image('./test_pages/test_curb.jpg', './out.jpg')\n","sub_path":"PROTOTYPE/laneline_detection/Lane_Lines_Image.py","file_name":"Lane_Lines_Image.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"214167220","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\nurl = 'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/'\next = 'gz'\n\ndef listdb(url, ext=''):\n page = requests.get(url).text\n soup = BeautifulSoup(page, 'html.parser')\n return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]\n\nbigdf = pd.DataFrame()\nfor file in listdb(url, ext):\n print(file)\n data = pd.read_csv(file, compression='gzip', error_bad_lines=False)\n bigdf = pd.concat([bigdf,data],ignore_index=True)\nbigdf.to_csv('events_datasets.csv', index=False)\n","sub_path":"FASA/events/getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"117532825","text":"# Setup file for unificontrol\nimport setuptools\n\nimport unificontrol\n\nwith open(\"README.rst\", \"r\") as fh:\n desc_lines = fh.readlines()\n stops = [i for i,l in enumerate(desc_lines) if \"PyPI STOP\" in l]\n if stops:\n desc_lines = desc_lines[:stops[0]]\n long_description = \"\".join(desc_lines)\n\nsetuptools.setup(\n name=\"unificontrol\",\n version=unificontrol.__version__,\n author=\"Nicko van Someren\",\n author_email=\"nicko@nicko.org\",\n description=\"Secure access to Ubiquiti Unifi network controllers\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n url=\"https://github.com/nickovs/unificontrol\",\n packages=setuptools.find_packages(),\n classifiers=(\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ),\n install_requires=['requests'],\n python_requires='>=3.4',\n keywords=['unifi', 'wifi', 'network', 'mamangement'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"646294047","text":"import FWCore.ParameterSet.Config as cms\nprocess = cms.Process('ANALYSIS')\n\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nfrom Configuration.AlCa.autoCond import autoCond\nprocess.load(\"Configuration.Geometry.GeometryIdeal_cff\")\nprocess.GlobalTag.globaltag=autoCond['startup']\n\n#process.load('RecoJets.Configuration.RecoPFJets_cff')\n#process.kt6PFJets = process.kt6PFJets.clone(rParam = 0.6, doRhoFastjet = True) \n\n#load the response corrections calculator\nprocess.load('HcalClosureTest.Analyzers.calcrespcorrphotonplusjet_cfi')\n\n# run over files\nprocess.calcrespcorrphotonplusjet.rootHistFilename = cms.string('PhoJet_tree.root')\nprocess.calcrespcorrphotonplusjet.doCaloJets = cms.bool(False)\nprocess.calcrespcorrphotonplusjet.doPFJets = cms.bool(True)\nprocess.calcrespcorrphotonplusjet.doGenJets = cms.bool(True)\nprocess.calcrespcorrphotonplusjet.photonTriggers = cms.vstring(\n 'HLT_Photon20_CaloIdVL_IsoL','HLT_Photon30_CaloIdVL_IsoL',\n 'HLT_Photon50_CaloIdVL_IsoL','HLT_Photon75_CaloIdVL_IsoL',\n 'HLT_Photon90_CaloIdVL_IsoL','HLT_Photon135',\n 'HLT_Photon150','HLT_Photon160')\n\n# Load file list\n# Summer12_DR53X production G_Pt_XtoY\nimport FWCore.Utilities.FileUtils as FileUtils\nlistFileName='fileinfo_GJet/makepy_Summer12_DR53X_G_Pt_170to300.txt'\n#listFileName='selection_tmp.txt'\nmylist = FileUtils.loadListFromFile(listFileName)\nmylist.extend( FileUtils.loadListFromFile(listFileName) )\nreadFiles = cms.untracked.vstring( *mylist )\n\n\n##process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring('/store/relval/CMSSW_5_3_16/RelValPyquen_GammaJet_pt20_2760GeV/GEN-SIM-RECO/PU_STARTHI53_LV1_mar03-v2/00000/20FE26F4-65A3-E311-B12C-0025904C6378.root'))\n\nprocess.source = cms.Source(\"PoolSource\", \n#fileNames = cms.untracked.vstring('file:/uscms/home/lovedeep/eos/RelValPhotonJets_Pt_10_CMSSW_5_3_12_patch2_A4609359-9E2B-E311-B331-0025905964A6.root')\n# fileNames= readFiles\n\nfileNames = cms.untracked.vstring(\n '/store/mc/Summer12_DR53X/G_Pt-170to300_TuneZ2star_8TeV_pythia6/GEN-SIM-RECO/PU_S10_START53_V7A-v1/00000/5846302F-1A18-E211-A060-00266CF2AE10.root',\n## '/store/mc/Summer12_DR53X/G_Pt-170to300_TuneZ2star_8TeV_pythia6/GEN-SIM-RECO/PU_S10_START53_V7A-v1/00000/586126E2-0F18-E211-9323-0030487D864B.root',\n## '/store/mc/Summer12_DR53X/G_Pt-170to300_TuneZ2star_8TeV_pythia6/GEN-SIM-RECO/PU_S10_START53_V7A-v1/00000/A80FB82E-1018-E211-B444-0025904B130E.root',\n## '/store/mc/Summer12_DR53X/G_Pt-170to300_TuneZ2star_8TeV_pythia6/GEN-SIM-RECO/PU_S10_START53_V7A-v1/00000/5809400A-F917-E211-8D3D-0030487F1C51.root',\n## '/store/mc/Summer12_DR53X/G_Pt-170to300_TuneZ2star_8TeV_pythia6/GEN-SIM-RECO/PU_S10_START53_V7A-v1/00000/A40C5492-F917-E211-AB13-002481E0DC82.root'\n )\n\n)\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery=cms.untracked.int32(10)\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )\n\n# timing\n#process.Timing = cms.Service('Timing')\n\n# Load pfNoPileUP\nprocess.load(\"CommonTools.ParticleFlow.pfNoPileUp_cff\")\nprocess.load(\"CommonTools.ParticleFlow.PF2PAT_cff\")\nfrom RecoJets.JetProducers.ak5PFJets_cfi import *\nprocess.ak5PFJetsCHS = ak5PFJets.clone(\n src = cms.InputTag(\"pfNoPileUp\")\n)\n\nprocess.load('HcalClosureTest.Analyzers.calcrespcorr_CHSJECs_cff')\n\nprocess.p = cms.Path(\nprocess.pfNoPileUpSequence\n+process.PF2PAT\n+process.ak5PFJetsCHS\n+process.calcrespcorrphotonplusjet)\n\n","sub_path":"Analyzers/test/testRespCorrPhotonPlusJet_cfg.py","file_name":"testRespCorrPhotonPlusJet_cfg.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"333741169","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# -*- coding: utf-8 -*-\n\n\"\"\"Google Cloud function code to extract periodic transactions from data source.\"\"\"\n\nimport base64\nimport json\nimport os\n\nfrom typing import Any, Dict, Optional\nfrom google.cloud.functions_v1.context import Context\nfrom google.cloud import bigquery\n\nfrom custom_functions import hook_get_load_predictions_query\nfrom custom_functions import hook_get_bq_schema\nfrom custom_functions import hook_apply_formulas\nfrom custom_functions import hook_on_completion\n\nBQ_LTV_GCP_PROJECT = str(os.getenv(\"BQ_LTV_GCP_PROJECT\", \"\"))\nBQ_LTV_DATASET = str(os.getenv(\"BQ_LTV_DATASET\", \"\"))\nBQ_LTV_PREDICTIONS_TABLE = str(\n os.getenv(\"BQ_LTV_PREDICTIONS_TABLE\", \"\"))\n\n\ndef _load_data_from_bq(query):\n \"\"\"Loads all the transactions from the table.\n\n Args:\n query: A string with the query to run on the table\n\n Returns:\n A dataframe with all the table data\n \"\"\"\n job_config = bigquery.job.QueryJobConfig()\n\n return bigquery.Client().query(query, job_config=job_config).to_dataframe()\n\n\ndef _write_to_bigquery(df, table_name):\n \"\"\"Writes the given dataframe into the BQ table.\n\n Args:\n df: A pandas dataframe representing the data to be written\n table_name: A string representing the full path of the metadata BQ table\n \"\"\"\n\n dataframe = df\n\n client = bigquery.Client()\n\n job_config = bigquery.LoadJobConfig()\n job_config.write_disposition = \"WRITE_TRUNCATE\"\n job_config.schema = hook_get_bq_schema()\n\n job = client.load_table_from_dataframe(\n dataframe, table_name, job_config=job_config)\n job.result()\n\n table = client.get_table(table_name)\n print(\"Loaded {} rows and {} columns to {}\".format(table.num_rows,\n len(table.schema),\n table_name))\n\ndef _delete_dataset(dataset):\n \"\"\"Deletes the dataset specified by the dataset parameter.\n\n Args:\n dataset: The name of the dataset to be deleted.\n \"\"\"\n\n client = bigquery.Client()\n client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True\n )\n\ndef main(event: Dict[str, Any], context=Optional[Context]):\n \"\"\"Checks if the data source table is available & no extract table generated.\n\n Depending on the existence it will trigger the data transfer.\n\n Args:\n event (dict): The dictionary with data specific to this type of event.\n The `data` field contains the PubsubMessage message. The `attributes`\n field will contain custom attributes if there are any.\n context (google.cloud.functions.Context): The Cloud Functions event\n metadata. The `event_id` field contains the Pub/Sub message ID. The\n `timestamp` field contains the publish time.\n \"\"\"\n del context\n\n data = base64.b64decode(event[\"data\"]).decode(\"utf-8\")\n msg = json.loads(data)\n\n input_dataset = (msg['operation']['metadata']\n ['batchPredictDetails']['outputInfo']['bigqueryOutputDataset']).split(\"://\")[1]\n input_table = f\"\"\"{input_dataset}.predictions\"\"\"\n\n output_table = f\"{BQ_LTV_GCP_PROJECT}.{BQ_LTV_DATASET}.{BQ_LTV_PREDICTIONS_TABLE}_{msg['date']}\"\n\n query = hook_get_load_predictions_query(input_table)\n _write_to_bigquery(\n hook_apply_formulas(_load_data_from_bq(query)), output_table)\n _delete_dataset(input_dataset)\n hook_on_completion()\n\n\ndef _test():\n message = {\n \"bq_output_table\":\n \"ltv-framework\",\n \"bq_input_to_predict_table\":\n \"ltv-framework.ltv_jaimemm.prepared_new_customers_periodic_transactions\",\n \"date\":\n \"20210303\",\n \"operation\": {\n \"name\":\n \"projects/988912752389/locations/eu/operations/TBL8979557532418703360\",\n \"metadata\": {\n \"@type\":\n \"type.googleapis.com/google.cloud.automl.v1beta1.OperationMetadata\",\n \"createTime\":\n \"2021-03-05T17:57:54.251058Z\",\n \"updateTime\":\n \"2021-03-05T18:02:43.797899Z\",\n \"batchPredictDetails\": {\n \"inputConfig\": {\n \"bigquerySource\": {\n \"inputUri\":\n \"bq://ltv-framework.ltv_jaimemm.prepared_new_customers_periodic_transactions_20210303\"\n }\n },\n \"outputInfo\": {\n \"bigqueryOutputDataset\":\n \"bq://ltv-framework.prediction_training_data_20200605_0608_2021_03_05T09_57_54_169Z\"\n }\n }\n },\n \"done\":\n \"true\",\n \"response\": {\n \"@type\":\n \"type.googleapis.com/google.cloud.automl.v1beta1.BatchPredictResult\"\n }\n }\n }\n msg_data = base64.b64encode(bytes(json.dumps(message).encode(\"utf-8\")))\n\n main(\n event={\n \"data\": msg_data,\n \"attributes\": {\n \"forwarded\": \"true\"\n }\n },\n context=None)\n\n\nif __name__ == \"__main__\":\n _test()\n","sub_path":"cfs/post_process_batch_predictions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"534813661","text":"import sys\nimport math\nimport numpy as np\nimport sklearn.linear_model as lm\n\nfrom scipy.io import loadmat\n\nfrom tqdm import tqdm\n\n# For JSB_Chorales, notes 27 through 75 are the ones which are actually played\n# For Nottingham, it is 10 through 72, with some missing in between\n\ndef get_dataset(dataname: str,\n key: str,\n low_off_notes: int,\n high_off_notes: int,\n lag=1,\n window=1,\n format='flattened'):\n \"\"\"\n :param dataname: which dataset is to be used\n :param key: 'traindata', 'testdata', 'validdata'\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n num_notes = high_off_notes - low_off_notes\n\n data_dict = loadmat('data/' + dataname)\n arrays = data_dict[key][0]\n\n # this much will have to be chopped off at the beginning and end of each sequence\n offset = lag + window - 1\n\n # store sequences separately here\n xlist = []\n ylist = []\n\n # record each array, reformatted appropriately\n for array in arrays:\n\n T = len(array)\n\n newx = np.zeros((T - offset, num_notes*window))\n for t in range(T - offset):\n for i in range(window):\n newx[t, num_notes*i : num_notes*(i + 1)] = array[t + i, low_off_notes : high_off_notes]\n xlist.append(newx)\n\n ylist.append(array[offset:, low_off_notes : high_off_notes])\n\n # this format is needed for computing average loss and accuracy over time and sequences\n if format == 'listofarrays':\n\n return xlist, ylist\n\n # this format is needed for training\n elif format == 'flattened':\n\n # count how big the whole array needs to be\n size = 0\n for xseq in xlist:\n size += len(xseq)\n\n # initialize the flattened inputs and targets\n x = np.zeros((size, num_notes*window))\n y = np.zeros((size, num_notes))\n\n # keep track of where we are\n ix = 0\n\n # put every sequence together into one array\n for xseq, yseq in zip(xlist, ylist):\n\n T = len(xseq)\n\n x[ix : ix + T] = xseq\n y[ix : ix + T] = yseq\n\n ix += T\n\n return x, y\n\n else:\n raise ValueError(\"Format {} not recognized\".format(format))\n\n\n# some of the notes might be off the entire time, find them!\ndef find_off_notes(x):\n\n off_notes = []\n\n num_notes = x.shape[1]\n\n for note in range(num_notes):\n\n if not 1 in x[:, note]:\n off_notes.append(note)\n\n return off_notes\n\n\ndef train_models(dataname: str,\n num_epochs: int,\n low_off_notes: int,\n high_off_notes: int,\n _seed,\n lag=1,\n window=1):\n \"\"\"\n :param dataname: which dataset to use for training\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n num_notes = high_off_notes - low_off_notes\n\n # load the data\n x, y = get_dataset(dataname,\n 'traindata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n\n off_notes = find_off_notes(x)\n\n # model is needed for every channel (note)\n model_list = []\n\n # train every model\n for channel in tqdm(range(num_notes)):\n\n # append a placeholder to the model list if this note is not played\n if channel in off_notes:\n model_list.append(None)\n\n # otherwise train the model on this particular note and append it\n else:\n model = lm.LogisticRegression(solver='saga', penalty='elasticnet', l1_ratio=0.9, random_state=_seed, max_iter=num_epochs)\n\n model.fit(x, y[:, channel])\n\n model_list.append(model)\n\n return model_list\n\n\ndef compute_accuracy(model_list,\n dataname: str,\n key: str,\n low_off_notes: int,\n high_off_notes: int,\n lag=1,\n window=1):\n \"\"\"\n :param model_list: the trained regression model for every note\n :param dataname: dataname of the dataset to be used\n :param key: 'traindata', 'testdata', 'validdata'\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n # how many notes we are predicting\n num_notes = len(model_list)\n\n # load the data\n x, y = get_dataset(dataname,\n key,\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window,\n format='listofarrays')\n\n # accumulate accuracy over all sequences\n tot_over_seqs = 0\n\n for xarr, yarr in tqdm(zip(x, y)):\n\n # accumulate accuracy over time\n tot_over_time = 0\n\n for xt, yt in zip(xarr, yarr):\n\n # true positives, false positives, false negatives\n tp = 0\n fp = 0\n fn = 0\n\n # compute for each note\n for channel in range(num_notes):\n\n # get the appropriate model and prediction\n model = model_list[channel]\n\n if model != None:\n pred = model.predict(xt.reshape(1, -1))[0]\n\n tp += yt[channel]*pred\n fp += (1 - yt[channel])*pred\n fn += yt[channel]*(1 - pred)\n\n # avoid nans\n if tp == 0 and fp == 0 and fn == 0:\n tot_over_time += 0\n else:\n tot_over_time += tp/(tp + fp + fn)\n\n tot_over_seqs += tot_over_time/len(xarr)\n\n return tot_over_seqs/len(x)\n\n\ndef compute_loss(model_list,\n dataname: str,\n key: str,\n low_off_notes: int,\n high_off_notes: int,\n lag=1,\n window=1):\n \"\"\"\n :param model_list: the trained regression model for every note\n :param dataname: dataname of the dataset to be used\n :param key: 'traindata', 'testdata', 'validdata'\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n # how many notes we are predicting\n num_notes = len(model_list)\n\n # load the data\n x, y = get_dataset(dataname,\n key,\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window,\n format='listofarrays')\n\n # accumulate loss over all sequences\n tot_over_seqs = 0\n\n for xarr, yarr in tqdm(zip(x, y)):\n\n # accumulate loss over time\n tot_over_time = 0\n\n for xt, yt in zip(xarr, yarr):\n\n # accumulate over each note\n tot = 0\n\n for channel in range(num_notes):\n\n model = model_list[channel]\n\n # sigmoid of the trained affine transformation\n pred = 1.0/(1.0 + np.exp(-(model.coef_ @ xt + model.intercept_)))\n\n # binary cross entropy with logits\n tot -= yt[channel]*math.log(pred) + (1 - yt[channel])*math.log(1 - pred)\n\n tot_over_time += tot\n\n tot_over_seqs += tot_over_time/len(xarr)\n\n return tot_over_seqs/len(x)\n\n","sub_path":"models/_sources/logistic_regression_7f55c04f45a613310c543e8078fe1851.py","file_name":"logistic_regression_7f55c04f45a613310c543e8078fe1851.py","file_ext":"py","file_size_in_byte":7484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"239860221","text":"import pandas as pd\nimport matplotlib.pylab as plt\n\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 15, 6\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\n\ndateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')\ndatas = pd.read_csv('state Data/Washington.csv', header=0, usecols=['Date Local', 'NO2 Mean'], parse_dates=['Date Local'], index_col='Date Local', date_parser=dateparse)\nprint(datas)\n\n\ndataValues = datas.values\ntrain, test = dataValues[0:int(len(dataValues) * 0.80)], dataValues[int(len(dataValues) * 0.80):len(dataValues)]\ntrainingData = [x for x in train]\npredictions = []\n\nfor item in range(len(test)):\n model = ARIMA(trainingData, order=(5,1,0))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n predicted = output[0]\n predictions.append(predicted)\n observation = test[item]\n trainingData.append(observation)\n print('predicted=%f, expected=%f' % (predicted, observation))\n\nerror = mean_squared_error(test[0:960], predictions)\nprint('Test MSE: %.3f' % error)\n\nplt.plot(test , label='Actual')\nplt.title('ARIMA Predictions DC')\nplt.ylabel('NO')\nplt.xlabel('Number of Months')\nplt.plot(predictions, color='red',label='Prediction')\nplt.legend(loc='best')\nplt.show()\nplt.savefig('Test_MSE.png')\n","sub_path":"ARIMA_NO2.py","file_name":"ARIMA_NO2.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"402883982","text":"configs = dict(\n auto_label_dir = \"/home/sss/UOS-SSaS Dropbox/05. Data/06. Auto Labeling\",\n dataset_name = 'Road Crack',\n rsz_img_size = 1024,\n\n model_config_file = '/home/sss/UOS-SSaS Dropbox/05. Data/03. Checkpoints/2020.09.02_deeplabv3plus_r101-d8_769x769_40k_concrete_damage_cs/deeplabv3plus_r101-d8_769x769_40k_concrete_damage_cs.py',\n model_checkpoint_file = '/home/sss/UOS-SSaS Dropbox/05. Data/03. Checkpoints/2020.09.02_deeplabv3plus_r101-d8_769x769_40k_concrete_damage_cs/iter_40000.pth',\n\n num_of_class = 1 , # not include background \n erode_size = 2,\n\n palette = [\n [255, 0, 0], # crack \n ], \n\n classes = ['crack',], \n )\n \n\n\n","sub_path":"road_crack_config.py","file_name":"road_crack_config.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447079477","text":"from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\n#\nfrom inventory.models import Item\nfrom .forms import ItemForm\nfrom django.shortcuts import redirect\n\n\ndef index(request):\n\titems = Item.objects.exclude(amount = 0)\n\treturn render(request, 'inventory/index.html', {\n\t\t'items': items,\n\n\t\t})\n\ndef item_detail(request,id):\n\ttry:\n\t\titem = Item.objects.get(id=id)\n\texcept Item.DoesNotExist:\n\t\traise Http404('This item does not exist')\n\treturn render(request, 'inventory/item_detail.html',{\n\t\t'item': item,\n\t\t})\n\ndef update_item(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = NameForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n # redirect to a new URL:\n return HttpResponseRedirect('/thanks/')\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = NameForm()\n\n return render(request, 'name.html', {'form': form})\n\ndef new_item(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = ItemForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n item = form.save(commit=False)\n item.save()\n return redirect('index')\n else:\n form = ItemForm()\n return render(request,'inventory/add_new_item.html',{'form':form})\n\n\ndef item_edit(request,id):\n item = get_object_or_404(Item,id = id)\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = ItemForm(request.POST,instance=item)\n # check whether it's valid:\n if form.is_valid():\n item = form.save(commit=False)\n item.save()\n return redirect('index')\n else:\n form = ItemForm(instance=item)\n return render(request,'inventory/item_edit.html',{'form':form})","sub_path":"inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"293615319","text":"# U08_EX15_ImageNegative.py\n#\n# Author: Will Baschab\n# Course: Coding for OOP\n# Section: A2\n# Date: 1 April 2019\n# IDE: PyCharm\n#\n# Assignment Info\n# Exercise: 15\n# Source: Python Programming\n# Chapter: 08\n#\n# Program Description\n# This program allows the user to upload an image and\n# edit it with a photo negative function. The image can be saved\n# or remove after it is edited.\n#\n# Algorithm (pseudocode)\n\"\"\"\n*** Refer to main() function for algorithm ***\n\"\"\"\nfrom graphics import *\n\n\n# ==================================================\n# Array/Object Creation Methods\n# ==================================================\n\n\ndef create_new_win(title, width, height):\n window = GraphWin(title, width, height) # make the window according to arguments\n window.setCoords(0.0, 0.0, 10.0, 10.0) # set coords on a 10x10 grid\n return window # return the window object to caller\n\n\ndef create_textbox(lx, ly, ux, uy, color, text):\n # l values are coords of the lower left point of rectangle\n # u values are coords of the upper right point of rectangle\n r = Rectangle(Point(lx, ly), Point(ux, uy)) # uses l and y values to make rectangle\n r.setFill(color) # takes color argument\n t = Text(r.getCenter(), text) # draws text in the center of textbox\n\n return [r, t] # in order to call on it as one object instead of two, return rectangle and text in array\n\n\ndef create_entreebox(lower, upper, display_text):\n \"\"\"\n This method generates an object (in the form of an array) for entering text into\n\n ALG:\n - define rectangle using the lower and upper arguments (lower left and upper right points) and fill rectangle gray\n\n - define a second rectangle (going inside the first) to hold the text using and .5 off of for it's\n lower left point, and for it's upper right point\n - so that text can be seen, fill this rectangle with white\n\n - create text object in center of second rectangle using display_text argument\n\n - create entree object in center of first rectangle with a 20 character max, with a white fill\n\n - return the first and second rectangle, the text, and the entree as an array to caller\n \"\"\"\n r = Rectangle(lower, upper)\n r.setFill('gray')\n\n display_rect = Rectangle(Point(lower.getX(), upper.getY() - .5), upper)\n display_rect.setFill('white')\n\n t = Text(display_rect.getCenter(), display_text)\n\n e = Entry(r.getCenter(), 20)\n e.setFill('white')\n\n return [r, display_rect, t, e]\n\n\ndef create_buttons(starting_point, width, height, spacing, titles_list, starting_color_list):\n \"\"\"\n This function combines text boxes into one large list of buttons to the caller\n \"\"\"\n buttons = [] # list of buttons to be returned\n\n for i in range(len(titles_list)): # each title represents a button, and so if there are 4 titles, 4 buttons\n buttons.append(\n create_textbox(starting_point.getX() + (spacing * i), # lx will start off at starting point\n # but is iterable with spacing\n starting_point.getY(), # the buttons all have same ly values\n starting_point.getX() + width + (spacing * i), # upper right x adds width too\n starting_point.getY() + height, # add the height to ly so it is now uy\n starting_color_list[i], # if in correct order, color and title will match up\n titles_list[i])) # if in correct order, title with match up with title list\n return buttons\n\n\n# ==================================================\n# Drawing Methods\n# ==================================================\n\n\ndef draw_objs(win, obj_list):\n # this method allows multiple objects in the same list to be drawn\n for i in obj_list: # the list represents a textbox object usually\n i.draw(win)\n\n\ndef undraw_objs(obj_list):\n # this method allows multiple objects in the same list to be undrawn\n for i in obj_list: # the list represents a textbox object usually\n i.undraw()\n\n\ndef draw_buttons(win, obj_list):\n # this method allows a list of text boxes (a list of lists) to be drawn with one command\n for i in obj_list:\n draw_objs(win, i) # new layer of abstraction is represented by the use of draw_buttons()\n\n\ndef draw_exitbox(win, lowerbound, upperbound):\n upperline = upperbound - .125 # upperline can be used as the upper right coordinate (the x shape is in middle)\n lowerline = lowerbound + .125 # lowerline can be used as the lower left coordinate (x is in middle\n\n # creation and drawing of the exit box shape using lower/upper bound twice bc it is a square\n exit_box = Rectangle(Point(lowerbound, lowerbound), Point(upperbound, upperbound))\n exit_box.setFill('red') # rarely have I seen an exit box that isn't red\n exit_box.draw(win)\n\n # creation of an x shape using the upper/lower line described for coords above\n left_right_line = Line(Point(lowerline, lowerline), Point(upperline, upperline))\n right_left_line = Line(Point(lowerline, upperline), Point(upperline, lowerline))\n\n # colorizing of lines\n left_right_line.setFill(\"white\")\n right_left_line.setFill(\"white\")\n\n # fixed width of lines after seeing what they looked like (they need to be larger)\n left_right_line.setWidth(4)\n right_left_line.setWidth(4)\n\n # both lines are drawn after exit box so that they show up\n left_right_line.draw(win)\n right_left_line.draw(win)\n\n\ndef draw_menu(win, buttons):\n # Create a rectangle to serve as backdrop for for buttons\n menu_area = Rectangle(Point(0, 8), Point(10, 10))\n menu_area.setFill(color_rgb(211, 211, 211))\n menu_area.draw(win)\n\n draw_exitbox(win, 9.5, 10) # draws exit box in upper right corner on top of menu\n draw_buttons(win, buttons) # draws the buttons onto the menu area\n\n\n# ==================================================\n# Logic Methods\n# ==================================================\n\ndef change_color(tbox, color):\n tbox[0].setFill(color) # refers to the rectangle in a textbox array and changes the fill based on arguments\n\n\ndef negative(win, img):\n \"\"\"\n This method handles the transformation of an image into a negative of itself and returns the\n new image back to caller\n\n ALG:\n - get the number of rows and columns from the width and height of the image\n - a chunk the size a tenth of the image will be represented by the widths (number of rows) integer divided by 10\n\n - for each row in in the range of rows and for each column in the range of columns\n - get the pixel rgb data of specific row and column\n - set the pixel of same row and column to have the rgb value of (255 - (r value),\n 255 - (g value),\n 255 - (b value))\n\n - (outside of column loop) check if the current row divides with no remainder into a tenth of the image width\n - if it does, redraw the image with new pixel values\n\n - return image object to caller\n \"\"\"\n rows = img.getWidth()\n columns = img.getHeight()\n chunksize = rows // 10\n\n for row in range(rows):\n for column in range(columns):\n rgb = img.getPixel(row, column)\n img.setPixel(row, column, color_rgb(255 - rgb[0], 255 - rgb[1], 255 - rgb[2]))\n\n if row % chunksize == 0:\n img.undraw()\n img.draw(win)\n\n return img\n\n\ndef is_inbounds_rectangle(mouse, lower, upper):\n lx, ly = lower.getX(), lower.getY() # lx and ly represent the lower left corner of rectangle\n ux, uy = upper.getX(), upper.getY() # ux and uy represent the upper right corner of rectangle\n\n if lx < mouse.getX() < ux and ly < mouse.getY() < uy: # check if mouse is in-between the x's and y's\n return True # if the mouse clicked inside the area, than return true\n\n\ndef is_inbounds_button(mouse, button):\n # because a button stores it's rectangle in an array, a different method is need for accessing it\n lower = button[0].getP1() # button[0] is the button's rectangle object\n upper = button[0].getP2()\n\n # this part is same as is_inbounds_rectangle()\n lx, ly = lower.getX(), lower.getY()\n ux, uy = upper.getX(), upper.getY()\n\n if lx < mouse.getX() < ux and ly < mouse.getY() < uy:\n return True\n\n\ndef check_option(mouse, button, photo_state, state):\n if is_inbounds_button(mouse, button) and photo_state % 2 == state:\n # photo_state represents whether or not a photo has been put on the canvas\n # this method allows for the result of the modulus (1, or 0) to be chosen, which is useful later\n return True\n else:\n return False\n\n\n# ==================================================\n# Active State Methods\n# ==================================================\n\n\ndef draw_upload_dialog(win, upload_state, buttons, upload_entreebox):\n \"\"\"\n This method controls the drawing of the upload box, allowing the user to close the dialog box\n by toggling the state of the upload dialog box with each start\n\n buttons[0] is the upload photo text box\n \"\"\"\n if upload_state % 2 == 0: # the state starts on 1, so by clicking it will become 2 and be drawn\n change_color(buttons[0], 'gray') # the gray means that the dialog box is active\n draw_objs(win, upload_entreebox) # draws entree dialog\n else:\n change_color(buttons[0], 'white') # having been clicked again, the text box becomes white\n undraw_objs(upload_entreebox) # and the entree dialogue closes\n\n\ndef draw_save_dialog(win, save_state, buttons, save_entreebox):\n \"\"\"\n This method controls the drawing of the save dialog box, allowing the user to close the dialog box\n by toggling the state of the save dialog box with each start\n\n buttons[3] is the save photo text box\n \"\"\"\n if save_state % 2 == 0: # the state starts on 1, so by clicking it will become 2 and be drawn\n change_color(buttons[3], 'gray') # the gray means that the dialog box is active\n draw_objs(win, save_entreebox) # draws entree dialog\n else:\n change_color(buttons[3], 'white') # having been clicked again, the text box becomes white\n undraw_objs(save_entreebox) # and the entree dialogue closes\n\n\ndef display_image(win, buttons, upload_entreebox):\n \"\"\"\n This method draws the photo to the window and returns the photo as an object to the caller\n\n upload_entreebox[3] is the user entree field object provided by graphics.py\n \"\"\"\n upload_entreebox[3].setText(upload_entreebox[3].getText()) # overcautious of making sure user input is read\n undraw_objs(upload_entreebox) # remove the entree field after input is received\n photo = Image(Point(5, 4), upload_entreebox[3].getText()) # 5,4 is middle of area that is not menu\n # photo gets filename from user entree field\n photo.draw(win)\n for i in range(1, 4): # now that there is a photo, the other 3 buttons are able to be interacted with\n change_color(buttons[i], 'white') # this is indicated by them being white\n\n return photo # returns the photo to the caller so that other methods can use it\n\n\ndef save_image(win, photo, button, save_entreebox, save_text):\n \"\"\"\n This method handles the process of saving the image and changing buttons after save\n\n save_entreebox[3] is the user entree field object provided by graphics.py\n \"\"\"\n save_entreebox[3].setText(save_entreebox[3].getText()) # overcautious of making sure user input is read\n undraw_objs(save_entreebox) # remove the entree field after input is received\n photo.save(save_entreebox[3].getText()) # this saves the photo to local folder with whatever name was given\n change_color(button, 'white') # returns the save button to white after it has finished\n\n # Replacement of text stating save progress\n save_text.undraw()\n save_text.setText(\"Image Saved! Viewable after exit.\")\n save_text.setFill('green')\n save_text.draw(win)\n\n\n# ==================================================\n# Main Function\n# ==================================================\n\n\ndef main():\n \"\"\"\n ALG:\n - make a new window\n\n - declare large scale variables such as lists, states, and photo variable\n\n - create save text at 1.5, 8.25 with initial message 'Not saved' in red (the program has only started)\n\n - create entree dialog boxes in center of edit area for saving and uploading\n - create buttons using list of titles and colors inside the menu area\n\n - draw the menu and the initial save status message\n\n - In while True state:\n - set checking of any key and mouse click to variables (key and mouse)\n\n - if the mouse variable returns anything:\n - break out of loop (ending program) if it is in the bounds of exit box\n\n - if click was in upload box:\n - add one to the upload state\n - run upload dialog method\n\n - if click was in remove box:\n - undraw the photo\n - add one to photo_state (photo is no longer present)\n - add one to upoad_state (with no photo, ability to upload is not blocked)\n - set the negative_state to 0 (with last image removed, the next one will not have been negative yet)\n\n - change upload text box to white and other three to gray\n\n -reset save text to unsaved\n\n - if click was in save textbox:\n - add one to the save state\n - run save dialog method\n\n - if click was in negative box and the photo has not been color negatived yet:\n - turn all buttons grey while this one is active\n\n - set photo equal to negative() method (it returns the photo after editing it)\n\n - add one to the negative_state (so that image can't be color negatived twice)\n - change the remove and save buttons back to white\n\n - if the key variable returns 'return':\n - if the upload_state is toggled on (even) and the save_state is not on (odd):\n - photo object created from display_image() method\n - the photo is present, so the photo_state turns on to 1 (photo_state adds one to itself)\n - if saving the photo (save state should be even, thus on):\n - run method save_image()\n\n \"\"\"\n # Window creation\n win = create_new_win('Image Editor', 1000, 1000)\n\n # Main Variables\n titles = ['Upload Photo', 'Remove Photo', 'Negative of Photo', 'Save Photo']\n colors = ['white', 'gray', 'gray', 'gray']\n # buttons[0] is upload box\n # buttons[1] is remove box\n # buttons[2] is negative box\n upload_state = 1\n negative_state = 0\n save_state = 1\n photo_state = 0\n photo = None # if removed, python will complain that photo is used before assignment\n\n # Save Text\n save_text = Text(Point(1.5, 8.25), \"Not Saved\")\n save_text.setFill('red')\n\n # Buttons and Entree Boxes\n upload_entreebox = create_entreebox(Point(3, 4.25), Point(7, 5.75),\n \"Enter Filename (.gif, .png, or .ppm) in entree field:\")\n save_entreebox = create_entreebox(Point(3, 4.25), Point(7, 5.75),\n \"Enter Filename to save by (same type as upload)\\nin entree field:\")\n buttons = create_buttons(Point(.5, 9.25), 1.75, .5, 2, titles, colors)\n\n # draws buttons and menu template\n draw_menu(win, buttons)\n save_text.draw(win)\n\n while True:\n key = win.checkKey()\n mouse = win.checkMouse()\n\n if mouse:\n if is_inbounds_rectangle(mouse, Point(9.5, 9.5), Point(10, 10)): # if in exit box\n break\n\n elif check_option(mouse, buttons[0], photo_state, 0): # if in upload text box (and no photo)\n upload_state += 1\n draw_upload_dialog(win, upload_state, buttons, upload_entreebox)\n\n elif check_option(mouse, buttons[1], photo_state, 1): # if in remove text box (and there is photo)\n photo.undraw()\n photo_state += 1\n upload_state += 1\n negative_state = 0\n\n change_color(buttons[0], 'white')\n for i in range(1, 4):\n change_color(buttons[i], 'gray')\n\n # After removing photo, save text resets to unsaved work of next photo\n save_text.undraw()\n save_text.setText(\"Not Saved\")\n save_text.setFill('red')\n save_text.draw(win)\n\n elif check_option(mouse, buttons[3], photo_state, 1): # if in save text box (and there is photo)\n save_state += 1\n draw_save_dialog(win, save_state, buttons, save_entreebox)\n\n elif check_option(mouse, buttons[2], photo_state, 1) and negative_state % 2 == 0: # if in save text box\n # and the image has not been color negatived yet (to prevent it from happening again)\n for i in range(1, 4):\n change_color(buttons[i], 'gray')\n\n photo = negative(win, photo)\n\n negative_state += 1\n change_color(buttons[1], 'white')\n change_color(buttons[3], 'white')\n\n if key == 'Return':\n if upload_state % 2 == 0 and save_state % 2 == 1: # if the user presses enter\n # after writing filename to display photo (to make sure it is only upload, check save and upload state)\n photo = display_image(win, buttons, upload_entreebox)\n photo_state += 1\n\n if save_state % 2 == 0: # if key is hit and the user meant to save,\n save_image(win, photo, buttons[3], save_entreebox, save_text)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Chapter08/U08_EX15_ImageNegative.py","file_name":"U08_EX15_ImageNegative.py","file_ext":"py","file_size_in_byte":18115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173879914","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2020 BuildGroup Data Services Inc.\nimport copy\n\nimport logging\nfrom caravaggio_rest_api.tests import CaravaggioBaseTest\nfrom davinci_crawling.entity_diff.diff import make_diff\n\n_logger = logging.getLogger(\"davinci_crawling.testing\")\n\n\nclass TestDiff(CaravaggioBaseTest):\n \"\"\"\n Test the proxy mesh logic, this test requires connection with internet\n because we use the proxy mesh api.\n \"\"\"\n\n all_files_count = 0\n original_json = None\n\n @classmethod\n def setUpTestData(cls):\n cls.original_json = {\n \"list_strings\": [\"abc\", \"def\"],\n \"list_objects\": [\n {\"uuid\": \"f8626a86-a9ca-4b3c-958b-5f2c71442a3c\", \"name\": \"John\", \"age\": 24, \"country\": \"Brazil\"},\n {\"uuid\": \"b0652153-000d-4100-85e5-1d25c7793542\", \"name\": \"Mary\", \"age\": 31, \"country\": \"USA\"},\n ],\n }\n\n def _compare_diff(self, original_json, modified_json, expected, should_fail=False):\n \"\"\"\n Call the diff using the original and the modified json and compare with\n the expected.\n Args:\n original_json: the original json\n modified_json: the modified version that will be compared with the\n original\n expected: the expected result of the diff\n \"\"\"\n result = make_diff(original_json, modified_json)\n\n if expected:\n result[\"inserts\"] = sorted(result[\"inserts\"])\n expected[\"inserts\"] = sorted(expected[\"inserts\"])\n result[\"updates\"] = sorted(result[\"updates\"])\n expected[\"updates\"] = sorted(expected[\"updates\"])\n result[\"deletes\"] = sorted(result[\"deletes\"])\n expected[\"deletes\"] = sorted(expected[\"deletes\"])\n\n try:\n self.assertDictEqual(expected, result)\n except AssertionError as e:\n if not should_fail:\n raise e\n\n def test_simple_list_insert(self):\n modified_json = copy.deepcopy(self.original_json)\n # insert on first position\n modified_json[\"list_strings\"].insert(0, \"new_string\")\n\n expected = {\n \"all\": {\"inserts\": {\"list_strings[0]\": {\"new_value\": \"new_string\"}}},\n \"inserts\": [\"list_strings*\"],\n \"updates\": [],\n \"deletes\": [],\n }\n\n self._compare_diff(self.original_json, modified_json, expected)\n\n modified_json = copy.deepcopy(self.original_json)\n # insert on the second position\n modified_json[\"list_strings\"].insert(1, \"new_string\")\n\n expected = {\n \"all\": {\"inserts\": {\"list_strings[1]\": {\"new_value\": \"new_string\"}}},\n \"inserts\": [\"list_strings*\"],\n \"updates\": [],\n \"deletes\": [],\n }\n\n self._compare_diff(self.original_json, modified_json, expected)\n\n modified_json = copy.deepcopy(self.original_json)\n # insert on the first position same value\n modified_json[\"list_strings\"].insert(0, \"abc\")\n\n expected = {\n \"all\": {\"inserts\": {\"list_strings[0]\": {\"new_value\": \"abc\"}}},\n \"inserts\": [\"list_strings*\"],\n \"updates\": [],\n \"deletes\": [],\n }\n\n self._compare_diff(self.original_json, modified_json, expected)\n\n def test_object_list_update(self):\n modified_json = copy.deepcopy(self.original_json)\n # invert the order of the elements\n modified_json[\"list_objects\"][0], modified_json[\"list_objects\"][1] = (\n modified_json[\"list_objects\"][1],\n modified_json[\"list_objects\"][0],\n )\n\n expected = {\"all\": {}, \"inserts\": [], \"updates\": [], \"deletes\": []}\n\n # this should fail because we are not in the same order on the two\n # dictionaries\n self._compare_diff(self.original_json, modified_json, expected, should_fail=True)\n\n sorted_current = sorted(self.original_json[\"list_objects\"], key=lambda i: i[\"uuid\"])\n sorted_previous = sorted(modified_json[\"list_objects\"], key=lambda i: i[\"uuid\"])\n\n self._compare_diff(sorted_current, sorted_previous, expected)\n\n def test_object_list_update_many(self):\n original_json = {\"many\": [{\"uuid\": 1}, {\"uuid\": 2}]}\n modified_json = {\"many\": [{\"uuid\": 2}, {\"uuid\": 1}]}\n\n expected = {\n \"all\": {\n \"updates\": {\n \"many[0].uuid\": {\"new_value\": 2, \"old_value\": 1},\n \"many[1].uuid\": {\"new_value\": 1, \"old_value\": 2},\n }\n },\n \"inserts\": [],\n \"updates\": [\"many*\"],\n \"deletes\": [],\n }\n\n # this should fail because we are not in the same order on the two\n # dictionaries\n self._compare_diff(original_json, modified_json, expected)\n","sub_path":"src/davinci_crawling/entity_diff/tests/test_diff.py","file_name":"test_diff.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"358362127","text":"#!/usr/bin/python\n\n## ======================================================================== ##\n## Copyright 2009-2012 Intel Corporation ##\n## ##\n## Licensed under the Apache License, Version 2.0 (the \"License\"); ##\n## you may not use this file except in compliance with the License. ##\n## You may obtain a copy of the License at ##\n## ##\n## http://www.apache.org/licenses/LICENSE-2.0 ##\n## ##\n## Unless required by applicable law or agreed to in writing, software ##\n## distributed under the License is distributed on an \"AS IS\" BASIS, ##\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ##\n## See the License for the specific language governing permissions and ##\n## limitations under the License. ##\n## ======================================================================== ##\n\nimport sys\nimport os\nimport re\n\n########################## configuration ##########################\n\ndash = '/'\nmodel = ''\nstatDir = ''\n\nhair_builder_modes_uncompressed = [ \n ('tri_accel=bvh4.triangle4 --tessellate-hair 0 4', 'bvh4.triangle4.P0aO'),\n# ('tri_accel=bvh4.triangle4 --tessellate-hair 1 4', 'bvh4.triangle4.P1aO'),\n# ('tri_accel=bvh4.triangle4 --tessellate-hair 2 4', 'bvh4.triangle4.P2aO'),\n ('tri_accel=bvh4.triangle4 --tessellate-hair 3 4', 'bvh4.triangle4.P3aO'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aO', 'bvh4hair.bezier1i.P0aO'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P1aO', 'bvh4hair.bezier1i.P1aO'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P2aO', 'bvh4hair.bezier1i.P2aO'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P3aO', 'bvh4hair.bezier1i.P3aO'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuO', 'bvh4hair.bezier1i.P0aOuO'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuOuST', 'bvh4hair.bezier1i.P0aOuOuST'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOaSP,hair_builder_replication_factor=1', 'bvh4hair.bezier1i.P0aOaSP.R1'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOaSP,hair_builder_replication_factor=3', 'bvh4hair.bezier1i.P0aOaSP.R3'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOaSP,hair_builder_replication_factor=7', 'bvh4hair.bezier1i.P0aOaSP.R7'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuOuSTaSPuSP,hair_builder_replication_factor=1', 'bvh4hair.bezier1i.P0aOuOuSTaSPuSP.R1'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuOuSTaSPuSP,hair_builder_replication_factor=3', 'bvh4hair.bezier1i.P0aOuOuSTaSPuSP.R3'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuOuSTaSPuSP,hair_builder_replication_factor=7', 'bvh4hair.bezier1i.P0aOuOuSTaSPuSP.R7')\n ];\n\nhair_builder_modes_compressed_aligned = [\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aO', 'cbvh4hair.bezier1i.P0aO'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P1aO', 'cbvh4hair.bezier1i.P1aO'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P2aO', 'cbvh4hair.bezier1i.P2aO'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P3aO', 'cbvh4hair.bezier1i.P3aO'),\n# ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOaSP,hair_builder_replication_factor=7', 'cbvh4hair.bezier1i.P0aOaSP.R7'),\n]\n\nhair_builder_modes_compressed_unaligned = [\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuOuST', 'cbvh4hair.bezier1i.P0aOuOuST'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuOuSTaSPuSP,hair_builder_replication_factor=1', 'cbvh4hair.bezier1i.P0aOuOuSTaSPuSP.R1'),\n ('hair_accel=bvh4hair.bezier1i,hair_builder_mode=P0aOuOuSTaSPuSP,hair_builder_replication_factor=3', 'cbvh4hair.bezier1i.P0aOuOuSTaSPuSP.R3')\n]\n\nhair_builder_modes_measure = hair_builder_modes_uncompressed\n#hair_builder_modes_measure = hair_builder_modes_compressed_aligned\n#hair_builder_modes_measure = hair_builder_modes_compressed_unaligned \n\n#keep_triangles = [\n# 'buddha',\n# 'hairdragon' \n#]\n\nhair_builder_ignore = [\n# 'sophie_bvh4.triangle4.P1aO',\n# 'sophie_bvh4.triangle4.P2aO',\n# 'sophie_bvh4.triangle4.P3aO',\n# 'buddha_bvh4.triangle4.P1aO',\n# 'buddha_bvh4.triangle4.P2aO',\n# 'buddha_bvh4.triangle4.P3aO',\n# 'hairdragon_bvh4.triangle4.P1aO',\n# 'hairdragon_bvh4.triangle4.P2aO',\n# 'hairdragon_bvh4.triangle4.P3aO'\n]\n\nhair_builder_modes_print = [\n 'bvh4.triangle4.P0aO',\n# 'bvh4.triangle4.P1aO',\n# 'bvh4.triangle4.P2aO',\n 'bvh4.triangle4.P3aO',\n '',\n 'bvh4hair.bezier1i.P0aO',\n# 'bvh4hair.bezier1i.P1aO',\n# 'bvh4hair.bezier1i.P2aO',\n# 'bvh4hair.bezier1i.P3aO',\n# 'cbvh4hair.bezier1i.P3aO',\n '',\n# 'bvh4hair.bezier1i.P0aOaSP.R1',\n# 'bvh4hair.bezier1i.P0aOaSP.R3',\n# 'bvh4hair.bezier1i.P0aOaSP.R7',\n# 'cbvh4hair.bezier1i.P0aOaSP.R7',\n '',\n 'bvh4hair.bezier1i.P0aOuO',\n 'bvh4hair.bezier1i.P0aOuOuST',\n# 'cbvh4hair.bezier1i.P0aOuOuST',\n '',\n 'bvh4hair.bezier1i.P0aOuOuSTaSPuSP.R1',\n 'bvh4hair.bezier1i.P0aOuOuSTaSPuSP.R3',\n# 'bvh4hair.bezier1i.P0aOuOuSTaSPuSP.R7',\n 'cbvh4hair.bezier1i.P0aOuOuSTaSPuSP.R1',\n 'cbvh4hair.bezier1i.P0aOuOuSTaSPuSP.R3',\n# '',\n# 'bvh4.triangle4.P3aO',\n# 'bvh4hair.bezier1i.P3aO',\n# 'bvh4hair.bezier1i.P0aOaSP.R7',\n# 'bvh4hair.bezier1i.P0aOuOuST',\n# 'bvh4hair.bezier1i.P0aOuOuSTaSPuSP.R7',\n '',\n# 'cbvh4hair.bezier1i.P3aO',\n# 'cbvh4hair.bezier1i.P0aOaSP.R7',\n# 'cbvh4hair.bezier1i.P0aOuOuST',\n# 'cbvh4hair.bezier1i.P0aOuOuSTaSPuSP.R7'\n]\n\ndef modelname(model):\n return os.path.splitext(os.path.basename(model))[0]\ndef name(model,mode):\n return modelname(model) + '_' + mode[1]\ndef name2(model,mode):\n return modelname(model) + '_' + mode\n \n########################## compiling ##########################\n\ndef compile():\n command = 'mkdir -p build; cd build; cmake ..; make clean; make -j 8';\n os.system(command)\n\n########################## rendering ##########################\n\ndef render(mode):\n #executable = 'build' + '/' + 'tutorial07'\n executable = './tutorial07'\n base = name(model,mode)\n os.system('mkdir -p ' + statDir)\n logFile = statDir + dash + base + '.log'\n imgFile = statDir + dash + base + '.tga'\n if not os.path.exists(logFile):\n command = executable\n command += ' -rtcore verbose=2,benchmark=1,' + mode[0]\n command += ' -c ' + model\n #if not modelname(model) in keep_triangles:\n # command += ' -i none' # disable triangle geometry\n command += ' -size 1024 1024 -frames 4 8'\n command += ' -o ' + imgFile + ' > ' + logFile\n os.system(command)\n\ndef renderLoop():\n for mode in hair_builder_modes_measure:\n base = name(model,mode)\n print(base)\n if (base in hair_builder_ignore):\n continue;\n render(mode)\n\n########################## data extraction ##########################\n\ntri_sah = {}\ntri_memory = {}\nhair_sah = {}\nhair_memory = {}\nfps = {}\n\ndef extract(mode):\n base = name2(model,mode)\n logFileName = statDir + dash + base + '.log'\n tri_sah [base] = 0\n tri_memory[base] = 0\n hair_sah [base] = 0\n hair_memory[base] = 0\n fps [base] = 0\n try:\n logFile = open(logFileName, 'r')\n for line in logFile:\n if line.count('BENCHMARK_HAIR_ACCEL ') == 1:\n numbers = map(float, line[21:].split(\" \"))\n hair_sah [base] += numbers[0]\n hair_memory[base] += numbers[1]\n if line.count('BENCHMARK_TRIANGLE_ACCEL ') == 1:\n numbers = map(float, line[25:].split(\" \"))\n tri_sah [base] += numbers[0]\n tri_memory[base] += numbers[1]\n if line.count('BENCHMARK_RENDER ') == 1:\n numbers = map(float, line[17:].split(\" \"))\n fps[base] += numbers[0]\n except IOError :\n print('cannot open ' + logFileName)\n\n# Extract all data\ndef extractLoop():\n for mode in hair_builder_modes_print:\n if mode != '': extract(mode)\n\ndef printData(mode):\n base = name2(model,mode)\n line = ' ' + '{0:<40}'.format(mode) + ' | '\n line += (' %#6.1f' % tri_sah[base])\n line += (' %#6.1f MB' % (1E-6*tri_memory[base]))\n line += (' %#6.1f' % hair_sah[base])\n line += (' %#6.1f MB' % (1E-6*hair_memory[base]))\n line += (' %#6.3f' % fps[base])\n line += (' %#6.3f' % (fps[base]/(1E-9*(tri_memory[base]+hair_memory[base]+0.0001))))\n print(line)\n\ndef printDataLoop():\n tableWidth = 40 + 60\n\n print('')\n \n title = os.path.splitext(os.path.basename(model))[0]\n line = ' ' + '{0:<40}'.format(title) + ' | TriSAH TriMemory HairSAH HairMemory Fps Fps/GB'\n print(line)\n\n line = ''\n while (len(line) < tableWidth): line = line + '-'\n print(line)\n\n for mode in hair_builder_modes_print:\n if mode == '': print('')\n else: printData(mode)\n\n print('')\n\n########################## command line parsing ##########################\n\ndef printUsage():\n sys.stderr.write('Usage: ' + sys.argv[0] + ' measure ...\\n')\n sys.stderr.write(' ' + sys.argv[0] + ' print ...\\n')\n sys.exit(1)\n\nif len(sys.argv) < 3:\n printUsage()\n sys.exit(1)\n\nmodel = 'none' \nstatDir = 'stat'\n\nif sys.argv[1] == 'measure':\n statDir = sys.argv[2]\n for i in range(3, len(sys.argv)):\n model = sys.argv[i]\n renderLoop()\n for i in range(3, len(sys.argv)):\n model = sys.argv[i]\n extractLoop()\n printDataLoop()\n sys.exit(1)\n\nif sys.argv[1] == 'print':\n print(os.path.splitext(os.path.basename(model))[0])\n statDir = sys.argv[2]\n for i in range(3, len(sys.argv)):\n model = sys.argv[i]\n extractLoop()\n printDataLoop()\n sys.exit(1)\n\nprintUsage()\nsys.exit(1)\n","sub_path":"scripts/benchmark-bvh4hair.py","file_name":"benchmark-bvh4hair.py","file_ext":"py","file_size_in_byte":9755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"85829220","text":"from django.forms import ModelForm\nfrom .models import FormBlueprint\nfrom workflows.models import Workflow\nfrom django import forms\n\nclass FormBlueprintForm(ModelForm):\n class Meta:\n model = FormBlueprint\n fields = ['title', 'workflow', 'active']\n labels = {\n 'title': ('Title'),\n 'workflow': ('Workflow'),\n 'active': ('Active')\n }\n help_texts = {\n 'title': ('Title of the form'),\n 'workflow': ('Select the workflow to be used by this form'),\n 'active': ('Form will be visible to others only if active')\n }\n # field_classes = {\n # 'title': forms.CharField(max_length=128, required=True),\n # 'workflow': forms.ChoiceField(required=True),\n # }","sub_path":"src/forms/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"432047610","text":"import os\n\nimport json, yaml\nimport pytest\nfrom jsonschema import validate\n\nSCHEMA_DIR = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))),\n \"network_wrangler\",\n \"schemas\",\n)\nSMALL_EX_DIR = os.path.join(os.getcwd(), \"examples\", \"single\")\nSTPAUL_EX_DIR = os.path.join(os.getcwd(), \"examples\", \"stpaul\")\n\nSTPAUL_PC_DIR = os.path.join(os.getcwd(), \"examples\", \"stpaul\", \"project_cards\")\n\n\n@pytest.mark.schema\n@pytest.mark.skip(reason=\"need to work on this\")\ndef test_roadway_link_schema():\n schema_filename = os.path.join(SCHEMA_DIR, \"roadway_network_link.json\")\n link_file = os.path.join(SMALL_EX_DIR, \"link.json\")\n\n with open(schema_filename) as schema_json_file:\n schema = json.load(schema_json_file)\n\n with open(link_file, \"r\") as links:\n link_json = yaml.safe_load(links)\n\n validate(link_json, schema)\n\n\n@pytest.mark.schema\n@pytest.mark.skip(reason=\"need to work on this\")\ndef test_project_card_schema():\n schema_filename = os.path.join(SCHEMA_DIR, \"project_card.json\")\n card_file = os.path.join(STPAUL_PC_DIR, \"1_simple_roadway_attribute_change.yml\")\n\n with open(schema_filename) as schema_json_file:\n schema = json.load(schema_json_file)\n\n with open(card_file, \"r\") as card:\n card_json = yaml.safe_load(card)\n\n validate(card_json, schema)\n","sub_path":"tests/test_schemas.py","file_name":"test_schemas.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"168608377","text":"import os\nfrom os import sys\nimport random\n\nimport time\nimport datetime\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom torch.utils.data import DataLoader\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\nfrom options import args\nfrom MiscTools import get_logger, makedirs, add_noise_tensor, count_parameters\nfrom networks import ODENet_MNIST\n\nif args.isRandom == False:\n random.seed(0)\n np.random.seed(seed=0)\n torch.manual_seed(0)\n\ndef add_noise_tensor_random(x):\n isAdd = random.choice([True, False])\n noise_level = random.choice([50, 75, 100])\n if isAdd:\n return add_noise_tensor(x, ['G', noise_level])\n else:\n return x\n\ndef get_mnist_loaders(isTrain=False, batch_size=128, test_batch_size=1000):\n if isTrain:\n # train\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: add_noise_tensor_random(x))\n ])\n train_loader = DataLoader(\n datasets.MNIST(root='../../data/mnist', train=True, download=True, transform=transform), \n batch_size=batch_size, shuffle=True, num_workers=6, drop_last=True)\n train_eval_loader = DataLoader(\n datasets.MNIST(root='../../data/mnist', train=True, download=True, transform=transform),\n batch_size=test_batch_size, shuffle=False, num_workers=2, drop_last=True)\n test_loader = DataLoader(\n datasets.MNIST(root='../../data/mnist', train=False, download=True, transform=transform),\n batch_size=test_batch_size, shuffle=False, num_workers=2, drop_last=True)\n\n return train_loader, test_loader, train_eval_loader\n else:\n transform = transforms.Compose([\n transforms.ToTensor()\n ])\n transform_noisy = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: add_noise_tensor(x, ['G', args.noise_level]))\n ])\n test_loader = DataLoader(\n datasets.MNIST(root='../../data/mnist', train=False, download=True, transform=transform),\n batch_size=test_batch_size, shuffle=False, num_workers=2, drop_last=True\n )\n test_loader_noisy = DataLoader(\n datasets.MNIST(root='../../data/mnist', train=False, download=True, transform=transform_noisy),\n batch_size=test_batch_size, shuffle=False, num_workers=2, drop_last=True\n )\n return test_loader, test_loader_noisy \n\ndef one_hot(x, K):\n return np.array(x[:, None] == np.arange(K)[None, :], dtype=int)\n\ndef accuracy(model, dataset_loader):\n \n total_correct = 0\n for x, y in dataset_loader:\n x = x.cuda()\n y = one_hot(np.array(y.numpy()), 10)\n\n target_class = np.argmax(y, axis=1)\n predicted_class = np.argmax(model(x).cpu().detach().numpy(), axis=1)\n total_correct += np.sum(predicted_class == target_class)\n return total_correct / len(dataset_loader.dataset)\n\n\ndef accuracy_withRef(model, RefDL, PertbDL):\n target_class = np.array([])\n pred_class_ref = np.array([])\n pred_class_pertb = np.array([])\n for x, y in RefDL:\n x = x.cuda(args.device_ids[0])\n pred_class_ref = np.concatenate((pred_class_ref, np.argmax(model(x).cpu().detach().numpy(), axis=1)), axis=None)\n # pred_class_ref.append(np.argmax(model(x).cpu().detach().numpy(), axis=1))\n\n for x, y in PertbDL:\n x = x.cuda(args.device_ids[0])\n y = one_hot(np.array(y.numpy()), 10)\n target_class = np.concatenate((target_class, np.argmax(y, axis=1)), axis=None)\n pred_class_pertb = np.concatenate((pred_class_pertb, np.argmax(model(x).cpu().detach().numpy(), axis=1)), axis=None)\n\n accu_ref = np.sum(pred_class_ref == target_class) / len(target_class)\n accu_pertb_target = np.sum(pred_class_pertb == target_class)/len(target_class)\n accu_pertb_ref = np.sum((pred_class_ref == target_class) & (pred_class_pertb == target_class)) / np.sum(pred_class_ref == target_class)\n return accu_ref, accu_pertb_target, accu_pertb_ref\n\n\nif __name__ == '__main__':\n if args.isTrain:\n # Add logs\n save_dir = os.path.join(args.dir_logging, args.exp_name); makedirs(save_dir)\n logger = get_logger(logpath=os.path.join(save_dir,'train_result.txt')) \n logger.info(os.path.abspath(__file__))\n for arg in vars(args):\n logger.info('{}: {}'.format(arg, getattr(args, arg)))\n\n # Build model\n model = ODENet_MNIST()\n criterion = nn.CrossEntropyLoss()\n \n if torch.cuda.is_available():\n model = model.cuda()\n criterion = criterion.cuda()\n if len(args.device_ids) > 1:\n model = nn.DataParallel(model, args.device_ids)\n\n # optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-4)\n # scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=0.2)\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)\n\n if not args.resume:\n logger.info('---- Model ----')\n logger.info(model)\n logger.info('Number of parameters: {}'.format(count_parameters(model))) \n\n # Construct datasets\n train_loader, test_loader, train_eval_loader = get_mnist_loaders(\n isTrain=True, batch_size=128, test_batch_size=1000)\n\n # Training\n logger.info('---- Training ----')\n best_epoch = {'epoch':0, 'acc':0}\n\n # Resume model\n if args.resume:\n print(\"=> loading checkpoint '{}'\".format(save_dir))\n checkpoint = torch.load(os.path.join(save_dir, 'checkpoint.pth'))\n args.start_epoch = checkpoint['epoch']+1\n best_epoch = checkpoint['best_epoch']\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\".format(save_dir, checkpoint['epoch']))\n\n for epoch in range(args.start_epoch, args.end_epoch):\n tic = time.time()\n scheduler.step(epoch)\n model.train()\n for _, batch_tr in enumerate(train_loader):\n optimizer.zero_grad()\n loss = criterion(model(batch_tr[0].cuda()), batch_tr[1].cuda())\n loss.backward()\n optimizer.step()\n\n # evaluation\n with torch.no_grad():\n if epoch%10==9:\n train_acc = accuracy(model, train_eval_loader)\n else:\n train_acc = 0\n test_acc = accuracy(model, test_loader)\n\n if test_acc >= best_epoch['acc']:\n best_epoch['epoch'] = epoch\n best_epoch['acc'] = test_acc\n torch.save(model, os.path.join(save_dir, 'model_best.pth'))\n\n checkpoint = {\n 'epoch': epoch,\n 'model': model.state_dict(),\n 'best_epoch': best_epoch,\n 'optimizer': optimizer.state_dict()\n }\n torch.save(checkpoint,os.path.join(save_dir, 'checkpoint.pth'))\n\n logger.info(\n \"Epoch {:04d}/{:04d} | Time {:.3f}s | \"\n \"Train Acc {:.4f} | Test Acc {:.4f} | Best epoch @ {:04d} with Acc {:.4f} | lr: {:.6f}\".format(\n epoch, args.end_epoch, time.time()-tic, train_acc, test_acc, best_epoch['epoch'], \n best_epoch['acc'],optimizer.state_dict()['param_groups'][0]['lr'])\n )\n else:\n # Build model\n print('===> Building model ...')\n # Add logs\n save_dir = os.path.join(args.dir_logging); makedirs(save_dir)\n logger = get_logger(logpath=os.path.join(save_dir, args.logging_file))\n logger.info(os.path.abspath(__file__))\n # for arg in vars(args):\n # logger.info('{}: {}'.format(arg, getattr(args, arg)))\n logger.info('{}: {}'.format('dir_model', getattr(args, 'dir_model')))\n logger.info('{}: {}'.format('noise_level', getattr(args, 'noise_level')))\n model = ODENet_MNIST()\n if torch.cuda.is_available():\n model = model.cuda(args.device_ids[0])\n if len(args.device_ids) >= 2:\n model = nn.DataParallel(model, args.device_ids)\n \n model.load_state_dict(torch.load(args.dir_model).state_dict())\n\n # Construct datasets\n test_loader, test_loader_noisy = get_mnist_loaders(\n isTrain=False, batch_size=128, test_batch_size=1000)\n # Testing\n model.eval()\n with torch.no_grad():\n accu_orgin, accu_pertb, accu_pertb_orgin = accuracy_withRef(model, test_loader, test_loader_noisy)\n logger.info(\n \"Test Acc: {}, Pertb Acc: {}, Pert_wrt_orgin: {}\".format(accu_orgin, accu_pertb, accu_pertb_orgin)\n ) ","sub_path":"mnist/main_ode_gaus.py","file_name":"main_ode_gaus.py","file_ext":"py","file_size_in_byte":9102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240885811","text":"import random\r\nlength = 16\r\nquantity = 50\r\nans = []\r\n\r\ndef check_prime(num):\r\n for l in range(2, int(num ** 0.5) + 1):\r\n if num % l == 0:\r\n return l\r\n else:\r\n return None\r\n\r\nall_numbers = set()\r\n\r\nwhile True:\r\n a = '1'\r\n for i in range(14): # 14\r\n a += str(random.randint(0, 1))\r\n a += '1'\r\n\r\n lst = []\r\n for k in range(2, 11):\r\n number = int(a, k)\r\n prime_check = check_prime(number)\r\n if prime_check == None:\r\n lst = []\r\n break\r\n else:\r\n lst.append(str(prime_check))\r\n\r\n if lst != []:\r\n all_numbers.add(a + \" \" + \" \".join(lst))\r\n print(len(all_numbers))\r\n if len(all_numbers) == 50:\r\n break\r\n\r\nf = open('C', 'w')\r\nf.write(\"Case #1:\")\r\nf.write(\"\\n\")\r\n\r\nf.write((\"\\n\".join(list(all_numbers))))\r\n\r\nf.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_3/salvia.m/coin_jam.py","file_name":"coin_jam.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"258030603","text":"import urllib.parse\n\nimport requests\nimport uvicorn\nfrom fastapi import FastAPI, Request, Response\nfrom fastapi.responses import JSONResponse\nfrom .crypto import rsa, encrypt_params\n\napp = FastAPI()\n\n\n@app.middleware(\"http\")\nasync def cors(req: Request, call_next):\n if req.method.lower() == \"options\":\n return Response(\n status_code=204,\n headers={\n \"Access-Control-Allow-Credentials\": True,\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \"Content-Type\",\n \"Access-Control-Allow-Methods\": \"PUT,POST,GET,DELETE,OPTIONS\"\n }\n )\n\n response = await call_next(req)\n response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n\n return response\n\n\n@app.middleware(\"http\")\nasync def proxy(req: Request, call_next):\n path = req.url.path\n\n if path.startswith(\"/api\"):\n body = await req.body()\n params, key = encrypt_params(body)\n enc_sec_key = rsa(key)\n api_path = path.replace(\"/api\", \"/weapi\")\n ret = requests.post(\n f\"https://music.163.com{api_path}?csrf_token=\",\n data=urllib.parse.urlencode({\n \"params\": params,\n \"encSecKey\": enc_sec_key\n }),\n headers={\n \"referer\": \"https://music.163.com/\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36\"\n }\n )\n\n return JSONResponse(\n content=ret.json(),\n status_code=200,\n headers={\n \"Access-Control-Allow-Origin\": \"*\",\n }\n )\n\n response = await call_next(req)\n\n return response\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\ndef run_server():\n uvicorn.run(app, port=8888)\n","sub_path":"music/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"489304289","text":"# 6.00 Problem Set 3\n# \n# Hangman\n#\n# -----------------------------------\n# Helper code\n# (you don't need to understand this helper code)\nimport random\nimport string\n\nWORDLIST_FILENAME = \"words.txt\"\n\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = str.split(line)\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\n\ndef choose_word(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n\n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n\n# end of helper code\n# -----------------------------------\n\n# actually load the dictionary of words and point to it with \n# the wordlist variable so that it can be accessed from anywhere\n# in the program\nwordlist = load_words()\n\n# your code begins here!\n\ndef partial_word(word, guessed_letter):\n result = ''\n for letter in word:\n if letter in guessed_letter:\n result = result + letter\n else:\n result = result + ' '\n\n return result\n\ndef hangman():\n random_word = choose_word(wordlist)\n word = random_word.lower()\n print('I am thinking of a word that is', len(word), 'letters long.')\n chance = 8\n guessed_letter = ''\n word_guessed = False\n available = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n while chance > 0 and not word_guessed :\n print('________________')\n print('You have', chance, 'chances.')\n print('Available letters : ', ''.join(available))\n guess = input('Enter your guessing letter : ')\n if guess not in available:\n print('Oops! You\\'ve already guessed that letter : ', partial_word(word, guessed_letter))\n elif guess not in word:\n chance = chance - 1\n print('Oops! That letter is not in my word : ', partial_word(word, guessed_letter))\n available.remove(guess)\n else:\n available.remove(guess)\n guessed_letter = guessed_letter + guess\n print('Good guess : ', partial_word(word, guessed_letter))\n\n if word == guessed_letter:\n word_guessed = True\n\n if word_guessed:\n print('Congratulation!')\n else:\n print('You are hanged!!')\n\n\n\n\n\n\n\n\n\n","sub_path":"Recursion/ps2/ps2_hangman.py","file_name":"ps2_hangman.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328877894","text":"# Copyright (C) 2022 CVAT.ai Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport csv\nimport json\nimport os\nimport random\nimport sys\nfrom itertools import product\n\nNAME = \"projects\"\n\n\ndef read_rules(name):\n rules = []\n with open(os.path.join(sys.argv[1], f\"{name}.csv\")) as f:\n reader = csv.DictReader(f)\n for row in reader:\n row = {k.lower(): v.lower().replace(\"n/a\", \"na\") for k, v in row.items()}\n row[\"limit\"] = row[\"limit\"].replace(\"none\", \"None\")\n found = False\n for col, val in row.items():\n if col in [\"limit\", \"method\", \"url\"]:\n continue\n complex_val = [v.strip() for v in val.split(\",\")]\n if len(complex_val) > 1:\n found = True\n for item in complex_val:\n new_row = row.copy()\n new_row[col] = item\n rules.append(new_row)\n if not found:\n rules.append(row)\n\n return rules\n\n\nsimple_rules = read_rules(NAME)\n\nSCOPES = {rule[\"scope\"] for rule in simple_rules}\nCONTEXTS = [\"sandbox\", \"organization\"]\nOWNERSHIPS = [\"owner\", \"assignee\", \"none\"]\nGROUPS = [\"admin\", \"business\", \"user\", \"worker\", \"none\"]\nORG_ROLES = [\"owner\", \"maintainer\", \"supervisor\", \"worker\", None]\nSAME_ORG = [False, True]\n\n\ndef RESOURCES(scope):\n if scope == \"list\":\n return [None]\n elif scope in [\"create\", \"import:backup\"]:\n return [\n {\n \"owner\": {\"id\": random.randrange(400, 500)},\n \"assignee\": {\"id\": random.randrange(500, 600)},\n \"organization\": {\"id\": random.randrange(600, 700)},\n \"user\": {\"num_resources\": count},\n }\n for count in (0, 1, 3, 10)\n ]\n else:\n return [\n {\n \"id\": random.randrange(300, 400),\n \"owner\": {\"id\": random.randrange(400, 500)},\n \"assignee\": {\"id\": random.randrange(500, 600)},\n \"organization\": {\"id\": random.randrange(600, 700)},\n }\n ]\n\n\ndef is_same_org(org1, org2):\n if org1 is not None and org2 is not None:\n return org1[\"id\"] == org2[\"id\"]\n elif org1 is None and org2 is None:\n return True\n else:\n return False\n\n\ndef eval_rule(scope, context, ownership, privilege, membership, data):\n if privilege == \"admin\":\n return True\n\n rules = list(filter(lambda r: scope == r[\"scope\"], simple_rules))\n rules = list(filter(lambda r: r[\"context\"] == \"na\" or context == r[\"context\"], rules))\n rules = list(filter(lambda r: r[\"ownership\"] == \"na\" or ownership == r[\"ownership\"], rules))\n rules = list(\n filter(\n lambda r: r[\"membership\"] == \"na\"\n or ORG_ROLES.index(membership) <= ORG_ROLES.index(r[\"membership\"]),\n rules,\n )\n )\n rules = list(filter(lambda r: GROUPS.index(privilege) <= GROUPS.index(r[\"privilege\"]), rules))\n resource = data[\"resource\"]\n rules = list(\n filter(lambda r: not r[\"limit\"] or eval(r[\"limit\"], {\"resource\": resource}), rules)\n )\n if (\n not is_same_org(data[\"auth\"][\"organization\"], data[\"resource\"][\"organization\"])\n and context != \"sandbox\"\n ):\n return False\n\n return bool(rules)\n\n\ndef get_data(scope, context, ownership, privilege, membership, resource, same_org):\n data = {\n \"scope\": scope,\n \"auth\": {\n \"user\": {\"id\": random.randrange(0, 100), \"privilege\": privilege},\n \"organization\": {\n \"id\": random.randrange(100, 200),\n \"owner\": {\"id\": random.randrange(200, 300)},\n \"user\": {\"role\": membership},\n }\n if context == \"organization\"\n else None,\n },\n \"resource\": resource,\n }\n\n user_id = data[\"auth\"][\"user\"][\"id\"]\n if context == \"organization\":\n org_id = data[\"auth\"][\"organization\"][\"id\"]\n if data[\"auth\"][\"organization\"][\"user\"][\"role\"] == \"owner\":\n data[\"auth\"][\"organization\"][\"owner\"][\"id\"] = user_id\n\n if same_org:\n data[\"resource\"][\"organization\"][\"id\"] = org_id\n\n if ownership == \"owner\":\n data[\"resource\"][\"owner\"][\"id\"] = user_id\n\n if ownership == \"assignee\":\n data[\"resource\"][\"assignee\"][\"id\"] = user_id\n\n return data\n\n\ndef _get_name(prefix, **kwargs):\n name = prefix\n for k, v in kwargs.items():\n prefix = \"_\" + str(k)\n if isinstance(v, dict):\n if \"id\" in v:\n v = v.copy()\n v.pop(\"id\")\n if v:\n name += _get_name(prefix, **v)\n else:\n name += f'{prefix}_{str(v).upper().replace(\":\", \"_\")}'\n\n return name\n\n\ndef get_name(scope, context, ownership, privilege, membership, resource, same_org):\n return _get_name(\"test\", **locals())\n\n\ndef is_valid(scope, context, ownership, privilege, membership, resource, same_org):\n if context == \"sandbox\" and membership:\n return False\n if scope == \"list\" and ownership != \"None\":\n return False\n if context == \"sandbox\" and same_org is False:\n return False\n\n return True\n\n\ndef gen_test_rego(name):\n with open(f\"{name}_test.gen.rego\", \"wt\") as f:\n f.write(f\"package {name}\\n\\n\")\n for scope, context, ownership, privilege, membership, same_org in product(\n SCOPES, CONTEXTS, OWNERSHIPS, GROUPS, ORG_ROLES, SAME_ORG\n ):\n for resource in RESOURCES(scope):\n if not is_valid(\n scope, context, ownership, privilege, membership, resource, same_org\n ):\n continue\n\n data = get_data(\n scope, context, ownership, privilege, membership, resource, same_org\n )\n test_name = get_name(\n scope, context, ownership, privilege, membership, resource, same_org\n )\n result = eval_rule(scope, context, ownership, privilege, membership, data)\n f.write(\n \"{test_name} {{\\n {allow} with input as {data}\\n}}\\n\\n\".format(\n test_name=test_name,\n allow=\"allow\" if result else \"not allow\",\n data=json.dumps(data),\n )\n )\n\n # Write the script which is used to generate the file\n with open(sys.argv[0]) as this_file:\n f.write(f\"\\n\\n# {os.path.split(sys.argv[0])[1]}\\n\")\n for line in this_file:\n if line.strip():\n f.write(f\"# {line}\")\n else:\n f.write(f\"#\\n\")\n\n # Write rules which are used to generate the file\n with open(os.path.join(sys.argv[1], f\"{name}.csv\")) as csv_file:\n f.write(f\"\\n\\n# {name}.csv\\n\")\n for line in csv_file:\n if line.strip():\n f.write(f\"# {line}\")\n else:\n f.write(f\"#\\n\")\n\n\ngen_test_rego(NAME)\n","sub_path":"cvat/apps/iam/rules/tests/generators/projects_test.gen.rego.py","file_name":"projects_test.gen.rego.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"562001864","text":"#!/usr/bin/env python\n# coding: utf-8\nimport pandas as pd\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio import SeqIO\nimport os\n\n\ndef main(input_path, dir_data, path_features, dir_pseinone):\n idata = pd.read_csv(input_path)\n Hchain, Lchain = idata.Hchain, idata.Lchain\n data = pd.DataFrame()\n\n # Build Fasta files to process data\n print('Building fasta files...')\n records = []\n i = 1\n for seq in Hchain:\n records.append(SeqRecord(Seq(seq), id='sequence #' + str(i),\n description = 'Heavy'))\n i += 1\n SeqIO.write(records, dir_data/\"fasta_heavy.fa\", \"fasta\")\n\n records = []\n i = 1\n for seq in Lchain:\n records.append(SeqRecord(Seq(seq), id='sequence #'+str(i), description='Light'))\n i += 1\n SeqIO.write(records, dir_data/\"fasta_light.fa\", \"fasta\")\n\n path_acc = dir_pseinone/'acc.py'\n print('Running Pse-in-One 2.0 for HEAVY chains...')\n os.system(f\"python3 {path_acc} {dir_data}/fasta_heavy.fa Protein ACC -out {dir_data}/fasta_heavy_ACC.out\")\n print('Running Pse-in-One 2.0 for LIGHT chains...')\n os.system(f\"python3 {path_acc} {dir_data}/fasta_light.fa Protein ACC -out {dir_data}/fasta_light_ACC.out\")\n\n ACC_heavy = pd.read_csv(dir_data/\"fasta_heavy_ACC.out\", sep='\\t', header=None)\n ACC_light = pd.read_csv(dir_data/\"fasta_light_ACC.out\", sep='\\t', header=None)\n\n d = {}\n for i in range(len(ACC_heavy.iloc[0])):\n d[i] = 'ACC_heavy_' + str(i)\n ACC_heavy = ACC_heavy.rename(columns=d, inplace=False)\n\n d = {}\n for i in range(len(ACC_light.iloc[0])):\n d[i] = 'ACC_light_' + str(i)\n ACC_light = ACC_light.rename(columns=d, inplace=False)\n\n print('Counting AA by polarity...')\n data['Heavy_Length'] = Hchain.str.len()\n data['Light_Length'] = Lchain.str.len()\n\n print(' Counting non-polar AA...')\n data['Heavy_SmallNonpolar'] = Hchain.str.count('[GAST]')\n data['Light_SmallNonpolar'] = Lchain.str.count('[GAST]')\n print(' Counting hydrophobic AA...')\n data['Heavy_Hydrophobic'] = Hchain.str.count('[CVILPFYMW]')\n data['Light_Hydrophobic'] = Lchain.str.count('[CVILPFYMW]')\n\n print(' Counting polar AA...')\n data['Heavy_Polar'] = Hchain.str.count('[NQH]')\n data['Light_Polar'] = Lchain.str.count('[NQH]')\n\n print(' Counting charged AA...')\n data['Heavy_Negative'] = Hchain.str.count('[DE]')\n data['Light_Negative'] = Lchain.str.count('[DE]')\n data['Heavy_Positive'] = Hchain.str.count('[KR]')\n data['Light_Positive'] = Lchain.str.count('[KR]')\n\n print(' Counting cysteins...')\n data['Heavy_Cystein'] = Hchain.str.count('C')\n data['Light_Cystein'] = Lchain.str.count('C')\n\n data = pd.concat([data, ACC_heavy], axis=1)\n data = pd.concat([data, ACC_light], axis=1)\n data.to_csv(path_features, index=False)\n return data\n","sub_path":"src/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"247794048","text":"# Copyright (C) 2008 Jason Cozens\n# Authors: Jason Cozens \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\nimport string\n\nfrom eqp import(\n EQState,\n Simulator,\n QCell,\n )\n\n\ndef join_exit():\n \"\"\"a.Join() ... a.Exit()\"\"\"\n interval = 8\n ids = ['a', 'b', 'c', 'd', 'e', 'f']\n eqstate = EQState(map(lambda id: QCell(id), ids))\n sim = Simulator(eqstate, {1: [('a', 'Join')],\n 6: [('a', 'Exit')],})\n sim.run(interval)\n trace = ''\n for t in range(0, interval):\n trace += '%s:\\n%s' % (t, sim.step(t).to_str(0,1,1,0,1,1))\n print (\"\"\"\nExample: A Single QCell Setting Up an Eager Queue and Exiting\n-------------------------------------------------------------\"\"\")\n print (trace + str(interval) + ':')\n print ('-' * 80 + \"\"\"\n 0 - 1: All 6 QCells are Idling.\n 1 - 2: A Join command is issued, the cell 'a' goes into the Listening state.\n There is no broadcast so the QCell increments its NoMessage count.\n 2 - 3: Cell 'a' listens to the channel and as there is no broadcast moves\n into the Joining state.\n 3 - 4: Cell 'a' broadcasts a JOIN message and starts an eager queue.\n 4 - 5: Cell 'a' listens to the channel and increments its NoMessage count.\n 5 - 6: As Cell 'a' is the only QCell in the queue it broadcasts an UPDATE.\n 6 - 7: An Exit command is issued and Cell 'a' broadcasts an EXIT message\n immediately.\n 7 - 8: All 6 QCells are Idling again.\"\"\")\n \ndef join_join():\n \"\"\"a.Join() ... b.Join()\"\"\"\n interval = 9\n ids = ['a', 'b', 'c', 'd', 'e', 'f']\n eqstate = EQState(map(lambda id: QCell(id), ids))\n sim = Simulator(eqstate, {1: [('a', 'Join')],\n 2: [('b', 'Join')],})\n sim.run(interval)\n trace = ''\n for t in range(0, interval):\n trace += '%s:\\n%s' % (t, sim.step(t).to_str(0,1,1,0,1,1))\n print (\"\"\"\nExample: Two QCells Setting Up an Eager Queue\n---------------------------------------------\"\"\")\n print (trace + str(interval) + ':')\n print ('-' * 80 + \"\"\"\n 0 - 1: All 6 QCells are Idling.\n 1 - 2: A Join command is issued and cell 'a' goes into the Listening state.\n There is no broadcast so the QCell increments its NoMessage count.\n 2 - 3: A Join command is issued and cell 'b' goes into the Listening state.\n Cell 'a' listens to the channel and as there is no broadcast moves\n into the Joining state.\n 3 - 4: Cell 'a' broadcasts a JOIN message and starts an eager queue.\n 4 - 5: Cell 'b' broadcasts a JOIN message and joins the eager queue.\n 5 - 6: Cell 'a' and 'b' listen to the channe\n and increment their NoMessage counts.\n 6 - 7: Cell 'a' is at the back of the LKT vector so it broadcasts an UPDATE.\n 7 - 8: Cell 'a' and 'b' listen to the channe\n and increment their NoMessage counts.\n 8 - 9: Cell 'b' is at the back of the LKT vector so it broadcasts an UPDATE.\n \"\"\")\n\n \ndef update_cycle_broadcasts():\n interval = 22\n ids = ['a', 'b', 'c', 'd', 'e', 'f']\n eqstate = EQState(map(lambda id: QCell(id), ids))\n sim = Simulator(eqstate, {0: [('a', 'Join')],\n 2: [('b', 'Join')],\n 3: [('c', 'Join')],\n 7: [('d', 'Join')],\n 8: [('e', 'Join')],\n })\n sim.run(interval)\n trace = ''\n for t in range(0, interval):\n step = sim.step(t).to_str(0,0,0,0,1,0)\n if step.find('-.-') == -1:\n step = sim.step(t).to_str(0,0,0,1,1,0)\n trace += '%s:\\n%s' % (t, step)\n \n print (\"\"\"\nExample: Messages and Broadcasts\n--------------------------------\"\"\")\n print (trace)\n print (\"\"\"\nThis shows Cells 'a' - 'e' joining an eager queue. When 'd' and 'e' join there\nis a collision \"*.*\" and one of the cells backs off.\"\"\")\n\ndef join_request():\n \"\"\"a.Join() ... b.Join()\"\"\"\n interval = 9\n ids = ['a', 'b', 'c', 'd', 'e', 'f']\n eqstate = EQState(map(lambda id: QCell(id), ids))\n sim = Simulator(eqstate, {1: [('a', 'Join')],\n 4: [('f', 'Request')],})\n sim.run(interval)\n trace = ''\n for t in range(0, interval):\n trace += '%s:\\n%s' % (t, sim.step(t).to_str(0,1,1,0,1,1))\n print (\"\"\"\nExample: One Cell sets up an Eager Queue, Another makes a Request\n-----------------------------------------------------------------\"\"\")\n print (trace)\n \ndef requests(): \n interval = 21\n ids = ['a', 'b', 'c', 'd', 'e', 'f']\n eqstate = EQState(map(lambda id: QCell(id), ids))\n sim = Simulator(eqstate, {0: [('a', 'Join')],\n 2: [('d', 'Join')],\n 3: [('c', 'Join')],\n 5: [('b', 'Join')],\n 10: [('e', 'Request')],\n 15: [('f', 'Request')],\n })\n sim.run(interval)\n trace = ''\n for t in range(9, interval):\n trace += '%s:\\n%s' % (t, sim.step(t).to_str(0,1,0,0,1,0))\n print (\"\"\"\nExample: Two Requests started on Different \"Phases\"\n--------------------------------------------------\"\"\")\n print (trace)\n \ndef request_with_decline(): \n interval = 19\n ids = ['a', 'b', 'c', 'd', 'e', 'f']\n eqstate = EQState(map(lambda id: QCell(id), ids))\n sim = Simulator(eqstate, {0: [('e', 'Join')],\n 2: [('d', 'Join')],\n 3: [('c', 'Join')],\n 5: [('f', 'Join')],\n 10: [('a', 'Request')],\n 13: [('c', 'Exit')],\n })\n sim.run(interval)\n trace = ''\n for t in range(9, interval):\n trace += '%s:\\n%s' % (t, sim.step(t).to_str(0,1,0,0,1,0))\n print (\"\"\"\nExample: Request with Decline\n-----------------------------\"\"\")\n print (trace)\n \ndef main():\n # join_exit()\n join_join()\n update_cycle_broadcasts()\n join_request()\n requests()\n request_with_decline()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"C/res/report01.py","file_name":"report01.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"561238128","text":"from math import ceil\n\ndef quick_sort(in_arr):\n\tif len(in_arr) == 1:\n\t\treturn in_arr\n\telse:\n\t\treturn ([x for x in in_arr if x < in_arr[0]] + \n\t\t [in_arr[0]] + \n\t\t [y for y in in_arr if y >= in_arr[0]])\n\ndef binary_search(in_arr, targ):\n\thi = len(in_arr) - 1\n\tlo = 0\n\twhile lo < hi:\n\t\tmiddy = lo + int(ceil(float(hi - lo) / 2))\n\t\tif in_arr[middy] == targ: \n\t\t\treturn middy\n\t\telif in_arr[middy] < targ:\n\t\t\tlo = middy\n\t\telse:\n\t\t\thi = middy\n\treturn -1\n\ndef find_adders(in_arr, targ_sum):\n\tsorted_arr = quick_sort(in_arr)\n\tind = 0\n\tfor x in sorted_arr:\n\t\tres = binary_search(sorted_arr, (targ_sum - x))\n\t\tif res != -1 and res != ind:\n\t\t\treturn 1\n\t\tind += 1\n\treturn 0\n","sub_path":"find_adders.py","file_name":"find_adders.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216324489","text":"\"\"\"add table task\n\nRevision ID: 8feb052e1f89\nRevises: 76e28793186f\nCreate Date: 2021-04-30 13:07:05.352935\n\n\"\"\"\nfrom datetime import datetime\n\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '8feb052e1f89'\ndown_revision = '76e28793186f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n task = op.create_table('periodic_task',\n sa.Column('name', sa.Enum('NEWS_PUSH', name='periodictask'), nullable=False),\n sa.Column('executed_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('name')\n )\n\n op.bulk_insert(task, [\n {\"name\": 'NEWS_PUSH', \"executed_at\": datetime.now()},\n ])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('periodic_task')\n # ### end Alembic commands ###\n","sub_path":"db/migrate/versions/8feb052e1f89_add_table_task.py","file_name":"8feb052e1f89_add_table_task.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535052047","text":"import keyboard\r\nimport pyautogui as pa\r\nimport time\r\nimport random\r\n\r\n\r\ndef get_time():\r\n return round(time.time())\r\n\r\n\r\nprint(\"Press q or esc to exit.\")\r\ntime.sleep(1)\r\nprint(\"Start in:\")\r\nfor i in reversed(range(10)):\r\n print(i)\r\n time.sleep(1)\r\n if keyboard.is_pressed('esc') or keyboard.is_pressed('q'):\r\n print(\"The end\")\r\n print(\"GREETINGS FROM \\~ZIOMBELEK~/\")\r\n time.sleep(5)\r\n exit()\r\nstart_time = time.time()\r\nrandom.seed()\r\nscreen = pa.size()\r\ncounter = 1\r\nscreen_height = screen[0]\r\nscreen_width = screen[1]\r\nseconds = random.randint(1, 15)\r\nprevious_time = get_time()\r\nwhile 1:\r\n if keyboard.is_pressed('esc') or keyboard.is_pressed('q'):\r\n break\r\n if get_time() - previous_time > seconds:\r\n previous_time = time.time()\r\n seconds = random.randint(1, 15)\r\n x = random.randint(7, screen_width - 195)\r\n y = random.randint(7, int(screen_height/2))\r\n movement_time = random.random() + random.random() + random.random()\r\n pa.moveTo(x,y, movement_time)\r\n czas = time.time() - start_time\r\n print(\"Timer:: \", int(czas/60/60), \"h \", int(czas/60), \"m \", int(czas) % 60, \"s\")\r\n if 1 <= counter <= 5:\r\n pa.scroll(113)\r\n if 6 <= counter <= 10:\r\n pa.scroll(-113)\r\n if counter == 10:\r\n counter = 0\r\n counter += 1\r\nprint(\"The end\")\r\npa.moveTo(1, 1, 0.1)\r\nprint(\"GREETINGS FROM \\~ZIOMBELEK~/\")\r\ntime.sleep(5)\r\n\r\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166816939","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.utils import translation\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom macadjan import models\nfrom macadjan_ecozoom import models as models_ecozoom\nfrom treemenus import models as models_menus\n#from themes import models as models_themes\n\nclass Command(BaseCommand):\n args = ' '\n help = 'Initialize a newly created instance, by setting Site and SiteInfo, creating a menu and a theme.\\n' \\\n 'You need to give the slug of the instance and also the public domain. For example:\\n' \\\n ' ./manage.py initialize_instance demo demo.mapunto.net'\n\n def handle(self, *args, **options):\n\n if len(args) != 2:\n return 'You need to give the slug of the instance and also the public domain. For example:\\n' \\\n ' ./manage.py initialize_instance demo demo.mapunto.net\\n'\n\n self.instance_slug = args[0]\n self.instance_domain = args[1]\n\n translation.activate(settings.LANGUAGE_CODE)\n\n current_site = self.init_current_site()\n current_site_info = self.init_current_site_info(current_site)\n main_menu = self.init_main_menu()\n #theme = self.init_theme(current_site)\n\n translation.deactivate()\n\n\n def init_current_site(self):\n current_site = Site.objects.get_current()\n current_site.name = self.instance_domain\n current_site.domain = self.instance_domain\n current_site.save()\n return current_site\n\n\n def init_current_site_info(self, current_site):\n try:\n current_site_info = current_site.site_info\n except models.SiteInfo.DoesNotExist:\n current_site_info = models.SiteInfo(site = current_site)\n current_site_info.website_name = self.instance_domain\n current_site_info.website_subtitle = ''\n current_site_info.website_description = ''\n current_site_info.footer_line = self.instance_domain\n current_site_info.map_bounds_left = -20037508.34\n current_site_info.map_bounds_right = 20037508.34\n current_site_info.map_bounds_bottom = -20037508.34\n current_site_info.map_bounds_top = 20037508.34\n current_site_info.map_zoom_levels = 18\n current_site_info.map_max_resolution = 156543\n current_site_info.map_units = 'meters'\n current_site_info.map_initial_lon = -3.86\n current_site_info.map_initial_lat = 40.38\n current_site_info.map_initial_zoom = 6\n current_site_info.new_entity_proposal_enabled = False\n current_site_info.entity_change_proposal_enabled = False\n current_site_info.description_hints = ''\n current_site_info.goals_hints = ''\n current_site_info.finances_hints = ''\n current_site_info.social_values_hints = ''\n current_site_info.how_to_access_hints = ''\n current_site_info.networks_member_hints = ''\n current_site_info.networks_works_with_hints = ''\n current_site_info.ongoing_projects_hints = ''\n current_site_info.needs_hints = ''\n current_site_info.offerings_hints = ''\n current_site_info.additional_info_hints = ''\n current_site_info.save()\n return current_site_info\n\n\n def init_main_menu(self):\n try:\n main_menu = models_menus.Menu.objects.get(name = 'main_menu')\n except models_menus.Menu.DoesNotExist:\n main_menu = models_menus.Menu(name = 'main_menu')\n\n root_menu_item = main_menu.root_item\n if not root_menu_item:\n root_menu_item = models_menus.MenuItem(menu = main_menu)\n root_menu_item.parent = None\n root_menu_item.caption = u'Root'\n root_menu_item.url = u''\n root_menu_item.named_url = u''\n root_menu_item.level = 0\n root_menu_item.rank = 0\n root_menu_item.save()\n\n main_menu.root_item = root_menu_item\n main_menu.save()\n\n try:\n menu_item = root_menu_item.menuitem_set.get(url = '/map/')\n except models_menus.MenuItem.DoesNotExist:\n menu_item = models_menus.MenuItem(parent = root_menu_item,\n menu = main_menu,\n url = '/map/')\n menu_item.caption = u'Mapa'\n menu_item.named_url = u''\n menu_item.level = 0\n menu_item.rank = 0\n menu_item.save()\n\n return main_menu\n\n# def init_theme(self, current_site):\n# themes = models_themes.Theme.objects.all()\n# if len(themes) > 1:\n# raise ValueError(\"There are more than 1 theme, i don't know how to handle this\")\n# if len(themes) == 1:\n# theme = themes[0]\n# else:\n# theme = models_themes.Theme()\n#\n# theme.name = self.instance_slug\n# theme.directory = self.instance_slug\n# theme.save()\n#\n# theme.sites_available.clear()\n# theme.sites_available.add(current_site)\n# theme.sites_enabled.clear()\n# theme.sites_enabled.add(current_site)\n\n","sub_path":"src/macadjan_ecozoom/management/commands/initialize_instance.py","file_name":"initialize_instance.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"239876088","text":"#!/usr/bin/env python\n'''\nCI、CD流程控制代码\n 针对app服务器,在app服务器上运行的\n 目录规划\n /var/www/html/current 连接文件\n /var/www/download/ 存储下载tar包的目录\n /var/www/deploy/ 存储各个版本代码的目录\n /var/www/deploy/mysite-1.0\n /var/www/deploy/mysite-2.0\n /var/www/version_con/ 存储deploy文件和本地版本记录文件\n 主要功能:\n 更新和回滚\n - 更新\n 1、检查是否有新版本\n 2、下载新版本的tar包\n 3、对于下载的tar包进行md5校验\n 4、解压新版本的tar包\n 5、更新web连接\n 6、更新本地版本记录文件\n - 回滚\n 1、检查本地是否有上一版本记录文件\n 2、回滚至上一版本\n 3、更新本地版本记录文件\n'''\nimport os\nimport sys\nimport wget\nimport requests\nimport tarfile\nimport hashlib\n\n\ndef update(app_name, local_version_file, local_last_version_file,\n local_store_dir, local_deploy_dir, local_current_link,\n remote_version_url, remote_file_url):\n # 检查是否有新版本\n if not os.path.exists(local_version_file):\n print('开始更新')\n download(app_name, local_store_dir, local_deploy_dir, local_last_version_file,\n local_current_link, remote_version_url, remote_file_url)\n else:\n with open(local_version_file, 'r') as fobj:\n local_version = fobj.read()\n remote_version = get_remote_version(remote_version_url)\n if local_version != remote_version:\n print('开始更新')\n download(app_name, local_store_dir, local_deploy_dir, local_last_version_file,\n local_current_link, remote_version_url, remote_file_url)\n else:\n print('无新版本可用')\n\n\n# 下载新版本的函数\ndef download(app_name, local_store_dir, local_deploy_dir, local_last_version_file,\n local_current_link, remote_version_url, remote_file_url):\n remote_version = get_remote_version(remote_version_url) # 1.0\n app_version_name = '%s-%s.tar.gz' % (app_name, remote_version)\n remote_file_url = remote_file_url + app_version_name # 组装最新版按本的tar包的路径\n remote_file_md5 = remote_file_url + '.md5' # 组装的是远程新版本的tar包的md5文件\n\n # 下载新版本tar包\n wget.download(remote_file_url, local_store_dir)\n filename = local_store_dir + app_version_name # 组装/var/www/download/mysite-2.0.tar.gz\n\n # 校验md5\n local_md5 = get_local_md5(filename)\n remote_md5 = get_remote_md5(remote_file_md5)\n\n if local_md5 == remote_md5:\n local_deploy(filename, local_deploy_dir, local_version_file,\n local_last_version_file, local_current_link)\n else:\n print('新版本文件下载过程中损坏,请联系运维')\n os.remove(filename)\n\n\n# 本地发布新版本函数\ndef local_deploy(filename, local_deploy_dir, local_version_file,\n local_last_version_file, local_current_link):\n # 1、解压新版本的tar包\n tar = tarfile.open(filename) # filename=/var/www/download/mysite-2.0.tar.gz\n tar.extractall(path=local_deploy_dir)\n tar.close()\n # 2、更新本地版本记录文件\n if os.path.exists(local_version_file):\n os.rename(local_version_file, local_last_version_file)\n app_name = os.path.basename(filename)\n new_version = app_name.split('-')[1].replace('.tar.gz', '')\n with open(local_version_file, 'w') as fobj:\n fobj.write(new_version)\n # 3、更新web连接\n if os.path.exists(local_current_link):\n os.remove(local_current_link)\n # 新版本本地目录 /var/www/deploy/mysite-2.0/\n app_dir = local_deploy_dir + app_name.replace('.tar.gz', '')\n os.symlink(app_dir, local_current_link)\n\n\n# 获取本地文件的md5值\ndef get_local_md5(filename):\n m = hashlib.md5()\n with open(filename, 'rb') as fobj:\n while True:\n data = fobj.read(1024)\n if not data:\n break\n m.update(data)\n local_md5 = m.hexdigest()\n return local_md5\n\n\n# 获取远端tar包的md5值\ndef get_remote_md5(remote_file_md5):\n r = requests.get(remote_file_md5)\n remote_md5 = r.text.strip()\n return remote_md5\n\n\n# 获取Jenkins服务器上最新的代码版本\ndef get_remote_version(remote_version_url):\n r = requests.get(remote_version_url)\n remote_version = r.text.strip() # 防止远端服务器版本文件有换行符\n\n return remote_version\n\n\ndef rollback(app_name, local_version_file, local_last_version_file, local_deploy_dir, local_current_link):\n # 判断是否可以进行回滚\n if not os.path.exists(local_last_version_file):\n print('本地无旧版本可回滚')\n return False\n if os.path.exists(local_version_file) and os.path.exists(local_last_version_file):\n with open(local_version_file)as fobj:\n local_version = fobj.read()\n with open(local_last_version_file) as fobj:\n last_version = fobj.read()\n\n if local_version != last_version:\n app_curr_ver = '%s-%s' % (app_name, local_version)\n app_last_ver = '%s-%s' % (app_name, last_version)\n app_curr_dir = local_deploy_dir + app_curr_ver\n app_last_dir = local_deploy_dir + app_last_ver\n\n if not os.path.exists(app_last_dir):\n print('上版本代码目录不存在,无法回滚')\n return False\n # 回滚\n if os.path.exists(local_current_link):\n os.remove(local_current_link)\n os.symlink(app_last_dir, local_current_link)\n\n # 更新本地版本记录文件\n with open(local_version_file, 'w') as fobj:\n fobj.write(last_version)\n with open(local_last_version_file, 'w') as fobj:\n fobj.write(local_version)\n\n print('回滚成功')\n return True\n else:\n print('本地版本记录文件有误,无法回滚')\n return False\n\n\ndef main(app_name, local_version_file, local_last_version_file,\n local_store_dir, local_deploy_dir, local_current_link,\n remote_version_url, remote_file_url):\n if len(sys.argv) != 2:\n print('%s u|r or update|rollback' % sys.argv[0])\n elif sys.argv[1] == 'u' or sys.argv[1] == 'update':\n update(app_name, local_version_file, local_last_version_file,\n local_store_dir, local_deploy_dir, local_current_link,\n remote_version_url, remote_file_url)\n elif sys.argv[1] == 'r' or sys.argv[1] == 'rollback':\n rollback(app_name, local_version_file, local_last_version_file, local_deploy_dir, local_current_link)\n\n\nif __name__ == '__main__':\n app_name = 'mysite'\n local_version_file = \"/var/www/version_con/local_version\"\n local_last_version_file = \"/var/www/version_con/last_version\"\n local_store_dir = \"/var/www/download/\"\n local_deploy_dir = \"/var/www/deploy/\"\n local_current_link = \"/var/www/html/current\"\n\n remote_version_url = \"http://192.168.1.3/deploy/live_version\"\n remote_file_url = \"http://192.168.1.3/deploy/pkgs/\"\n\n main(app_name, local_version_file, local_last_version_file,\n local_store_dir, local_deploy_dir, local_current_link,\n remote_version_url, remote_file_url)\n","sub_path":"exec/day25-28/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"133786363","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 kirmani \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nSampler.\n\"\"\"\n\nimport math\n\nkVerbose = False\n\nclass Sampler:\n def __init__(self):\n self.started_ = False\n self.expectation_ = 0\n self.m2_ = 0\n self.variance_ = 0\n self.population_variance_ = 0\n self.num_samples_ = 0\n self.error_count_ = 0\n\n def IsConfidentValue(self, value):\n if self.num_samples_ < 2:\n return 0\n\n # determine confidence z-value\n z_score = (value - self.expectation_) / math.sqrt(self.variance_)\n z_cutoff = 1.96 # 95%\n confidence_range = z_cutoff * math.sqrt(self.variance_)\n lower_bound = self.expectation_ - confidence_range\n upper_bound = self.expectation_ + confidence_range\n # if kVerbose:\n # print(\"Confidence range: (%.9f, %.9f)\"\n # % (lower_bound, upper_bound))\n if value < lower_bound:\n if kVerbose:\n print(\"Value below confidence range\")\n return -1\n if value > upper_bound:\n if kVerbose:\n print(\"Value above confidence range\")\n return 1\n return 0\n\n def Expectation(self):\n return self.expectation_\n\n def SampleVariance(self):\n return self.variance_\n\n def GetSaveData(self):\n return {\n 'expectation': self.expectation_,\n 'm2': self.m2_,\n 'num_samples': self.num_samples_\n }\n\n def LoadFromSaveData(self, data):\n self.started_ = True\n self.expectation_ = data['expectation']\n self.m2_ = data['m2']\n self.num_samples_ = data['num_samples']\n self.variance_ = (self.m2_ / (self.num_samples_ - 1)\n if self.num_samples_ > 1 else 0)\n self.population_variance_ = self.m2_ / (self.num_samples_)\n\n def Sample(self, value):\n self.num_samples_ += 1\n error = value - self.expectation_\n self.expectation_ += error / self.num_samples_\n self.m2_ += error * (value - self.expectation_)\n\n self.variance_ = (self.m2_ / (self.num_samples_ - 1)\n if self.num_samples_ > 1 else 0)\n self.population_variance_ = self.m2_ / (self.num_samples_)\n # print(\"Error ratio: %.9f\" % (float(self.error_count_) / float(self.num_samples_)))\n\n # print(\"Expectation: %.9f\" % self.expectation_)\n # print(\"Sample Variance: %.9f\" % self.variance_)\n # print(\"Population Variance: %.9f\" % self.population_variance_)\n","sub_path":"src/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"385159681","text":"#!/usr/bin/python\n\n# importing libraries\nimport requests \nimport sys\nimport re\nimport os\nfrom dateutil.parser import parse\nimport geopy.distance\n\ndef name2num(s):\n return re.match(r'\\d+', os.path.basename(s)).group()\n\n# correct usage\nif (len(sys.argv) != 2 or not sys.argv[1].endswith('.ttl')):\n exit('usage: ' + sys.argv[0] + ' ttl_file instread of ' + sys.argv[0] + ' ' + sys.argv[1])\n\n# file with traces\ntry:\n f = open(sys.argv[1], 'r')\n content = f.read()\n f.close()\nexcept IOError:\n exit('Error while reading ' + sys.argv[1])\n\n# regular expression pattern\nreg_exp = r'<(.*)> :hasTimestamp ([^ ]+) ;\\n([ \\t]*):lat (\\d*(\\.\\d*)?) ;\\n[ \\t]*:lon (\\d*(\\.\\d*)?) ;\\n[ \\t]*:elv (\\d*(\\.\\d*)?) ;[^.]*\\.'\npattern = re.compile(reg_exp)\n\nend = 0\nnew_content = ''\nline_wkt = '\"LINESTRING ZM('\nstart_dt = 0\ndistance = 0\n\n# for all the points in the file\nfor m in pattern.finditer(content):\n dt = parse(m.group(2).replace('^^xsd:dateTime', '')[1:-1])\n if start_dt != 0:\n x_prev, y_prev = x, y\n\n y, x, z = m.group(4), m.group(6), m.group(8)\n\n if start_dt == 0:\n start_dt = dt\n else:\n distance += geopy.distance.vincenty((y_prev, x_prev), (y, x)).km\n \n new_content += content[end:m.start()]\n new_content += m.group() + '\\n'\n\n point = '<' + m.group(1) + '>'\n pointGeomID = '<' + m.group(1) + '_geom>' \n wkt = '\"POINT ZM(' + x + ' ' + y + ' ' + z + ' ' + str(distance) + ')\"^^geo:wktLiteral'\n new_content += point + ' :mileage ' + str(distance) + ' . \\n'\n new_content += point + ' a geo:Feature . \\n'\n new_content += point + ' a :TracePoint . \\n'\n new_content += point + ' geo:hasGeometry ' + pointGeomID + ' . \\n'\n new_content += pointGeomID + ' a sf:Point . \\n'\n new_content += pointGeomID + ' geo:asWKT ' + wkt + ' . '\n\n line_wkt += x + ' ' + y + ' ' + z + ' ' + str(distance) + ', '\n \n end = m.end()\n #print elevation\nnew_content += content[end:] + '\\n'\nline_wkt = line_wkt[:-2] + ')\"'\n\nnew_prefixes = ''\nnew_prefixes += '@prefix : .\\n'\nnew_prefixes += '@prefix geo: .\\n'\nnew_prefixes += '@prefix sf: .\\n'\n\nnew_content = new_content.replace('@prefix : .\\n', new_prefixes, 1)\n\n# addtional enhancements\nduration = dt - start_dt\nnew_content += '<#trace> :numID \"' + name2num(sys.argv[1]) + '\"^^xsd:integer .\\n'\nnew_content += '<#trace> :hasStartPoint <#point0> .\\n'\nnew_content += '<#trace> :hasEndPoint ' + point + ' .\\n'\nnew_content += '<#trace> :hasDuration \"' + str(duration.days * 24 * 60 * 60 + duration.seconds) + '\"^^xsd:integer .\\n'\n\n\n# trace enhancements\nnew_content += '<#trace> a geo:Feature .\\n'\nnew_content += '<#trace> geo:hasGeometry <#trace_geom> .\\n'\nnew_content += '<#trace_geom> a sf:LineString .\\n'\nnew_content += '<#trace_geom> geo:asWKT ' + line_wkt + ' .\\n'\n\n# new file with elevation\ntry:\n new_name = sys.argv[1].replace('.ttl', '_g.ttl')\n g = open(new_name, 'w')\n g.write(new_content)\n g.close()\nexcept IOError:\n exit('Error while writing to the file ' + new_name)\n","sub_path":"code/geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"554232130","text":"import pytest\n\nfrom tests.helpers import assert_df_equal\nfrom pyranges.pyranges import PyRanges\nfrom pyranges.methods import _overlap_write_both\n\nimport pandas as pd\n\nfrom io import StringIO\n\n\n@pytest.fixture\ndef simple_gr1():\n\n c = \"\"\"Chromosome Start End Score Strand\nchr1 3 6 5 +\nchr1 5 7 7 -\nchr1 8 9 1 +\"\"\"\n\n df = pd.read_table(StringIO(c), sep=\"\\s+\", header=0)\n return PyRanges(df)\n\n\n\n@pytest.fixture\ndef simple_gr2():\n\n c = \"\"\"Chromosome Start End Score Strand\nchr1 1 2 1 +\nchr1 6 7 2 -\"\"\"\n df = pd.read_table(StringIO(c), sep=\"\\s+\", header=0)\n return PyRanges(df)\n\n\n@pytest.fixture\ndef expected_result_subtract_simple_granges():\n\n c = \"\"\"Chromosome Start End Strand Score\nchr1\t3\t6\t+ 5\nchr1\t8\t9\t+ 1\"\"\"\n df = pd.read_table(StringIO(c), sep=\"\\s+\", header=0)\n return PyRanges(df)\n\n\n\n\n@pytest.fixture\ndef expected_result_overlap_same_strand_simple_granges():\n\n c = \"\"\"Chromosome Start End Score Strand Start_b End_b Score_b Strand_b\nchr1\t5\t7\t7\t-\t6\t7\t2\t-\"\"\"\n df = pd.read_table(StringIO(c), sep=\"\\s+\", header=0)\n return PyRanges(df)\n\n\ndef test_overlap_same_strand_simple_granges(simple_gr1, simple_gr2, expected_result_overlap_same_strand_simple_granges):\n\n print(\"gr1\")\n print(simple_gr1)\n\n print(\"gr2\")\n print(simple_gr2)\n\n print(\"expected\")\n print(expected_result_overlap_same_strand_simple_granges)\n result = simple_gr1.join(simple_gr2, strandedness=\"same\")\n\n print(\"actual\")\n print(result)\n\n assert_df_equal(result.df, expected_result_overlap_same_strand_simple_granges.df)\n\n\n\ndef test_overlap_opposite_strand_simple_granges(simple_gr1, simple_gr2):\n\n result = simple_gr1.join(simple_gr2, strandedness=\"opposite\")\n\n print(result)\n\n assert result.df.empty\n\n\ndef test_default_overlap_simple_granges(simple_gr1, simple_gr2, expected_result_overlap_same_strand_simple_granges):\n\n print(simple_gr1)\n print(simple_gr2)\n\n result = simple_gr1.join(simple_gr2)\n\n assert_df_equal(result.df, expected_result_overlap_same_strand_simple_granges.df)\n","sub_path":"tests/test_overlap_keep_both.py","file_name":"test_overlap_keep_both.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"404767018","text":"from multiprocessing import Manager,Process\r\ndef add_data(p_dict, key, value):\r\n p_dict[key] = value\r\n\r\nif __name__ == \"__main__\":\r\n progress_dict = Manager().dict()\r\n from queue import PriorityQueue\r\n\r\n first_progress = Process(target=add_data, args=(progress_dict, \"bobby1\", 22))\r\n second_progress = Process(target=add_data, args=(progress_dict, \"bobby2\", 23))\r\n\r\n first_progress.start()\r\n second_progress.start()\r\n first_progress.join()\r\n second_progress.join()\r\n\r\n print(progress_dict)\r\n","sub_path":"async/multiprocessing/mp_manager.py","file_name":"mp_manager.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556098642","text":"#FTP Client Side Code\r\n\r\nimport socket\r\n\r\ns = socket.socket() #declare socket variable\r\n\r\nport = 8888\r\n\r\ns.connect(('192.168.56.101', port)) #established connection to the server\r\nprint(\"connected to server:\")\r\n\r\nfilename = input(str(\"Rename file as : \")) #enter what name file you want to save, you can rename it\r\nfile = open(filename, 'wb')\r\nfile_data = s.recv(1024) #will receive file\r\nfile.write(file_data) #will write it on client storage\r\n\r\nfile.close() #close file function\r\nprint(\"File has been received successfully.\")\r\n\r\ns.close() #close socket\r\nprint('Server-Client Connection End')\r\n","sub_path":"ftp_client.py","file_name":"ftp_client.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"375099857","text":"def iii():\n s=input()\n row=-1\n for x in s:\n if x=='[':\n row+=1;\n s=s.replace('[','')\n s=s.replace(']','')\n s=s.replace(',',' ')\n l=s.split()\n l= list(map(int, l))\n\n gap=int(len(l)/row)\n\n d1=[]\n for i in range(0, len(l), gap):\n dd=[]\n for j in range(gap):\n dd.append(l[i+j])\n d1.append(dd)\n\n return d1\ndef hitBricks(grid, hits):\n m,n = len(grid),len(grid[0])\n for i,j in hits:\n grid[i][j] -= 1\n flag = [[0]*n for i in range(m)]\n queue = []\n for j in range(n):\n flag[0][j] = 1\n if grid[0][j] == 1:\n queue.append((0,j))\n tmp = [(0,1),(1,0),(0,-1),(-1,0)]\n while queue:\n i,j = queue.pop(0)\n for di,dj in tmp:\n a,b = i+di,j+dj\n if 0<=a proxy)\n resource_name = collection_name[:-1]\n parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')\n params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(\n 'parameters')\n\n controller = base.create_resource(collection_name, resource_name,\n plugin, params,\n allow_bulk=True,\n parent=parent)\n\n resource = extensions.ResourceExtension(\n collection_name,\n controller, parent,\n path_prefix=FLAVORS_PREFIX,\n attr_map=params)\n resources.append(resource)\n\n return resources\n\n def update_attributes_map(self, attributes):\n super(Flavors, self).update_attributes_map(\n attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)\n\n def get_extended_resources(self, version):\n if version == \"2.0\":\n return RESOURCE_ATTRIBUTE_MAP\n else:\n return {}\n","sub_path":"neutron/extensions/flavors.py","file_name":"flavors.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67749288","text":"from infer_node import *\nimport time\n\n\ndef main():\n target = Formula.from_str(input('Please input the target formula: '))\n premise_set = []\n while True:\n premise = input('Please input the premise set one by one(enter to ignore): ')\n if not premise:\n break\n premise_set.append(Formula.from_str(premise, state=FormulaState.known))\n\n premise_strs = [str(i) for i in premise_set]\n print('Your target is: {}'.format(target))\n print('Your given premise set: {{{}}}'.format(', '.join(premise_strs)))\n\n if not target.get_truth_value_with_premise(premise_set):\n print(\"The Truth Value isn't True!\")\n return\n\n print('Computing Begins!')\n begin = time.time()\n\n final_infer_node = InferNode.infer([InferNode(target, premise_set)])\n final_infer_node.print_process()\n\n end = time.time()\n print('\\nTime Spent: {} s'.format(end - begin))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"470245901","text":"import pandas_datareader.data as web\nimport datetime\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\napp = dash.Dash()\nstart = datetime.datetime(2015, 1, 1)\nend = datetime.datetime.now()\nticker = \"TSLA\"\ndf = web.DataReader(ticker, 'yahoo', start, end)\ndf.reset_index(inplace=True)\n\ndf.set_index(\"Date\", inplace=True)\n# df = df.drop(\"Symbol\", axis=1)\n\nprint(df.head())\n\napp.layout = html.Div(children=[\n html.H1(children='Hello Dash'),\n\n html.Div(children='''\n Stock Graph!\n '''),\n\n dcc.Graph(\n id='example-graph',\n figure={\n 'data': [\n {'x': df.index, 'y': df.Close, 'type': 'line', 'name': ticker},\n ],\n 'layout': {\n 'title': ticker\n }\n }\n )\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n\n\n\n","sub_path":"Dash_Projects/Data_Visualization_pt_3.py","file_name":"Data_Visualization_pt_3.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530062689","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mainpage', '0014_msaereq_msae_slug'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='msaereq',\n name='msae_slug',\n ),\n migrations.AlterField(\n model_name='msaereq',\n name='user_name',\n field=models.CharField(unique=True, max_length=100),\n ),\n ]\n","sub_path":"mainpage/migrations/0015_auto_20150713_2011.py","file_name":"0015_auto_20150713_2011.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"44916232","text":"import nltk\nfrom sklearn.datasets import fetch_20newsgroups\nimport pandas as pd\nimport pymongo\nimport csv\n\n\n# converting fetch_20newsgroups to csv file\ndef twenty_newsgroup_to_csv():\n newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))\n\n df = pd.DataFrame([newsgroups_train.data, newsgroups_train.target.tolist()]).T\n df.columns = ['text', 'target']\n\n targets = pd.DataFrame(newsgroups_train.target_names)\n targets.columns = ['title']\n\n out = pd.merge(df, targets, left_on='target', right_index=True)\n out['date'] = pd.to_datetime('now')\n out.to_csv('20_newsgroup.csv')\n\n\ntwenty_newsgroup_to_csv()\n\n# Removing special characters in dataset\nspec_chars = [\"!\", '\"', \"#\", \"%\", \"&\", \"'\", \"(\", \")\",\n \"*\", \"+\", \",\", \"-\", \".\", \"/\", \":\", \";\", \"<\",\n \"=\", \">\", \"?\", \"@\", \"[\", \"\\\\\", \"]\", \"^\", \"_\",\n \"`\", \"{\", \"|\", \"}\", \"~\", \"–\", \" \", \"*\"]\ndf = pd.read_csv('20_newsgroup.csv')\nfor char in spec_chars:\n df['title'] = df['title'].str.replace(char, ' ')\n\n# fetching the words\ndata = []\nwith open(r'20_newsgroup.csv') as f:\n for row in csv.reader(f, delimiter=' ', quoting=csv.QUOTE_NONE):\n data += row\n\n# Extracting the noun and inserting it into DB table\nnoun_table = []\nfor sentence in data:\n for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):\n if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'):\n noun_table.append(word)\n\n# retriveing stored data from MongoDB\nfor document in noun_table:\n print(document)\n","sub_path":"noun.py","file_name":"noun.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"512199164","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nimport re\n\nfrom .models import Profile, Skill, Group\nfrom .forms import ProfileModelFormUser, ProfileModelFormMember, ProfileModelFormAdmin\n\n\ndef members(request):\n profiles = Profile.objects.exclude(group__isnull=True)\n if request.method == 'POST':\n text = request.POST['searchBar'].lower()\n tokens = re.split('; |, | |\\n', text)\n\n name_results = User.objects.none()\n skill_results = Skill.objects.none()\n group_results = Group.objects.none()\n\n for token in tokens:\n name_results |= User.objects.filter(first_name__icontains=token) | User.objects.filter(\n last_name__icontains=token)\n skill_results |= Skill.objects.filter(title__icontains=token)\n group_results |= Group.objects.filter(title__icontains=token)\n\n result_profiles = [user_profile for user_profile in profiles if\n user_profile.user in name_results or\n any(skill in skill_results for skill in user_profile.skills.all()) or\n any(group in group_results for group in user_profile.group.all())\n ]\n\n return render(request, \"members.html\", context={\"profiles\": result_profiles})\n\n return render(request, \"members.html\", context={\"profiles\": profiles})\n\n\ndef skill(request, skill_title):\n skill = Skill.objects.get(title=skill_title)\n profiles = Profile.objects.filter(skills__title__icontains=skill_title, group__isnull=False)\n context = {'skill': skill, 'profiles': profiles}\n return render(request, 'skill.html', context)\n\n\n\"\"\"\ndef group(request):\n context = {'group': request.path.split(\"/\")[-1]}\n return render(request, \"group.html\", context)\n\"\"\"\n\n\ndef profile(request, profileID):\n try:\n profile = Profile.objects.get(user_id=profileID)\n profile.update()\n return render(request, 'profile.html', {'profile': profile, 'user': request.user})\n except Profile.DoesNotExist:\n return render(request, '404.html')\n\n\ndef edit_profile(request):\n return edit_profile_id(request, request.user.id)\n\n\ndef edit_profile_id(request, profileID):\n try:\n user = request.user\n profile = Profile.objects.get(user_id=profileID)\n if user != profile.user and not user.is_superuser:\n return redirect('/members/profile/' + str(profileID))\n if user.is_superuser:\n form = ProfileModelFormAdmin(request.POST or None, request.FILES or None, instance=profile)\n elif Profile.objects.filter(user=user, group__isnull=False):\n form = ProfileModelFormMember(request.POST or None, request.FILES or None, instance=profile)\n else:\n form = ProfileModelFormUser(request.POST or None, request.FILES or None, instance=profile)\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('/members/profile/' + str(profileID))\n return render(request, 'edit_profile.html', {'form': form, 'profile': profile})\n except Profile.DoesNotExist:\n return render(request, '404.html')\n","sub_path":"userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"124084303","text":"#This code designed to fetch all the trajectory data from the alfred folder in an organized fashion\nimport os\nimport sys\nos.environ['ALFRED_ROOT'] = '/alfred'\n\nsys.path.append(os.path.join(os.environ['ALFRED_ROOT']))\nsys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'gen'))\nsys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'data/json_2.1.0/train'))\n\nimport json\nimport glob\nimport os\nimport constants\nimport cv2\nimport shutil\nimport numpy as np\nimport argparse\nimport threading\nimport time\nimport copy\nimport random\nfrom utils.video_util import VideoSaver\nfrom utils.py_util import walklevel\nfrom env.thor_env import ThorEnv\nimport time as t\n\n\n#my imports \nimport math\n#import instruction_parser as ip\n#import graph_viz as gv\n\n\n\n#Data parameters\nIMAGE_WIDTH = 300 #rendering\nIMAGE_HEIGHT = 300\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_path', type=str, default=\"data\")\nparser.add_argument('--smooth_nav', dest='smooth_nav', action='store_false') #can also try 'store_false' to see smooth trajectories\nparser.add_argument('--time_delays', dest='time_delays', action='store_true')\nparser.add_argument('--shuffle', dest='shuffle', action='store_true') \nparser.add_argument('--num_threads', type=int, default=1)\nparser.add_argument('--reward_config', type=str, default='data/config/rewards.json')\n\n#my arguments\nparser.add_argument('--room', type=int, default=301)\nparser.add_argument('--task', type=int, default=1)\nparser.add_argument('--gendata', dest='gendata', action='store_true')\nparser.add_argument('--numexec', type=int, default=-1)\nargs = parser.parse_args()\n\n\nrender_settings = dict()\nrender_settings['renderImage'] = True\nrender_settings['renderDepthImage'] = True\nrender_settings['renderObjectImage'] = True\nrender_settings['renderClassImage'] = True\n\n\n#data generation control parameters\n#args.task = 6\n'''\nroom number 301- #0-15 1-18 2-16 6-6 7-13 8-13 9-10\n'''\n#for 7-13 dont know if its Chair ir ArmChair\nnumexec = args.numexec #make it -1 for finding out the number of actions after which it starts the 2nd instruction\n\ndef get_file(rn = 302, task_index = 1, trial_num = 0):\n folders = sorted(glob.glob('/alfred/data/json_2.1.0/train/*-'+repr(rn)))\n print(\"Number of demonstrated tasks for this room \",len(folders))\n trials = glob.glob(folders[task_index]+'/*') #there would be len(folders) number of different tasks \n traj = glob.glob(trials[trial_num]+'/*.json')\n\n print(\"got trajectory file \",traj)\n return traj\n\ndef parse_instr(lang):\n parse_tree = ip.parse(lang,weights = \"parse_weights\")\n print(\"Got parse tree \",parse_tree)\n print(\"Take a look at the constructed parse tree\")\n gv.visualize_projection_tree(language = parse_tree)\n\ndef inspect_lang_dict(rn = 301, task = 0):\n d = np.load('panorama_data/language_data/room_number_'+repr(rn)+'_task'+repr(task)+'.npy',allow_pickle = 'TRUE').item()\n print(\"loaded Dictionary \")\n print(d)\n\ndef example_run_from_traj(json_file, numexec = numexec):\n env = ThorEnv(player_screen_width=IMAGE_WIDTH,player_screen_height=IMAGE_HEIGHT)\n #open the expert demonstration trajectory file\n nav_dict = {}\n with open(json_file) as f:\n traj_data = json.load(f)\n\n # scene setup\n scene_num = traj_data['scene']['scene_num']\n object_poses = traj_data['scene']['object_poses']\n object_toggles = traj_data['scene']['object_toggles']\n dirty_and_empty = traj_data['scene']['dirty_and_empty']\n\n # reset\n scene_name = 'FloorPlan%d' % scene_num\n env.reset(scene_name)\n env.restore_scene(object_poses, object_toggles, dirty_and_empty)\n\n env.step(dict(traj_data['scene']['init_action']))\n #print(\"Task: %s\" % (traj_data['template']['task_desc']))\n print(\"Task: %s\" % (traj_data['turk_annotations']['anns'][0][\"high_descs\"]))\n\n nav_dict[\"instructions\"] = traj_data['turk_annotations']['anns'][0][\"high_descs\"]\n nav_dict[\"commands\"] = []\n nav_dict[\"grids\"]=[]\n nav_dict[\"orts\"]=[]\n #print(\"First navigation instruction: %s\" % (traj_data['turk_annotations']['anns'][0][\"high_descs\"][0]))\n\n #parse_instr(traj_data['turk_annotations']['anns'][0][\"high_descs\"][0])\n\n # setup task\n env.set_task(traj_data, args, reward_type='dense')\n rewards = []\n grids = []\n orts = []\n\n lang_dict = {}\n lang_dict[\"instr\"] = traj_data['turk_annotations']['anns'][0][\"high_descs\"][0]\n\n event = env.step(dict(action = 'GetReachablePositions'))\n reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene\n reach_x = [i['x'] for i in reach_pos]\n reach_z = [i['z'] for i in reach_pos]\n\n m_x = min(reach_x)\n m_z = min(reach_z)\n\n\n #print(enumerate(traj_data['plan']['low_actions']))\n n = 0\n for ll_idx, ll_action in enumerate(traj_data['plan']['low_actions']):\n # next cmd under the current hl_action\n cmd = ll_action['api_action']\n hl_action = traj_data['plan']['high_pddl'][ll_action['high_idx']]\n\n # remove unnecessary keys\n cmd = {k: cmd[k] for k in ['action', 'objectId', 'receptacleObjectId', 'placeStationary', 'forceAction'] if k in cmd}\n print(\"command \",cmd)\n nav_dict[\"commands\"].append(cmd)\n \n\n x = event.metadata['agent']['position']['x']\n y = event.metadata['agent']['position']['y']\n z = event.metadata['agent']['position']['z']\n #print(\"x \",x,\" y \",y,\" z \",z)\n a = int(math.fabs((x - m_x)/0.25))\n b = int(math.fabs((z - m_z)/0.25))\n\n nav_dict[\"grids\"].append(repr(a)+\"_\"+repr(b))\n nav_dict[\"orts\"].append(event.metadata['agent']['rotation'])\n\n if \"MoveAhead\" in cmd['action']:\n if args.smooth_nav:\n #save_image(env.last_event, root_dir) #will do our own function\n events = env.smooth_move_ahead(cmd, render_settings)\n #save_images_in_events(events, root_dir)\n event = events[-1]\n else:\n event = env.step(cmd)\n #save_image(event, root_dir)\n\n elif \"Rotate\" in cmd['action']:\n if args.smooth_nav:\n #save_image(env.last_event, root_dir)\n events = env.smooth_rotate(cmd, render_settings)\n #save_images_in_events(events, root_dir)\n event = events[-1]\n else:\n event = env.step(cmd)\n #save_image(event, root_dir)\n\n elif \"Look\" in cmd['action']:\n if args.smooth_nav:\n #save_image(env.last_event, root_dir)\n events = env.smooth_look(cmd, render_settings)\n #save_images_in_events(events, root_dir)\n event = events[-1]\n else:\n event = env.step(cmd)\n #save_image(event, root_dir)\n\n # handle the exception for CoolObject tasks where the actual 'CoolObject' action is actually 'CloseObject'\n # TODO: a proper fix for this issue\n elif \"CloseObject\" in cmd['action'] and \\\n \"CoolObject\" in hl_action['planner_action']['action'] and \\\n \"OpenObject\" in traj_data['plan']['low_actions'][ll_idx + 1]['api_action']['action']:\n if args.time_delays:\n cool_action = hl_action['planner_action']\n #save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.BEFORE)\n event = env.step(cmd)\n #save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.MIDDLE)\n #save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.AFTER)\n else:\n event = env.step(cmd)\n #save_image(event, root_dir)\n\n else:\n if args.time_delays:\n #save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.BEFORE)\n event = env.step(cmd)\n #save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.MIDDLE)\n #save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.AFTER)\n else:\n event = env.step(cmd)\n #save_image(event, root_dir)\n\n # update image list\n '''\n new_img_idx = get_image_index(high_res_images_dir)\n last_img_idx = len(traj_data['images'])\n num_new_images = new_img_idx - last_img_idx\n for j in range(num_new_images):\n traj_data['images'].append({\n 'low_idx': ll_idx,\n 'high_idx': ll_action['high_idx'],\n 'image_name': '%09d.png' % int(last_img_idx + j)\n })\n '''\n\n if not event.metadata['lastActionSuccess']:\n raise Exception(\"Replay Failed: %s\" % (env.last_event.metadata['errorMessage']))\n\n n+=1\n if n>=numexec and numexec!=-1: #-1 means testing \n break\n x = event.metadata['agent']['position']['x']\n y = event.metadata['agent']['position']['y']\n z = event.metadata['agent']['position']['z']\n #print(\"x \",x,\" y \",y,\" z \",z)\n a = int(math.fabs((x - m_x)/0.25))\n b = int(math.fabs((z - m_z)/0.25))\n\n grids.append(repr(a)+\"_\"+repr(b))\n orts.append(event.metadata['agent']['rotation'])\n\n\n reward, _ = env.get_transition_reward()\n rewards.append(reward)\n\n goalsat1 = env.get_subgoal_idx()\n goalsat2 = env.get_goal_conditions_met() #env.get_postconditions_met()\n print(\"subgoal index \",goalsat1)\n print(\"postconditions met \",goalsat2)\n print(\"reward \",reward)\n\n #np.save('expert_demo_'+repr()+'_'+repr()+'.npy',event.metadata)\n\n #print(\"This is the list of rewards for each action \",rewards)\n #print(\"This is the list of grids \",grids)\n lang_dict[\"grids\"] = grids\n lang_dict[\"orts\"] = orts\n if numexec!=-1:\n x = input(\"Enter the refinement object1 (press enter if doesnt exist \")\n rf1 = x\n y = input(\"Enter the refinement object2 (press enter if doesnt exist \")\n rf2 = y\n z = input(\"Enter the target object (press enter if doesnt exist \")\n rf2 = z\n #ex- Turn around and walk over to the white desk on your right.\n #ex(navigation cues)- turn around,walk right\n #no refinement objects 1 and 2\n #target object is Desk\n #sometimes there is no worthy navigation cue for ex- Move to the bottom right side of the large wood dresser (no cues here)\n c = input(\"Enter the navigation cues implied from the sentence (use keyword pairs seperated by comma) \")\n\n lang_dict[\"objects\"] ={}\n lang_dict[\"objects\"][\"ref1\"] = x\n lang_dict[\"objects\"][\"ref2\"] = y\n lang_dict[\"objects\"][\"target\"] = z\n lang_dict[\"nav_cues\"] = c\n lang_dict[\"traj_file_name\"] = json_file\n\n\n #np.save('panorama_data/language_data/room_number_'+repr(scene_num)+'_task'+repr(args.task)+'.npy',lang_dict)\n\n\nif __name__ == '__main__':\n gen_data = args.gendata\n print(\"got gendata \",gen_data)\n\n if gen_data:\n traj_file = get_file(rn = args.room, task_index = args.task)\n example_run_from_traj(traj_file[0])\n else:\n inspect_lang_dict(rn = args.room, task = args.task)\n\n'''\n#simple run whole trajectory\npython3 master_execution.py --room 1 --task 1 --gendata\n\n#to check the complete trajectory\npython annotate_traj.py --room 301 --task 1 --numexec -1 --gendata\n\n#to generate and store language dictionary data\npython annotate_traj.py --room 301 --task 1 --numexec 10 --gendata\n\n#to verify stored dictionary\npython annotate_traj.py --room 301 --task 1 \n'''","sub_path":"robot/master_execution.py","file_name":"master_execution.py","file_ext":"py","file_size_in_byte":11629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"631435877","text":"import sys\nfrom statistics import mean, median\n\ndef main():\n f_name = sys.argv[1]\n f = open(f_name, 'r')\n times = [float(line.split()[-2]) for line in f]\n\n print(\"minimum: {0}, maximum: {1}, average: {2}, median: {3}\".format(min(times),\n max(times),\n '{:.3f}'.format(mean(times)),\n '{:.3f}'.format(median(times))))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"time-stats.py","file_name":"time-stats.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"262923489","text":"import pyperclip,re\r\n\r\nphoneRegex = re.compile(r'1[35678]\\d{9}')\r\n\r\nemailRegex = re.compile(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9]{2,}\\.[com,cn,net]{2,4}')\r\n\r\ntext = str(pyperclip.paste())\r\n\r\nmatches = []\r\n\r\nfor groups in phoneRegex.findall(text):\r\n\tmatches.append(groups)\r\n\r\nfor groups in emailRegex.findall(text):\r\n\tmatches.append(groups)\r\n\r\nif len(matches) > 0:\r\n\tpyperclip.copy('\\n'.join(matches))\r\n\tprint('Copied to the clipboard:')\r\n\tprint('\\n'.join(matches))\r\nelse:\r\n\tprint('No phone numbers or email addresses found.')\r\n","sub_path":"电话号码和Email提取程序.py","file_name":"电话号码和Email提取程序.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"33743979","text":"from rest_framework import serializers, exceptions\nfrom django.conf import settings\n\n# DateTimeField与DateField(2020-11-22T16:14:46.107960Z)显示年月日时分秒\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.serializers import ListSerializer\n\nfrom pimordial_drf import models\n\n\n# 重点:ListSerializer与ModelSerializer建立关联的是: 在ModelSerializer的Meta类中设置 list_serializer_class\nclass V2BookListSerializer(ListSerializer):\n def update(self, instance, validated_data): # print(instance) # 要更新的对象们\n # print(validated_data) # 更新的对象对应的数据们\n # print(self.child) # 服务的模型序列化类 - V2BookModelSerializer\n for index, obj in enumerate(instance):\n self.child.update(obj, validated_data[index])\n return instance\n\n\nclass V2BookModelSerializer(serializers.ModelSerializer):\n class Meta:\n # 序列化关联的model类\n model = models.Book\n list_serializer_class = V2BookListSerializer\n # 参与序列化的字段 —— 没有默认值的字段必须序列化,为其传值\n fields = ('name', 'price', 'img', 'author_list', 'publish_name', 'publish', 'authors')\n # extra_kwargs 用来完成反序列化字段的 系统校验规则\n # write_only:只反序列化\n # read_only:只序列化\n # 自定义字段默认只序列化(read_only)\n # 如果字段没设置write_only或者read_only,那么该字段可以序列化和反序列化\n extra_kwargs = {\n 'name': {\n 'required': True,\n 'min_length': 1,\n 'error_messages': {\n 'required': '必填项',\n 'min_length': '太短',\n }\n },\n 'publish': {\n 'write_only': True\n },\n 'authors': {\n 'write_only': True\n },\n 'img': {\n 'read_only': True,\n },\n 'author_list': {\n 'read_only': True,\n },\n 'publish_name': {\n 'read_only': True,\n }\n }\n\n # 局部钩子校验单个字段 validate_字段名\n def validate_name(self, value): # value是字段name的值\n # 书名不能包含 g 字符\n if 'g' in value.lower():\n raise ValidationError('该g书不能出版')\n return value\n\n # 全局钩子\n def validate(self, attrs):\n publish = attrs.get('publish') # publish如果是外键字段,这个就是publish对象\n name = attrs.get('name')\n if models.Book.objects.filter(name=name, publish=publish):\n raise ValidationError({'book': '该书已存在'})\n return attrs\n\n\nclass PublishModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Publish\n fields = ('name', 'address')\n\n\nclass BookModelSerializer(serializers.ModelSerializer):\n # 1.还可以自定义设置序列化字段,但是必须在fields中声明,在fields中写publish_address\n # publish_address = serializers.SerializerMethodField\n #\n # def get_publish_address(self, obj):\n # return obj.Publish.address\n\n # 2. 自定义连表深度---子序列化方式\n publish = PublishModelSerializer\n\n class Meta:\n # 序列化关联的model类\n model = models.Book\n # 参与序列化的字段\n fields = ('name', 'price', 'img', 'author_list', 'publish_name', 'publish')\n\n\nclass BookModelDeserializer(serializers.ModelSerializer):\n class Meta:\n model = models.Book\n # 没有默认值的字段必须序列化,为其传值\n fields = ('name', 'price', 'img', 'publish', 'authors')\n # extra_kwargs 用来完成反序列化字段的 系统校验规则\n extra_kwargs = {\n 'img': {\n 'required': True, # 设置img字段必填\n 'min_length': 10,\n 'error_messages': {\n 'required': '必填项',\n 'min_length': '最少输入10位'\n }\n }\n }\n\n # 局部钩子校验单个字段 validate_字段名\n def validate_name(self, value): # value是字段name的值\n # 书名不能包含 g 字符\n if 'g' in value.lower():\n raise ValidationError('该g书不能出版')\n return value\n\n # 全局钩子\n def validate(self, attrs):\n publish = attrs.get('publish') # publish如果是外键字段,这个就是publish对象\n name = attrs.get('name')\n if models.Book.objects.filter(name=name, publish=publish):\n raise ValidationError({'book': '该书已存在'})\n return attrs\n\n # 注意:ModelSerializer类已经帮我们实现了 create 与 update 方法,不需要写create就能创建\n\n\n'''\n方式一:\n created_time = serializers.DateTimeField(format=\"%Y-%m-%d - %H:%M:%S\")\n方式二:\n REST_FRAMEWORK = {\n 'DATETIME_FORMAT': \"%Y-%m-%d - %H:%M:%S\",\n }\n方式三:\n views中\n def list(self, request, *args, **kwargs):\n response = super().list(request, *args, **kwargs)\n response.data['results'] = handle_env(response.data['results'])\n return response\n 定义\n import re\n def handle_env(datas):\n datas_list = []\n for item in datas:\n mtch = re.search(r'(.*)T(.*)\\..*?', item['created_time'])\n # 时间格式化\n item['created_time'] = mtch.group(1) + ' ' + mtch.group(2)\n datas_list.append(item)\n return datas_list\n\n'''\n\n\n# 序列化\nclass UserSerializer(serializers.Serializer): # 创建一个序列化类\n name = serializers.CharField()\n phone = serializers.CharField()\n sex = serializers.IntegerField()\n icon = serializers.ImageField()\n created_time = serializers.DateTimeField()\n # created_time = serializers.DateTimeField(format=\"%Y-%m-%d - %H:%M:%S\")\n\n # 自定义序列化属性\n '''\n 属性名随意,值由固定的命名规范方法提供\n def get_属性名(self, 参与序列化的model对象):\n 返回值就是自定义序列化属性的值\n '''\n\n gender = serializers.SerializerMethodField()\n\n def get_gender(self, obj):\n # choice类型的解释型值 get_字段_display() 来访问\n return obj.get_sex_display()\n\n icon1 = serializers.SerializerMethodField()\n\n def get_icon1(self, obj):\n # settings.MEDIA_URL: 自己配置的 /media/,给后面高级序列化与视图类准备的\n # obj.icon不能直接作为数据返回,因为内容虽然是字符串,但是类型是ImageFieldFile类型\n return '%s%s%s' % (r'http://127.0.0.1:8000', settings.MEDIA_URL, str(obj.icon))\n\n\n# 反序列化\nclass UserDeserializer(serializers.Serializer):\n # 1) 哪些字段必须反序列化\n # 2) 字段都有哪些安全校验\n # 3) 哪些字段需要额外提供校验 钩子函数\n # 4) 哪些字段间存在联合校验\n # 注:反序列化字段都是用来入库的,不会出现自定义方法属性,会出现可以设置校验规则的自定义属性,不入数据库的\n name = serializers.CharField(\n max_length=64,\n min_length=3,\n\n error_messages={\n 'max_length': '太长',\n 'min_length': '太短'\n }\n )\n pwd = serializers.CharField()\n phone = serializers.CharField(required=False)\n sex = serializers.IntegerField(required=False)\n # 自定义有校验规则的反序列化字段,例如确认密码字段re_pwd\n re_pwd = serializers.CharField(required=True)\n\n # 局部钩子:validate_要校验的字段名(self, 当前要校验字段的值)\n # 校验规则:校验通过返回原值,校验失败,抛出异常\n def validate_name(self, value):\n if 'g' in value.lower():\n raise exceptions.ValidationError('名字非法')\n return value\n\n # 全局钩子:validate(self, 通过系统与局部钩子校验之后的所有数据)\n def validate(self, attrs): # attrs是字典格式\n pwd = attrs.get('pwd')\n re_pwd = attrs.pop('re_pwd') # 因为re_pwd不需要存入数据库,所以在全局钩子校验中删除掉这个字段\n if pwd != re_pwd:\n raise exceptions.ValidationError({'pwd&re_pwd': '两次密码不一致'})\n return attrs\n\n # 要完成新增,必须重写create方法,validated_data是校验的数据\n def create(self, validated_data):\n # 尽量在所有校验规则完毕之后,数据可以直接入库\n return models.User.objects.create(**validated_data)\n ","sub_path":"pimordial_drf/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"586674020","text":"class Pair:\n min = int()\n max = int()\n\n\ndef getMaxMin(lista, n):\n minmax = Pair()\n i = int()\n if (n == 1):\n minmax.max = lista[0]\n minmax.max = lista[0]\n return minmax\n if (lista[0] > lista[1]):\n minmax.max = lista[0]\n minmax.min = lista[1]\n else:\n minmax.max = lista[1]\n minmax.min = lista[0]\n for i in range(2, n):\n if (lista[i] > minmax.max):\n minmax.max = lista[i]\n elif (lista[i] < minmax.min):\n minmax.min = lista[i]\n return minmax\n\n\nif __name__ == '__main__':\n lista = [4, -1, 6, 23, 7, 78, 2]\n tamanio = int(len(lista))\n resultado = getMaxMin(lista, tamanio)\n print('El minimo es ', resultado.min)\n print('El maximo es ', resultado.max)\n","sub_path":"Codigos estudiantes por lenguaje/PY/Bryann Valderrama/Algoritmos de Busqueda/MinMaxSearch.py","file_name":"MinMaxSearch.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7532404","text":"from utils.torch.rand import *\nfrom utils.torch.modules import ImageNet\nfrom model.mnist_train import Model\nfrom torch.utils.data import *\nfrom discretization import *\nfrom torchvision import datasets, transforms\nimport random\nimport time\nimport argparse\nfrom tqdm import tqdm\nimport pickle\nfrom utils.ans import NORM_CONST, ANS, VectorizedANS as ANS\nfrom copy import deepcopy\n\ndef compress(quantbits, nz, bitswap, gpu):\n # model and compression params\n zdim = 1 * 16 * 16\n zrange = torch.arange(zdim)\n xdim = 32 ** 2 * 1\n xrange = torch.arange(xdim)\n ansbits = NORM_CONST - 1 # ANS precision\n type = torch.float64 # datatype throughout compression\n device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n ans_device = device #\"cuda:0\"\n\n # set up the different channel dimension for different latent depths\n if nz == 8:\n reswidth = 61\n elif nz == 4:\n reswidth = 62\n elif nz == 2:\n reswidth = 63\n else:\n reswidth = 64\n assert nz > 0\n\n print(f\"{'Bit-Swap' if bitswap else 'BB-ANS'} - MNIST - {nz} latent layers - {quantbits} bits quantization\")\n\n # seed for replicating experiment and stability\n np.random.seed(100)\n random.seed(50)\n torch.manual_seed(50)\n torch.cuda.manual_seed(50)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n # compression experiment params\n experiments = 20\n ndatapoints = 100\n decompress = True\n\n # <=== MODEL ===>\n model = Model(\n xs = (1, 32, 32), nz=nz, zchannels=1, \n nprocessing=4, kernel_size=3, resdepth=8, \n reswidth=reswidth,\n tag=\"batch\"\n ).to(device)\n model.load_state_dict(\n torch.load(f'model/params/mnist/nz{nz}',\n map_location=lambda storage, location: storage\n )\n )\n model.eval()\n\n print(\"Discretizing\")\n # get discretization bins for latent variables\n zendpoints, zcentres = discretize(nz, quantbits, type, device, model, \"mnist\")\n\n #### priors\n prior_cdfs = logistic_cdf(zendpoints[-1].t(), torch.zeros(1, device=device, dtype=type), torch.ones(1, device=device, dtype=type)).t()\n prior_pmfs = prior_cdfs[:, 1:] - prior_cdfs[:, :-1]\n prior_pmfs = torch.cat((prior_cdfs[:, 0].unsqueeze(1), prior_pmfs, 1. - prior_cdfs[:, -1].unsqueeze(1)), dim=1)\n\n ####\n\n # get discretization bins for discretized logistic\n xbins = ImageBins(type, device, xdim)\n xendpoints = xbins.endpoints()\n xcentres = xbins.centres()\n\n print(\"Load data..\")\n # <=== DATA ===>\n class ToInt:\n def __call__(self, pic):\n return pic * 255\n transform_ops = transforms.Compose([transforms.Pad(2), transforms.ToTensor(), ToInt()])\n test_set = datasets.MNIST(root=\"model/data/mnist\", train=False, transform=transform_ops, download=True)\n\n # sample (experiments, ndatapoints) from test set with replacement\n print(len(test_set.data))\n if not os.path.exists(\"bitstreams/mnist/indices\"):\n randindices = np.random.choice(len(test_set.data), size=(experiments, ndatapoints), replace=False)\n np.save(\"bitstreams/mnist/indices\", randindices)\n else:\n randindices = np.load(\"bitstreams/mnist/indices\")\n\n print(\"Setting up metrics..\")\n # metrics for the results\n nets = np.zeros((experiments, ndatapoints), dtype=np.float)\n elbos = np.zeros((experiments, ndatapoints), dtype=np.float)\n cma = np.zeros((experiments, ndatapoints), dtype=np.float)\n total = np.zeros((experiments, ndatapoints), dtype=np.float)\n\n print(\"Compression..\")\n for ei in range(experiments):\n experiment_start_time = time.time()\n print(f\"Experiment {ei + 1}\")\n subset = Subset(test_set, randindices[ei])\n test_loader = DataLoader(\n dataset=subset,\n batch_size=1, shuffle=False, drop_last=True)\n datapoints = list(test_loader)\n\n # < ===== COMPRESSION ===>\n # initialize compression\n model.compress()\n state = list(map(int, np.random.randint(low=1 << 16, high=(1 << NORM_CONST) - 1, size=(200), dtype=np.uint32))) # fill state list with 'random' bits\n state[-1] = state[-1] << 16 #NORM_CONST\n \n states = [\n state.copy()\n for _ in range(len(datapoints))\n ]\n\n initialstates = deepcopy(states)\n reststates = None\n\n state_init = time.time()\n\n \n iterator = tqdm(range(len(datapoints)), desc=\"Sender\")\n\n # <===== SENDER =====>\n\n ####\n xs = []\n for xi in range(len(datapoints)):\n (x, _) = datapoints[xi]\n x = x.to(device).view(xdim)\n xs.append(x)\n\n for zi in range(nz):\n mus = []\n scales = []\n for xi in tqdm(range(len(datapoints))):\n input = zcentres[zi - 1, zrange, zsyms[xi]] if zi > 0 else xcentres[xrange, xs[xi].long()]\n mu, scale = model.infer(zi)(given=input)\n mus.append(mu)\n scales.append(scale)\n\n\n s = time.time()\n cdfs_b = logistic_cdf(\n torch.stack(\n [zendpoints[zi]]*len(datapoints)\n ).permute(2, 0, 1), \n torch.stack(mus), \n torch.stack(scales)\n ).permute(1, 2, 0)\n\n pmfs_b = torch.cat((\n cdfs_b[:, :, 0].unsqueeze(2), \n cdfs_b[:, :, 1:] - cdfs_b[:, :, :-1], \n 1. - cdfs_b[:, :, -1].unsqueeze(2)\n ), dim=2)\n \n ans = ANS(\n pmfs_b.to(ans_device),\n bits=ansbits, quantbits=quantbits\n )\n t1 = time.time()\n states, zsymtops = ans.batch_decode(states)\n t2 = time.time()\n zsymtops = zsymtops.to(device)\n\n if zi == 0:\n reststates = states.copy()\n assert all([\n len(rb) > 1\n for rb in reststates\n ]), \"too few initial bits\" # otherwise initial state consists of too few bits\n\n z_dec_pmfs = []\n mus = []\n scales = []\n for zsymtop in tqdm(zsymtops):\n z = zcentres[zi, zrange, zsymtop]\n mu, scale = model.generate(zi)(given=z)\n mus.append(mu)\n scales.append(scale)\n \n cdfs_b = logistic_cdf(\n torch.stack(\n [\n (zendpoints[zi - 1] if zi > 0 else xendpoints)\n ]*len(datapoints)\n ).permute(2, 0, 1), \n torch.stack(mus), \n torch.stack(scales)\n ).permute(1, 2, 0)\n\n pmfs_b = torch.cat((\n cdfs_b[:, :, 0].unsqueeze(2), \n cdfs_b[:, :, 1:] - cdfs_b[:, :, :-1], \n 1. - cdfs_b[:, :, -1].unsqueeze(2)\n ), dim=2)\n \n ans = ANS(\n pmfs_b.to(ans_device),\n bits=ansbits, quantbits=quantbits\n )\n\n to_encode = zsyms if zi > 0 else torch.stack(xs).long()\n states = ans.batch_encode(\n states,\n to_encode\n )\n\n zsyms = zsymtops\n\n states = ANS(\n torch.stack([\n prior_pmfs\n for _ in range(len(datapoints))\n ]).to(ans_device), \n bits=ansbits, quantbits=quantbits\n ).batch_encode(states, zsymtops)\n \n\n totaladdedbits_for_xs = [\n (len(state) - len(initialstate)) * 32\n for (state, initialstate)\n in zip(states, initialstates)\n ]\n\n totalbits_for_xs = [\n (len(state) - (len(restbits) - 1)) * 32\n for (state, restbits)\n in zip(states, reststates)\n ]\n\n iterator = tqdm(\n enumerate(\n zip(totaladdedbits_for_xs, totalbits_for_xs)\n ))\n with torch.no_grad():\n for xi, (totaladdedbits, totalbits) in iterator:\n x = xs[xi]\n model.compress(False)\n logrecon, logdec, logenc, _ = model.loss(x.view((-1,) + model.xs))\n elbo = -logrecon + torch.sum(-logdec + logenc)\n model.compress(True)\n\n nets[ei, xi] = (totaladdedbits / xdim) - nets[ei, :xi].sum()\n elbos[ei, xi] = elbo.item() / xdim\n cma[ei, xi] = totalbits / (xdim * (xi + 1))\n total[ei, xi] = totalbits\n\n iterator.set_postfix_str(s=f\"N:{nets[ei,:xi+1].mean():.2f}±{nets[ei,:xi+1].std():.2f}, D:{nets[ei,:xi+1].mean()-elbos[ei,:xi+1].mean():.4f}, C: {cma[ei,:xi+1].mean():.2f}, T: {totalbits:.0f}\", refresh=False)\n\n\n state_file = f\"bitstreams/mnist/nz{nz}/{'Bit-Swap' if bitswap else 'BB-ANS'}/{'Bit-Swap' if bitswap else 'BB-ANS'}_{quantbits}bits_nz{nz}_experiment{ei + 1}_batch\"\n print(state_file)\n # write state to file\n # print(len(states))\n # print([len(s) for s in states])\n\n max_common_len = min([len(s) for s in states])\n common_len = 0\n \n for pref in range(max_common_len):\n if len(set(s[pref] for s in states)) > 1:\n break\n common_len = pref + 1\n\n print(\"common len:\", common_len)\n states_to_dump = (\n states[0][:common_len],\n [\n s[common_len:]\n for s in states\n ]\n )\n with open(state_file, \"wb\") as fp:\n pickle.dump(states_to_dump, fp)\n\n state = None\n # open state file\n with open(state_file, \"rb\") as fp:\n states_prefix, states_postfixes = pickle.load(fp)\n states = [\n states_prefix + sp \n for sp in states_postfixes\n ]\n \n print([len(s) for s in states])\n print(sum([\n len(s) - len(inits)\n for (s, inits) in zip(states, initialstates)\n ]))\n\n # <===== RECEIVER =====>\n\n # priors\n states, zsymtops = ANS(\n torch.stack([\n prior_pmfs\n for _ in range(len(datapoints))\n ]).to(ans_device), \n bits=ansbits, quantbits=quantbits\n ).batch_decode(states)\n zsymtops = zsymtops.to(device)\n\n for zi in reversed(range(nz)):\n zs = z = zcentres[zi, zrange, zsymtops]\n\n z_dec_pmfs = []\n mus = []\n scales = []\n for xi in tqdm(range(len(datapoints))):\n\n z = zs[xi]\n mu, scale = model.generate(zi)(given=z)\n mus.append(mu)\n scales.append(scale)\n \n cdfs_b = logistic_cdf(\n torch.stack(\n [(zendpoints[zi - 1] if zi > 0 else xendpoints)]*len(datapoints)\n ).permute(2, 0, 1), \n torch.stack(mus), \n torch.stack(scales)\n ).permute(1, 2, 0)\n\n pmfs_b = torch.cat((\n cdfs_b[:, :, 0].unsqueeze(2), \n cdfs_b[:, :, 1:] - cdfs_b[:, :, :-1], \n 1. - cdfs_b[:, :, -1].unsqueeze(2)\n ), dim=2)\n \n ans = ANS(\n pmfs_b.to(ans_device),\n bits=ansbits, quantbits=quantbits\n )\n \n states, symbols = ans.batch_decode(states)\n symbols = symbols.to(device)\n\n inputs = zcentres[zi - 1, zrange, symbols] if zi > 0 else xcentres[xrange, symbols]\n\n mus = []\n scales = []\n\n for input in tqdm(inputs):\n mu, scale = model.infer(zi)(given=input)\n mus.append(mu)\n scales.append(scale)\n\n cdfs_b = logistic_cdf(\n torch.stack(\n [zendpoints[zi]]*len(datapoints)\n ).permute(2, 0, 1), \n torch.stack(mus), \n torch.stack(scales)\n ).permute(1, 2, 0)\n\n pmfs_b = torch.cat((\n cdfs_b[:, :, 0].unsqueeze(2), \n cdfs_b[:, :, 1:] - cdfs_b[:, :, :-1], \n 1. - cdfs_b[:, :, -1].unsqueeze(2)\n ), dim=2)\n\n ans = ANS(\n pmfs_b.to(ans_device),\n bits=ansbits, quantbits=quantbits\n )\n\n states = ans.batch_encode(states, zsymtops)\n zsymtops = symbols\n\n assert all([\n torch.all(datapoints[xi][0].view(xdim).long().to(device) == zsymtops[xi].to(device))\n for xi in range(len(datapoints))\n ])\n\n assert initialstates == states\n experiment_end_time = time.time()\n print(\"Experiment time\", experiment_end_time - experiment_start_time)\n\n print(f\"N:{nets.mean():.4f}±{nets.std():.2f}, E:{elbos.mean():.4f}±{elbos.std():.2f}, D:{nets.mean() - elbos.mean():.6f}\")\n\n # save experiments\n np.save(f\"plots/mnist{nz}/{'bitswap' if bitswap else 'bbans'}_{quantbits}bits_nets\",nets)\n np.save(f\"plots/mnist{nz}/{'bitswap' if bitswap else 'bbans'}_{quantbits}bits_elbos\", elbos)\n np.save(f\"plots/mnist{nz}/{'bitswap' if bitswap else 'bbans'}_{quantbits}bits_cmas\",cma)\n np.save(f\"plots/mnist{nz}/{'bitswap' if bitswap else 'bbans'}_{quantbits}bits_total\", total)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', default=0, type=int) # assign to gpu\n parser.add_argument('--nz', default=2, type=int) # choose number of latent variables\n parser.add_argument('--quantbits', default=10, type=int) # choose discretization precision\n parser.add_argument('--bitswap', default=1, type=int) # choose whether to use Bit-Swap or not\n\n args = parser.parse_args()\n print(args)\n\n gpu = args.gpu\n nz = args.nz\n quantbits = args.quantbits\n bitswap = args.bitswap\n\n for nz in [nz]:\n for bits in [quantbits]:\n for bitswap in [bitswap]:\n compress(bits, nz, bitswap, gpu)","sub_path":"mnist_batch_compress.py","file_name":"mnist_batch_compress.py","file_ext":"py","file_size_in_byte":14120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"205748309","text":"import base64, zlib\n\ndef Update(Data, FirstGid = 1, TileMapWidth = 8, MapWidth = 100, NewGid = 0):\n OutData = [0] * (len(Data) * 4);\n if(NewGid == 0):\n NewGid = FirstGid;\n for [i,val] in enumerate(Data):\n Byte = i%4;\n if((Byte % 4) != 0):\n continue;\n i = i // 4;\n val -= FirstGid;\n OutTile = ((val % TileMapWidth)*2) + ((val // TileMapWidth)*TileMapWidth*4) + NewGid;\n OutPos = ((i % MapWidth)*2) + ((i // MapWidth)*MapWidth*4);\n OutData[(OutPos)*4] = OutTile;\n OutData[(OutPos + 1)*4] = OutTile + 1;\n OutData[(OutPos + 2*MapWidth)*4] = OutTile + 2*TileMapWidth;\n OutData[(OutPos + 2*MapWidth + 1)*4] = OutTile + 2*TileMapWidth + 1;\n for [i,val] in enumerate(OutData):\n if(val >= 256):\n OutData[i-1] = val // 256;\n OutData[i] = val % 256;\n\ndef ExpandMap(RawIn, FirstGid = 1, TileMapWidth = 8, MapWidth = 100, NewGid = 0):\n Data = zlib.decompress(base64.decodestring(RawIn));\n OutData = Update(Data,FirstGid,TileMapWidth,MapWidth,NewGid);\n return base64.encodestring(zlib.compress(bytes(OutData)))\n\ndef ExpandBlockers(RawIn, FirstGid = 1, TileMapWidth = 8, MapWidth = 100, NewGid = 0):\n Data = zlib.decompress(base64.decodestring(RawIn));\n \n OutData = Update(Data,FirstGid,TileMapWidth,MapWidth,NewGid);\n return base64.encodestring(zlib.compress(bytes(OutData)))\n","sub_path":"Utilities/ExpandMap.py","file_name":"ExpandMap.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309034976","text":"import pyglet\nfrom game import player, monster, resources\nfrom random import randint\n\nfrom config import WIDTH, HEIGHT\n\n# Set up a window\ngame_window = pyglet.window.Window(WIDTH, HEIGHT)\n\n# Create the container for our graphics\nmain_batch = pyglet.graphics.Batch()\n\n# Load the main music\ntheme_song = pyglet.media.load('./resources/music.wav')\nmusic = pyglet.media.Player()\nmusic.queue(theme_song)\n\n# Set up the two top labels\nscore_label = pyglet.text.Label(text=\"Caught 0\", x=15, y=75, batch=main_batch)\n\n# Initialize the player sprite\nhero = player.Player(x=400, y=300, batch=main_batch)\ngoblin = monster.Monster(x=randint(0, WIDTH), y=randint(0,HEIGHT), batch=main_batch)\n\n# Store all objects that update each frame in a list\ngame_objects = [hero, goblin]\n\n# Tell the main window that the player object responds to events\ngame_window.push_handlers(hero.key_handler)\n\n@game_window.event\ndef on_draw():\n game_window.clear()\n resources.background.blit(0, 0)\n main_batch.draw()\n\n\nscore = 0\nis_drawing = True # Controls whether to show movement\n\n\ndef game_over():\n global is_drawing\n\n is_drawing = False\n music.pause()\n\n\ndef update(dt):\n\n global score\n\n if is_drawing:\n\n for obj in game_objects:\n obj.update(dt)\n\n # To avoid handling collisions twice, we employ nested loops of ranges.\n # This method also avoids the problem of colliding an object with itself.\n for i in range(len(game_objects)):\n for j in range(i + 1, len(game_objects)):\n\n obj_1 = game_objects[i]\n obj_2 = game_objects[j]\n\n # Make sure the objects haven't already been killed\n if not obj_1.dead and not obj_2.dead:\n if obj_1.collides_with(obj_2):\n obj_1.handle_collision_with(obj_2)\n obj_2.handle_collision_with(obj_1)\n\n # Get rid of dead objects\n for to_remove in [obj for obj in game_objects if obj.dead]:\n # Remove the object from any batches it is a member of\n to_remove.delete()\n\n # Remove the object from our list\n game_objects.remove(to_remove)\n\n score += 1\n score_label.text = f\"Caught {score}\"\n\n gotcha_sound_effect = pyglet.media.load('./resources/bullet.wav', streaming=False)\n gotcha_sound_effect.play()\n\n # Add a new monster\n new_goblin = monster.Monster(x=randint(0, WIDTH), y=randint(0, HEIGHT), batch=main_batch)\n game_objects.append(new_goblin)\n\n\nif __name__ == \"__main__\":\n # Update the game 120 times per second\n pyglet.clock.schedule_interval(update, 1 / 120.0)\n\n music.play()\n # Tell pyglet to do its thing\n pyglet.app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582768991","text":"list=[]\n\ndef primero(x):\n storage=0\n count=2\n voyager=1\n while(x):\n if x % count == 0:\n list.append(count)\n print (list)\n voyager*=count\n x=x/count\n print (voyager)\n else:\n # print (\"hello2\")\n count+=1\n return list\n\n\nprint (primero(600851475143))\n\n\n##check=[1,2,3,5,7,9,13]\n##def primefactor(x):\n## count=0\n## while(count0:\n f, s = name.split('.', 1)\n if not f in data:\n data[f] = {}\n data[f][s] = m.group(2)\n else:\n data[name] = m.group(2)\n\n return data\n","sub_path":"reppy/Args.py","file_name":"Args.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"620292507","text":"\n\"\"\"\nhttps://www.youtube.com/watch?v=3M8q-wB2tmw\nA = [1,15,7,9, 2,5,10]\n i\nK = 3\n\ndp[i] = dp[j] + max(dp[j:i]) * (i-j) for j = i-1, i, i-K+1\n\n\n1043.Partition-Array-for-Maximum-Sum\n这是一个基本款的DP。我们考虑当前正在处理的最后一个元素A[i],必然会想到它归为哪个subarray?因为A[i]只可能归为最后一个subarray,\n我们自然会联想到这最后一个subarray的长度可能是1,2,...,直至K。于是我们显然会挨个尝试一遍。只要确定最后一个subarray的范围(比如说从j到i,\n那么最后一个subarray的sum就能轻易知道(就是这个subarray中的最大值乘以元素个数i-j+1),并且这个subarray前面的所有元素之和恰好就是dp[j-1].\n\n所以状态转移方程就是:\n\ndp[i] = max{ dp[j-1], Max_element over A[j,..i] * (i-j+1)}, for j=i, i-1, ... , i-K+1\n另外需要注意一下,j不可能小于0。\n\n\n\"\"\"\n\n\nclass Solution:\n def maxSumAfterPartitioning(self, A, K) -> int:\n n = len(A)\n dp = [0] * (n + 1)\n\n for i in range(1, n + 1):\n maxi = float('-inf')\n for k in range(1, min(i, K) + 1):\n maxi = max(maxi, A[i - k])\n dp[i] = max(dp[i], dp[i - k] + maxi * k)\n\n return dp[n]\n\n\n\n\n","sub_path":"LeetcodeNew/python2/LC_1043.py","file_name":"LC_1043.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"213412154","text":"import configparser\nimport sys\nfrom pathlib import Path\nimport os\nfrom PyQt5 import QtWidgets, QtCore, uic\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5.QtGui import QPainter, QBrush, QPen\nimport pandas as pd\nimport random\nfrom PyQt5.QtCore import Qt\nimport time\nimport pyautogui\n\n\"\"\"\nInitiating all global variables (the values remain unused and are \nonly suggestions for the configuration file and arguments at running the script)\n\"\"\"\nconfig_url = 'setup.ini'\ncanvas_min_width = 200\ncanvas_min_height = 100\ntargets_per_row = 15\ntargets_per_column = 7\ntargetsize = 40\ntargetspace = 60\nrepetitions = 3\nid = 345\ncondition_selection = [\"normal\"]\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\n\nclass Test:\n \"\"\"\n The Test-class is a structure-esk-pre-set of the actual test, additionally it predetermines the\n test to a large extend and is capable of writing csv files\n \"\"\"\n\n def __init__(self):\n # Initializes test & dataframe\n self.column_names = [\"ID\", \"Condition\", \"Repetition\", \"Target Index\",\n \"Target Position(relative)\", \"Target Size(absolute)\",\n \"Timestamp(Teststart)\", \"Timestamp(Rep_load)\", \"Timestamp(clicked)\",\n \"Pointer Postition(start, absolute)\", \"Pointer Postition(end, absolute)\",\n \"Pointer Postition(end, relative)\"]\n self.log_data = pd.DataFrame(columns=self.column_names)\n self.path_results = \"result.csv\"\n self.participant_ID = 0\n self.pos_conditons = condition_selection\n self.participant_Condition = [\"normal\"] * repetitions\n\n @staticmethod\n def setup_target():\n # returns random index in array of target possibilities\n index = random.randrange(0, targets_per_row * targets_per_column)\n return index\n\n def create_test(self, p_id):\n # creates test on command and fills the table\n self.participant_ID = p_id\n self.set_res_path()\n index = 0\n while index < repetitions:\n for j in range(0, len(self.pos_conditons)):\n self.participant_Condition[index + j] = str(self.pos_conditons[j])\n if index == repetitions - 1:\n break\n index = index + j + 1\n self.condition_roulette()\n target = [None] * repetitions\n self.log_data[self.column_names[2]] = target\n for i in range(0, repetitions):\n target[i] = self.setup_target()\n self.log_data[self.column_names[0]] = self.participant_ID\n print(self.participant_Condition)\n self.log_data[self.column_names[1]] = self.participant_Condition\n self.log_data[self.column_names[2]] = self.log_data.index\n self.log_data[self.column_names[3]] = target\n self.log_data[self.column_names[5]] = targetsize\n\n def condition_roulette(self):\n # algorithm in for counter-balanced sorting\n temp_cond = self.pos_conditons[0]\n for i in range(0, len(self.pos_conditons)):\n if i == (len(self.pos_conditons) - 1):\n self.pos_conditons[i] = temp_cond\n break\n self.pos_conditons[i] = self.pos_conditons[i + 1]\n\n def set_res_path(self):\n # sets save file path for the results\n self.path_results = \"result_ID\" + self.participant_ID + \".csv\"\n\n def save_test(self):\n # saves table to \"results_ID[..].csv\"\n path_results = self.path_results\n file = Path(path_results)\n if file.is_file():\n self.path_results = self.rename_filepath(path_results)\n self.save_test()\n return\n self.log_data.to_csv(path_results, index=False)\n\n @staticmethod\n def rename_filepath(fpath):\n fpath = fpath.split(\".\")\n return fpath[0] + \"~.\" + fpath[1]\n\n # getter and setter:\n\n def set_timestamp(self, rep_status, case):\n if case == 0:\n self.log_data[self.column_names[6 + case]] = time.time()\n return\n self.log_data.loc[rep_status, self.column_names[6 + case]] = time.time()\n\n def set_target_rel_pos(self, rep_status, position):\n self.log_data.loc[rep_status, self.column_names[4]] = position\n\n def set_po_pos(self, rep_status, case, position):\n self.log_data.loc[rep_status, self.column_names[9 + case]] = position\n\n def get_current_target(self, rep_status):\n return self.log_data.loc[rep_status, self.column_names[3]]\n\n def get_current_con(self, rep_status):\n return self.log_data.loc[rep_status, self.column_names[1]]\n\n\nclass PointingExperiment(QDialog):\n \"\"\"\n all ui-management in one class...\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.timer = QtCore.QTime()\n self.canvas_margin_top = 70\n self.canvas_margin_default = 15\n self.test_started = False\n self.canvas_updated = False\n self.current_target_pos_x = 0\n self.current_target_pos_y = 0\n self.current_target_index = 0\n self.current_repetition = 0\n self.test = Test()\n self.test.create_test(str(id))\n self.initUI()\n\n def start_test(self):\n self.test_started = True\n self.test.set_timestamp(self.current_repetition, 0)\n self.start_Button.hide()\n return\n\n def paintEvent(self, event):\n # updates the view\n self.label.setText(\"ID: \" + str(id) + \" - \" + str(self.current_repetition + 1) + \"/\" + str(repetitions))\n if self.test_started & (not self.canvas_updated):\n condition = str(self.test.get_current_con(self.current_repetition))\n painter_color = Qt.black\n painter_color_fill = Qt.gray\n # condition triggers:\n if condition == \"Color: green\":\n painter_color = Qt.green\n painter_color_fill = Qt.darkGreen\n if condition == \"Color: red\":\n painter_color = Qt.red\n painter_color_fill = Qt.darkRed\n painter = QPainter(self)\n painter.setPen(QPen(painter_color, 2, Qt.SolidLine))\n index = 0\n # draws array of random circles:\n for i in range(0, targets_per_row):\n for j in range(0, targets_per_column):\n painter.setBrush(QBrush(Qt.transparent, Qt.SolidPattern))\n pos_x = random.randrange(0, targetspace - targetsize) + i * targetspace + self.canvas_margin_default\n pos_y = random.randrange(0, targetspace - targetsize) + j * targetspace + self.canvas_margin_top\n # if index is the index of the target the drawn target data will be saved\n if index == self.test.get_current_target(self.current_repetition):\n self.current_target_pos_x = pos_x\n self.current_target_pos_y = pos_y\n painter.setBrush(QBrush(painter_color_fill, Qt.SolidPattern))\n self.test.set_target_rel_pos(self.current_repetition, pyautogui.Point(pos_x, pos_y))\n painter.drawEllipse(pos_x, pos_y, targetsize, targetsize)\n index = index + 1\n self.test.set_timestamp(self.current_repetition, 1)\n self.test.set_po_pos(self.current_repetition, 0, pyautogui.position())\n self.canvas_updated = True\n\n def check_input(self, m_pos_x, m_pos_y):\n # check if pointer is in the target's space:\n if self.current_target_pos_x > m_pos_x:\n return\n if self.current_target_pos_y > m_pos_y:\n return\n if (self.current_target_pos_y + targetsize) < m_pos_y:\n return\n if (self.current_target_pos_x + targetsize) < m_pos_x:\n return\n print(\"success!!\")\n self.test.set_timestamp(self.current_repetition, 2)\n self.test.set_po_pos(self.current_repetition, 1, pyautogui.position())\n self.test.set_po_pos(self.current_repetition, 2, pyautogui.Point(m_pos_x, m_pos_y))\n print(self.test.log_data)\n self.update()\n\n def update(self):\n super().update()\n if self.test_started:\n if self.current_repetition + 1 < repetitions:\n self.current_repetition = self.current_repetition + 1\n self.canvas_updated = False\n return\n self.test_started = False\n self.test.save_test()\n self.close()\n\n def mousePressEvent(self, event):\n # gets mouse press\n if self.test_started:\n if event.button() == QtCore.Qt.LeftButton:\n self.check_input(event.x(), event.y())\n\n def initUI(self):\n # initialize important ui-components\n uic.loadUi(\"Pointing_exp.ui\", self)\n self.setWindowTitle('Pointing Experiment')\n width = targets_per_row * targetspace\n height = targets_per_column * targetspace\n if width < canvas_min_width:\n width = canvas_min_width\n if height < canvas_min_height:\n height = canvas_min_height\n self.setFixedSize(width + 2 * self.canvas_margin_default,\n height + 2 * self.canvas_margin_default + self.canvas_margin_top)\n self.label.setText(\"ID: \" + str(id) + \" - 0/\" + str(repetitions))\n self.start_Button.clicked.connect(lambda: self.start_test())\n\n\ndef init_args_handler():\n # how to handle the possible arguments (dialog tree u.a)\n global id\n global config_url\n if len(sys.argv) != 3:\n exception_handler(\"NoArgs\")\n if not (os.path.isfile(sys.argv[1])):\n exception_handler(\"noFile\")\n if not isinstance(int(sys.argv[2]), int):\n exception_handler(\"noID\")\n config_url = sys.argv[1]\n id = sys.argv[2]\n return\n\n\ndef exception_handler(case):\n # exiting earlier due to .. reasons\n print(dialog(case))\n sys.exit()\n\n\ndef dialog(case):\n # a simple dialog manager\n switch = {\n \"NoArgs\": \"Please provide a configuration file & an ID as arguments!\",\n \"noFile\": \"Couldn't open file!\",\n \"noID\": \"Please provide participant ID!\",\n \"noInput\": \"Missing input from configuration file!\\n\\n\"\n \"Please Provide following settings in category \\'Canvas_Settings\\':\\n\"\n \"\\'CanvasMinWidth\\', \\'CanvasMinHeight\\'\\n\\n\"\n \"...and the following settings in category \\'Test_Settings\\':\\n\"\n \"\\'TargetsPerRow\\', \\'TargetsPerColumn\\', \\'TargetSize\\', \\'TargetSpace\\', \\'Repetitions\\', \\'Conditions\\'\\n\"\n }\n return switch.get(case)\n\n\ndef get_presets():\n # reading the configurations and writing them into the predicated globule counterparts\n global canvas_min_width, canvas_min_height, targets_per_row, targets_per_column, targetsize, targetspace, repetitions, condition_selection\n init_args_handler()\n config = configparser.ConfigParser()\n config.read(config_url)\n try:\n canvas_min_width = int(config['Canvas_Settings']['CanvasMinWidth'])\n canvas_min_height = int(config['Canvas_Settings']['CanvasMinHeight'])\n targets_per_row = int(config['Test_Settings']['TargetsPerRow'])\n targets_per_column = int(config['Test_Settings']['TargetsPerColumn'])\n targetsize = int(config['Test_Settings']['TargetSize'])\n targetspace = int(config['Test_Settings']['TargetSpace'])\n repetitions = int(config['Test_Settings']['Repetitions'])\n condition_selection = config['Test_Settings']['Conditions'].split(\", \")\n except KeyError:\n exception_handler(\"noInput\")\n\n\ndef main():\n get_presets()\n app = QtWidgets.QApplication(sys.argv)\n win = PointingExperiment()\n\n win.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pointing_experiment.py","file_name":"pointing_experiment.py","file_ext":"py","file_size_in_byte":11804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279996526","text":"import math\n## IHRA_ENGINE.HRA_FUNC\ndef stress_score( physical_health, hours_sleep,life_satis, personal_loss,marital_status,social_tie ):\n \"function stress score\"\n # [old] physical_health, hours_sleep,life_satis, personal_loss,marital_status,social_tie\n # [new] record.ql_overallhealth, record.sc_sleepduration, record.ql_lifesatisfaction,record.ql_personalloss, record.demo_maritalstatus, record.ql_socialtiestrength\n stress_sum = 0;\n addto = 0;\n\n if physical_health == 1: # excellent\n addto = 1;\n elif physical_health == 2: # very good\n addto = 1;\n elif physical_health == 3:\n addto = 2;\n elif physical_health == 4:\n addto = 3;\n elif physical_health == 5:\n addto = 5;\n elif physical_health == 9: # do not count\n addto = 1;\n\n else:\n addto = 3; # others\n\n stress_sum += addto;\n # --- add other items here ---->\n\n\n #SC_SLEEPDURATION\n\n #15 hours or less\n #26 hours\n #37 hours\n #48 hours\n #59 hours or more\n\n # --- Hours Sleep new coding ---->\n if hours_sleep == 1: # 5 or less\n addto = 4;\n elif hours_sleep == 2: # 6 hrs\n addto = 4;\n elif hours_sleep == 3: #7 hrs \n addto = 2;\n elif hours_sleep == 4: #8 hrs\n addto = 2;\n elif hours_sleep == 5: # 9 or more\n addto = 4;\n elif hours_sleep == 9: # do not count\n addto = 2;\n else:\n addto = 3; # others\n\n stress_sum += addto;\n\n #QL_LIFESATISFACTION\n #1Completely satisfied\n #2Mostly satisfied\n #3Partly satisfied\n #4Not satisfied\n # 1,3, 5, 9, 1, 5\n if life_satis == 1: # 1Completely satisfied\n addto = 1;\n elif life_satis == 2: # \n addto = 3;\n elif life_satis == 3: #\n addto = 5;\n elif life_satis == 4: # 4 Not satisfied\n addto = 9;\n elif life_satis == 9: # do not count\n addto = 1;\n\n else:\n addto = 5; # others\n stress_sum += addto;\n\n # QL_PERSONALLOSS\n #1Yes, two or more serious losses\n #2Yes, one serious loss\n #3No\n # 9,6,3,3,5\n if personal_loss == 1: # 1Yes, two or more serious losses\n addto = 9;\n elif personal_loss == 2: # Yes, one serious loss\n addto = 6;\n elif personal_loss == 3: # No\n addto = 3;\n elif personal_loss == 9: # do not count\n addto = 3;\n\n else:\n addto = 5; # others\n stress_sum += addto;\n\n\n # DEMO_MARITALSTATUS\n\n #1Single (never married)\n #2Married\n #3Widowed\n #4Divorced\n #5Separated\n #6Other\n # 2,4,4,1,5,3,1,3\n if marital_status == 1: # Single (never married)\n addto = 2;\n elif marital_status == 2: # Married\n addto = 1;\n elif marital_status == 3: #Widowed\n addto = 5;\n elif marital_status == 4: #Divorced\n addto = 4;\n elif marital_status == 5: #Separated\n addto = 4;\n elif marital_status == 6: # Other\n addto = 3;\n elif marital_status == 9: # do not count\n addto = 1;\n\n else:\n addto = 3; # others\n stress_sum += addto;\n\n # social_tie\n #QL_SOCIALTIESTRENGTH\n\n #1Very strong\n #2About average\n #3Weaker than average\n #4I don't know\n # 2,5,8,5,2,5\n if life_satis == 1: # 1Very strong\n addto = 2;\n elif life_satis == 2: # 2 About average\n addto = 5;\n elif life_satis == 3: # #3Weaker than average\n addto = 8;\n elif life_satis == 4: # 4 I don't know\n addto = 5;\n elif life_satis == 9: # do not count\n addto = 2;\n\n else:\n addto = 5; # others\n stress_sum += addto;\n\n return stress_sum\n# stress_score end of function\n\n\n\n# !! function ideal_weight(sex varchar2, height_inches pls_integer, frame varchar2) return pls_integer;\n# --default sex: male\n\ndef ideal_weight(sex, height_inches, frame ) :\n return pls_integer;\n\n# !! function bmi(wgt pls_integer, hgt pls_integer) return number;\ndef bmi(wgt, hgt) :\n if wgt> 0 and hgt > 0:\n #bmi = math.trunc(wgt/2.2046/pow(.0254*hgt,2));\n bmi = float('%.1f'%(wgt/2.2046/pow(.0254*hgt,2)));\n \n if bmi > 99.9:\n bmi = 99.9;\n return bmi;\n else:\n return -1;\n\n# !! function bmi_ideal_weight(hgt_inch pls_integer, bmi number:=24.9) return pls_integer;\ndef bmi_ideal_weight(hgt, bmi) :\n #return round(bmi*power(hgt_inch*.0254,2)*2.2046);\n return round(bmi*pow(hgt*.0254,2)*2.2046,1) ;\n\n#!! function bmi_weight_range(hgt number, bmi_low number:=19, bmi_high number:=24.9) return varchar2;\ndef bmi_weight_range(hgt, bmi_low , bmi_high ) :\n return varchar2;\n\n# !! function mental_prob (stress_score in number,\n# absent_days in number,\n# use_drug_medication in number,\n# physical_health in number,\n# sex in number,\n# age in number,\n# feel_stress in number,\n# stress_effect in number) return number;\n\ndef mental_prob(stress_score, absent_days, use_drug_medication,physical_health, sex, age, feel_stress, stress_effect ) :\n return number;\n\n# !! function bmi_kgcm(wgt pls_integer, hgt pls_integer) return number;\n#!! function bmi_ideal_weight_kgcm(hgt_cm pls_integer, bmi number:=24.9) return pls_integer;\n#!! function bmi_ideal_height(wt_lb pls_integer, bmi number:=24.9) return pls_integer;\n#!! function bmi_ideal_height_kgcm(wt_kg pls_integer, bmi number:=24.9) return pls_integer;\n#!! function bmi_weight_range_kgcm(hgt number, bmi_low number:=19, bmi_high number:=24.9) return varchar2;\n\n\n\n\n","sub_path":"hra2016_func.py","file_name":"hra2016_func.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"137215732","text":"import math\r\n\r\ndef applyKNN(query):\r\n fileg=open(\"C:/Users/DELL/Desktop/Project/Google/score/\"+query+\".txt\",'r',encoding=\"utf-8-sig\")\r\n fileb=open(\"C:/Users/DELL/Desktop/Project/Bing/score/\"+query+\".txt\",'r',encoding=\"utf-8-sig\")\r\n filed=open(\"C:/Users/DELL/Desktop/Project/DuckDuckGo/score/\"+query+\".txt\",'r',encoding=\"utf-8-sig\")\r\n filec=open(\"C:/Users/DELL/Desktop/Project/Combined/score/\"+query+\".txt\",'r',encoding=\"utf-8-sig\")\r\n \r\n wordg={}\r\n wordb={}\r\n wordd={}\r\n wordc=[]\r\n \r\n while True:\r\n x=fileg.readline()\r\n if(x==''):\r\n break\r\n y=x.split()\r\n wordg[y[0]]=float(y[1])\r\n \r\n while True:\r\n x=fileb.readline()\r\n if(x==''):\r\n break\r\n y=x.split()\r\n wordb[y[0]]=float(y[1])\r\n \r\n while True:\r\n x=filed.readline()\r\n if(x==''):\r\n break\r\n y=x.split()\r\n wordd[y[0]]=float(y[1])\r\n \r\n while True:\r\n x=filec.readline()\r\n if(x==''):\r\n break\r\n y=x.split()\r\n t=(y[0],y[1])\r\n wordc.append(t)\r\n \r\n fileg.close()\r\n fileb.close()\r\n filed.close()\r\n filec.close()\r\n \r\n filer=open(\"C:/Users/DELL/Desktop/Project/Result/\"+query+\".txt\",'w',encoding=\"utf-8\")\r\n \r\n for i in range(1):\r\n j=0\r\n wordr={}\r\n for obj in wordc:\r\n if j==0:\r\n j+=1\r\n t1=obj[0]\r\n print(obj)\r\n filer.write(obj[0]+\" \"+str(obj[1])+\"\\n\")\r\n wordc=wordc[1:]\r\n continue\r\n \r\n try:\r\n wt1g=wordg[t1]\r\n wt1b=wordb[t1]\r\n wt1d=wordd[t1]\r\n \r\n ti=obj[0]\r\n wtig=wordg[ti]\r\n wtib=wordb[ti]\r\n wtid=wordd[ti]\r\n print(wt1g,wt1b,wt1d,wtig,wtib,wtid);\r\n num=(wt1g*wtig + wt1b*wtib + wt1d*wtid)\r\n den=math.sqrt((wt1g*wt1g + wt1b*wt1b + wt1d*wt1d)*(wtig*wtig + wtib*wtib + wtid*wtid))\r\n cs=num/den\r\n print(num,den,cs)\r\n wordr[ti]=cs\r\n except KeyError:\r\n continue\r\n \r\n wordc=sorted(wordr.items(), key = lambda kv:(kv[1], kv[0]),reverse=True) \r\n \r\n for obj in wordc:\r\n filer.write(obj[0]+\" \"+str(obj[1])+\"\\n\")\r\n\r\n#============================================================================\r\n\r\napplyKNN('xyz')\r\n\r\n'''\r\nfile=open(\"C:\\\\Users\\\\DELL\\\\Desktop\\\\Project\\\\query_lists.txt\",'r')\r\nwhile(True):\r\n query=file.readline()\r\n if(query==\"\"):\r\n break\r\n l=len(query)\r\n query=query[:l-1]\r\n \r\n applyKNN(query)\r\n \r\n'''\r\n","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344367471","text":"import sys\nimport os\nimport csv\nimport pymysql\n\n\ndef list_to_csv(FileName,FileList):\n for row_index, list in enumerate(FileList):\n for column_index, string in enumerate(list):\n FileList[row_index][column_index] = FileList[row_index][column_index]\n with open(FileName, 'w', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerows(FileList)\n\n\ndef csv_to_list(fileName):\n data = []\n with open(fileName, newline='') as csvfile:\n spamreader = csv.reader(csvfile)\n for row in spamreader:\n tmp = (', '.join(row)).replace(' ','').replace('%','').split(',')\n data.append(tmp)\n return data\n\n\n# 把列出資料夾的程式碼寫成一個函式\ndef show_folder_content(folder_path,isWindows=False,isDesignation=False,Designation=[]):\n data = []\n # 判別是否是資料夾\n if os.path.isdir(folder_path):\n print(folder_path + '讀取資料夾內容')\n else:\n print(\"error,{} is not dir\".format(folder_path))\n return \"error,folder_path is not dir\"\n \n # 設定路徑的分隔符號\n IdentificationSymbol = '/'\n if isWindows:\n IdentificationSymbol = '\\\\'\n \n # 開始裝填檔案路徑名稱\n folder_content = os.listdir(folder_path)\n for item in folder_content:\n if os.path.isdir(folder_path + IdentificationSymbol + item):\n\n # 呼叫自己處理這個子資料夾\n tmp_data = show_folder_content(folder_path + IdentificationSymbol + item)\n for line in tmp_data:\n data.append(line)\n elif os.path.isfile(folder_path + IdentificationSymbol + item):\n tmp_file_path = folder_path + IdentificationSymbol + item\n if isDesignation:\n for line in Designation:\n if( item.find(line) != -1):\n data.append(tmp_file_path)\n else:\n data.append(tmp_file_path)\n else:\n print('無法辨識: ' + folder_path + IdentificationSymbol + item)\n return data \n\ndef get_onFilePathOfdate(file_path):\n tmp = file_path.split('/')\n tmp = tmp[-1].split('.')\n return tmp[0]\n\ndef update_TimeofList(date,index,origin_list,deletes):\n data = []\n for line in origin_list:\n for column in line:\n for delete in deletes:\n if (column.find(delete) != -1):\n origin_list.remove(line)\n for line in origin_list:\n line[index] = '{} {}'.format(date,line[index])\n data.append(tuple(line))\n return data\n\n \n\n# 驗證參數指令數目\nif (len(sys.argv) != 2):\n print(\"error, parameter. it's like```python3 main.py ```\")\n sys.exit()\n\n# 驗證設定檔案路徑是否正確\nmain_setting_path = sys.argv[1]\nif (not os.path.isfile(main_setting_path)):\n print(\"error, {} is a dir or not a ture path\".format(main_setting_path))\n sys.exit()\nif (main_setting_path.find(\".csv\") == -1):\n print(\"error, {} is not a csv file\")\n sys.exit()\n\n# \nmain_setting = csv_to_list(main_setting_path)\nobj_title = [\n 'db_table',\n 'db_column',\n 'file_tag',\n 'data_dir',\n 'deletes',\n]\nobj_setting = {}\nif (len(main_setting) != len(obj_title)):\n print(\"error, main_setting amd obj_title are len not match\")\n sys.exit()\n\nfor index,line in enumerate(main_setting):\n obj_setting[obj_title[index]] = line[1:]\n\n\ndata_list = show_folder_content(obj_setting['data_dir'][0],False,True,obj_setting['file_tag'])\n\n\nconnection = pymysql.connect(host='172.16.3.3',\n port=3306,\n user='greenhouser',\n password='greenhouser_test',\n db='green_house',)\n\n\nexecutemany_string = 'INSERT INTO `{}` ({}) VALUES ({})'\nexecutemany_string = executemany_string.format(obj_setting['db_table'][0],'{}','{}')\nmax_len = len(obj_setting['db_column'])-1\n\nfor index,value in enumerate(obj_setting['db_column']):\n if index == max_len:\n executemany_string = executemany_string.format(value,'%s')\n else:\n executemany_string = executemany_string.format(('{},{}'.format(value,{})),('{},{}'.format('%s','{}')))\n\nfor data_file_path in data_list:\n executemany_list = None\n tmp_csv_data_list = csv_to_list(data_file_path)\n tmp_csv_date = get_onFilePathOfdate(data_file_path)\n executemany_list = update_TimeofList(tmp_csv_date,0,tmp_csv_data_list,obj_setting['deletes'])\n\n cursor = connection.cursor()\n effect_row = cursor.executemany(executemany_string,executemany_list)\n try:\n connection.commit()\n except:\n print(\"error,connection.commit\")\n else:\n print('insert done {}'.format(data_file_path))\n\n# data_file_path = '/Users/dingding/Documents/get_fileData_to_mySQL/data/2018-10-08.csv'\n# tmp_csv_data_list = csv_to_list(data_file_path)\n# tmp_csv_date = get_onFilePathOfdate(data_file_path)\n# executemany_list = update_TimeofList(tmp_csv_date,0,tmp_csv_data_list)\n# cursor = connection.cursor()\n# effect_row = cursor.executemany(executemany_string,executemany_list)\n# connection.commit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386085855","text":"import tornadis\nimport json\n\nimport tornado.gen\n\n\nclass SessionManager:\n \"\"\" 与 app 绑定的 session client, 一个 app 一个 redis client\"\"\"\n\n def __init__(self, app=None):\n self.app = app\n self.client = tornadis.Client(host=\"localhost\", port=6379, autoconnect=True)\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n app.session = self\n\n\nclass Session:\n \"\"\" 与 handler 绑定的 session, 一个 handler 一个 session 对象, 公用一个 client\"\"\"\n\n def __init__(self, handler):\n self.handler = handler\n self.client = handler.application.session.client\n\n @tornado.gen.coroutine\n def get_all(self):\n items = yield self.client.call(\"get\", 'session_%s' % self.handler.user_id)\n if items:\n return json.loads(items.decode())\n return dict()\n\n @tornado.gen.coroutine\n def get(self, key):\n items = self.handler.sessions\n if items:\n return items.get(key)\n return None\n\n @tornado.gen.coroutine\n def set(self, **kwargs):\n \"\"\" session 默认的 TTL 是 1 天 \"\"\"\n self.handler.sessions.update(kwargs)\n new_items = json.dumps(self.handler.sessions)\n yield self.client.call(\"set\", 'session_%s' % self.handler.user_id, new_items, 'ex', 79200)\n\n @tornado.gen.coroutine\n def rename(self, new_key):\n \"\"\" 用户登陆后, 将未登录时的 session 数据转移到用户的 session 中\"\"\"\n items = yield self.get(self.handler.user_id)\n if items:\n self.handler.sessions = items\n yield self.client.call(\"rename\", 'session_%s' % self.handler.user_id, 'session_%s' % new_key)\n\n @tornado.gen.coroutine\n def delete(self):\n \"\"\" 用户注销后, 删除所有 session 数据 \"\"\"\n self.handler.sessions = dict()\n yield self.client.call(\"del\", 'session_%s' % self.handler.user_id)\n\n\nclass Cache:\n \"\"\" cache 与 session 都是保存在 redis 中的缓存, session 保存用户关键信息,\n cache 保存 statuses, 避免短时间内重复请求, cache 的 TTL 为 3 min\"\"\"\n\n def __init__(self, handler):\n self.handler = handler\n self.client = handler.application.session.client\n\n @tornado.gen.coroutine\n def get_all(self, is_anonymous=False):\n if not is_anonymous:\n key = 'cache_%s' % self.handler.user_id\n else:\n key = 'cache_anonymous'\n\n items = yield self.client.call(\"get\", key)\n if items:\n items = json.loads(items.decode())\n return items\n return dict()\n\n @tornado.gen.coroutine\n def get(self, key, is_anonymous=False):\n items = yield self.get_all(is_anonymous)\n return items.get(key, [])\n\n @tornado.gen.coroutine\n def set(self, is_anonymous=False, ttl=180, **kwargs):\n \"\"\"因为在 redis 中使用 set 命令时会覆盖原有 TTL\n 所以可以在更新时指定 TTL=0, 这样会在原有时间上加 1 秒\"\"\"\n if kwargs:\n if not is_anonymous:\n key = 'cache_%s' % self.handler.user_id\n else:\n key = 'cache_anonymous'\n items = yield self.get_all(is_anonymous)\n items.update(**kwargs)\n new_items = json.dumps(items)\n if ttl == 0:\n ttl = yield self.client.call('ttl', key)\n ttl += 1\n yield self.client.call(\"set\", key, new_items, 'ex', ttl)\n\n @tornado.gen.coroutine\n def add(self, type, value):\n key = 'cache_%s' % self.handler.user_id\n items = yield self.get_all()\n old_value = items.setdefault(type, [])\n old_value.extend(value)\n new_items = json.dumps(items)\n ttl = yield self.client.call('ttl', key)\n yield self.client.call(\"set\", key, new_items, 'ex', ttl + 10)\n\n @tornado.gen.coroutine\n def clear(self):\n key = 'cache_%s' % self.handler.user_id\n yield self.client.call('del', key)\n\n\nsession = SessionManager()\n","sub_path":"ext/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"371390235","text":"\"\"\"\nGiven an integer array nums, find a contiguous non-empty subarray within the\narray that has the largest product, and return the product.\n\nIt is guaranteed that the answer will fit in a 32-bit integer.\n\nA subarray is a contiguous subsequence of the array.\n\"\"\"\nfrom typing import List\nfrom math import inf\n\n\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n res = -inf\n dp_arr = [-inf]*len(nums)\n dp_arr[0] = nums[0]\n for i, num in enumerate(nums[1:]):\n if dp_arr[i] != 0:\n dp_arr[i+1] = max(num, num*dp_arr[i])\n else:\n dp_arr[i + 1] = num\n print(dp_arr)\n return max(dp_arr)\n\n\ns = Solution()\nprint(s.maxProduct([-2,0,-1,-2,-3]))\n\n","sub_path":"Maximum Product Subarray.py","file_name":"Maximum Product Subarray.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"118698460","text":"# Bu Python dosyası projede ortak olarak kullanılan bazı fonksiyon ve sınıfları içerir.\nimport numpy as np\n\n# Sigmoid aktivasyon fonksiyonu\nsigmoid = lambda x: (1 / (1 + np.exp(-x)))\n\n# ReLu aktivasyon fonksiyonu\nrelu = lambda x: np.maximum(0, x)\n\n# Softplus aktivasyon fonksiyonu\nsoftplus = lambda x: np.log1p(np.exp(x))\n\n# Tanh aktivasyon fonksiyonu\ntanh = lambda x: (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\n\n# Yapay sinir ağı sınıfımız\nclass NeuralNetwork:\n\tdef __init__(self, layer_units):\n\t\tself.layer_units = layer_units\n\n\tdef get_parameter_count(self):\n\t\tweight_params_count = sum([\n\t\t\tself.layer_units[i] * self.layer_units[i+1]\n\t\t\tfor i in range(0, len(self.layer_units)-1)\n\t\t])\n\t\tbias_params_count = len(self.layer_units)-1\n\t\treturn (weight_params_count + bias_params_count)\n\n\t# Verilen bireyin genotipini yapay sinir ağı parametreleri olarak kullanarak, state vektörünü ağa feed-forward eder\n\tdef forward(self, individual, state):\n\t\t# State'in 2 seviyeli tensör olduğundan emin ol: [?, observation_space_dim]\n\t\tif len(state.shape) == 1: state = np.array([state])\n\t\tassert len(state.shape) == 2\n\n\t\t# Yapay sinir ağı parametrelerini verilen gen dizisinden çıkar\n\t\tgeneCurrentIndex = 0\n\t\tlayer_weights_n_biases = []\n\t\tfor layer_i in range(0, len(self.layer_units)-1):\n\t\t\t## Katman ağırlıklarını çıkar\n\t\t\t# (Önceki katman nöron sayısı x sonraki katman nöron sayısı)\n\t\t\tweightParamCount = self.layer_units[layer_i] * self.layer_units[layer_i+1]\n\n\t\t\t# Gen dizisinden çıkar\n\t\t\tlayer_weights = np.array(individual[geneCurrentIndex: geneCurrentIndex+weightParamCount])\n\t\t\tgeneCurrentIndex += weightParamCount\n\n\t\t\t## Katman bias değerini çıkar\n\t\t\tlayer_bias = individual[geneCurrentIndex]\n\t\t\tgeneCurrentIndex += 1\n\n\t\t\t# Ağırlık değerlerini 1D vektörden, gereken şekle reshape et\n\t\t\tlayer_weights = np.reshape(\n\t\t\t\tlayer_weights,\n\t\t\t\t(\n\t\t\t\t\tself.layer_units[layer_i],\n\t\t\t\t\tself.layer_units[layer_i+1]\n\t\t\t\t)\n\t\t\t)\n\t\t\tlayer_weights_n_biases.append((layer_weights, layer_bias))\n\n\t\t# Girdi vektörüyle başlayarak, katman çıktılarını sırayla ağırlıklarla çarparak biasları ekle: yani feed-forward!\n\t\thidden_output = state\n\t\tfor weight, bias in layer_weights_n_biases:\n\t\t\t# Aktivasyon\n\t\t\thidden_output = sigmoid(\n\t\t\t\t# x*w + b\n\t\t\t\tnp.dot(hidden_output, weight) + bias\n\t\t\t)\n\n\t\t# Bu noktada hidden_output değerimiz ağın çıktısını taşıyor, argmax ile ilgili aksiyonu alalım.\n\t\treturn np.argmax(hidden_output)\n\n## Verilen gym ortamını, verilen aksiyon fonksiyonuyla çalıştırmak için ortak bir fonksiyon\ndef run_gym_environment(env, act_fn, n_episode=1, render=False, n_timestep=None):\n\ttotalRewards = 0\n\n\t# Episode döngüsü\n\tfor episode in range(n_episode):\n\t\ttimestep = 0\n\t\tstate = env.reset()\n\t\tdone = False\n\n\t\tdef shouldStop():\n\t\t\t# Eğer timestep değeri verilmişse sadece onu dikkate al\n\t\t\tif (n_timestep != None):\n\t\t\t\treturn (timestep >= n_timestep)\n\t\t\t# Verilmemişse ortamın sağladığı tamamlanma değerini kullan\n\t\t\telse:\n\t\t\t\treturn done\n\n\t\t# Timestep döngüsü\n\t\tepisodeReward = 0\n\t\twhile not shouldStop():\n\t\t\tif render: env.render()\n\n\t\t\t# Aksiyon kararı ver\n\t\t\taction = act_fn(state)\n\n\t\t\t# Ortamda uygula\n\t\t\tstate, reward, done, _ = env.step(action)\n\t\t\tepisodeReward += reward\n\t\t\ttimestep += 1\n\n\t\tif render: print(\"Episode bitti, ödül {}\".format(episodeReward))\n\t\ttotalRewards += episodeReward\n\n\t# Simülasyondan toplanan toplam ödülü çevirir\n\treturn totalRewards\n","sub_path":"Article 3/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"619690891","text":"class Pociag:\n def __init__(self, val=None, next=None):\n \"\"\"\n :param val: Value\n :param next: Next element\n :type next: Pociag\n \"\"\"\n self.val = val\n self.next = next\n\n # Insertion\n\n def insert_after(self, val):\n \"\"\"\n :param val: Element to insert\n :returns: Inserted element\n :rtype: Pociag\n \"\"\"\n self.next = Pociag(val, self.next)\n return self.next\n\n def insert_elements_after(self, t):\n \"\"\"\n :param t: Iterable of elements to insert\n :returns: Last inserted element\n :rtype: Pociag\n \"\"\"\n q = self\n for i in t:\n q = q.insert_after(i)\n return q\n\n def insert_pociag_after(self, pociag):\n \"\"\"\n :type pociag: Pociag\n :return: Last element inserted\n :rtype: Pociag\n \"\"\"\n last = pociag.last()\n last.next = self.next\n self.next = pociag\n return last\n\n # Deletion\n\n def remove_after(self):\n \"\"\"\n Remove next element\n\n :returns: Removed element value\n \"\"\"\n if self.next is None:\n return None\n v = self.next.val\n self.next = self.next.next\n return v\n\n # Utility\n\n def __iter__(self):\n q = self\n while q is not None:\n yield q\n q = q.next\n\n def last(self):\n \"\"\"\n Returns last element\n\n :returns: Last element\n :rtype: Pociag\n \"\"\"\n q = self\n while q.next is not None:\n q = q.next\n return q\n\n def print(self):\n print(list(map(lambda i: i.val, iter(self))))\n\n\nif __name__ == \"__main__\":\n pociag = Pociag(1)\n pociag\\\n .insert_after(2)\\\n .insert_after(3)\\\n .insert_after(4)\\\n .insert_after(5)\n\n pociag.insert_after(5).remove_after()\n\n pociag.print()\n\n\n","sub_path":"pociag.py","file_name":"pociag.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"562108369","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nProblem 8: Python code for computing two-dimensional Fourier transform of the Gaussian function using numpy.fft.fft2\n\n\n@author: krishnendu\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef fun(x,y): #defining the function\n return np.exp(-(x**2+y**2))\n\ndef fun1(kx,ky):\n X,Y=np.meshgrid(kx,ky)\n return np.exp(-(X**2+Y**2)/4)/2**1.5\nxmin=-50\nxmax=50\nymin=-50\nymax=50\n\nnumpoints=512 #number of sampling points\n\ndx=(xmax-xmin)/(numpoints-1)\ndy=dx\nxarr=np.linspace(xmin,xmax,numpoints) #creating the x array\n \nyarr=np.copy(xarr) #taking the y array same as the y array\n\nkarrX=np.fft.fftfreq(numpoints,d=dx) #computing the frequencies corresponding to the x axis\n\nkarrY=np.fft.fftfreq(numpoints,d=dy) #computing the frequencies corresponding to the y axis\n\n\nX,Y=np.meshgrid(xarr,yarr)\n\nf=fun(X,Y) #sampling the function\n\nnft=np.fft.fft2(f,norm='ortho') #computing the DFT of the function\n\nkarrX=2*np.pi*karrX #computing the frequencies corresponding to the x array\n\nkarrY=2*np.pi*karrY #computing the frequencies corresponding to the y array\n \n\nfactorX=np.exp(-1j*karrX*xmin)\nfactorY=np.exp(-1j*karrY*ymin)\n\nZ=dx*dy*(numpoints/(2.0*np.pi))*factorX*factorY*nft #computing the fft of the function\nZ=np.fft.fftshift(Z)\nkarrX=np.fft.fftshift(karrX)\n\nkarrY=np.fft.fftshift(karrY)\n\nKX,KY=np.meshgrid(karrX,karrY)\n\nfig=plt.figure(figsize=plt.figaspect(0.4))\nax1=fig.add_subplot(1,2,1,projection='3d')\n\nax1.contour3D(karrX,karrY,abs(Z),100) #plotting the FFT of the function computed using numpy.fft.fft2\nax1.set_title(\"using numpy.fft.fft2\")\nax1.set_xlabel(\"kx\")\nax1.set_ylabel(\"ky\")\n\nax1=fig.add_subplot(1,2,2,projection='3d')\n\n\nax1.contour3D(karrX,karrY,fun1(karrX,karrY),100) #plotting the FFT of the function computed using numpy.fft.fft2\n\nax1.set_title(\"analytical\")\nax1.set_xlabel(\"kx\")\nax1.set_ylabel(\"ky\")\n\n\nplt.show()","sub_path":"Problem_8_Assignment_3.py","file_name":"Problem_8_Assignment_3.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641263554","text":"import datetime\nimport smtplib\n\ndef eviar_email(texto):\n\teemail = 'projetosimoni@gmail.com'\n\tssenha = 'ivetesangalo'\n\n\tdata = datetime.datetime.now()\n\tassunto = str(data.day)+'-'+str(data.month)+'-'+str(data.year)+'-'+str(data.hour)\n\n\tsmtpObj = smtplib.SMTP('smtp.gmail.com', 587)\n\tsmtpObj.ehlo()\n\tsmtpObj.starttls()\n\n\tsmtpObj.login(e, s)\n\n\tconteudo = texto\n\n\tsmtpObj.sendmail(eemail, eemail,'Subject:'+assunto+'.\\n'+conteudo)\n\n\tsmtpObj.quit()\n","sub_path":"enviar_email.py","file_name":"enviar_email.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"340708463","text":"#!python3\n\nimport sys, os\n# from yahoo_finance import get_price\nfrom yahoo_finance import Share\n\nclass Portfolio:\n def __init__(self, portfolio):\n self.portfolio = portfolio\n with open(portfolio) as f:\n self.symbols = f.read().splitlines()\n \n def print_all_prices(self):\n for element in self.symbols:\n # cur = Share(element)\n cur = Share('YHOO')\n price = cur.get_price()\n print(price)\n\n\n def printvars(self):\n print(self.portfolio)\n print(self.symbols)\n \n\ndef main(argv):\n pyfile = sys.argv[0]\n try:\n portfile = sys.argv[1]\n except:\n print('USAGE: scrolling_ticker.py ')\n sys.exit(2)\n \n MainPort = Portfolio(portfile)\n MainPort.printvars()\n MainPort.print_all_prices()\n\n\n \nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scrolling_ticker.py","file_name":"scrolling_ticker.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"409035598","text":"#! /usr/bin/env python\nfrom __future__ import absolute_import\n\nimport sys\nimport threading\nfrom optparse import OptionParser\nimport time\n\nfrom joinmarket import OrderbookWatch, load_program_config, IRCMessageChannel\nfrom joinmarket import jm_single, MessageChannelCollection\nfrom joinmarket import random_nick\nfrom joinmarket import get_log, debug_dump_object, get_irc_mchannels\n\nlog = get_log()\n\nclass BroadcastThread(threading.Thread):\n\n def __init__(self, taker):\n threading.Thread.__init__(self)\n self.daemon = True\n self.taker = taker\n\n def run(self):\n print('waiting for all orders to certainly arrive')\n time.sleep(self.taker.waittime)\n crow = self.taker.db.execute(\n 'SELECT DISTINCT counterparty FROM orderbook ORDER BY RANDOM() LIMIT 1;'\n ).fetchone()\n counterparty = crow['counterparty']\n log.debug('sending tx to ' + counterparty)\n self.taker.msgchan.push_tx(counterparty, self.taker.txhex)\n time.sleep(30) #wait for the irc throttle thread to send everything\n #when the tx notify callback is written, use that instead of a hardcoded wait\n self.taker.msgchan.shutdown()\n\nclass Broadcaster(OrderbookWatch):\n\n def __init__(self, msgchan, waittime, txhex):\n OrderbookWatch.__init__(self, msgchan)\n self.waittime = waittime\n self.txhex = txhex\n\n def on_welcome(self):\n OrderbookWatch.on_welcome(self)\n BroadcastThread(self).start()\n\ndef main():\n parser = OptionParser(\n usage=\n 'usage: %prog [options] [tx hex]',\n description='Sends a transaction to a random market maker requesting that they broadcast it '\n +\n 'to the wider bitcoin network. Used to add a layer between your own IP address and the network '\n +\n 'where other methods are not possible.')\n parser.add_option(\n '-w',\n '--wait-time',\n action='store',\n type='float',\n dest='waittime',\n help='wait time in seconds to allow orders to arrive, default=5',\n default=10)\n (options, args) = parser.parse_args()\n\n if len(args) < 1:\n parser.error('Needs a transaction hex string')\n sys.exit(0)\n txhex = args[0]\n\n load_program_config()\n jm_single().nickname = random_nick()\n log.debug('starting broadcast-tx')\n mcs = [IRCMessageChannel(c, jm_single().nickname) for c in get_irc_mchannels()]\n mcc = MessageChannelCollection(mcs)\n taker = Broadcaster(mcc, options.waittime, txhex)\n try:\n log.debug('starting message channels')\n mcc.run()\n except:\n log.debug('CRASHING, DUMPING EVERYTHING')\n debug_dump_object(taker)\n import traceback\n log.debug(traceback.format_exc())\n\nif __name__ == \"__main__\":\n main()\n print('done')\n","sub_path":"broadcast-tx.py","file_name":"broadcast-tx.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"531177204","text":"\n# coding: utf-8\n\n# In[1]:\n\n##Includes\nfrom flask import request, url_for\nfrom flask_api import FlaskAPI, status, exceptions\nfrom flask_cors import CORS, cross_origin\nfrom flask import Blueprint, render_template, abort\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport pickle\nimport json\n\n\n\nfrom google.cloud import bigquery\n\napi_common = Blueprint('api_common', __name__)\nCORS(api_common)\nCORS(api_common,resources={r\"/api_common/*/\": {\"origins\": \"*\"}})\n#cors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n######flask_api\n\n\n\n#http://localhost:5000/api_common/get_tables/\n@api_common.route(\"/api_common/get_tables/\", methods=['GET','POST'])\ndef get_tables():\n \n dataset = request.json['dataset']\n client = bigquery.Client()\n sql = \"SELECT * FROM \"+dataset+\".__TABLES__\"\n print (sql)\n df = client.query(sql).to_dataframe()\n json = df.to_json(orient='records')\n \n return json\n\n\t\n #http://localhost:5000/api_common/get_fields/\n@api_common.route(\"/api_common/get_fields/\", methods=['GET','POST'])\ndef get_fields():\n \n dataset = request.json['dataset']\n tablename = request.json['tablename']\n client = bigquery.Client()\n sql = \"SELECT * EXCEPT(is_generated, generation_expression, is_stored, is_updatable) FROM \" \\\n + dataset+\".INFORMATION_SCHEMA.COLUMNS where table_name='\"+tablename+\"'\"\n \n print (sql)\n df = client.query(sql).to_dataframe()\n json = df.to_json(orient='records')\n \n return json\n\n#process()\n\n\n\n#\n","sub_path":"apps/0_aas_v1/middle/flask_api/api_common.py","file_name":"api_common.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"190569419","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom TransformLayer import ColorJitterLayer\n\n\ndef random_crop(imgs, out=84):\n \"\"\"\n args:\n imgs: np.array shape (B,C,H,W)\n out: output size (e.g. 84)\n returns np.array\n \"\"\"\n h, w, c = imgs.shape\n n = 1\n crop_max = h - out + 1\n w1 = np.random.randint(0, crop_max, n)\n h1 = np.random.randint(0, crop_max, n)\n w11 = int(w1)\n h11 = int(h1)\n cropped = np.empty((out, out,c), dtype=imgs.dtype)\n \n cropped = imgs[h11:h11 + out, w11:w11 + out,:]\n return cropped\n\n\ndef grayscale(imgs):\n # imgs: b x c x h x w\n #device = imgs.device\n h, w, c = imgs.shape\n b = 1\n frames = c // 3\n out = imgs.copy()\n \n #imgs = imgs.view([frames,3,h,w])\n imgs = imgs[:, :, 0] * 0.2989 + imgs[:, :, 1] * 0.587 + imgs[:, :, 2] * 0.114\n out[:,:,0] = imgs\n out[:, :, 1] = imgs\n out[:, :, 2] = imgs\n\n \n\n # assert len(imgs.shape) == 3, imgs.shape\n #imgs = imgs[:, :, None, :, :]\n #imgs = imgs * torch.ones([1, 1, 3, 1, 1], dtype=imgs.dtype).float() # broadcast tiling\n return out\n\ndef random_grayscale(images,p=.3):\n \"\"\"\n args:\n imgs: torch.tensor shape (B,C,H,W)\n device: cpu or cuda\n returns torch.tensor\n \"\"\"\n #device = images.device\n #in_type = images.type()\n images = images * 255.\n\n # images: [B, C, H, W]\n h, w, channels = images.shape\n bs = 1\n #images = images.to(device)\n gray_images = grayscale(images)\n\n return gray_images\n\n# random cutout\n# TODO: should mask this \n\ndef random_cutout(imgs, min_cut=10,max_cut=30):\n \"\"\"\n args:\n imgs: np.array shape (B,C,H,W)\n min / max cut: int, min / max size of cutout \n returns np.array\n \"\"\"\n\n h, w, c = imgs.shape\n n = 1\n w1 = np.random.randint(min_cut, max_cut, n)\n w11 = int(w1)\n h1 = np.random.randint(min_cut, max_cut, n)\n h11 = int(h1)\n cutouts = np.empty((h, w,c), dtype=imgs.dtype)\n cutouts[h11:h11 + h11, w11:w11 + w11,0] = 0\n\n return cutouts\n\ndef random_cutout_color(imgs, min_cut=10,max_cut=30):\n \"\"\"\n args:\n imgs: shape (B,C,H,W)\n out: output size (e.g. 84)\n \"\"\"\n\n h, w, c = imgs.shape\n n = 1\n w1 = np.random.randint(min_cut, max_cut, n)\n w11 = int(w1)\n h1 = np.random.randint(min_cut, max_cut, n)\n h11 = int(h1)\n #cutouts = np.empty((n, c, h, w), dtype=imgs.dtype)\n rand_box = np.random.randint(0, 255, size=(c,n)) / 255.\n\n cut_img = imgs.copy()\n a = rand_box.reshape(-1,1,1)\n b = cut_img[h11:h11 + h11, w11:w11 + w11].shape[:2]\n # add random box\n d = cut_img[h11:h11 + h11, w11:w11 + w11, :]\n temp = np.tile(\n rand_box.reshape(-1,1,1),\n cut_img[h11:h11 + h11, w11:w11 + w11].shape[:2])\n cut_img[h11:h11 + h11, w11:w11 + w11, 0] =temp[0,:,:]\n cut_img[h11:h11 + h11, w11:w11 + w11, 1] = temp[1, :, :]\n cut_img[h11:h11 + h11, w11:w11 + w11, 2] = temp[2, :, :]\n #cutouts[i] = cut_img\n return cut_img\n\n# random flip\n\ndef random_flip(images,p=.2):\n \n #args:\n #imgs: torch.tensor shape (B,C,H,W)\n #device: cpu or gpu, \n #p: prob of applying aug,\n #returns torch.tensor\n \n # images: [B, C, H, W]\n\n h, w, channels = images.shape\n bs = 1\n \n #images = images.to(device)\n\n flipped_images = np.flip(images)\n \n rnd = np.random.uniform(0., 1., size=(channels,))\n mask = rnd <= p\n mask = torch.from_numpy(mask)\n frames = images.shape[1] #// 3\n images = images.view(*flipped_images.shape)\n mask = mask[:, None] * torch.ones([1, frames]).type(mask.dtype)\n \n mask = mask.type(images.dtype)\n mask = mask[:, :, None, None]\n \n out = mask * flipped_images + (1 - mask) * images\n\n out = out.view([ h, w,bs])\n return out\n\n# random rotation\n\ndef random_rotation(images,p=.3):\n \"\"\"\n args:\n imgs: torch.tensor shape (B,C,H,W)\n device: str, cpu or gpu, \n p: float, prob of applying aug,\n returns torch.tensor\n \"\"\"\n #device = images.device\n # images: [B, C, H, W]\n #bs, channels, h, w = images.shape\n h, w, channels = images.shape\n bs = 1\n #images = images.to(device)\n\n rot90_images = np.rot90(images,1,[0,1])\n rot180_images = np.rot90(images,2,[0,1])\n rot270_images = np.rot90(images,3,[0,1])\n \n rnd = np.random.uniform(0., 1., size=(256,))\n rnd_rot = np.random.randint(1, 4, size=(256,))\n mask = rnd <= p\n mask = rnd_rot * mask\n mask = torch.from_numpy(mask)\n \n frames = 256\n masks = [torch.zeros_like(mask) for _ in range(4)]\n for i,m in enumerate(masks):\n m[torch.where(mask==i)] = 1\n m = m[:, None] * torch.ones([frames])#.type(mask.dtype).type(images.dtype)\n m = m[:,:,None]*torch.ones([3])\n masks[i] = m\n\n\n \n \n out = masks[0] * images + masks[1] * rot90_images + masks[2] * rot180_images + masks[3] * rot270_images\n\n out = out.view([h, w, 3])\n return out\n\n\n# random color\n\n \n\n\n\n\n\n\ndef random_translate(imgs, size=300, return_random_idxs=False, h1s=None, w1s=None):\n h, w, c = imgs.shape\n n = 1\n assert size >= h and size >= w\n outs = np.zeros((size, size,c ), dtype=imgs.dtype)\n h1s = np.random.randint(0, size - h + 1, n) if h1s is None else h1s\n w1s = np.random.randint(0, size - w + 1, n) if w1s is None else w1s\n h1s = int(h1s)\n w1s = int(w1s)\n outs[h1s:h1s + h, w1s:w1s + w,:] = imgs\n if return_random_idxs: # So can do the same to another set of imgs.\n return outs, dict(h1s=h1s, w1s=w1s)\n return outs\n\n\ndef no_aug(x):\n return x\n\n\nif __name__ == '__main__':\n import time \n from tabulate import tabulate\n def now():\n return time.time()\n def secs(t):\n s = now() - t\n tot = round((1e5 * s)/60,1)\n return round(s,3),tot\n\n x = np.load('data_sample.npy',allow_pickle=True)\n x = np.concatenate([x,x,x],1)\n #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n x = torch.from_numpy(x)\n x = x.float() / 255.\n\n # crop\n t = now()\n random_crop(x.cpu().numpy(),64)\n s1,tot1 = secs(t)\n # grayscale \n t = now()\n random_grayscale(x,p=.5)\n s2,tot2 = secs(t)\n # normal cutout \n t = now()\n random_cutout(x.cpu().numpy(),10,30)\n s3,tot3 = secs(t)\n # color cutout \n t = now()\n random_cutout_color(x.cpu().numpy(),10,30)\n s4,tot4 = secs(t)\n # flip \n t = now()\n random_flip(x,p=.5)\n s5,tot5 = secs(t)\n # rotate \n t = now()\n random_rotation(x,p=.5)\n s6,tot6 = secs(t)\n # rand conv \n t = now()\n random_convolution(x)\n s7,tot7 = secs(t)\n # rand color jitter \n t = now()\n random_color_jitter(x)\n s8,tot8 = secs(t)\n \n print(tabulate([['Crop', s1,tot1], \n ['Grayscale', s2,tot2], \n ['Normal Cutout', s3,tot3], \n ['Color Cutout', s4,tot4], \n ['Flip', s5,tot5], \n ['Rotate', s6,tot6], \n ['Rand Conv', s7,tot7], \n ['Color Jitter', s8,tot8]], \n headers=['Data Aug', 'Time / batch (secs)', 'Time / 100k steps (mins)']))\n\n","sub_path":"data_augs.py","file_name":"data_augs.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"647395824","text":"\"\"\"\r\nGiven a linked list of size N. The task is to reverse every k nodes (where k is an input to the function) in the linked list.\r\n\r\nInput:\r\nFirst line of input contains number of testcases T. For each testcase, first line contains length of linked list and next line contains the linked list elements.\r\n\r\nOutput:\r\nFor each testcase, there will be a single line of output which contains the linked list with every k group elements reversed.\r\n\r\nExample:\r\nInput:\r\n1\r\n8\r\n1 2 2 4 5 6 7 8\r\n4\r\n\r\nOutput:\r\n4 2 2 1 8 7 6 5\r\n\r\nExplanation:\r\nTestcase 2: Since, k = 4. So, we have to reverse everty group of two elements. Modified linked list is as 4, 2, 2, 1, 8, 7, 6, 5.\r\n\"\"\"\r\n\r\nclass Node:\r\n def __init__(self, val, next=None):\r\n self.val = val\r\n self.next = next\r\n\r\n def __str__(self):\r\n return str(self.val)\r\n\r\nclass LinkedList:\r\n def __init__(self, head=None):\r\n self.head = head\r\n\r\n def push(self, val):\r\n new_node = Node(val)\r\n new_node.next = self.head\r\n self.head = new_node\r\n\r\n def printList(self):\r\n temp = self.head\r\n while temp is not None:\r\n print(temp)\r\n temp = temp.next\r\n\r\n def reverse(self):\r\n prev = None\r\n curr = self.head\r\n while curr is not None:\r\n next = curr.next\r\n curr.next = prev\r\n prev = curr\r\n curr = next\r\n self.head = prev\r\n\r\n def reverseK(self, k):\r\n # counter = 1\r\n prev = None\r\n curr = self.head\r\n while curr is not None:\r\n for i in range(k):\r\n if curr is None:\r\n break\r\n next = curr.next\r\n curr.next = prev\r\n prev = curr\r\n curr = next\r\n # counter += 1\r\n prev = self.head\r\n self.head = curr\r\n self.head = prev\r\n\r\nllist = LinkedList()\r\nllist.push(8)\r\nllist.push(7)\r\nllist.push(6)\r\nllist.push(5)\r\nllist.push(4)\r\nllist.push(3)\r\nllist.push(2)\r\nllist.push(1)\r\nprint(\"input\")\r\nllist.printList()\r\nllist.reverseK(3)\r\nprint(\"reverse\")\r\nllist.printList()","sub_path":"LinkedListTest2.py","file_name":"LinkedListTest2.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"113345634","text":"from numpy import genfromtxt\nimport numpy as np\nfrom sklearn import datasets, linear_model\n\ndatapath = r\"DiliveryDummy.csv\"\ndeliveryData = genfromtxt(datapath, delimiter=',')\n\n# print(deliveryData)\n\nX = deliveryData[:, :-1]\nY = deliveryData[:, -1]\nprint(\"X:\",X,\"Y\",Y)\nprint(deliveryData)\n# print(X)\n\nregr = linear_model.LinearRegression()\nregr.fit(X,Y)\n#\n# print(regr.coef_, regr.intercept_)\n\nxPred = [[106, 6, 1, 1, 0, 2]]\nyPred = regr.predict(xPred)\nprint(yPred)","sub_path":"regression_training/multipleRegDummy.py","file_name":"multipleRegDummy.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"159294050","text":"my_dict = {\"a\":1, \"b\":2, \"c\":3}\n \ntry:\n value = my_dict[\"d\"]\nexcept IndexError:\n print(\"This index does not exist!\")\nexcept KeyError:\n print(\"This key is not in the dictionary!\")\nexcept:\n print(\"Some other error occurred!\")\n\ntry:\n your_code\nexcept (IOError, Exception) as e:\n print(e)\n\n \ntry:\n f = open(\"no-file\")\nexcept IOError as err:\n print(\"Ошибка:\", err)\n print(\"Код:\", err.errno)\n","sub_path":"keyError.py","file_name":"keyError.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"174406425","text":"\n# coding: utf-8\n\n\"\"\"\nDefinição de valores e menus\n\"\"\"\n\n\nfrom file_ops import *\nfrom image_ops import *\n\n\n# Controlo de actualização da imagem afixada\nNAO_ACTUALIZA = 0\nACTUALIZA = 1\n\n# Tamanho máximo de mensagem ocultada em imagem\nMAX_MSG = 100\n\n\n# Definições para opcções do menu principal\nMAIN_FIM = 0\nMAIN_CARREGA_FICH = 1\nMAIN_GUARDA_FICH = 2\nMAIN_MOLDURA = 3\nMAIN_NEGATIVO = 4\nMAIN_CINZA = 5\nMAIN_RODA = 6\nMAIN_MIRRORING = 7\nMAIN_CORTE = 8\nMAIN_AJUSTES = 9\nMAIN_ENCRIPTA = 10\nMAIN_ESTEGANO = 11\n\n\n# Definições para opcções do menu de rotações\nROT_FIM = 0\nROT_90 = 1\nROT_180 = 2\n\n\n# Definições para opcções do menu de moldura\nMOD_FIM = 0\nMOD_5 = 1\nMOD_10 = 2\nMOD_15 = 3\nMOD_20 = 4\n\n\n# Definições para opcções do menu de flip\nFLIP_FIM = 0\nFLIP_HOR = 1\nFLIP_VER = 2\n\n\n# Definições para opcções do menu de ajustes RGB\nAJU_FIM = 0\nAJU_RED = 1\nAJU_GREEN = 2\nAJU_BLUE = 3\n\n\n# Definições para opcções do menu de encriptação\nENC_FIM = 0\nENC_ENC = 1\nENC_DEC = 2\n\n\n# Definições para opcções do menu de ocultação\nOCU_FIM = 0\nOCU_LER = 1\nOCU_ESC = 2\n\n\n# Opcções do menu principal\nmenu_principal = { MAIN_CARREGA_FICH : \"Carregar imagem de ficheiro\", \\\n MAIN_GUARDA_FICH : \"Guardar imagem em ficheiro\", \\\n MAIN_MOLDURA : \"Criacao de moldura em imagem\", \\\n MAIN_NEGATIVO : \"Passagem a negativo de imagem\",\n MAIN_CINZA: \"Passagem de imagem a tons de cinza\",\\\n MAIN_RODA : \"Rotacao da imagem\", \\\n MAIN_MIRRORING : \"Mirroring horizontal ou vertical da imagem\", \\\n MAIN_CORTE : \"Corte da imagem\",\\\n MAIN_AJUSTES : \"Alteracao de vermelhos, verdes e azuis\", \\\n MAIN_ENCRIPTA : \"Encriptacao de imagem\", \\\n MAIN_ESTEGANO : \"Ocultacao de mensagem em imagem\", \\\n MAIN_FIM : \"*Terminar programa*\", \\\n }\n\n\n# Funções correspondentes às opcções do menu principal\naccoes_menu_principal = { MAIN_CARREGA_FICH : read_image_from_file, \\\n MAIN_GUARDA_FICH : write_image_to_file, \\\n MAIN_MOLDURA : cria_moldura, \\\n MAIN_NEGATIVO : transforma_negativo, \\\n MAIN_CINZA : transforma_cinza, \\\n MAIN_RODA : roda_imagem, \\\n MAIN_MIRRORING : flip_imagem, \\\n MAIN_CORTE : corta_imagem, \\\n MAIN_AJUSTES : ajusta_imagem, \\\n MAIN_ENCRIPTA : encripta_imagem, \\\n MAIN_ESTEGANO : oculta_mensagem\n }\n\n\n# Opcções do menu rotação\nmenu_rotacao = { ROT_90 : \"Rotacao (clockwise) de 90 graus\", \\\n ROT_180 : \"Rotacao (clockwise) de 180 graus\", \\\n ROT_FIM : \"*Regressar ao menu principal\", \\\n }\n\n\n# Opcções do menu moldura\nmenu_moldura = { MOD_5 : \"Cria moldura com 5 pixes\", \\\n MOD_10 : \"Cria moldura com 10 pixeis\", \\\n MOD_15 : \"Cria moldura com 15 pixeis\", \\\n MOD_20 : \"Cria moldura com 20 pixeis\", \\\n MOD_FIM : \"*Regressa ao menu principal\", \\\n }\n \n\n\n# Opcções do menu flip\nmenu_flip = { FLIP_HOR : \"Flip horizontal da imagem\", \\\n FLIP_VER : \"Flip vertical da imagem\", \\\n FLIP_FIM : \"*Regressa ao menu principal\", \\\n }\n\n\n# Opcções do menu flip\nmenu_ajustes = { AJU_RED : \"Ajuste do componente RED\", \\\n AJU_GREEN : \"Ajuste do componente GREEN\", \\\n AJU_BLUE : \"Ajuste do componente BLUE\", \\\n AJU_FIM : \"*Regressa ao menu principal\", \\\n }\n\n\n# Opcções do menu encriptação\nmenu_encriptacao = { ENC_ENC : \"Cifrar imagem activa em memoria\", \\\n ENC_DEC : \"Decifrar imagem activa em memoria\", \\\n ENC_FIM : \"*Regressa ao menu principal\", \\\n }\n\n\n# Opcções do menu ocultar\nmenu_ocultar = { OCU_LER : \"Ler mensagem oculta em imagem\", \\\n OCU_ESC : \"Escrever mensagem oculta em imagem\", \\\n OCU_FIM : \"*Regressa ao menu principal\", \\\n }\n\n\n\n# Devolve informação acerca do módulo se chamado individualmente\ndef main():\n print (\"Informação acerca da utilização deste módulo:\")\n print (__doc__)\n \nif __name__ == '__main__':\n main()\nelse:\n print (\"defines loaded as a module\")\n \n ","sub_path":"visoes_2/programas/Projecto/programas/defines.py","file_name":"defines.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610999258","text":"# %%\r\nimport rasterio\r\nimport rasterio.plot\r\nimport pyproj\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\n\r\n' Indicador - NDVI '\r\ndef calc_ndvi(nir,red):\r\n '''Calcular NDVI a partir de matrizes inteiras'''\r\n nir = nir.astype('f4')\r\n red = red.astype('f4')\r\n ndvi = (nir - red) / (nir + red)\r\n return ndvi\r\n\r\n# Imagens de 2016\r\ndate = '2017-06-16'\r\nurl = 'http://landsat-pds.s3.amazonaws.com/c1/L8/042/034/LC08_L1TP_042034_20170616_20170629_01_T1/'\r\nredband = 'LC08_L1TP_042034_20170616_20170629_01_T1_B{}.TIF'.format(4)\r\nnirband = 'LC08_L1TP_042034_20170616_20170629_01_T1_B{}.TIF'.format(5)\r\n\r\n# Imagens de 2019\r\ndate2 = '2018-06-19'\r\nurl2 = 'http://landsat-pds.s3.amazonaws.com/c1/L8/042/034/LC08_L1TP_042034_20180619_20180703_01_T1/'\r\nredband2 = 'LC08_L1TP_042034_20180619_20180703_01_T1_B{}.TIF'.format(4)\r\nnirband2 = 'LC08_L1TP_042034_20180619_20180703_01_T1_B{}.TIF'.format(5)\r\n\r\n\r\n' BANDA 4 - Vermelha'\r\n# 2016\r\nwith rasterio.open(url+redband) as src:\r\n profile = src.profile\r\n oviews = src.overviews(1) # lista de visões gerais do maior ao menor\r\n oview = oviews[1] # Use a visão geral de segunda maior resolução\r\n print('Fator de dizimação = {}'.format(oview))\r\n red = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))\r\n\r\n# 2019\r\nfilepath = url2+redband2\r\nwith rasterio.open(filepath) as src:\r\n print('Opening:', filepath)\r\n oviews = src.overviews(1) # lista de visões gerais do maior ao menor\r\n oview = oviews[1] # Use a visão geral de segunda maior resolução\r\n print('Decimation factor= {}'.format(oview))\r\n red2 = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))\r\n\r\n\r\n' BANDA 5 - Infravermelho próximo'\r\n# 2016\r\nwith rasterio.open(url+nirband) as src:\r\n oviews = src.overviews(1) # lista de visões gerais do maior ao menor\r\n oview = oviews[1] # Use a visão geral de segunda maior resolução\r\n nir = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))\r\n\r\n# 2019\r\nfilepath = url2+nirband2\r\nwith rasterio.open(filepath) as src:\r\n print('Opening:', filepath)\r\n oviews = src.overviews(1) # lista de visões gerais do maior ao menor\r\n oview = oviews[1] # Use a visão geral de segunda maior resolução\r\n print('Decimation factor= {}'.format(oview))\r\n nir2 = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))\r\n\r\n# Calculo do NDVI\r\nndvi = calc_ndvi(nir,red)\r\nndvi2 = calc_ndvi(nir2, red2)\r\n\r\n# Plotando os resultados:\r\nfig, axes = plt.subplots(1,3, figsize=(14,6), sharex=True, sharey=True)\r\n\r\nplt.sca(axes[0])\r\nplt.imshow(ndvi, cmap='RdYlGn', vmin=-1, vmax=1)\r\nplt.colorbar(shrink=0.5)\r\nplt.title('NDVI {}'.format(date))\r\nplt.xlabel('Coluna #')\r\nplt.ylabel('Linha #')\r\n\r\nplt.sca(axes[1])\r\nplt.imshow(ndvi2, cmap='RdYlGn', vmin=-1, vmax=1)\r\nplt.colorbar(shrink=0.5)\r\nplt.title('NDVI {}'.format(date2))\r\n\r\nplt.sca(axes[2])\r\nplt.imshow(ndvi2 - ndvi, cmap='bwr', vmin=-1, vmax=1)\r\nplt.colorbar(shrink=0.5)\r\nplt.title('Diferença entre datas:\\n ({} - {})'.format(date2, date))\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"2_mudanca_NDVI_tempo.py","file_name":"2_mudanca_NDVI_tempo.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"531143070","text":"import urllib\nimport json\n\napiurl = \"https://maps.googleapis.com/maps/api/geocode/json?\"\n\nwhile True:\n\taddress = raw_input('Enter location: ') # Get the address from the user\n\tif len(address) < 1 : break # Exit if no address was entered\n\t\n\t# Encodes some data for the website parameters\n\turl = apiurl + urllib.urlencode({'sensor' : 'false', 'address':address})\n\t\n\t#print (\"Retrieving the data at\" + url)\n\t\n\t# Open the url and read it\n\tdata = urllib.urlopen(url).read()\n\t\n\t#print (\"Retrieved \", len(data), \" characters\")\n\t\n\t# Attempt to parse the data\n\ttry: js = json.loads(str(data))\n\texcept: js = None\n\t\n\t#Quit if the data is bad\n\tif 'status' not in js or js[\"status\"] != \"OK\":\n\t\tprint(\"Bad data... it was:\")\n\t\tprint(data)\n\t\tcontinue\n\t\n\t#print(\"The data : \")\n\t#print(json.dumps(js, indent=4))\n\t\n\tlatitude = js[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n\tlongitude = js[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n\t\n\tprint(\"Latitude: \" + str(latitude) + \", Longitude: \" + str(longitude))","sub_path":"Python/Networking/apitest.py","file_name":"apitest.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610991893","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom modular import click, exports, imports\nfrom os.path import join\nimport sys\n\n\n@exports('gen')\ndef gen(target, context=None, options=None):\n\n context = context or {}\n options = options or {}\n\n targets = {\n 'apb3': {\n 'files': [{\n 'expand': True,\n 'cwd': 'apb3/',\n 'src': [\n 'sim/**/*.sv', 'sim/**/*.tcl', 'sim/**/*.f',\n 'uvc/**/*.sv', 'sim/**/*.svh', 'sim/**/*.f'\n ],\n 'dest': './'\n }]\n },\n 'axi3': {\n 'files': [{\n 'expand': True,\n 'cwd': 'axi3/',\n 'src': [\n 'sim/**/*.sv', 'sim/**/*.tcl', 'sim/**/*.f',\n 'uvc/**/*.sv', 'sim/**/*.svh', 'sim/**/*.f'\n ],\n 'dest': './'\n }]\n }\n }\n\n imports('orbit-uvc-gen:gen')(**{\n 'context': {\n 'addr_bits': context['addr_bits'],\n 'data_bits': context['data_bits'],\n 'modules': {'uvm-1.2': imports('uvm-1.2', config='path')}\n },\n 'files': {target: targets[target]},\n 'options': options\n })\n\n\n@click.command('gen', short_help='Parameterize module')\n@click.argument('target', required=True)\n@click.option('--txid_bits', default=8, type=int)\n@click.option('--addr_bits', default=32, type=int)\n@click.option('--data_bits', default=32, type=int)\ndef main(target, txid_bits, addr_bits, data_bits):\n \"\"\"Parameterize module\"\"\"\n\n if target == 'apb3':\n context = {\n 'addr_bits': addr_bits,\n 'data_bits': data_bits\n }\n to = join('.modular', 'orbit-apb3-%d-%d' % (\n addr_bits, data_bits\n ))\n\n if target == 'apb3':\n context = {\n 'txid_bits': txid_bits,\n 'addr_bits': addr_bits,\n 'data_bits': data_bits\n }\n to = join('.modular', 'orbit-axi3-%d-%d-%d' % (\n txid_bits, addr_bits, data_bits\n ))\n\n gen(target, context, options={'from': '.', 'to': to})\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"256336558","text":"#!/usr/bin/python\n\n# File: run_job.py\n#\n# Author: Suchandra Thapa\n# e-mail: sthapa@ci.uchicago.edu\n#\n#\n# Copyright (c) University of Chicago. 2013\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os, subprocess, shutil, optparse, platform, tempfile, urllib2\nimport tarfile, urlparse, time, re\n\nTICKET_CONTENTS = \"\"\"\n\"\"\"\nOSGC_PROXY = 'http://squid.osgconnect.net:3128'\nCATALOG_HOST = 'stash.osgconnect.net'\nCHIRP_MOUNT = ''\nWEB_PROXY= ''\nUSER_PROXY = \"\"\"\"\"\"\nAPP_URL = 'http://stash.osgconnect.net/+sthapa/parrot-root.tar.gz'\nPARROT_URL = 'http://stash.osgconnect.net/+sthapa/parrot4.1'\nJOB_SCRIPT = './parrot-root/read-multiple.sh'\nJOB_ARGS = \"\"\nCVMFS_INFO = {'atlas.cern.ch': {'options': 'url=http://cvmfs.racf.bnl.gov:8000/opt/atlas;http://cvmfs-stratum-one.cern.ch:8000/opt/atlas,pubkey=cern.ch.pub,proxies=squid.osgconnect.net:3128', 'key': 'http://stash.osgconnect.net/+sthapa/cern.ch.pub'}, 'oasis.opensciencegrid.org': {'options': 'url=http://oasis-replica.opensciencegrid.org:8000/cvmfs/oasis,pubkey=opensciencegrid.org.pub,proxies=squid.osgconnect.net:3128', 'key': 'http://stash.osgconnect.net/keys/opensciencegrid.org.pub'}, 'atlas-nightlies.cern.ch': {'options': \"url=http://cvmfs-atlas-nightlies.cern.ch/cvmfs/atlas-nightlies.cern.ch,pubkey=cern.ch.pub,proxies=squid.osgconnect.net:3128'\", 'key': 'http://stash.osgconnect.net/+sthapa/cern.ch.pub'}, 'atlas-condb.cern.ch': {'options': 'url=http://cvmfs.racf.bnl.gov:8000/opt/atlas-condb;http://cvmfs-stratum-one.cern.ch:8000/opt/atlas-condb,pubkey=cern.ch.pub,proxies=squid.osgconnect.net:3128', 'key': 'http://stash.osgconnect.net/+sthapa/cern.ch.pub'}}\nVERSION = '0.16-osgc'\n\ndef write_ticket(directory):\n \"\"\"\n Write out ticket information in directory specified\n \"\"\"\n if not os.path.exists(directory) or not os.path.isdir(directory):\n return None\n try:\n ticket = open(os.path.join(directory, 'chirp.ticket'), 'w')\n ticket.write(TICKET_CONTENTS)\n ticket.close()\n return True\n except IOError:\n return None\n\ndef write_proxy(directory):\n \"\"\"\n Extract and create user proxy file for application\n \"\"\"\n if not os.path.exists(directory) or not os.path.isdir(directory):\n return None\n try:\n ticket = open(os.path.join(directory, 'user_proxy'), 'w')\n ticket.write(USER_PROXY)\n ticket.close()\n return True\n except IOError:\n return None\n\ndef ticket_valid(): \n \"\"\"\n Check a ticket to see if it's still valid\n \"\"\"\n if TICKET_CONTENTS == \"\":\n # Don't need to worry about ticket expiration if the ticket is not present\n return True\n ticket_expiration = re.compile(r'Expires on (\\w+\\s+\\w+\\s+\\d{1,2}\\s+\\d\\d:\\d\\d:\\d\\d\\s+\\d{4})')\n match = ticket_expiration.search(TICKET_CONTENTS)\n if match is None:\n # if no expiration written, assume ticket doesn't expire\n return True\n expiration = time.strptime(match.group(1),\n \"%a %b %d %H:%M:%S %Y\")\n return time.mktime(expiration) > time.time() \n\ndef download_tarball(url, path):\n \"\"\"Download a tarball from a given url and extract it to specified path\"\"\"\n\n (fhandle, download_file) = tempfile.mkstemp(dir=path)\n url_handle = urllib2.urlopen(url)\n url_data = url_handle.read(2048)\n while url_data:\n os.write(fhandle, url_data)\n url_data = url_handle.read(2048)\n os.close(fhandle)\n downloaded_tarfile = tarfile.open(download_file)\n cur_dir = os.getcwd()\n os.chdir(path)\n extract_path = os.path.join(path,\n downloaded_tarfile.getmembers()[0].name)\n for tar_info in downloaded_tarfile:\n downloaded_tarfile.extract(tar_info)\n os.unlink(download_file)\n os.chdir(cur_dir)\n return extract_path\n\ndef setup_application(directory):\n \"\"\"\n Download application binaries and setup in temp directory\n \"\"\"\n app_path = download_tarball(APP_URL, directory)\n return app_path\n\ndef setup_parrot(directory):\n \"\"\"\n Download correct parrot binaries and setup in temp directory\n \"\"\"\n sys_ver = platform.dist()[1][0]\n parrot_url = PARROT_URL + \"/parrot-sl%s.tar.gz\" % sys_ver\n parrot_path = download_tarball(parrot_url, directory)\n return parrot_path\n\ndef generate_env(parrot_path, debug = False):\n \"\"\"\n Create a dict with the environment variables for binary + parrot\n \"\"\"\n job_env = os.environ.copy()\n \n if WEB_PROXY != \"\":\n job_env['http_proxy'] = WEB_PROXY\n job_env['HTTP_PROXY'] = WEB_PROXY\n else:\n job_env['http_proxy'] = OSGC_PROXY\n job_env['HTTP_PROXY'] = OSGC_PROXY\n \n if job_env.has_key('OSG_SQUID_LOCATION') and job_env['OSG_SQUID_LOCATION'] != 'UNAVAILABLE':\n job_env['http_proxy'] = job_env['OSG_SQUID_LOCATION']\n job_env['HTTP_PROXY'] = job_env['OSG_SQUID_LOCATION']\n \n job_env['PARROT_ALLOW_SWITCHING_CVMFS_REPOSITORIES'] = '1'\n job_env['PARROT_HELPER'] = os.path.join(parrot_path,\n 'parrot',\n 'lib',\n 'libparrot_helper.so')\n job_env['CHIRP_MOUNT'] = \"/remote_data\"\n job_env['CATALOG_HOST'] = CATALOG_HOST\n if debug:\n sys.stdout.write(\"Added env vars:\\n\")\n for i in ['http_proxy',\n 'HTTP_PROXY',\n 'PARROT_ALLOW_SWITCHING_CVMFS_REPOSITORIES',\n 'PARROT_HELPER',\n 'CHIRP_MOUNT',\n 'CATALOG_HOST']:\n sys.stdout.write(\"%s=%s\\n\" % (i, job_env[i]))\n return job_env \n\ndef update_proxy(cvmfs_options):\n \"\"\"\n Update cvmfs options to use local proxy if available\n \"\"\"\n new_proxies = \"\"\n if WEB_PROXY != \"\": \n new_proxies = WEB_PROXY + \";\"\n if os.environ.has_key('OSG_SQUID_LOCATION') and os.environ['OSG_SQUID_LOCATION'] != 'UNAVAILABLE':\n new_proxies += \"%s;\" % os.environ['OSG_SQUID_LOCATION']\n proxy_re = re.compile(r'proxies=(.*?)(,|$)')\n return proxy_re.sub(r'proxies=' + new_proxies + r'\\1\\2', cvmfs_options)\n\ndef create_cvmfs_options():\n \"\"\"\n Create CVMFS options for parrot\n \"\"\"\n if len(CVMFS_INFO) == 0:\n return ' '\n cvmfs_opts = ''\n for k in CVMFS_INFO:\n# if os.path.isdir(os.path.join('/', 'cvmfs', k)):\n# continue\n cvmfs_options = update_proxy(CVMFS_INFO[k]['options'])\n cvmfs_opts += \"%s:%s \" % (k, cvmfs_options)\n return cvmfs_opts[:-1]\n\ndef get_cvmfs_keys(temp_dir):\n \"\"\"\n Download cvmfs keys for repositories that have been defined\n \"\"\"\n for k in CVMFS_INFO:\n key_url = CVMFS_INFO[k]['key']\n url_handle = urllib2.urlopen(key_url)\n key_name = urlparse.urlparse(key_url)[2].split('/')[-1]\n key_file = open(os.path.join(temp_dir, key_name), 'w')\n url_data = url_handle.read(2048)\n while url_data:\n key_file.write(url_data)\n url_data = url_handle.read(2048)\n key_file.close()\n\ndef run_application(temp_dir, debug = False):\n \"\"\"\n Run specified user application in a parrot environment\n \"\"\"\n job_env = generate_env(temp_dir, debug)\n get_cvmfs_keys(temp_dir)\n job_args = ['./parrot/bin/parrot_run', \n '-F',\n '-t',\n '/tmp/root-shared',\n '-r',\n create_cvmfs_options()]\n if TICKET_CONTENTS != \"\":\n job_args.extend(['-i', 'chirp.ticket'])\n job_args.append(JOB_SCRIPT)\n if JOB_ARGS != \"\":\n job_args.extend(JOB_ARGS.split(' '))\n os.chdir(temp_dir)\n if len(sys.argv) > 1:\n job_args.extend(sys.argv[1:])\n if debug:\n sys.stdout.write(\"Parrot call:\\n %s\\n\" % (\" \".join(job_args)))\n return subprocess.call(job_args, env=job_env)\n\ndef main():\n \"\"\"Setup and run application\"\"\"\n parser = optparse.OptionParser(version=\"%prog \" + VERSION)\n parser.add_option(\"-d\", \"--debug\", \n dest=\"debug\",\n help=\"Enabling debugging\",\n action=\"store_true\", \n default=False)\n parser.add_option(\"--preserve-dir\", \n dest=\"preserve_dir\",\n help=\"Preserve working directory for debugging\",\n action=\"store_true\", \n default=False)\n (options, args) = parser.parse_args() \n try:\n temp_dir = tempfile.mkdtemp()\n except IOError:\n sys.stderr.write(\"Can't create temporary directory, exiting...\\n\")\n sys.exit(1)\n\n\n if TICKET_CONTENTS != \"\":\n if not ticket_valid():\n sys.stderr.write(\"ERROR: Ticket expired, exiting...\\n\")\n sys.exit(1)\n if not write_ticket(temp_dir):\n sys.stderr.write(\"Can't create ticket, exiting...\\n\")\n sys.exit(1)\n \n if USER_PROXY != \"\":\n if not write_proxy(temp_dir):\n sys.stderr.write(\"Can't create user proxy, exiting...\\n\")\n sys.exit(1)\n \n if not setup_parrot(temp_dir):\n sys.stderr.write(\"Can't download parrot binaries, exiting...\\n\")\n sys.exit(1)\n if APP_URL != '':\n if not setup_application(temp_dir): \n sys.stderr.write(\"Can't download application binaries, exiting...\\n\")\n sys.exit(1)\n exit_code = run_application(temp_dir, options.debug)\n if exit_code != 0:\n sys.stderr.write(\"Application exited with error\\n\")\n if options.debug:\n sys.stderr.write(\"Exit code: %d\\n\" % exit_code)\n sys.exit(exit_code)\n \n if options.preserve_dir:\n sys.stdout.write(\"Temp directory at %s\\n\" % temp_dir)\n else:\n shutil.rmtree(temp_dir)\n sys.exit(exit_code)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Root/cache-testing/shared/multiple_read_shared.py","file_name":"multiple_read_shared.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"486504990","text":"from graph import Graph\n\ngraph = {\n 'A': ['B', 'E', 'H'],\n 'B': ['C', 'D'],\n 'E': ['F', 'G'],\n 'H': ['G', 'I'],\n 'C': [],\n 'D': [],\n 'F': [],\n 'I': [],\n 'G': []\n}\nif __name__ == '__main__':\n g = Graph()\n g.set_graph(graph)\n g.print_graph()\n print(\"Visited Nodes :\", g.BFS('A', 'F'))\n print(\"path to goal :\", g.path_to_goal('A', 'F'))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461721631","text":"#!/usr/bin/python\n#\n# Copyright 2015 The Cluster-Insight Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Collects context metadata from Docker.\n\nAssumes the Docker daemon's remote API is enabled on port\nglobal_state.get_docker_port() on the Docker host in the master and\nminion nodes.\n\"\"\"\n\nimport json\nimport os\nimport re\nimport sys\nimport time\nimport types\n\nimport requests\n\n# local imports\nimport collector_error\nimport global_state\nimport kubernetes\nimport metrics\nimport utilities\n\n## Docker APIs\n\n\n# No decorator for this function signature.\ndef fetch_data(gs, url, base_name, expect_missing=False):\n \"\"\"Fetch the named URL from Kubernetes (in production) or a file (in a test).\n\n The input is always JSON. It is converted to an internal representation\n by this routine.\n\n Args:\n gs: global state.\n url: the URL to fetch the data from when running in production.\n base_name: fetch the data from the file\n 'testdata/' + base_name + '.input.json'\n when running in test mode.\n expect_missing: if True, then do not die in test mode when the test file\n is missing. Just raise ValueError. If False and the test file is not\n found in test mode, raise CollectorError.\n\n Returns:\n The data after converting it from JSON.\n\n Raises:\n ValueError: when 'expect_missing' is True and failed to open the file.\n CollectorError: if any other exception occured or 'expect_missing' is False.\n other exceptions which may be raised by fetching the URL in production mode.\n \"\"\"\n assert isinstance(gs, global_state.GlobalState)\n assert isinstance(url, types.StringTypes)\n assert isinstance(base_name, types.StringTypes)\n start_time = time.time()\n if gs.get_testing():\n # Read the data from a file.\n fname = 'testdata/' + base_name + '.input.json'\n try:\n f = open(fname, 'r')\n v = json.loads(f.read())\n f.close()\n gs.add_elapsed(start_time, fname, time.time() - start_time)\n return v\n except IOError:\n # File not found\n if expect_missing:\n raise ValueError\n else:\n msg = 'failed to read %s' % fname\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n except:\n msg = 'reading %s failed with exception %s' % (fname, sys.exc_info()[0])\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n else:\n # Send the request to Kubernetes\n v = requests.get(url).json()\n gs.add_elapsed(start_time, url, time.time() - start_time)\n return v\n\n\n@utilities.global_state_two_string_args\ndef _inspect_container(gs, docker_host, container_id):\n \"\"\"Fetch detailed information about the given container in the given host.\n\n Args:\n gs: global state.\n docker_host: Docker host name. Must not be empty.\n container_id: container ID. Must not be empty.\n\n Returns:\n (container_information, timestamp_in_seconds) if the container was found.\n (None, None) if the container was not found.\n\n Raises:\n CollectorError in case of failure to fetch data from Docker.\n Other exceptions may be raised due to exectution errors.\n \"\"\"\n url = 'http://{docker_host}:{port}/containers/{container_id}/json'.format(\n docker_host=docker_host, port=gs.get_docker_port(),\n container_id=container_id)\n fname = utilities.container_id_to_fname(\n docker_host, 'container', container_id)\n try:\n result = fetch_data(gs, url, fname, expect_missing=True)\n except ValueError:\n # TODO(vasbala): this container does not exist anymore.\n # What should we do here?\n return (None, time.time())\n except collector_error.CollectorError:\n raise\n except:\n msg = 'fetching %s failed with exception %s' % (url, sys.exc_info()[0])\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n if not isinstance(result, types.DictType):\n msg = 'fetching %s returns invalid data' % url\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n # Sort the \"Env\" attribute because it tends to contain elements in\n # a different order each time you fetch the container information.\n if isinstance(utilities.get_attribute(result, ['Config', 'Env']),\n types.ListType):\n # Sort the contents of the 'Env' list in place.\n result['Config']['Env'].sort()\n\n return (result, time.time())\n\n\n@utilities.global_state_string_args\ndef get_containers(gs, docker_host):\n \"\"\"Gets the list of all containers in 'docker_host'.\n\n Args:\n gs: global state.\n docker_host: the Docker host running the containers.\n\n Returns:\n list of wrapped container objects.\n Each element in the list is the result of\n utilities.wrap_object(container, 'Container', ...)\n\n Raises:\n CollectorError: in case of failure to fetch data from Docker.\n Other exceptions may be raised due to exectution errors.\n \"\"\"\n containers, timestamp = gs.get_containers_cache().lookup(docker_host)\n if timestamp is not None:\n gs.logger_info(\n 'get_containers(docker_host=%s) cache hit returns '\n '%d containers', docker_host, len(containers))\n return containers\n\n url = 'http://{docker_host}:{port}/containers/json'.format(\n docker_host=docker_host, port=gs.get_docker_port())\n # A typical value of 'docker_host' is:\n # k8s-guestbook-node-3.c.rising-apricot-840.internal\n # Use only the first period-seperated element for the test file name.\n fname = '{host}-containers'.format(host=docker_host.split('.')[0])\n try:\n containers_list = fetch_data(gs, url, fname)\n except collector_error.CollectorError:\n raise\n except:\n msg = ('fetching %s or %s failed with exception %s' %\n (url, fname, sys.exc_info()[0]))\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n if not isinstance(containers_list, types.ListType):\n msg = 'invalid response from fetching %s' % url\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n containers = []\n timestamps = []\n for container_info in containers_list:\n # NOTE: container 'Name' is stable across container re-starts whereas\n # container 'Id' is not.\n # This may be because Kubernertes assigns the Name while Docker assigns\n # the Id (?)\n # The container Name is the only element of the array 'Names' -\n # why is Names an array here?\n # skip the leading / in the Name\n if not (isinstance(container_info.get('Names'), types.ListType) and\n container_info['Names'] and\n utilities.valid_string(container_info['Names'][0]) and\n container_info['Names'][0][0] == '/'):\n msg = 'invalid containers data format. docker_host=%s' % docker_host\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n container_id = container_info['Names'][0][1:]\n container, ts = _inspect_container(gs, docker_host, container_id)\n if container is None:\n continue\n\n if not utilities.valid_string(container.get('Name')):\n msg = ('missing or invalid \"Name\" attribute in container %s' %\n container_id)\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n if container['Name'] != ('/' + container_id):\n msg = ('container %s\\'s Name attribute is \"%s\"; expecting \"%s\"' %\n (container_id, container['Name'], '/' + container_id))\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n # The 'container_id' is most often unique, because it contains long\n # unique hex numbers. However, in some cases the 'container_id' is simply\n # the image name, such as \"cluster-insight\". In this case 'container_id'\n # is not unique in the context graph, so we make it unique by appending\n # the a prefix of the Docker ID of the container.\n hex_id = utilities.object_to_hex_id(container)\n if hex_id is None:\n msg = 'Could not compute short hex ID of container %s' % container_id\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n if utilities.contains_long_hex_number(container_id):\n short_label = hex_id\n unique_id = container_id\n else:\n # The short label is descriptive when 'container_id' does not contain\n # long hex numbers.\n short_label = container_id\n unique_id = '{container_id}-{hex_id}'.format(\n container_id=container_id, hex_id=hex_id)\n\n wrapped_container = utilities.wrap_object(\n container, 'Container', unique_id, ts, label=short_label)\n containers.append(wrapped_container)\n timestamps.append(ts)\n\n # If the container's label does not contain long hex fields, it is\n # good enough. It should not be replaced with anything else.\n if not utilities.contains_long_hex_number(short_label):\n continue\n\n # Modify the container's label after the wrapped container was added\n # to the containers list.\n # Compute the container's short name to create a better container label:\n # short_container_name/short_hex_id.\n # For example: \"cassandra/d85b599c17d8\".\n parent_pod_id = utilities.get_parent_pod_id(wrapped_container)\n if parent_pod_id is None:\n continue\n parent_pod = kubernetes.get_one_pod(gs, docker_host, parent_pod_id)\n if parent_pod is None:\n continue\n short_container_name = utilities.get_short_container_name(\n wrapped_container, parent_pod)\n if not utilities.valid_string(short_container_name):\n continue\n wrapped_container['annotations']['label'] = (short_container_name + '/' +\n hex_id)\n\n ret_value = gs.get_containers_cache().update(\n docker_host, containers,\n min(timestamps) if timestamps else time.time())\n gs.logger_info(\n 'get_containers(docker_host=%s) returns %d containers',\n docker_host, len(containers))\n return ret_value\n\n\n@utilities.global_state_string_args\ndef get_containers_with_metrics(gs, docker_host):\n \"\"\"Gets the list of all containers in 'docker_host' with metric annotations.\n\n Args:\n gs: global state.\n docker_host: the Docker host running the containers.\n\n Returns:\n list of wrapped container objects.\n Each element in the list is the result of\n utilities.wrap_object(container, 'Container', ...)\n\n Raises:\n CollectorError: in case of failure to fetch data from Docker.\n Other exceptions may be raised due to exectution errors.\n \"\"\"\n # Create a lookup table from pod IDs to pods.\n # This lookup table is needed when annotating containers with\n # metrics. Also compute the project's name.\n containers_list = get_containers(gs, docker_host)\n if not containers_list:\n return []\n\n pod_id_to_pod = {}\n project_id = '_unknown_'\n\n # Populate the pod ID to pod lookup table.\n # Compute the project_id from the name of the first pod.\n for pod in kubernetes.get_pods(gs, docker_host):\n assert utilities.is_wrapped_object(pod, 'Pod')\n pod_id_to_pod[pod['id']] = pod\n if project_id != '_unknown_':\n continue\n pod_hostname = utilities.get_attribute(\n pod, ['properties', 'spec', 'nodeName'])\n if utilities.valid_string(pod_hostname):\n project_id = utilities.node_id_to_project_id(pod_hostname)\n\n # We know that there are containers in this docker_host.\n if not pod_id_to_pod:\n # there are no pods in this docker_host.\n msg = 'Docker host %s has containers but no pods' % docker_host\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n # Annotate the containers with their metrics.\n for container in containers_list:\n assert utilities.is_wrapped_object(container, 'Container')\n\n parent_pod_id = utilities.get_parent_pod_id(container)\n if not utilities.valid_string(parent_pod_id):\n msg = ('missing or invalid parent pod ID in container %s' %\n container['id'])\n metrics.annotate_container_error(container, msg)\n continue\n\n if parent_pod_id not in pod_id_to_pod:\n msg = ('could not locate parent pod %s for container %s' %\n (parent_pod_id, container['id']))\n metrics.annotate_container_error(container, msg)\n continue\n\n # Note that the project ID may be '_unknown_'.\n # This is not a big deal, because the aggregator knows the project ID.\n metrics.annotate_container(\n project_id, container, pod_id_to_pod[parent_pod_id])\n\n return containers_list\n\n\n@utilities.global_state_two_string_args\ndef get_one_container(gs, docker_host, container_id):\n \"\"\"Gets the given container that runs in the given Docker host.\n\n Note that the 'container_id' is the value in container['id'].\n It is a symbolic name, such as:\n k8s_POD.cc4afd21_kibana-logging-controller-fn98y_default_06b28f3f-dd5a-11e4-8a61-42010af0c46c_a1a2515e\n -or-\n cluster-insight-9c1e7820fd4c\n This should not be confused with the Docker ID of the container, which\n is a long hexadecimal string. It is stored in container['properties']['Id'].\n\n Args:\n gs: global state.\n docker_host: the Docker host running the container. Must not be empty.\n container_id: the container ID (in the wrapped object). Must not be empty.\n\n Returns:\n The wrapped container object if it was found.\n The wrapped container object is the result of\n utilities.wrap_object(container, 'Container', ...)\n None is the container is not found.\n\n Raises:\n Passes through all exceptions from lower-level routines.\n May raise exceptions due to run-time errors.\n \"\"\"\n for container in get_containers(gs, docker_host):\n if container['id'] == container_id:\n return container\n\n return None\n\n\n@utilities.global_state_string_args\ndef invalid_processes(gs, url):\n \"\"\"Raise the CollectorError exception because the response is invalid.\n\n Args:\n gs: global state.\n url: the source of the invalid data is this URL.\n\n Raises:\n CollectorError: always raises this exception.\n \"\"\"\n msg = 'process information from URL %s is invalid' % url\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n\n@utilities.global_state_two_string_args\ndef get_processes(gs, docker_host, container_id):\n \"\"\"Gets the list of all processes in the 'docker_host' and 'container_id'.\n\n If the container is not found, returns an empty list of processes.\n\n Args:\n gs: global state.\n docker_host: the Docker host running the container.\n container_id: the container running the processes.\n\n Returns:\n list of wrapped process objects.\n Each element in the list is the result of\n utilities.wrap_object(process, 'Process', ...)\n\n Raises:\n CollectorError in case of failure to fetch data from Docker.\n Other exceptions may be raised due to exectution errors.\n \"\"\"\n processes_label = '%s/%s' % (docker_host, container_id)\n processes, timestamp_secs = gs.get_processes_cache().lookup(\n processes_label)\n if timestamp_secs is not None:\n gs.logger_info(\n 'get_processes(docker_host=%s, container_id=%s) cache hit',\n docker_host, container_id)\n return processes\n\n container = get_one_container(gs, docker_host, container_id)\n if container is not None:\n assert utilities.is_wrapped_object(container, 'Container')\n container_short_hex_id = utilities.object_to_hex_id(container['properties'])\n assert utilities.valid_string(container_short_hex_id)\n else:\n # Parent container not found. Container might have crashed while we were\n # looking for it.\n return []\n\n container_name = utilities.get_container_name(container)\n if not utilities.valid_string(container_name):\n msg = 'Invalid container \"Name\" attribute in container %s' % container_id\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n # NOTE: there is no trailing /json in this URL - this looks like a bug in the\n # Docker API\n # Note that the {container_id} in the URL must be the internal container\n # name in container['properties']['Name'][1:]\n # and not the container name in container['id'] which may contain an extra\n # suffix.\n url = ('http://{docker_host}:{port}/containers/{container_name}/top?'\n 'ps_args=aux'.format(docker_host=docker_host,\n port=gs.get_docker_port(),\n container_name=container_name))\n fname = utilities.container_id_to_fname(\n docker_host, 'processes', container_name)\n\n try:\n # TODO(vasbala): what should we do in cases where the container is gone\n # (and replaced by a different one)?\n result = fetch_data(gs, url, fname, expect_missing=True)\n except ValueError:\n # this container does not exist anymore\n return []\n except collector_error.CollectorError:\n raise\n except:\n msg = 'fetching %s failed with exception %s' % (url, sys.exc_info()[0])\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n if not isinstance(utilities.get_attribute(result, ['Titles']),\n types.ListType):\n invalid_processes(gs, url)\n if not isinstance(utilities.get_attribute(result, ['Processes']),\n types.ListType):\n invalid_processes(gs, url)\n\n pstats = result['Titles']\n processes = []\n now = time.time()\n for pvalues in result['Processes']:\n process = {}\n if not isinstance(pvalues, types.ListType):\n invalid_processes(gs, url)\n if len(pstats) != len(pvalues):\n invalid_processes(gs, url)\n for pstat, pvalue in zip(pstats, pvalues):\n process[pstat] = pvalue\n\n # Prefix with container Id to ensure uniqueness across the whole graph.\n process_id = '%s/%s' % (container_short_hex_id, process['PID'])\n processes.append(utilities.wrap_object(\n process, 'Process', process_id, now, label=process['PID']))\n\n ret_value = gs.get_processes_cache().update(\n processes_label, processes, now)\n gs.logger_info(\n 'get_processes(docker_host=%s, container_id=%s) returns %d processes',\n docker_host, container_id, len(processes))\n return ret_value\n\n\n@utilities.global_state_string_dict_args\ndef get_image(gs, docker_host, container):\n \"\"\"Gets the information of the given image in the given host.\n\n Args:\n gs: global state.\n docker_host: Docker host name. Must not be empty.\n container: the container which runs the image.\n\n Returns:\n If image was found, returns the wrapped image object, which is the result of\n utilities.wrap_object(image, 'Image', ...)\n If the image was not found, returns None.\n\n Raises:\n CollectorError: in case of failure to fetch data from Docker.\n ValueError: in case the container does not contain a valid image ID.\n Other exceptions may be raised due to exectution errors.\n \"\"\"\n assert utilities.is_wrapped_object(container, 'Container')\n # The 'image_id' should be a long hexadecimal string.\n image_id = utilities.get_attribute(container, ['properties', 'Image'])\n if not utilities.valid_hex_id(image_id):\n msg = 'missing or invalid image ID in container ID=%s' % container['id']\n gs.logger_error(msg)\n raise ValueError(msg)\n\n # The 'image_name' should be a symbolic name (not a hexadecimal string).\n image_name = utilities.get_attribute(\n container, ['properties', 'Config', 'Image'])\n\n if ((not utilities.valid_string(image_name)) or\n utilities.valid_hex_id(image_name)):\n msg = 'missing or invalid image name in container ID=%s' % container['id']\n gs.logger_error(msg)\n raise ValueError(msg)\n\n cache_key = '%s|%s' % (docker_host, image_id)\n image, timestamp_secs = gs.get_images_cache().lookup(cache_key)\n if timestamp_secs is not None:\n gs.logger_info('get_image(docker_host=%s, image_id=%s) cache hit',\n docker_host, image_id)\n return image\n\n # A typical value of 'docker_host' is:\n # k8s-guestbook-node-3.c.rising-apricot-840.internal\n # Use only the first period-seperated element for the test file name.\n # The typical value of 'image_name' is:\n # brendanburns/php-redis\n # We convert embedded '/' and ':' characters to '-' to avoid interference with\n # the directory structure or file system.\n url = 'http://{docker_host}:{port}/images/{image_id}/json'.format(\n docker_host=docker_host, port=gs.get_docker_port(), image_id=image_id)\n fname = '{host}-image-{id}'.format(\n host=docker_host.split('.')[0],\n id=image_name.replace('/', '-').replace(':', '-'))\n\n try:\n image = fetch_data(gs, url, fname, expect_missing=True)\n except ValueError:\n # image not found.\n msg = 'image not found for image_id: %s' % image_id\n gs.logger_info(msg)\n return None\n except collector_error.CollectorError:\n raise\n except:\n msg = 'fetching %s failed with exception %s' % (url, sys.exc_info()[0])\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n now = time.time()\n # compute the two labels of the image.\n # The first is a 12-digit hexadecimal number shown by \"docker images\".\n # The second is the symbolic name of the image.\n full_hex_label = image.get('Id')\n if not (isinstance(full_hex_label, types.StringTypes) and full_hex_label):\n msg = 'Image id=%s has an invalid \"Id\" attribute value' % image_id\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n short_hex_label = utilities.object_to_hex_id(image)\n if short_hex_label is None:\n msg = 'Could not compute short hex ID of image %s' % image_id\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n wrapped_image = utilities.wrap_object(\n image, 'Image', full_hex_label, now,\n label=short_hex_label, alt_label=image_name)\n\n ret_value = gs.get_images_cache().update(cache_key, wrapped_image, now)\n gs.logger_info('get_image(docker_host=%s, image_id=%s, image_name=%s)',\n docker_host, image_id, image_name)\n return ret_value\n\n\n@utilities.global_state_string_args\ndef get_images(gs, docker_host):\n \"\"\"Gets the list of all images in 'docker_host'.\n\n Args:\n gs: global state.\n docker_host: Docker host name. Must not be empty.\n\n Returns:\n list of wrapped image objects.\n Each element in the list is the result of\n utilities.wrap_object(image, 'Image', ...)\n\n Raises:\n CollectorError in case of failure to fetch data from Docker.\n Other exceptions may be raised due to exectution errors.\n \"\"\"\n # The images are already cached by get_image(), so there is no need to\n # check the cache on entry to this method.\n\n # docker_host is the same as node_id\n images_list = []\n image_id_set = set()\n\n # All containers in this 'docker_host'.\n for container in get_containers(gs, docker_host):\n # Image from which this Container was created\n image = get_image(gs, docker_host, container)\n if (image is not None) and (image['id'] not in image_id_set):\n images_list.append(image)\n image_id_set.add(image['id'])\n\n gs.logger_info('get_images(docker_host=%s) returns %d images',\n docker_host, len(images_list))\n return images_list\n\n\n@utilities.global_state_string_args\ndef get_minion_status(gs, docker_host):\n \"\"\"Returns the status of the collector minion running on 'docker_host'.\n\n Args:\n gs: global state.\n docker_host: Docker host name. Must not be empty.\n\n Returns:\n 'OK': the collector minion is active.\n 'ERROR': the collecor minion is inactive or an error occured while\n communicating with it.\n \"\"\"\n try:\n containers_list = get_containers(gs, docker_host)\n except:\n gs.logger_error('failed to communicate with collector minion on %s',\n docker_host)\n return 'ERROR'\n\n # In testing mode, an empty containers list is also considered an error.\n if gs.get_testing() and (not containers_list):\n return 'ERROR'\n return 'OK'\n\n\n@utilities.global_state_arg\ndef get_version(gs):\n \"\"\"Returns a human-readable information of the currently running image.\n\n Args:\n gs: global state.\n\n Returns:\n A string of the form:\n \n\n Raises:\n CollectorError: in case of any error to compute the running image\n information.\n \"\"\"\n version, timestamp_secs = gs.get_version_cache().lookup('')\n if timestamp_secs is not None:\n assert utilities.valid_string(version)\n gs.logger_info('get_version() cache hit')\n return version\n\n if gs.get_testing():\n fname = 'testdata/proc-self-cgroup.txt'\n else:\n fname = '/proc/self/cgroup'\n\n try:\n f = open(fname, 'r')\n cgroup = f.read()\n f.close()\n except IOError:\n # file not found\n msg = 'failed to open or read %s' % fname\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n except:\n msg = 'reading %s failed with exception %s' % (fname, sys.exc_info()[0])\n gs.logger_exception(msg)\n raise collector_error.CollectorError(msg)\n\n # The file must contain an entry for '\\d+:cpu:/...'.\n m = re.search(r'\\b\\d+:cpu:/([0-9a-fA-F]+)\\b', cgroup)\n if not m:\n msg = 'could not find an entry for \"cpu:/docker/...\" in %s' % fname\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n hex_container_id = m.group(1)\n if gs.get_testing():\n # This pod name is guaranteed to match a pod in the testdata directory.\n my_pod_name = 'kube-dns-bqw5e'\n else:\n my_pod_name = os.uname()[1]\n assert utilities.valid_string(my_pod_name)\n\n # Find my node name from my pod.\n my_node_name = None\n for pod in kubernetes.get_pods(gs):\n assert utilities.is_wrapped_object(pod, 'Pod')\n if pod['id'] == my_pod_name:\n my_node_name = utilities.get_attribute(\n pod, ['properties', 'spec', 'nodeName'])\n break\n\n if not utilities.valid_string(my_node_name):\n msg = ('could not find pod %s or this pod does not contain a valid '\n 'node name' % my_pod_name)\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n # inspect the running container.\n # Must specify an explicit host name (not \"localhost\").\n url = 'http://{host}:{port}/containers/{container_id}/json'.format(\n host=my_node_name, port=gs.get_docker_port(),\n container_id=hex_container_id)\n container = fetch_data(gs, url, 'container-' + hex_container_id[:12])\n\n # Fetch the image symbolic name and hex ID from the container information.\n symbolic_image_id = utilities.get_attribute(container, ['Config', 'Image'])\n hex_image_id = utilities.get_attribute(container, ['Image'])\n\n # Verify the image symbolic name and the image hex ID.\n if not (utilities.valid_string(symbolic_image_id) and\n not utilities.valid_hex_id(symbolic_image_id) and\n utilities.valid_hex_id(hex_image_id)):\n msg = 'could not find or invalid image information in container %s' % url\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n # Fetch image information.\n # Must specify an explicit host name (not \"localhost\").\n url = 'http://{host}:{port}/images/{image_id}/json'.format(\n host=my_node_name, port=gs.get_docker_port(),\n image_id=hex_image_id)\n image = fetch_data(gs, url, 'image-' + hex_image_id[:12])\n\n # Fetch the image creation timestamp.\n created = utilities.get_attribute(image, ['Created'])\n if not utilities.valid_string(created):\n msg = 'could not find image creation timestamp in %s' % url\n gs.logger_error(msg)\n raise collector_error.CollectorError(msg)\n\n # Remove the trailing subsecond part of the creation timestamp.\n created = re.sub(r'\\.[0-9]+Z$', '', created)\n\n version = '%s %s %s' % (symbolic_image_id, hex_image_id[:12], created)\n ret_value = gs.get_version_cache().update('', version)\n gs.logger_info('get_version() returns: %s', ret_value)\n return ret_value\n","sub_path":"kube-system-plugins/cluster-insight/collector/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":27948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"59570786","text":"#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom nvtabular import Workflow, ops\nfrom nvtabular.columns import ColumnSelector, Schema\n\n\ndef test_fit_schema():\n schema = Schema([\"x\", \"y\", \"id\"])\n\n cont_features = (\n ColumnSelector(schema.column_names)\n >> ops.FillMissing()\n >> ops.Clip(min_value=0)\n >> ops.LogOp\n >> ops.Normalize()\n >> ops.Rename(postfix=\"_renamed\")\n )\n\n workflow = Workflow(cont_features)\n workflow.fit_schema(schema)\n\n assert workflow.output_schema.column_names == [\"x_renamed\", \"y_renamed\", \"id_renamed\"]\n\n\ndef test_fit_schema_works_with_addition_nodes():\n schema = Schema([\"x\", \"y\", \"id\"])\n\n x_node = ColumnSelector([\"x\"]) >> ops.Rename(postfix=\"_renamed\")\n\n workflow = Workflow(x_node + \"y\")\n workflow.fit_schema(schema)\n\n assert workflow.output_schema.column_names == [\"x_renamed\", \"y\"]\n\n x_node = ColumnSelector([\"x\"]) >> ops.Rename(postfix=\"_renamed\")\n y_node = ColumnSelector([\"y\"]) >> ops.Rename(postfix=\"_renamed\")\n\n workflow = Workflow(x_node + y_node)\n workflow.fit_schema(schema)\n\n assert workflow.output_schema.column_names == [\"x_renamed\", \"y_renamed\"]\n\n\ndef test_fit_schema_works_with_subtraction_nodes():\n schema = Schema([\"x\", \"y\", \"id\"])\n\n cont_features = (\n ColumnSelector([\"x\", \"y\"])\n >> ops.FillMissing()\n >> ops.Clip(min_value=0)\n >> ops.LogOp\n >> ops.Normalize()\n >> ops.Rename(postfix=\"_renamed\")\n )\n\n workflow1 = Workflow(cont_features - \"y_renamed\")\n workflow1.fit_schema(schema)\n\n assert workflow1.output_schema.column_names == [\"x_renamed\"]\n\n\ndef test_fit_schema_works_with_selection_nodes():\n schema = Schema([\"x\", \"y\", \"id\"])\n\n cont_features = (\n ColumnSelector([\"x\", \"y\"])\n >> ops.FillMissing()\n >> ops.Clip(min_value=0)\n >> ops.LogOp\n >> ops.Normalize()\n >> ops.Rename(postfix=\"_renamed\")\n )\n\n workflow1 = Workflow(cont_features[\"x_renamed\"])\n workflow1.fit_schema(schema)\n\n assert workflow1.output_schema.column_names == [\"x_renamed\"]\n","sub_path":"tests/unit/workflow/test_workflow_schemas.py","file_name":"test_workflow_schemas.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"367735645","text":"import operator as op\n\n\nclass SuffixArray:\n def __init__(self, strings):\n \"\"\" Saves the strings and builds the Suffix Array.\n\n :param strings: list of strings to build the Suffix Array on\n \"\"\"\n self.strings = strings\n self.suffixes = None\n self.build()\n\n def build(self):\n \"\"\" Builds the Suffix Array based on saved strings \"\"\"\n self.suffixes = [(self.strings[i][j:], i, j)\n for i in range(len(self.strings))\n for j in range(len(self.strings[i]))]\n self.suffixes.sort()\n\n def binary_search(self, string, lower=True):\n \"\"\" Searches for lower or upper bound for string\n on self.suffixes with a binary search.\n\n Method based on stringMatching function from\n Competitive Programming 3 book (page 259).\n\n :param string: string to search for\n :param lower: True to return the lower bound or\n False to return the upper bound\n :return: the lower or upper bound, according to lower\n \"\"\"\n if lower:\n cmp = op.ge\n else:\n cmp = op.gt\n lo, hi = 0, len(self.suffixes) - 1\n while lo < hi:\n mid = int(lo + (hi - lo) / 2)\n suffix = self.suffixes[mid][0]\n # print(lo, mid, hi, suffix)\n if cmp(suffix[:len(string)], string):\n hi = mid\n else:\n lo = mid + 1\n # special case: if searching for upper bound and\n # last suffix is not equal to the query string,\n # then decrease upper bound (should we?)\n if not lower:\n if self.suffixes[hi][0][:len(string)] != string:\n hi -= 1\n return lo if lower else hi\n\n def suffix_bounds(self, string):\n \"\"\" Searches for both lower and upper bounds\n for string on self.suffixes.\n\n :param string: string to search for\n :return: tuple containing lower and upper bounds of self.suffixes\n \"\"\"\n lower_bound = self.binary_search(string)\n upper_bound = self.binary_search(string, lower=False)\n if lower_bound > upper_bound:\n lower_bound, upper_bound = upper_bound, lower_bound\n return lower_bound, upper_bound\n\n def indices_between_bounds(self, string):\n \"\"\" Given the lower and upper bounds on self.suffixes,\n returns a list of indices of self.strings which point\n to the strings that own the suffixes between the bounds.\n\n (What a good explanation, huh)\n\n :param string: string to search for\n :return: list of indices pointing to filtered strings of self.strings\n \"\"\"\n lower_bound, upper_bound = self.suffix_bounds(string)\n return [self.suffixes[i][1]\n for i in range(lower_bound, upper_bound + 1)]\n\n def __call__(self, string):\n return self.indices_between_bounds(string)\n","sub_path":"SuffixArray.py","file_name":"SuffixArray.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"5602522","text":"from rest_framework.views import APIView\nfrom rest_framework import viewsets\nfrom rest_framework.viewsets import ViewSetMixin\nfrom ..models import *\n\nfrom api.serializer.course import *\nfrom api.serializer.article import *\nfrom rest_framework.response import Response\nfrom rest_framework.versioning import URLPathVersioning\n\n\n\nclass ArticleModelView(viewsets.ModelViewSet):\n queryset = Article.objects.all()\n serializer_class = ArticleModelSerializers\n\n def list(self, request, *args, **kwargs):\n ret = {'code':1000, 'data':None}\n try:\n article_list = Article.objects.all()\n article_list = ArticleModelSerializers(article_list,many=True) #记得queryset用many=True\n print(article_list.data)\n ret['data'] = article_list.data\n except Exception as e:\n ret['code'] = 1001\n ret['error'] = '获取数据失败'\n\n return Response(ret)\n\n def retrieve(self, request, *args, **kwargs):\n\n ret = {'code': 1000, 'data': None}\n try:\n pk = kwargs.get('pk')\n\n obj = Article.objects.filter(course_id=pk).first()\n\n obj_ser = CourseDetailModelSerializers(obj)\n print(obj_ser.data)\n ret['data'] = obj_ser.data\n except Exception as e:\n ret['code'] = 1001\n ret['error'] = '获取数据失败'\n\n return Response(ret)","sub_path":"api/views/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"383669774","text":"import pandas as pd\nimport numpy as np\nimport sys, os, time\nimport dill as pickle\nimport numba\nimport math\n\nclass CountFeature:\n \"\"\"\"\"\"\n @classmethod\n @numba.jit\n def __ApplyCateRatio(cls, ColumnValues, d, dt):\n \"\"\"\"\"\"\n n = len(ColumnValues)\n result = np.zeros((n), dtype='float32')\n if(dt == 'object'):\n for i in range(n):\n v = ColumnValues[i]\n if (pd.isnull(v)):\n result[i] = d['missing']\n else:\n result[i] = d[v]\n else:\n for i in range(n):\n v = ColumnValues[i]\n if(math.isnan(v)):\n result[i] = d['missing']\n else:\n result[i] = d[v]\n\n return result\n\n @classmethod\n def GenerateCountFeature(cls, InputFile, OutputFile):\n \"\"\"\"\"\"\n start = time.time()\n\n prop = pd.read_csv(InputFile)\n #prop = prop.sample(frac = 0.01)\n\n CategoryCols = ['hashottuborspa', 'taxdelinquencyflag', 'airconditioningtypeid', 'architecturalstyletypeid',\n 'buildingqualitytypeid', 'decktypeid', 'heatingorsystemtypeid', 'pooltypeid10', 'pooltypeid2',\n 'pooltypeid7', 'propertylandusetypeid', 'regionidcity', 'regionidcounty','regionidneighborhood','regionidzip']\n\n df_cf = pd.DataFrame(index= prop.index)\n df_cf['parcelid'] = prop['parcelid']\n\n for col in CategoryCols:\n start0 = time.time()\n\n N = len(prop)\n vcs = prop[col].value_counts()\n total = np.sum(vcs)\n d_vcs = dict(vcs)\n for vc in d_vcs:\n d_vcs[vc] = d_vcs[vc] / N\n if (total < N):\n d_vcs['missing'] = (N - total) / N\n\n dt = prop[col].dtype.name\n CateRatio = cls.__ApplyCateRatio(prop[col].values, d_vcs, dt)\n df_tmp = pd.DataFrame(data=CateRatio, index= df_cf.index, columns=['%sratio' % col])\n df_cf = pd.concat([df_cf, df_tmp], axis=1)\n\n end0 = time.time()\n print('%s was added, time consumed %ds.' % (col,(end0 - start0)))\n\n with open(OutputFile, 'wb') as o_file:\n pickle.dump(df_cf, o_file, -1)\n o_file.close()\n\n end = time.time()\n print('Add count features done, time consumed %ds' % (end - start))\n","sub_path":"Zillow/src/feat/CountFeature.py","file_name":"CountFeature.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"464779427","text":"\"\"\"\nThis will import whatever there is in pack1.__init__ as well\n\"\"\"\n\n\ndef main() -> None:\n pack1 = __import__('pack1.mod1')\n print(pack1)\n pack1.mod1.do_this()\n pack1.mod2.do_that()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"importing/07_01_manual_dunder_import/02_import_absolute_submod.py","file_name":"02_import_absolute_submod.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"129446297","text":"#guangduyouxian.py\nimport os\nmap=(\n(0,1,0,0,0,1,1,1,1,1),\n(0,1,0,1,0,0,0,0,0,1),\n(0,1,0,1,1,1,1,1,0,1),\n(0,1,0,1,0,0,0,1,0,1),\n(0,1,0,1,0,1,0,1,0,1),\n(0,1,0,1,0,1,0,1,0,1),\n(0,1,0,1,0,1,0,1,0,1),\n(0,1,0,1,0,1,0,0,0,1),\n(0,1,0,1,0,1,1,1,1,1),\n(0,0,0,1,0,0,0,0,0,0),)\nprint(len(map))\n\nstartpoint,endpoint=(0,0),(9,9)\n\nopen=[]\nfdome={}\nclose=[]\nnowpoint=()\nopen.append(startpoint)\nwhile open:\n\tpoint=open.pop(0)\n\tprint(point)\n\tfor x,y in ((-1,0),(1,0),(0,-1),(0,1)):\n\t\tnowpoint=(point[0]+x,point[1]+y)\n\t\tif nowpoint[0]<0 or nowpoint[0]>9 \\\n\t\tor nowpoint[1]<0 or nowpoint[1]>9:\n\t\t\tcontinue\n\t\tprint(nowpoint)\n\t\t#print(x,y)\n\t\tif map[nowpoint[0]][nowpoint[1]] or nowpoint in close:\n\t\t\tcontinue\n\t\telse:\n\t\t\tfdome[nowpoint]=point\n\t\t\tclose.append(nowpoint)\n\t\t\topen.append(nowpoint)\n\tprint('')\n\tif nowpoint == endpoint:\n\t\tbreak\n\tclose.append(point)\n\nelse:\n\tprint('无法到达!')\n\texit()\n\n#print(fdome)\nechomap = []\nfor m in map:\n\techomap.append(list(m))\nwhile nowpoint != startpoint:\n\techomap[nowpoint[0]][nowpoint[1]]='8'\n\t\n\tnowpoint=fdome.get(nowpoint)\nechomap[startpoint[0]][startpoint[1]]='#'\nechomap[endpoint[0]][endpoint[1]]='@'\nfor x in echomap:\n\tfor y in x:\n\t\tif y ==0 :\n\t\t\tprint(' ',end=' ')\n\t\telif y == 1:\n\t\t\tprint('|',end=' ')\n\t\telse:\n\t\t\tprint(y,end=' ')\n\tprint('')\n\n\n\n\n","sub_path":"guangduyouxiansousuo.py","file_name":"guangduyouxiansousuo.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"299401073","text":"from guide.models import Guide, GuideCategory\nfrom rest_framework import serializers\n\nclass GuideCategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = GuideCategory\n fields = (\n 'id',\n 'name'\n )\n\nclass GuideSerializer(serializers.ModelSerializer):\n category = GuideCategorySerializer(read_only=True)\n class Meta:\n model = Guide\n fields = (\n 'id',\n 'name',\n 'category',\n 'created'\n )\n","sub_path":"guide/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"74147849","text":"#!/usr/bin/env python3\nimport json\nimport petname\nimport random\nimport sys\nhead_generator = ['bull', 'lion', 'raven', 'bunny']\narm_generator = [2,4,6,8,10]\nleg_generator = [3,6,9,12]\nanimals = {}\n\nfor i in range (20):\n\tanimals[i] = {}\n\tanimals[i]['head'] = random.choice(head_generator) \n\t\n\tbody1 = petname.name()\n\tbody2 = petname.name()\n\tanimals[i]['body'] = ('{}-{}').format(body1, body2)\n\t\n\tanimals[i]['arms'] = random.choice(arm_generator)\n\tanimals[i]['legs'] = random.choice(leg_generator)\n\tanimals[i]['tail'] = animals[i]['arms']+animals[i]['legs']\n\nwith open(sys.argv[1], 'w') as out:\n\tjson.dump(animals, out, indent=2)\n\n\t\n\n\n\n","sub_path":"homework03/generate_animals.py","file_name":"generate_animals.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435045128","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport os\nimport csv\nimport codecs\nimport datetime\nimport asyncio\nimport shutil\nimport pandas as pd\nimport json\nfrom Hive.DealRules import *\nfrom Hive.ConfigHandler import *\nfrom threading import Thread\nfrom os.path import realpath,dirname\nfrom Hive.SQL import Mission_BLL,MissionInfo_BLL,Model,CSV2Mysql,CSV2Sqlserver\n\n#写入本地文件\nasync def do_some_write(fp,item,dpF):\n data=[]\n if dpF:\n R_csvFile = open(fp, \"r\",encoding='utf-8')\n reader = csv.DictReader(R_csvFile)\n data=[row[dpF] for row in reader]#获取去重字段的所有值\n print('data of dpf:',data)\n R_csvFile.close()\n if not dpF or item[dpF] not in data:#如果值不存在,或者不需要去重\n print('Item of dpf:',item)\n csvFile = open(fp, \"a\",encoding='utf-8')\n writer = csv.writer(csvFile)\n writer.writerow(item.values()) \n csvFile.close()\n\n#推入数据库:dp为根目录,\nasync def put_sql_data(sqlConfig,result_step,dp,_ste,_ste_key):\n print('Result:推数据中。。。。')\n path=dp+'/dataFlag.json'\n table_name=dp.split('/')[-1]#拿到spider.name\n with open(path,'r',encoding='utf-8') as f:\n dataFlag=json.loads(f.read())#各个文件的推送标志位\n #推数据至数据库\n for oneResult in result_step:\n pushflag=True\n step_num=oneResult.split('_')[1]#步骤名\n step_flag=dataFlag[step_num]\n rs_fp=dp+'/'+oneResult#得到结果文件路径\n csv_data_all=pd.read_csv(rs_fp)#读取文件所有内容\n if step_flag < len(csv_data_all):#该文件未推送完毕\n csv_data_rs=csv_data_all.loc[step_flag:]\n for mdResult in _ste[step_num]:#得到需要关联的步骤\n md_key=_ste_key[step_num][_ste[step_num].index(mdResult)]#得到关联字段\n md_fp=dp+'/'+'step_'+mdResult+'_middle'+'.csv'#得到关联文件路径\n \n if os.path.exists(md_fp):\n \n csv_data_md=pd.read_csv(md_fp)#读取文件所有内容\n csv_data_rs=pd.merge(csv_data_rs,csv_data_md,on=md_key)#按照关联字段合并数据\n else:#说明需要关联的文件尚未生成好,跳过此次同步\n pushflag=False\n break\n if pushflag:\n \n dataFlag[step_num]=step_flag+len(csv_data_rs)#记录推送位置\n data=dict(csv_data_rs)# 处理数据格式\n datas=list(map( dict, zip(*([(key, val) for val in data[key]] for key in data.keys()))))\n table_name=table_name+'_step_'+step_num+'_result'#表名\n print(\"##################datas#########################\")\n #print('datas',datas)\n print('dataFlag',dataFlag)\n table_keys=list(data.keys())#键名\n #数据库交互\n sqltype=sqlConfig[\"sqltype\"]\n if sqltype=='MySql':\n print('同步数据至MySql')\n cm=CSV2Mysql.CSV2Mysql(sqlConfig)\n cm.create_table(table_keys,table_name)\n cm.insert_MoreData(table_name,datas)\n elif sqltype=='SqlServer':\n print('同步数据至SqlServer')\n cs=CSV2Sqlserver.CSV2Sqlserver(sqlConfig)\n cs.create_table(table_keys,table_name)\n cs.insert_MoreData(table_name,datas)\n #更新标志位\n jsondata = json.dumps(dataFlag, ensure_ascii=False)\n with open(path, 'wb') as f:\n f.write(jsondata.encode('utf8'))\n \n\n\nclass HivePipeline(object):\n def process_item(self, item, spider):\n return item\n\nclass Pipeline_ToCSV(object): \n def __init__(self):\n print(\"初始化Pipeline_ToCSV\")\n self.failCount_savedata=0 \n self.MI=MissionInfo_BLL.MissionInfo()\n self.new_loop = asyncio.new_event_loop()\n self.lock_push=False\n #推数据库相关\n self.sTe={}#一个字典,包含数据需要关联的文件,如:{'4':['2','3']},将step2,step3的数据关联到4\n self.sTe_key={}#一个字典,包含数据需要关联的文件的关联字段,需要结合self.sTe使用,如:{'4':['a','b']},将step2,step3的数据关联到4,关联字段分别为a,b \n \n #写数据时每一步对应的去重字段\n self.dpFields={} \n t = Thread(target=self.start_loop, args=(self.new_loop,))\n t.setDaemon(True) # 设置子线程为守护线程\n t.start()\n\n def _relevance(self,rules):#提取关联信息\n for key in rules.keys():\n rule=rules[key]\n for ru in rule:\n if ru.relevance:#如果存在关联字段\n rels = ru.relevance\n print(rels)\n if rels:\n for rel in rels:#rel:[包含规则,关联字段,解析归则,关联步骤]\n e=rel[3].split('t')[1]\n s=rel[3].split('t')[0]\n k=rel[1]\n print(e,s,k)\n if e not in self.sTe.keys():\n self.sTe[e] =[s]\n self.sTe_key[e]=[k]\n else:\n if s not in self.sTe[e]:#去除重复的\n self.sTe[e].append(s)\n self.sTe_key[e].append(k)\n\n def get_DpField(self,rules):\n for key in rules.keys():\n rule=rules[key]\n for ru in rule:\n #print('-------------dpf',ru.dpField)\n if key not in self.dpFields.keys():\n self.dpFields[key]=ru.dpField\n elif ru.dpField not in self.dpFields[key]:#去除重复的\n self.dpFields[key]=ru.dpField\n\n\n def start_loop(self,loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()\n\n def open_spider(self, spider):\n print('进入key')\n self._relevance(spider.rules)#创建最终结果关联信息\n self.get_DpField(spider.rules)#创建去重字段信息\n print('--------------------sTe',self.sTe)\n print('--------------------dpField',self.dpFields)\n #当任务被开启时,往missioninfo表中插入默认数据\n self.m=Mission_BLL.Mission()\n try:\n self.sqlConfig=get_Sql(spider.name)\n except Exception as e:\n spider.logger.info('%s异常,pipeline获取sqlconfig!%s'%(spider.name,e))\n spider.EI_BLL._update_exceptionInfo('%s异常,pipeline获取sqlconfig!%s'%(spider.name,e))\n self.cm=CSV2Mysql.CSV2Mysql(self.sqlConfig)\n self.cs=CSV2Sqlserver.CSV2Sqlserver(self.sqlConfig)\n\n _id=self.m._select_ID(spider.name)[0][0]\n self.st= datetime.datetime.now()\n MID=Model.missionInfo(spider.name,0,0,0,0,0,0,self.st,self.st,_id)\n \n flag=self.MI._get_missionInfo(spider.name)\n if flag:#该信息已经存在了\n self.MI._delete(spider.name)#删除该条记录\n self.MI._add_missionInfo(MID)\n #创建数据存储文件夹\n dp=dirname(realpath(__file__))+'/Honey_CSV/'+spider.name\n if not os.path.exists(dp):\n print('创建存储目录',dp)\n os.mkdir(dp)\n else:\n print(dp,'存储目录已经存在!')\n\n #初始化包含关联标志位的json文件\n \n item_sTe=dict.fromkeys(spider.rules.keys(),0)\n jsondata = json.dumps(item_sTe, ensure_ascii=False)\n fp=dp+'/dataFlag.json'\n print('写入dataFlag.json',fp)\n with open(fp, 'wb') as f:\n f.write(jsondata.encode('utf8'))\n \n def process_item(self,item,spider):\n try:\n #写入本地CSV\n print('正在写入csv:'+spider.name)\n dp=dirname(realpath(__file__))+'/Honey_CSV/'+spider.name\n if not os.path.exists(dp):\n print('创建存储目录',dp)\n os.mkdir(dp)\n else:\n print(dp,'存储目录已经存在!')\n #csv文件的位置,无需事先创建\n store_file = dp+'/'+spider.honeyName+'.csv'\n if not os.path.exists(store_file):\n # 写入数据\n csvFile = open(store_file, \"a\",encoding='utf-8')\n writer = csv.writer(csvFile)\n print('写入键')\n writer.writerow(item.keys())\n writer.writerow(item.values()) \n else: \n #获得该步的去重字段\n dpF=self.dpFields[int(spider.honeyName.split('_')[1])]\n print('传入dpf',dpF)\n asyncio.run_coroutine_threadsafe(do_some_write(store_file,item,dpF), self.new_loop)#异步写入,最后一位为需要去重的字段\n except Exception as e:\n print (\"保存解析数据至本地异常!\",e)\n self.failCount_savedata=self.failCount_savedata+1\n self.MI._update_SaveData(self.failCount_savedata,spider.name)\n\n #判断时间间隔,大于5s,同步数据\n end_t=datetime.datetime.now()\n val=end_t-self.st\n if val.seconds>10 :\n #self.lockflga=False\n self.st=end_t#重置起始时间\n #middle_step=[]\n result_step=[]\n #查找路径下文件数\n for f_name in os.listdir(dp+'/'):\n #print(f_name)\n #找到需要关联的CSV文件\n #if 'middle' in f_name:\n #middle_step.append(f_name.replace('.csv',''))\n #找到最终结果文件\n if 'result' in f_name:\n result_step.append(f_name)\n if self.lock_push:#技术不成熟,暂不开放基于协程的异步io \n asyncio.run_coroutine_threadsafe(put_sql_data(self.sqlConfig,result_step,dp,self.sTe,self.sTe_key), self.new_loop)#推入数据库 \n else:#同步阻塞io\n self.old_push_data(self.sqlConfig,result_step,dp,self.sTe,self.sTe_key)\n return item\n\n def close_spider(self,spider):\n try:\n #将csv文件移动至备份文件夹\n dp=dirname(realpath(__file__))+'/Honey_CSV/'+spider.name+'/'\n des=str(datetime.datetime.now())+'_bak'\n os.mkdir(dp+des)\n for f_name in os.listdir(dp):\n if '.csv' in f_name or '.json' in f_name:\n shutil.move(dp+f_name,dp+des+'/')\n #停止异步循环\n self.new_loop.stop()\n except Exception as e:\n spider.logger.info('关闭%s异常!%s'%(spider.name,e))\n spider.EI_BLL._update_exceptionInfo(spider.name,str(e))\n #爬虫退出,修改数据库任务状态\n sta=self.m._select_Status(spider.name)[0][0]\n if(sta ==\"working\"):#确认为正在运行的任务\n self.m._change_Status(spider.name,'complete')\n\n '''\n def push_data(self,sqlConfig,result_step,dp,_ste,_ste_key):\n print(\"开始推数据\")\n '''\n\n\n def old_push_data(self,sqlConfig,result_step,dp,_ste,_ste_key):\n print('Result:推数据中。。。。')\n path=dp+'/dataFlag.json'\n table_name=dp.split('/')[-1]#拿到spider.name\n with open(path,'r',encoding='utf-8') as f:\n dataFlag=json.loads(f.read())#各个文件的推送标志位\n #推数据至数据库\n for oneResult in result_step:\n pushflag=True\n step_num=oneResult.split('_')[1]#步骤名\n step_flag=dataFlag[step_num]\n rs_fp=dp+'/'+oneResult#得到结果文件路径\n csv_data_all=pd.read_csv(rs_fp)#读取文件所有内容\n if step_flag < len(csv_data_all):#该文件未推送完毕\n csv_data_rs=csv_data_all.loc[step_flag:]\n if step_num in _ste.keys():#如果该步骤需要关联\n for mdResult in _ste[step_num]:#得到需要关联的步骤\n md_key=_ste_key[step_num][_ste[step_num].index(mdResult)]#得到关联字段\n md_fp=dp+'/'+'step_'+mdResult+'_middle'+'.csv'#得到关联文件路径\n try:\n if os.path.exists(md_fp):\n csv_data_md=pd.read_csv(md_fp)#读取文件所有内容\n csv_data_md.pop('files')#删除多余的下载文件键\n csv_data_temp=pd.merge(csv_data_rs,csv_data_md,on=md_key)#按照关联字段合并数据\n else:#说明需要关联的文件尚未生成好,跳过此次同步\n pushflag=False\n break\n if not csv_data_temp.empty:#如果不为空,说明关联正常,如果为空,说明关联失败,只推原数据,不能不推数据\n csv_data_rs=csv_data_temp\n except Exception as e:\n print(\"合并异常%s\"%(e))\n if pushflag:\n dataFlag[step_num]=step_flag+len(csv_data_rs)#记录推送位置\n csv_data_rs=csv_data_rs.astype(object).where(pd.notnull(csv_data_rs), None)#去除缺页项nan数据\n #csv_data_rs.rename(columns={'files_x':'files'}, inplace=True)#替换下载文件的键名\n data=dict(csv_data_rs)# 处理数据格式\n datas=list(map( dict, zip(*([(key, val) for val in data[key]] for key in data.keys()))))\n table_name=table_name+'_step_'+step_num+'_result'#表名\n print(\"##################datas#########################\")\n #print('datas',datas)\n print('dataFlag',dataFlag)\n table_keys=list(data.keys())#键名\n #数据库交互\n sqltype=sqlConfig[\"sqltype\"]\n if sqltype=='MySql':\n print('同步数据至MySql')\n \n self.cm.create_table(table_keys,table_name)\n self.cm.insert_MoreData(table_name,datas)\n elif sqltype=='SqlServer':\n print('同步数据至SqlServer')\n \n self.cs.create_table(table_keys,table_name)\n self.cs.insert_MoreData(table_name,datas)\n #更新标志位\n jsondata = json.dumps(dataFlag, ensure_ascii=False)\n with open(path, 'wb') as f:\n f.write(jsondata.encode('utf8'))\n \n","sub_path":"Hive_Server/Hive/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":15171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"318543203","text":"###########################################################################\n# Created by: Jianan Yang\n# Email: u7083746@anu.edu.au\n# Copyright (c) 2020\n###########################################################################\n\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .fcn import FCNHead\nfrom .base import BaseNet\n\n#from ssd.SSD import _SSD\n__all__ = ['DeepLabV3', 'get_deeplab']\n\nclass DeepLabV3(BaseNet):\n\tdef __init__(self,nclass, model_name,backbone, aux=True, se_loss=False, norm_layer=nn.BatchNorm2d, **kwargs):\n\t\tsuper(DeepLabV3, self).__init__(nclass, model_name,backbone, aux, se_loss, norm_layer=norm_layer, **kwargs)\n\n\t\tself.head = DeepLabV3Head(2048, nclass, norm_layer, self._up_kwargs)\n\n\t\tself.low_level_1 = nn.Sequential( #2x\n\t\t\tnn.Conv2d(128,32,1,bias=False),\n\t\t\tnorm_layer(32),\n\t\t\tnn.ReLU(True)\n\t\t\t)\n\n\t\tself.low_level_2 = nn.Sequential( #4x\n\t\t\tnn.Conv2d(256,48,1,bias=False),\n\t\t\tnorm_layer(48),\n\t\t\tnn.ReLU(True)\n\t\t\t)\n\t\t\n\t\tself.concat_conv_1 = nn.Sequential(\n\t\t\tnn.Conv2d(304,128,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\tnorm_layer(128),\n\t\t\tnn.ReLU(True),\n\t\t\tnn.Conv2d(128,128,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\tnorm_layer(128),\n\t\t\tnn.ReLU(True),\n\t\t\t)\n\n\t\tself.concat_conv_2 = nn.Sequential(\n\t\t\tnn.Conv2d(160,64,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\tnorm_layer(64),\n\t\t\tnn.ReLU(True),\n\t\t\tnn.Conv2d(64,32,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\tnorm_layer(32),\n\t\t\tnn.Conv2d(32,nclass,kernel_size=1,stride=1)\n\t\t\t)\n\n\t\t# self.feature_penultimate = nn.Sequential(\n\t\t# \tnn.Conv2d(160,32,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t# \tnorm_layer(32),\n\t\t# \tnn.ReLU(True)\n\t\t# \t)\n\n\t\t# self.category = nn.Conv2d(32,nclass,kernel_size=1,stride=1)\n\t\tself.edge_conv = nn.Sequential(\n\t\t\tnn.Conv2d(160,64,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\tnorm_layer(64),\n\t\t\tnn.ReLU(True),\n\t\t\t# nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\t# norm_layer(64),\n\t\t\t# nn.Conv2d(64,2,kernel_size=3,stride=1,padding=1,bias=False)\n\t\t\tnn.Conv2d(64,2,kernel_size=1,stride=1)\n\t\t\t)\n\n\t\tself.objectness = nn.Sequential(\n\t\t\tnn.Conv2d(160,64,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\tnorm_layer(64),\n\t\t\tnn.ReLU(True),\n\t\t\t# nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,bias=False),\n\t\t\t# norm_layer(64),\n\t\t\t# nn.Conv2d(64,2,kernel_size=3,stride=1,padding=1,bias=False)\n\t\t\tnn.Conv2d(64,2,kernel_size=1,stride=1)\n\t\t\t) #foreground or background\n\n\tdef forward(self, x,im_info,gt_boxes,num_boxes):\n\t\t#Space for decoder\n\t\t_, _, h, w = x.size()\n\t\tc0,c1, c2, c3, c4 = self.base_forward(x)\n#\t\tprint (c0.size())\n\t\t#detection head\n\t\tfeature_maps = [c0,c1,c4]\n\n\n\t\t#decoder\n\t\tlow_level_features1 = self.low_level_1(c0) #2x\n\t\tlow_level_features2 = self.low_level_2(c1) #4x\n\t\t#print (low_level_features1.shape,low_level_features2.shape)\n\t\tx = self.head(c4)\n\t\t#print (x.shape)\n\t\tx = F.interpolate(x,(h//4,w//4),**self._up_kwargs)\n\t\t\n\t\tconcated = torch.cat((low_level_features2,x),1)\n\t\t#print (concated.shape)\n\t\t# objects = F.interpolate(concated,(h,w),**self._up_kwargs)\n\n\t\tconcated = self.concat_conv_1(concated)\n\t\t#print (concated.shape)\n\t\tx = F.interpolate(concated, (h//2,w//2), **self._up_kwargs)\n\n\t\t\n\t\tconcated = torch.cat((low_level_features1,x),1)\n\n\t\t#print (concated.shape)\n\t\t\n\t\tobject_edge = F.interpolate(concated,(h,w),**self._up_kwargs)\n\t\t# #print (object_edge.shape)\n\t\t# concated = self.concat_conv_2(concated)\n\n\n\t\t# x = F.interpolate(concated,(h,w), **self._up_kwargs)\n\n\t\t# feature_map = self.feature_penultimate(object_edge)\n\t\t# x = self.category(feature_map)\n\t\tx = self.concat_conv_2(object_edge)\n\t\tedge = self.edge_conv(object_edge)\n\t\tobjectness_score = self.objectness(object_edge) #batch_size x 2 x H x W\n\n\t\t# rois,rpn_loss_cls,rpn_loss_box = self._SSD(feature_maps,im_info,gt_boxes,num_boxes)\n\t\treturn objectness_score,x,edge\n\n\tdef val_forward(self,x):\n\t\t_, _, h, w = x.size()\n\t\tc0,c1, c2, c3, c4 = self.base_forward(x)\n\n\t\tlow_level_features1 = self.low_level_1(c0)\n\t\tlow_level_features2 = self.low_level_2(c1)\n\n\t\tx = self.head(c4)\n\n\t\tx = F.interpolate(x,(h//4,w//4),**self._up_kwargs)\n\n\t\tconcated = torch.cat((low_level_features2,x),1)\n\n\t\t# feature_vectors = F.interpolate(concated,(h,w),**self._up_kwargs)\n\t\tconcated = self.concat_conv_1(concated)\n\n\t\tx = F.interpolate(concated, (h//2,w//2), **self._up_kwargs)\n\n\t\tconcated = torch.cat((low_level_features1,x),1)\n\n\t\tobject_edge = F.interpolate(concated,(h,w),**self._up_kwargs)\n\t\t# concated = self.concat_conv_2(concated)\n\n\t\t# x = F.interpolate(concated,(h,w),**self._up_kwargs)\n\t\tx = self.concat_conv_2(object_edge)\n\t\t# feature_map = self.feature_penultimate(object_edge)\n\t\t# x = self.category(feature_map)\n\t\tedge = self.edge_conv(object_edge)\n\t\tobjectness_score = self.objectness(object_edge) #whether the pixel is fg/bg, batch_size x 2 x H x W\n\t\treturn x,objectness_score,edge\n\nclass DeepLabV3Head(nn.Module):\n\tdef __init__(self, in_channels, out_channels, norm_layer, up_kwargs, atrous_rates=(12, 24, 36)):\n\t\tsuper(DeepLabV3Head, self).__init__()\n\t\tinter_channels = in_channels // 8\n\t\tself.aspp = ASPP_Module(in_channels, atrous_rates, norm_layer, up_kwargs)\n\t\tself.block = nn.Sequential(\n\t\t\tnn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n\t\t\tnorm_layer(inter_channels),\n\t\t\tnn.ReLU(True),\n\t\t\tnn.Dropout2d(0.1, False),\n\t\t\tnn.Conv2d(inter_channels, out_channels, 1))\n\n\tdef forward(self, x):\n\t\tx = self.aspp(x)\n\t\t# x = self.block(x)\n\t\treturn x\n\n\ndef ASPPConv(in_channels, out_channels, atrous_rate, norm_layer):\n\tblock = nn.Sequential(\n\t\tnn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate,\n\t\t\t\t dilation=atrous_rate, bias=False),\n\t\tnorm_layer(out_channels),\n\t\tnn.ReLU(True))\n\treturn block\n\nclass AsppPooling(nn.Module):\n\tdef __init__(self, in_channels, out_channels, norm_layer, up_kwargs):\n\t\tsuper(AsppPooling, self).__init__()\n\t\tself._up_kwargs = up_kwargs\n\t\tself.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1),\n\t\t\t\t\t\t\t\t nn.Conv2d(in_channels, out_channels, 1, bias=False),\n\t\t\t\t\t\t\t\t norm_layer(out_channels),\n\t\t\t\t\t\t\t\t nn.ReLU(True))\n\n\tdef forward(self, x):\n\t\t_, _, h, w = x.size()\n\t\tpool = self.gap(x)\n\n\t\treturn F.interpolate(pool, (h,w), **self._up_kwargs)\n\nclass ASPP_Module(nn.Module):\n\tdef __init__(self, in_channels, atrous_rates, norm_layer, up_kwargs):\n\t\tsuper(ASPP_Module, self).__init__()\n\t\tout_channels = in_channels // 8\n\t\trate1, rate2, rate3 = tuple(atrous_rates)\n\t\tself.b0 = nn.Sequential(\n\t\t\tnn.Conv2d(in_channels, out_channels, 1, bias=False),\n\t\t\tnorm_layer(out_channels),\n\t\t\tnn.ReLU(True))\n\t\tself.b1 = ASPPConv(in_channels, out_channels, rate1, norm_layer)\n\t\tself.b2 = ASPPConv(in_channels, out_channels, rate2, norm_layer)\n\t\tself.b3 = ASPPConv(in_channels, out_channels, rate3, norm_layer)\n\t\tself.b4 = AsppPooling(in_channels, out_channels, norm_layer, up_kwargs)\n\n\t\tself.project = nn.Sequential(\n\t\t\tnn.Conv2d(5*out_channels, out_channels, 1, bias=False),\n\t\t\tnorm_layer(out_channels),\n\t\t\tnn.ReLU(True),\n\t\t\tnn.Dropout2d(0.5, False)\n\t\t\t)\n\n\tdef forward(self, x):\n\t\tfeat0 = self.b0(x)\n\t\tfeat1 = self.b1(x)\n\t\tfeat2 = self.b2(x)\n\t\tfeat3 = self.b3(x)\n\t\tfeat4 = self.b4(x)\n\n\t\ty = torch.cat((feat0, feat1, feat2, feat3, feat4), 1)\n\n\t\treturn self.project(y)\n\n\ndef get_deeplab(nclass,model_name,dataset='sciencebirds', backbone='resnet50', pretrained=False,\n\t\t\t\troot='~/.encoding/models', **kwargs):\n\t# infer number of classes\n\tfrom ..datasets import datasets\n\tmodel = DeepLabV3(nclass,model_name, backbone=backbone, root=root, **kwargs)\n\tif pretrained:\n\t\traise NotImplementedError\n\n\treturn model\n","sub_path":"encoding/models/deeplabv3.py","file_name":"deeplabv3.py","file_ext":"py","file_size_in_byte":7512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"629057226","text":"N = int(input())\nS = input()\n\ndef main():\n s = list(S)\n ans = 0\n for i in range(N):\n x = s[:i]\n y = s[i:]\n intersection = set(x) & set(y)\n ans = max(ans, len(intersection))\n print(ans)\n\nif __name__ == \"__main__\":\n main()","sub_path":"beginner_98/cut_and_count.py","file_name":"cut_and_count.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"560101041","text":"#THOMAS Charles\n#ROSSIGNOL Adelin\n#Programme du jeu de la vie\nfrom upemtk import *\nfrom time import sleep\n\ndef init_plateau (nb_case):\n\n '''Initialise le plateau composé d'une liste de liste à \n valeures initiales False.'''\n \n plateau = [[False]*nb_case]*nb_case \n \n for i in range(1,nb_case):\n plateau[i]=plateau[i-1].copy()\n \n return plateau\n\ndef affichage_plateau(plateau):\n \n '''Dessine le plateau de jeu et permet le changement\n de couleur des cases.'''\n \n for i in range (len(plateau)):\n for j in range (len(plateau)):\n \n if plateau [i][j]:\n couleur ='black'\n else:\n couleur ='white'\n \n rectangle(50*i,50*j,50*(i+1),50*(j+1),couleur='black',remplissage=couleur)\n \ndef saisie_coord(saisie):\n '''Créer la liste des coordonnées des cellules vivantes initiales.'''\n \n coor_saisie=[]\n on = True \n \n saisie_souris()\n iterations=input(\"Combien voulez vous d'itérations ? \")\n if iterations != 'inf':\n ite_intermediaires=str(input(\"Voulez-vous voir les itérations intermédiaires ? (o/n) \"))\n else:\n ite_intermediaires = 'o'\n return iterations,ite_intermediaires\n\ndef pixel_vers_case(x,y):\n '''convertit les pixels vers l'unité des cases'''\n i=x//50\n j=y//50\n return (i,j)\n\ndef saisie_souris():\n '''Fonction gérant la saisie à la souris des cases initilaes'''\n \n var=True\n while var:\n coor_saisie=[]\n affichage_plateau(plateau)\n texte(0,0,compt_tour,couleur='red',taille=30)\n mise_a_jour()\n ev = donne_ev()\n ty = type_ev(ev)\n \n if ty==\"ClicGauche\":\n x,y=abscisse(ev),ordonnee(ev)\n \n i,j=pixel_vers_case(x,y)\n coor_saisie.append((i,j))\n inversion_intiale(plateau,coor_saisie)\n\n if ty==\"ClicDroit\":\n var=False\n\ndef survie(plateau):\n \n '''Définie les règles de mort ou de vie d'une case. \n Retourne une liste de coordonnées.'''\n \n inverse=[]\n \n for i in range (0,len(plateau)): #\n for j in range (0,len(plateau[i])): # Boucles imbriquées permettant la lecture des cases du plateau.\n \n cmpt = voisine(i,j,plateau) # Comptage du nombre de voisines de chaque case du plateau.\n case = plateau[i][j] # Définition de la case verifiée.\n \n if cmpt<2 and case == True: # Mort de la case si elle possède moins de 2 voisines.\n inverse.append((i,j))\n elif cmpt > 3 and case == True: # Mort si les voisines > 3.\n inverse.append((i,j))\n elif cmpt == 3 and case == False: # S'il y a 3 voisines apparition d'une case.\n inverse.append((i,j))\n return inverse \n \ndef voisine(i,j,plateau):\n \n '''Comptage du nombre de voisines vivantes d'une case.'''\n \n cmpt=0\n \n for a in range (i-1,i+2): #\n for b in range (j-1,j+2): # Boucles imbriquées parcourant les cases adjacentes.\n \n if 0<=a19:\n valeur=int(input('Valeur incorrecte, ressaisir la valeur : '))\n \n return valeur\n\ndef inversion_intiale(plateau,coor_saisie):\n \n '''Permet l'actualisation des cases vivantes initiales.'''\n \n for (k,l) in coor_saisie:\n plateau[k][l]= not plateau[k][l]\n affichage_plateau(plateau)\n \ndef affiche_tout(plateau,compt_tour):\n efface_tout()\n affichage_plateau(plateau)\n \n compt_tour+=1\n texte(0,0,compt_tour,couleur='red',taille=30)\n mise_a_jour()\n return compt_tour\n\ndef affichage_menu(menu):\n '''Fonction gérant le clic dans le menu jouer'''\n ev = donne_ev()\n tev = type_ev(ev)\n \n if tev == \"ClicGauche\":\n if 320<=abscisse(ev)<=680 and 350<=ordonnee(ev)<=460 :\n menu= False\n \n if 320<=abscisse(ev)<=680 and 490<=ordonnee(ev)<=590 : \n #case quitter\n ferme_fenetre()\n\n mise_a_jour()\n return menu\n\ndef affichage_rejouer(rejouer,menu,jouer):\n '''Fonction gérant le clic dans le menu rejouer'''\n ev = attend_ev()\n tev = type_ev(ev)\n\n if tev == \"ClicGauche\":\n if 320<=abscisse(ev)<=680 and 350<=ordonnee(ev)<=460 :\n rejouer = True\n menu = True\n jouer = True \n\n if 320<=abscisse(ev)<=680 and 490<=ordonnee(ev)<=590 : \n #case quitter\n ferme_fenetre()\n\n mise_a_jour()\n return rejouer,menu,jouer\n\ndef affiche_menu(taille_fenetre,menu):\n '''Gére l'affichage du menu'''\n if menu==True:\n string='Jouer'\n else:\n string='Rejouer'\n x1,y1 = taille_texte(texte(500,400,string,taille=40,ancrage='center'))\n x3,y3 = taille_texte(texte(500,530,'Quitter',taille=40,ancrage='center'))\n\n rectangle(320, 350, 680, 460)\n rectangle(320, 490, 680, 590)\n\n###################################\n# BOUCLE DE JEU #\n###################################\n\nnb_case=50\ntaille_case=20\nsaisie='auto'\njouer = True\ncompt_tour = 0\ncree_fenetre(1000,1000)\n\n\n\njeu = True\nmenu = True\nrejouer = True\n\nefface_tout()\naffiche_menu((1000,1000),menu)\n\nwhile rejouer:\n plateau = init_plateau(nb_case)\n\n while menu :\n menu=affichage_menu(menu)\n \n efface_tout()\n affichage_plateau(plateau)\n texte(0,0,compt_tour,couleur='red',taille=30)\n \n saisie='clavier'\n if jouer==True:\n if saisie == 'clavier':\n print('clavier')\n coor_saisie,iterations,ite_intermediaires=saisie_coord(saisie)\n inversion_intiale(plateau,coor_saisie)\n else:\n iterations,ite_intermediaires=saisie_coord(saisie)\n\n lst_mod = ['inactif','inactif']\n\n if iterations == 'inf':\n while jouer:\n ev=donne_ev()\n tev=type_ev(ev)\n \n if tev == None:\n inverse(plateau)\n compt_tour=affiche_tout(plateau,compt_tour)\n sleep(0.5)\n else:\n jouer=False\n \n \n elif ite_intermediaires == 'o':\n\n iterations=int(iterations)\n for i in range (iterations):\n \n iterations_visibles(ite_intermediaires)\n inverse(plateau)\n compt_tour=affiche_tout(plateau,compt_tour)\n attend_ev()\n\n elif ite_intermediaires == 'n':\n iterations=int(iterations)\n for i in range (iterations):\n \n iterations_visibles(ite_intermediaires)\n inverse(plateau)\n compt_tour+=1\n \n compt_tour=affiche_tout(plateau,compt_tour) \n attend_ev()\n \n efface_tout()\n affiche_menu((1000,1000),menu)\n\n rejouer,menu,jouer = affichage_rejouer(rejouer,menu,jouer)\n compt_tour=0\n\nferme_fenetre()","sub_path":"le_jeu_de_la_vie.py","file_name":"le_jeu_de_la_vie.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"434021462","text":"##############################################################################\n#\n# Copyright (C) Zenoss, Inc. 2013, all rights reserved.\n#\n# This content is made available according to terms specified in\n# License.zenoss under the directory where your Zenoss product is installed.\n#\n##############################################################################\n\nfrom Products.ZenRelations.ToManyRelationship import ToManyRelationshipBase\nfrom Products.ZenRelations.ToOneRelationship import ToOneRelationship\nfrom Products.ZenUtils.guid.interfaces import IGlobalIdentifier\n\nfrom ZenPacks.zenoss.Impact.impactd.relations import ImpactEdge\n\nfrom .macs_catalog import CatalogAPI\n\n\nRP = 'ZenPacks.zenoss.Layer2'\nAVAILABILITY = 'AVAILABILITY'\nPERCENT = 'policyPercentageTrigger'\nTHRESHOLD = 'policyThresholdTrigger'\n\n\ndef guid(obj):\n return IGlobalIdentifier(obj).getGUID()\n\n\ndef edge(source, target):\n return ImpactEdge(source, target, RP)\n\n\nclass BaseRelationsProvider(object):\n '''\n Basic class for impact relations\n '''\n relationship_provider = RP\n\n impact_relationships = None\n impacted_by_relationships = None\n\n def __init__(self, adapted):\n self._object = adapted\n\n def belongsInImpactGraph(self):\n return True\n\n def guid(self):\n if not hasattr(self, '_guid'):\n self._guid = guid(self._object)\n\n return self._guid\n\nclass DeviceRelationsProvider(BaseRelationsProvider):\n '''\n Adds upstream router(s) as dependency to device on impact graph\n '''\n def getEdges(self):\n cat = CatalogAPI(self._object.zport)\n for brain in cat.get_upstream_devices(self._object.id):\n router = brain.getObject()\n yield edge(guid(router), self.guid())\n","sub_path":"ZenPacks/zenoss/Layer2/impact.py","file_name":"impact.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"484943200","text":"import numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\ndef preprocessing_data(image,mask):\n image=np.array(image)\n mask=np.array(mask)\n image=image/255\n mask=mask/255\n mask[mask<=0.5]=0\n mask[mask>0.5]=0\n return image,mask\n\ndef generate_data(train_path, data_folder, aug_dict, data_type=None,save_to_dir=None, seed=1):\n datagen=ImageDataGenerator(**aug_dict)\n data_generator=datagen.flow_from_directory(\n train_path,\n classes=[data_folder],\n class_mode=None,\n color_mode=\"grayscale\",\n target_size=[256, 256],\n save_to_dir=save_to_dir,\n save_prefix=data_type,\n seed=seed\n )\n return data_generator\n\ndef zip_data(image_generator, mask_generator):\n train_generator=zip(image_generator, mask_generator)\n for (image, mask) in train_generator:\n image,mask=preprocessing_data(image, mask)\n yield(image, mask)","sub_path":"preparedata.py","file_name":"preparedata.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"320756572","text":"path = './data/lst/'\n\ndef get_candidates():\n candidates = dict()\n # scrap.n::discarded item;album;clipping;piece;crumb;fragment;recycling;morsel;collage;shred;odds and ends;rubbish;waste metal;leftover;bit;waste;remainder;trade in\n with open(path + '/lst.gold.candidates', 'r') as f:\n for line in f:\n s = line.strip().split('::')\n identifier = s[0]\n candidates[identifier] = [word for word in s[1].split(';') if ' ' not in word and '-' not in word]\n\n return candidates\n\ndef create_lst_file(score_fn, fn_out='/lst.out'):\n\n candidates = get_candidates()\n\n # side.n\t301\t29\ton sunday at craven cottage , jose mourinho and his all stars exhibited all of the above symptoms and they were made to pay the price by a fulham side that had in previous weeks woken up after matches with their heads kicked in .\n with open(path + '/lst_test.preprocessed', 'r') as f, open(fn_out, 'w') as f_out:\n for line in f:\n id_word, id, index, sentence= line.strip().split('\\t')\n \n sentence = sentence.split()\n scores = score_fn(candidates[id_word], sentence, index)\n scores = sorted(scores.items(), key=lambda x:-x[1])\n\n # RANKED| side.n 301| hand -2.7787| part -5.9708| against -7.5731|shore -11.5640| surface -11.5733| faction -12.7584| facet -13.6011| aspect -13.6443|bank -14.4334| perspective -1 5.0994| area -15.1572| standpoint -15.1597|view -15.4184| divide -16.0836|conversely -16.7894|ally -18.5698| edge -19.0853| contingent -19.3950|team -21.0785| boundary -21.1545| fringe -22.5431|flank -24.0384| position -32.6632| instead -33.1494| you -35.0130| responsibility -35.7164\n to_write = 'RANKED\\t{} {}'.format(id_word, id)\n for candidate, score in scores:\n to_write += '\\t{} {}'.format(candidate, score)\n to_write += '\\n'\n f_out.write(to_write)\n\n\n# Example of score function\ndef random_score_function(candidates, sentence, index):\n from random import uniform\n\n # just predict a random score for each candidate\n scores = [uniform(-1,1) for _ in candidates]\n return dict(zip(candidates, scores))\n\nif __name__ == '__main__':\n create_lst_file(random_score_function)\n","sub_path":"assignment2/embedalign/lst.py","file_name":"lst.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"335734160","text":"#!/usr/bin/env python3\n\nfrom hpecp import ContainerPlatformClient\nimport json,sys,subprocess\nimport os\n\n# Disable the SSL warnings - don't do this on productions! \nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nos.environ[\"LOG_LEVEL\"] = \"INFO\"\n\ntry:\n with open('./generated/output.json') as f:\n j = json.load(f)\nexcept: \n print(80 * \"*\")\n print(\"ERROR: Can't parse: './generated/output.json'\")\n print(80 * \"*\")\n sys.exit(1)\n\ncontroller_public_ip = j[\"controller_public_ip\"][\"value\"]\nad_server_private_ip = j[\"ad_server_private_ip\"][\"value\"]\n\n\nclient = ContainerPlatformClient(username='admin', \n password='admin123', \n api_host=controller_public_ip, \n api_port=8080,\n use_ssl=True,\n verify_ssl=False)\n\nclient.create_session()\n\n################\n# Add Gateway # \n################\n\ngateway_host_ip = j[\"gateway_private_ip\"][\"value\"]\ngateway_host_dns = j[\"gateway_private_dns\"][\"value\"]\n\nwith open('/certs/controller.prv_key', 'r') as f:\n prvkey = f.read()\n\ngw_id = client.worker.add_gateway(\n data ={\n \"ip\": gateway_host_ip,\n \"credentials\":{\n \"type\":\"ssh_key_access\",\n \"ssh_key_data\":prvkey\n },\n \"tags\":[],\n \"proxy_nodes_hostname\":gateway_host_dns,\n \"purpose\":\"proxy\"\n }\n )\n\n# wait 10 minutes for gateway to have state of 'installed'\nclient.worker.wait_for_gateway_state(id=gw_id, timeout_secs=600, state=['installed'])\n","sub_path":"scripts/end_user_scripts/hpe_admin/add_gateway.py","file_name":"add_gateway.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346813977","text":"def long_paths(tree, n):\r\n \"\"\"Return a list of all paths in tree with length at least n.\r\n The path is represented as a linked list of node values that starts from root and ends at leaf.\r\n The length of a path is the number of edges in the path (i.e. one less than the number of nodes in the path).\r\n Paths are listed in order from left to right.\r\n\r\n >>> t = Tree(3, [Tree(4), Tree(4), Tree(5)])\r\n >>> left = Tree(1, [Tree(2), t])\r\n >>> mid = Tree(6, [Tree(7, [Tree(8)]), Tree(9)])\r\n >>> right = Tree(11, [Tree(12, [Tree(13, [Tree(14)])])])\r\n >>> whole = Tree(0, [left, Tree(13), mid, right])\r\n >>> for path in long_paths(whole, 2):\r\n ... print(path)\r\n ...\r\n <0 1 2>\r\n <0 1 3 4>\r\n <0 1 3 4>\r\n <0 1 3 5>\r\n <0 6 7 8>\r\n <0 6 9>\r\n <0 11 12 13 14>\r\n >>> for path in long_paths(whole, 3):\r\n ... print(path)\r\n ...\r\n <0 1 3 4>\r\n <0 1 3 4>\r\n <0 1 3 5>\r\n <0 6 7 8>\r\n <0 11 12 13 14>\r\n >>> long_paths(whole, 4)\r\n [Link(0, Link(11, Link(12, Link(13, Link(14)))))]\r\n \"\"\"\r\n \"*** YOUR CODE HERE ***\"\r\n if tree.is_leaf():\r\n if n <= 0: # if n <= 0, which means the longth of paths is greater than n, because everytime you add a node, n = n-1\r\n return [Link(tree.label)]\r\n return [] # if n > 0, return a [].\r\n else:\r\n paths = []\r\n for b in tree.branches:\r\n paths += long_paths(b, n-1)\r\n return [Link(tree.label, path) for path in paths] # if paths is [], then this will return []\r\n #Official answer:\r\n \"*** YOUR CODE HERE ***\" # the answer is much conciser than mine.\r\n paths = []\r\n if n <= 0 and tree.is_leaf():\r\n paths.append(Link(tree.label))\r\n for b in tree.branches:\r\n for path in long_paths(b, n-1):\r\n paths.append(Link(tree.label, path))\r\n return paths # if no condition satisfied, return [].\r\n","sub_path":"functions/processing_tree/tree/long_paths.py","file_name":"long_paths.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"294343825","text":"import Item\nfrom datetime import date\n\ndef main():\n print(\"Main\")\n Item.createTables()\n owner_steff = Item.Owner(name='Steffen')\n owner_steff.save()\n item = Item.Item(owner=owner_steff, name='Mælk', date=date(2016, 9, 25))\n item.save()\n item2 = Item.Item(owner=owner_steff, name='Ost', date=date(2016, 9, 25))\n item2.save()\n\nif __name__ == \"__main__\": main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"557203924","text":"import numpy as np\nimport pandas as pd\nimport cashflow\n\n# This file contains all the calculations for the cashflow tab\n\ndef CashFlowFunction(Yr1COE, UseLife, Yr1TariffRateEsc, CostBasedTEscR, GenEqCost, BOPCost,\\\n\tInterconCost, DevCFCost, PercDebt, DebtTerm, IntRateDebt, production, Royal, PayDur, LastDay,\\\n\tOMcostinfl, OMcostinflafter, FixedOandM, genNPC, VarOandM, Insuryr1, ProjMan, PILOT, PropTaxAd,\\\n\tLandLease, monthsDebt, monthsOM, firstRep, secondRep, OneEqRepl, TwoEqRepl, ReserveReq,\\\n\tintReserve, LenderFee, IntConst, ClosingCosts, macrs, macrs_halfyear, depY1, OneReplCost,\\\n\tTwoReplCost, EffIncomeTaxRate, StateTaxRate, FedTaxRate, AfterTaxEquity, ITC, ITCutilization):\n\n\t# Tariff Rate & Cash Incentives (assume no PBI)\n\ttariff = cashflow.TariffRate(UseLife, Yr1TariffRateEsc, CostBasedTEscR, Yr1COE)\n\t# print('Tariff Info')\n\t# print(tariff)\n\n\t# Simplified inputs for DebtLoan\n\tTotValGrants = 0 #SIMPLIFIED! Can calculate Total Value of Grants\n\n\t# Debt Service, Loan Repayment, and Loan Amortization\n\tloanRep, sizeDebt = cashflow.DebtLoan(GenEqCost, BOPCost, InterconCost, DevCFCost, \\\n\t\tTotValGrants, PercDebt, UseLife, DebtTerm, IntRateDebt)\n\t# print('Loan Information Matrix')\n\t# print(loanRep)\n\t# print('Size of Debt')\n\t# print(sizeDebt)\n\n\t# Inputs needed for Royalties\n\tTariffRate = tariff[5,:]\n\tRevfromTar = np.multiply(TariffRate,production[1,:])/100\n\tPTMarValue = np.zeros((1,UseLife)) # SIMPLIFIED,but doesn't matter\n\tFedCashIncRate = np.zeros((1,UseLife)) #SIMPLIFIED, but doesn't matter\n\tStateCashIncRate = np.zeros((1,UseLife)) #SIMPLIFIED, but doesn't matter\n\n\t# Royalties\n\troyalties = cashflow.Royalties(UseLife, Royal, PayDur, RevfromTar, PTMarValue, \\\n\tFedCashIncRate, StateCashIncRate, production)\n\n\t# Project Expenses\n\tTotalOpExp = cashflow.TotalOpExpenses(LastDay, UseLife, OMcostinfl, \\\n\t\tOMcostinflafter, FixedOandM, genNPC, VarOandM, Insuryr1, ProjMan, \\\n\t\tPILOT, PropTaxAd, LandLease, royalties, production)\n\t# print('Total Operating Expenses Matrix')\n\t# print(TotalOpExp)\n\n\n\t#_________________INTERLUDE OF CALCULATIONS______________________\n\t#************Calculation: Initial Debt Service Reserve************\n\t# Units: $ \n\tInitialDebtServRes = -loanRep[2,0]/12 * monthsDebt\n\t#******************************************\n\t#************Calculation: Initial Debt Service Reserve************\n\t# Units: $ \n\tInitialDebtServRes = -loanRep[2,0]/12 * monthsDebt\n\t#******************************************\n\n\t#************Calculation: Initial O&M and WC Reserve************\n\t# Units: $\n\tInitialOMandWCRes = -(np.average(TotalOpExp[7,:]) / 12 * monthsOM)\n\t#******************************************\n\t#_________________________________________________________________\n\n\n\t# Reserve Accounts\n\treserveAcc = cashflow.ReserveAccounts(UseLife, firstRep, secondRep,InitialDebtServRes, \\\n\t\tInitialOMandWCRes,DebtTerm, OneEqRepl, TwoEqRepl, ReserveReq, PayDur,intReserve)\n\t# print(reserveAcc)\n\t\n\t# Inputs needed for Project Revenue\n\tInterestEarned = reserveAcc[6,:]\n\t# print(reserveAcc[6,:])\n\n\t# Project Revenue, All Sources\n\tprojectRevenue = cashflow.ProjectRevenue(UseLife, PayDur, RevfromTar, \\\n\t\tPTMarValue, FedCashIncRate, StateCashIncRate, InterestEarned, production)\n\n\t# EBITDA\n\tebitda = cashflow.EBITDA(TotalOpExp[7,:], projectRevenue, UseLife)\n\t# print('EBITDA')\n\t# print(ebitda)\n\n\n\t#_________________INTERLUDE OF CALCULATIONS______________________\n\n\t#************Calculation: Reserves & Financing Costs************\n\t# Units: $\n\tResFinCost = LenderFee * PercDebt * (GenEqCost + BOPCost + InterconCost + DevCFCost) + \\\n\tIntConst+ ClosingCosts + InitialDebtServRes +InitialOMandWCRes\n\t#***********************************************************\n\t#************Calculation: Installed Costs (before rebates/grants)************\n\t# Units: $\tTotal Installed Cost\n\tTotalInstallCost = GenEqCost + BOPCost + InterconCost + DevCFCost + ResFinCost\n\t# Installed Costs in $/Wattdc\n\tInstallperwatt = TotalInstallCost / genNPC / 1000\n\t#***********************************************************\n\t#_________________________________________________________________\n\n\t# Inputs required for Depreciation and Tax Calculations (assume Yes tax entity, ITC)\n\tFederalITC = cashflow.PreDepreciation(macrs, macrs_halfyear, GenEqCost, BOPCost, \\\n\t\tInterconCost,DevCFCost, ResFinCost) * ITC * ITCutilization\n\n\t# Depreciation\n\t# depsch: Project Cost Allocation table\n\t# annualDepExp: Depreciation Expense, Initial Installation\n\t# depReplRepairs: Depreciation Expense, REpairs & Replacements\n\t# firstRep: Cost for 1st replacement\n\t# secondRep: Cost for 2nd replacement\n\tdepsch, annualDepExp, depReplRepairs = cashflow.Depreciation(macrs, \\\n\t\tmacrs_halfyear, GenEqCost, BOPCost, InterconCost,DevCFCost, ResFinCost, depY1, \\\n\t\tUseLife, FederalITC, genNPC, OneEqRepl, OneReplCost,TwoEqRepl, TwoReplCost, \\\n\t\tEffIncomeTaxRate, firstRep, secondRep)\n\t# print('The table for Depreciation Schedules, Half Year Convention is: ')\n\t# print(depsch)\n\t# print('The table for Annual DepreciationExpense, Initial Installation is: ')\n\t# print(annualDepExp)\n\t# Check the values:\n\t# print(np.sum(annualDepExp, axis=1))\n\t# print('The table for Annual DepreciationExpense, Initial Installation is: ')\n\t# print(depReplRepairs)\n\n\t# Operating Income After Interest Expense\n\tOpIncAftIntExp = loanRep[1,:] + ebitda\n\t# print('Operating Income After Interest Expense')\n\t# print(OpIncAftIntExp)\n\n\t# Pre-Tax CashFlow to Equity\n\t\t# Repayment of Loan Principle: loanRep, row 0\n\t\t# (Contributions to), and Liquidation of, Reserve Accounts: -reserveacc, row 7\n\t\t# Adjustment(s) for Major Equipment Replacement(s): min of 0 and reserveacc, row 3\n\t\t# Also needs Operating Income after Interest Expense\n\tpreTaxCashFlowEquity = loanRep[0,:] - reserveAcc[7,:] + np.minimum(reserveAcc[3,:], 0) \\\n\t+ OpIncAftIntExp\n\t# print(preTaxCashFlowEquity)\n\n\t# Net Pre-Tax Cash Flow to Equity\n\t# Assume initial equity investment, then $0 for rest of time\n\t# Thus, \"Net Pre-Tax Cash Flow to Equity\" is the same as\n\t# \"Pre-Tax Cash Flow to Equity\" just with a year 0 as the initial\n\t# equity investment\n\tinitialEquityInv = -(TotalInstallCost - TotValGrants - sizeDebt)\n\tNetPreTaxCashFlowEquity = np.append(initialEquityInv, preTaxCashFlowEquity)\n\n\t# Running IRR (Cash Only)\n\trunningIRRcashonly = cashflow.RunningIRR(NetPreTaxCashFlowEquity, UseLife)\n\t# print(runningIRR)\n\n\t# Taxes\n\ttaxInfo = cashflow.Taxes(OpIncAftIntExp, depReplRepairs, UseLife, StateTaxRate, \\\n\t\tFedTaxRate, FederalITC, NetPreTaxCashFlowEquity)\n\t# np.set_printoptions(formatter={'float_kind':'{:f}'.format})\n\t# print(taxInfo)\n\n\t# Add on initial equity investment before calculating IRR\n\tAfterTaxCashFlowEquity = np.append(initialEquityInv, taxInfo[4,:])\n\n\t# Running IRR (After Tax)\n\trunningIRRaftertax = cashflow.RunningIRR(AfterTaxCashFlowEquity, UseLife)\n\n\t#******************Calculations for IRR/NPV (blue box in spreadsheet)\n\t# Pre-Tax (Cash-only) Equity IRR (over defined Useful Life)\n\tPreTaxCashEquityIRR = runningIRRcashonly[0,UseLife-1]\n\t# print(PreTaxCashEquityIRR)\n\t# After Tax Equity IRR (over defined Useful Life)\n\t# AfterTaxEquityIRR = runningIRRaftertax[0,UseLife-1]\n\t# Net Present Value @ 12% (over defined Useful Life)\n\t# (needed to add zero to beginning because numpy NPV is calculated differently)\n\tAfterTaxCashFlowEquityMOD = np.append([[0]],AfterTaxCashFlowEquity)\n\tNPV = np.npv(AfterTaxEquity, AfterTaxCashFlowEquityMOD)\n\n\treturn Yr1COE, NPV, TotalInstallCost, TotalOpExp[7,:]","sub_path":"mainfunctions.py","file_name":"mainfunctions.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"14299890","text":"# -*- coding: utf-8 -*-\n# Copyright: Ankitects Pty Ltd and contributors\n# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html\n#\n# Feed field HTML through BeautifulSoup to fix things like unbalanced div tags.\n#\n\nfrom anki.hooks import addHook\nfrom aqt import mw\nfrom aqt.qt import *\nfrom aqt.utils import showInfo\nfrom bs4 import BeautifulSoup\n\n\ndef onFixHTML(browser):\n nids = browser.selected_notes()\n if not nids:\n showInfo(\"Please select some notes.\")\n return\n\n mw.checkpoint(\"Fix Invalid HTML\")\n\n mw.progress.start(immediate=True)\n try:\n changed = _onFixHTML(browser, nids)\n finally:\n mw.progress.finish()\n\n browser.note_type.reset()\n mw.requireReset()\n\n showInfo(\"Updated %d/%d notes.\" % (changed, len(nids)), parent=browser)\n\n\ndef _onFixHTML(browser, nids):\n changed = 0\n for c, nid in enumerate(nids):\n note = mw.col.getNote(nid)\n if _fixNoteHTML(note):\n changed += 1\n mw.progress.update(label=\"Processed %d/%d notes\" % (c + 1, len(nids)))\n return changed\n\n\n# true on change\ndef _fixNoteHTML(note):\n changed = False\n for fld, val in note.items():\n parsed = str(BeautifulSoup(val, \"html.parser\"))\n if parsed != val:\n note[fld] = parsed\n changed = True\n\n if changed:\n note.flush()\n\n return changed\n\n\ndef onMenuSetup(browser):\n act = QAction(browser)\n act.setText(\"Fix Invalid HTML\")\n mn = browser.form.menu_Notes\n mn.addSeparator()\n mn.addAction(act)\n act.triggered.connect(lambda b=browser: onFixHTML(browser))\n\n\naddHook(\"browser.setupMenus\", onMenuSetup)\n","sub_path":"code/fixinvalidhtml/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"430454525","text":"# -*- coding: utf-8 -*-#\n\n# Name: Map_xian.py\n# Author: jiaocheng\n# Date: 2018/11/22\n# Description:\n\n\nfrom pyecharts import Map, Geo, Style\nimport pandas as pd\n\n# 导入excel表举例\ndf = pd.read_excel('D:\\hadoop_test\\\\1.xlsx')\ndf.head()\n\n\n# 导入自定义的地点经纬度\ngeo_cities_coords = {df.iloc[i]['地点']: [df.iloc[i]['经度'], df.iloc[i]['纬度']] for i in range(len(df))}\nattr = list(df['地点'])\nvalue = list(df['数量'])\nstyle = Style(title_color=\"#fff\", title_pos=\"center\",\n width=1200, height=800, background_color=\"#404a59\")\n\n\n\n# 可视化\ngeo = Geo('西安租房', **style.init_style)\ngeo.add(\"\", attr, value, visual_range=[0, 100], symbol_size=10,\n visual_text_color=\"#fff\", is_piecewise=True,\n is_visualmap=True, maptype='西安', visual_split_number=10,\n geo_cities_coords=geo_cities_coords,is_legend_show=True,label_emphasis_textsize=15,label_emphasis_pos='right')\n\ngeo.render('./西安租房分布.html')\n\n\n\n\n\n# 省和直辖市\n# province_distribution = {'河南': 45.23, '北京': 37.56, '河北': 21, '辽宁': 12, '江西': 6, '上海': 20, '安徽': 10, '江苏': 16, '湖南': 9, '浙江': 13, '海南': 2, '广东': 22, '湖北': 8, '黑龙江': 11, '澳门': 1, '陕西': 11, '四川': 7, '内蒙古': 3, '重庆': 3, '云南': 6, '贵州': 2, '吉林': 3, '山西': 12, '山东': 11, '福建': 4, '青海': 1, '舵主科技,质量保证': 1, '天津': 1, '其他': 1}\n# provice=list(province_distribution.keys())\n# values=list(province_distribution.values())\n#\n# # 城市 -- 指定省的城市 xx市\n# city = ['郑州市', '安阳市', '洛阳市', '濮阳市', '南阳市', '开封市', '商丘市', '信阳市', '新乡市']\n# values2 = [1.07, 3.85, 6.38, 8.21, 2.53, 4.37, 9.38, 4.29, 6.1]\n#\n# # 区县 -- 具体城市内的区县 xx县\n# quxian = ['夏邑县', '民权县', '梁园区', '睢阳区', '柘城县', '宁陵县']\n# values3 = [3, 5, 7, 8, 2, 4]\n\n# maptype='china' 只显示全国直辖市和省级\n# 数据只能是省名和直辖市的名称\n# map = Map(\"中国地图\",'中国地图', width=1200, height=600)\n# map.add(\"2121\", provice, values, visual_range=[0, 50], maptype='china', is_visualmap=True,visual_text_color='#000')\n# #map.show_config()\n# map.render(path=\"./04-01中国���图.html\")\n\n\n# map2 = Map(\"陕西地图\",'陕西', width=1200, height=600)\n# map2.add('陕西', city, values2, visual_range=[1, 10], maptype='陕西', is_visualmap=True, visual_text_color='#000')\n# map2.show_config()\n# map2.render(path=\"./04-02陕西地图.html\")\n\n# map3 = Map(\"西安地图\",'西安', width=1200, height=600)\n# map3.add(\"西安\", quxian, values3, visual_range=[1, 10], maptype='西安', is_visualmap=True,\n# visual_text_color='#000')\n# map3.render(path=\"./04-03西安地图.html\")\n\n\n\n\n","sub_path":"super_collector/fangtianxia/Map_xian.py","file_name":"Map_xian.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"250114374","text":"import requests\nimport json\nfrom flask import Flask\n\napp = Flask(__name__)\n\nAPI = \"https://api.exchangeratesapi.io/latest?base={}&symbols={}\"\n\n@app.route(\"//\")\ndef mostrar_moeda(moeda, valor):\n\tdestino = \"BRL\"\n\turl = API.format(moeda.upper(), destino)\n\t\n\tresposta = requests.get(url)\n\tdicionario = resposta.json()\n\ttotal = dicionario[\"rates\"][\"BRL\"] * valor\n\treturn f\"Ovalor é {total:.2f}\"\n\napp.run(debug=True)\n\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"330204618","text":"from aliyunsdkcore.client import AcsClient\nfrom aliyunsdkecs.request.v20140526 import DescribeInvocationResultsRequest, StopInstanceRequest, RebootInstanceRequest, RunCommandRequest, DeleteInstanceRequest, StartInstanceRequest, AllocatePublicIpAddressRequest, CreateInstanceRequest, DescribePriceRequest, DescribeAvailableResourceRequest, DescribeInstanceStatusRequest\nfrom conf import getcfg\nfrom fn.common import getObject\nfrom models.instance import getIId, writeCommandHistory, writeIp\n\necs = getcfg()['ecs']\n\nclient = AcsClient(\n ecs['access_key'], \n ecs['access_secret'],\n ecs['region'],\n timeout=20\n)\n\ndef describePrice():\n request = DescribePriceRequest.DescribePriceRequest()\n request.set_InstanceType(ecs['type'])\n request.set_InstanceNetworkType(ecs['network'])\n request.set_InternetChargeType(ecs['i_chargetype'])\n request.set_InternetMaxBandwidthOut(ecs['i_bandwidth'])\n request.set_SystemDiskCategory(ecs['systemdisk']['type'])\n request.set_SystemDiskSize(ecs['systemdisk']['size'])\n request.set_DataDisk1Category(ecs['datadisk']['type'])\n request.set_DataDisk1Size(ecs['datadisk']['size'])\n request.set_ZoneId(ecs['zone'])\n request.set_SpotStrategy(ecs['strategy'])\n response = client.do_action_with_exception(request)\n try:\n s = str(response, encoding='utf-8') #type: ignore\n return s\n except:\n return None\n \n\ndef describeAvailable():\n request = DescribeAvailableResourceRequest.DescribeAvailableResourceRequest()\n request.set_InstanceType(ecs['type'])\n request.set_SystemDiskCategory(ecs['systemdisk']['type'])\n request.set_DataDiskCategory(ecs['datadisk']['type'])\n request.set_ZoneId(ecs['zone'])\n request.set_SpotStrategy(ecs['strategy'])\n request.set_DestinationResource('InstanceType')\n response = client.do_action_with_exception(request)\n try:\n s = str(response, encoding='utf-8') #type: ignore\n return s\n except:\n return None\n \n\ndef describeInstanceStatus(id):\n request = DescribeInstanceStatusRequest.DescribeInstanceStatusRequest()\n request.set_InstanceIds([id])\n response = client.do_action_with_exception(request)\n try:\n s = str(response, encoding='utf-8') #type: ignore\n return s\n except:\n return None\n \ndef createInstance():\n request = CreateInstanceRequest.CreateInstanceRequest()\n request.set_InstanceType(ecs['type'])\n request.set_InternetChargeType(ecs['i_chargetype'])\n request.set_InternetMaxBandwidthOut(ecs['i_bandwidth'])\n request.set_SystemDiskCategory(ecs['systemdisk']['type'])\n request.set_SystemDiskSize(ecs['systemdisk']['size'])\n request.set_DataDisks([{\n \"Size\": ecs['datadisk']['size'],\n \"Category\": ecs['datadisk']['type'],\n \"DeleteWithInstance\": True\n }])\n request.set_ZoneId(ecs['zone'])\n request.set_SpotStrategy(ecs['strategy'])\n request.set_ImageId(ecs['image'])\n if ecs['password']:\n request.set_Password(ecs['password'])\n request.set_SpotPriceLimit(0)\n response = client.do_action_with_exception(request)\n try:\n s = str(response, encoding='utf-8') #type: ignore\n except:\n return None\n return s\n\ndef allocateIp(id):\n request = AllocatePublicIpAddressRequest.AllocatePublicIpAddressRequest()\n request.set_InstanceId(id)\n r = getObject(client.do_action_with_exception(request))\n writeIp(r.get('IpAddress'))\n \ndef startInstance(id):\n request = StartInstanceRequest.StartInstanceRequest()\n request.set_InstanceId(id)\n client.do_action_with_exception(request)\n \ndef deleteInstance(id):\n request = DeleteInstanceRequest.DeleteInstanceRequest()\n request.set_Force(True)\n request.set_InstanceId(id)\n client.do_action_with_exception(request)\n \ndef deploy(id, token):\n request = RunCommandRequest.RunCommandRequest()\n try:\n f = open('run.sh')\n cmd = f.read()\n f.close()\n except Exception as e:\n print('Cannot open run.sh: ' + str(e))\n cmd = 'echo No run.sh content.'\n request.set_InstanceIds([id])\n request.set_CommandContent(cmd)\n request.set_Type('RunShellScript')\n request.set_Timeout(99999)\n r = getObject(client.do_action_with_exception(request))\n writeCommandHistory(token, r.get('CommandId'), r.get('InvokeId'))\n \ndef rebootInstance(id):\n request = RebootInstanceRequest.RebootInstanceRequest()\n request.set_InstanceId(id)\n client.do_action_with_exception(request)\n \ndef stopInstance(id):\n request = StopInstanceRequest.StopInstanceRequest()\n request.set_InstanceId(id)\n client.do_action_with_exception(request)\n \ndef describeInvocationResult(id):\n request = DescribeInvocationResultsRequest.DescribeInvocationResultsRequest()\n iid = getIId()\n if not iid:\n return None\n request.set_InvokeId(id)\n request.set_InstanceId(iid)\n return client.do_action_with_exception(request)","sub_path":"src/sdk.py","file_name":"sdk.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"614181899","text":"from random import randint\nalunos = []\nfor i in range(0, int(input('Quantidade de alunos: '))):\n alunos.append(input('{}° aluno: '.format(i+1)))\nprint('\\nOrdem de aprensentação:')\nwhile len(alunos) > 0:\n random_index = randint(0, len(alunos)-1)\n print(' >> ', end='')\n print(alunos[random_index], end='')\n alunos.pop(random_index)\n","sub_path":"ex019.py","file_name":"ex019.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637100827","text":"#!/usr/bin/env python3\n\"\"\"\nDownload mp3 from YouTube Video\n\"\"\"\nfrom pytube import YouTube\nimport subprocess\nimport os\nimport argparse\nimport re\n\n\n__author__ = \"Alessio Scarfone\"\n__version__ = \"0.1.0\"\n__license__ = \"MIT\"\n\n# ----------------------------------------\n\nfile_size = 0\nprinted = -1;\n\ndef main(args):\n parent_dir = r\".\\download\"\n original = r\"\\original\"\n converted = r\"\\mp3\"\n\n check_output_folder(parent_dir+original)\n check_output_folder(parent_dir+converted)\n\n yt_url = input(\" >>> Copy and paste your YouTube URL here: \")\n print(yt_url)\n print (\"Accessing YouTube URL...\")\n yt = YouTube(yt_url)\n title = yt.title\n vids = []\n print (\"Fetching: {}...\".format(title))\n vids= yt.streams.all()\n\n for i in range(len(vids)):\n if vids[i].mime_type.startswith(\"audio\") == True:\n print(i,'. ',vids[i])\n\n vnum = int(input(\" >>> Enter vid num: \"))\n while vnum < 0 or vnum >= len(vids): \n print(\"No valid number\")\n vnum = int(input(\"Enter vid num: \"))\n \n global file_size\n file_size = vids[vnum].filesize\n\n #add callback for print progress \n yt.register_on_progress_callback(progress_Check)\n \n #Starts the download process\n print (\"Download from: \\n \"+str(vids[vnum])+\"\\n File Size: \"+str(round(file_size/1024/1024,3))+\" MB\")\n vids[vnum].download(parent_dir+original)\n\n\n default_filename = vids[vnum].default_filename # get default name using pytube API\n \n if args.modify_name == True:\n new_filename = input(\"Enter filename (including extension): \")\n\n if args.modify_name == False or new_filename == \"\":\n print(\"Use original title!\");\n new_filename = os.path.splitext(default_filename)[0]+\".mp3\"\n\n if os.path.exists(os.path.join(parent_dir+original, default_filename)) == True:\n print(\" >>> START MP3 CONVERSION!\")\n subprocess.check_call(['ffmpeg','-hide_banner','-loglevel','quiet','-i', \n os.path.join(parent_dir+original, default_filename),\n os.path.join(parent_dir+converted, new_filename)\n ])\n\n #DELETE ORIGINAL FILE\n if args.not_delete_original == False:\n print(\" >>> DELETE ORIGINAL\")\n os.remove(os.path.join(parent_dir+original, default_filename))\n\n else:\n print(\" >>> FILE NOT EXIST\");\n \n \n \n print('Done')\n\ndef progress_Check(stream = None, chunk = None, file_handle = None, remaining = None):\n #Gets the percentage of the file that has been downloaded.\n global printed\n percent = (100*(file_size-remaining))/file_size\n if int(percent) % 10 == 0 and int(percent) != printed:\n print(str(int(percent))+\" % downloaded\")\n printed = int(percent)\n\ndef check_output_folder(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n# ----------------------------------------------------------\n \nif __name__ == \"__main__\":\n \"\"\" This is executed when run from the command line \"\"\"\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-d\", \"--not-delete-original\", help='not delete original file',action='store_true')\n parser.add_argument(\"-i\", \"--modify-name\", help='ask for a new name for converted file',action='store_true')\n\n # Specify output of \"--version\"\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=\"%(prog)s (version {version})\".format(version=__version__))\n\n args = parser.parse_args()\n main(args)","sub_path":"YouPy.py","file_name":"YouPy.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"586928500","text":"import os\nimport time\nimport random\n\ndef main():\n path=os.path.abspath(os.path.dirname(__file__))\n images=[]\n for name in os.listdir(path):\n if name.endswith('.png') or name.endswith('.jpg'):\n images.append(path+'/'+name)\n\n setbg='xsetbg -onroot -fullscreen -tile '\n delay=600 # seconds\n\n print(images)\n \n while True:\n os.system(setbg + random.choice(images))\n time.sleep(delay)\n\nmain()\n","sub_path":".xmonad/backgrounds/bgdaemon.py","file_name":"bgdaemon.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"351161225","text":"import urllib\nimport datetime\nfrom pytz import utc\nimport requests\nfrom django.conf import settings\nfrom rest_framework import exceptions\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom wander.serializers import TripSerializer, CreateTripSerializer, ViewTripSerializer, CancelTripSerializer, \\\n TwilioVoiceSerializer\nfrom wander.models import Traveler, Trip, Guide, Counter\nfrom rest_framework.reverse import reverse\nfrom collections import OrderedDict\nfrom django.http import HttpResponse\nimport re\n\nalphanumeric_only = re.compile('[\\W_]+')\nphone_pattern = re.compile(r\"^[\\d\\+\\-\\(\\) ]+$\")\n\n\n@api_view(['GET'])\n@authentication_classes([])\n@permission_classes([])\ndef api_root(request, format=None):\n \"\"\"\n ### API documentation for the Rehive digital currency platform.\n ---\n \"\"\"\n return Response(\n [\n {'Guide': OrderedDict(\n [(\n 'View, Accept, Decline and Cancel Trips',\n reverse('wander-api:trips', request=request, format=format)),\n ]\n )},\n {'Traveler': OrderedDict(\n [('View Trip', reverse('wander-api:view_trip', request=request, format=format)),\n ('Create Trip', reverse('wander-api:create_trip', request=request, format=format)),\n ('Cancel Trip', reverse('wander-api:cancel_trip', request=request, format=format)),\n ]\n )},\n {'Twilio': OrderedDict(\n [('Twilio Token', reverse('wander-api:twilio_token_view', request=request, format=format)),\n ]\n )}\n ])\n\n\nfrom twilio.util import TwilioCapability\nimport twilio.twiml\n\n\nclass TripView(GenericAPIView):\n \"\"\"\n ### Get the available trips.\n\n \"\"\"\n permission_classes = ()\n allowed_methods = ('POST', 'GET')\n serializer_class = TripSerializer\n\n def post(self, request, *args, **kwargs):\n\n action = request.data.get('action')\n trip_id = request.data.get('trip_id')\n guide = request.data.get('username')\n\n guide, created = Guide.objects.get_or_create(username=guide)\n\n if action == 'accept':\n Trip.objects.filter(id=trip_id).update(guide=guide, status='live')\n Counter.objects.filter(id=1).update(counter=0)\n elif action == 'cancel':\n Trip.objects.filter(id=trip_id).update(guide=guide, status='cancelled',\n end_time=datetime.datetime.now(tz=utc))\n\n return Response({'status': 'success'})\n\n def get(self, request, *args, **kwargs):\n\n r = requests.get(\"https://thingspace.io/get/latest/dweet/for/arthur\")\n current_location = r.json()['with'][0]['content']\n\n # Hard code get latest trip created.\n # trip = Trip.objects.filter(traveler__username='arthur').latest('id')\n trip = Trip.objects.latest('id')\n\n if trip.status == 'cancelled':\n trip_status = 'no_trips'\n else:\n trip_status = trip.status\n\n if trip.guide:\n guide = {'name': 'Fort Prefect',\n 'age': 25,\n 'gender': 'Male',\n 'occupation': 'Tour Guide',\n 'rating': 5,\n }\n\n else:\n guide = {}\n\n if trip.facts:\n facts = trip.facts\n else:\n facts = {'list': []}\n\n data = {'trip': {'id': trip.id,\n 'start_time': trip.start_time,\n 'guide': guide,\n 'traveler':\n {'name': trip.traveler.name,\n 'age': trip.traveler.age,\n 'occupation': trip.traveler.occupation,\n 'country': trip.traveler.country,\n 'city': trip.traveler.city,\n 'interest': trip.traveler.interest,\n 'bio': trip.traveler.bio,\n 'gender': \"Male\",\n 'profile': trip.traveler.profile},\n 'start_location': {'lat': '37.7786', 'lon': '122.3893'},\n 'end_location': {'lat': '37.7786', 'lon': '122.3893'},\n 'facts': facts,\n 'current_location': current_location,\n 'status': trip_status,\n }\n }\n\n return Response({'status': 'success', 'data': data})\n\n\nclass CreateTripView(GenericAPIView):\n \"\"\"\n ### Get the available trips.\n\n \"\"\"\n permission_classes = ()\n allowed_methods = ('POST',)\n serializer_class = CreateTripSerializer\n\n def post(self, request, *args, **kwargs):\n # If user creates a trip, check if there is more than one trip entry for the user. If so, then cancel previous\n # and create a new trip.\n username = request.data.get('username')\n traveler, created = Traveler.objects.get_or_create(username=username)\n\n # Create trip\n if Trip.objects.filter(traveler=traveler, status='waiting').exists():\n Trip.objects.filter(traveler=traveler).update(status='cancelled', end_time=datetime.datetime.now(tz=utc))\n\n trip = Trip.objects.create(traveler=traveler)\n\n data = {'trip_id': trip.id}\n\n return Response({'status': 'success', 'data': data})\n\n\nclass ViewTripView(GenericAPIView):\n \"\"\"\n ### Get the trip info for Traveler.\n\n \"\"\"\n permission_classes = ()\n allowed_methods = ('POST',)\n serializer_class = ViewTripSerializer\n\n def post(self, request, *args, **kwargs):\n\n # If user creates a trip, check if there is more than one trip entry for the user. If so, then cancel previous\n # and create a new trip.\n trip_id = request.data.get('trip_id')\n\n if Trip.objects.filter(id=trip_id).exists():\n trip = Trip.objects.get(id=trip_id)\n data = {'trip_id': trip_id, 'status': trip.status}\n return Response({'status': 'success', 'data': data})\n else:\n return Response({'status': 'error', 'message': 'Trip does not exist.'})\n\n\nclass CancelTripView(GenericAPIView):\n \"\"\"\n ### Get the trip info for Traveler.\n\n \"\"\"\n permission_classes = ()\n allowed_methods = ('POST',)\n serializer_class = CancelTripSerializer\n\n def post(self, request, *args, **kwargs):\n\n # If user creates a trip, check if there is more than one trip entry for the user. If so, then cancel previous\n # and create a new trip.\n trip_id = request.data.get('trip_id')\n\n trip = Trip.objects.latest('id')\n trip.status = 'cancelled'\n trip.save()\n\n return Response({'status': 'success'})\n\n # if Trip.objects.filter(id=trip_id).exists():\n # trip = Trip.objects.get(id=trip_id)\n # trip.end_time = datetime.datetime.now(tz=utc)\n # trip.status = 'cancelled'\n # trip.save()\n # data = {'trip_id': trip_id, 'status': trip.status}\n # return Response({'status': 'success', 'data': data})\n # else:\n # return Response({'status': 'error', 'message': 'Trip does not exist.'})\n\n\nclass TwilioTokenView(GenericAPIView):\n \"\"\"\n ### Twilio token.\n\n \"\"\"\n permission_classes = ()\n allowed_methods = ('GET',)\n\n def get(self, request, *args, **kwargs):\n\n # get credentials for environment variables\n account_sid = getattr(settings, 'TWILIO_ACCOUNT_SID')\n auth_token = getattr(settings, 'TWILIO_AUTH_TOKEN')\n application_sid = getattr(settings, 'TWILIO_TWIML_APP_SID')\n\n # Generate a random user name\n user = self.request.query_params.get('user', '')\n if user:\n identity = alphanumeric_only.sub('', user)\n else:\n identity = alphanumeric_only.sub('', 'default')\n\n # Create a Capability Token\n capability = TwilioCapability(account_sid, auth_token)\n capability.allow_client_outgoing(application_sid)\n capability.allow_client_incoming(identity)\n token = capability.generate()\n\n return Response(OrderedDict([('identity', identity), ('token', token)]))\n\n\nclass TwilioVoiceView(GenericAPIView):\n \"\"\"\n ### Twilio token.\n\n \"\"\"\n permission_classes = ()\n allowed_methods = ('POST',)\n serializer_class = TwilioVoiceSerializer\n\n def post(self, request, *args, **kwargs):\n resp = twilio.twiml.Response()\n if \"To\" in request.data and request.data[\"To\"] != '':\n dial = resp.dial(callerId=getattr(settings, 'TWILIO_CALLER_ID'))\n # wrap the phone number or client name in the appropriate TwiML verb\n # by checking if the number given has only digits and format symbols\n if phone_pattern.match(request.data[\"To\"]):\n dial.number(request.data[\"To\"])\n else:\n dial.client(request.data[\"To\"])\n else:\n resp.say(\"Thanks for calling!\")\n\n return HttpResponse(str(resp), content_type='text/xml')\n","sub_path":"src/wander/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"455016494","text":"# coding: utf-8\n\n\"\"\"\nCopyright 2016 SmartBear Software\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Ref: https://github.com/swagger-api/swagger-codegen\n\"\"\"\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass ReplaceResponse(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self):\n \"\"\"\n ReplaceResponse - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'change_number': 'int',\n 'upload_status': 'UriReference',\n 'upload_destination_uri': 'str',\n 'upload_method': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'change_number': 'changeNumber',\n 'upload_status': 'uploadStatus',\n 'upload_destination_uri': 'uploadDestinationUri',\n 'upload_method': 'uploadMethod'\n }\n\n self._id = None\n self._name = None\n self._change_number = None\n self._upload_status = None\n self._upload_destination_uri = None\n self._upload_method = None\n\n @property\n def id(self):\n \"\"\"\n Gets the id of this ReplaceResponse.\n\n\n :return: The id of this ReplaceResponse.\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"\n Sets the id of this ReplaceResponse.\n\n\n :param id: The id of this ReplaceResponse.\n :type: str\n \"\"\"\n \n self._id = id\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this ReplaceResponse.\n\n\n :return: The name of this ReplaceResponse.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Sets the name of this ReplaceResponse.\n\n\n :param name: The name of this ReplaceResponse.\n :type: str\n \"\"\"\n \n self._name = name\n\n @property\n def change_number(self):\n \"\"\"\n Gets the change_number of this ReplaceResponse.\n\n\n :return: The change_number of this ReplaceResponse.\n :rtype: int\n \"\"\"\n return self._change_number\n\n @change_number.setter\n def change_number(self, change_number):\n \"\"\"\n Sets the change_number of this ReplaceResponse.\n\n\n :param change_number: The change_number of this ReplaceResponse.\n :type: int\n \"\"\"\n \n self._change_number = change_number\n\n @property\n def upload_status(self):\n \"\"\"\n Gets the upload_status of this ReplaceResponse.\n\n\n :return: The upload_status of this ReplaceResponse.\n :rtype: UriReference\n \"\"\"\n return self._upload_status\n\n @upload_status.setter\n def upload_status(self, upload_status):\n \"\"\"\n Sets the upload_status of this ReplaceResponse.\n\n\n :param upload_status: The upload_status of this ReplaceResponse.\n :type: UriReference\n \"\"\"\n \n self._upload_status = upload_status\n\n @property\n def upload_destination_uri(self):\n \"\"\"\n Gets the upload_destination_uri of this ReplaceResponse.\n\n\n :return: The upload_destination_uri of this ReplaceResponse.\n :rtype: str\n \"\"\"\n return self._upload_destination_uri\n\n @upload_destination_uri.setter\n def upload_destination_uri(self, upload_destination_uri):\n \"\"\"\n Sets the upload_destination_uri of this ReplaceResponse.\n\n\n :param upload_destination_uri: The upload_destination_uri of this ReplaceResponse.\n :type: str\n \"\"\"\n \n self._upload_destination_uri = upload_destination_uri\n\n @property\n def upload_method(self):\n \"\"\"\n Gets the upload_method of this ReplaceResponse.\n\n\n :return: The upload_method of this ReplaceResponse.\n :rtype: str\n \"\"\"\n return self._upload_method\n\n @upload_method.setter\n def upload_method(self, upload_method):\n \"\"\"\n Sets the upload_method of this ReplaceResponse.\n\n\n :param upload_method: The upload_method of this ReplaceResponse.\n :type: str\n \"\"\"\n allowed_values = [\"SINGLE_PUT\", \"MULTIPART_POST\"]\n if upload_method not in allowed_values:\n raise ValueError(\n \"Invalid value for `upload_method`, must be one of {0}\"\n .format(allowed_values)\n )\n\n self._upload_method = upload_method\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n\n","sub_path":"build/PureCloudPlatformApiSdk/models/replace_response.py","file_name":"replace_response.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"513688973","text":"#\n# @lc app=leetcode.cn id=33 lang=python\n#\n# [33] 搜索旋转排序数组\n#\n# https://leetcode-cn.com/problems/search-in-rotated-sorted-array/description/\n#\n# algorithms\n# Medium (36.08%)\n# Likes: 486\n# Dislikes: 0\n# Total Accepted: 68.2K\n# Total Submissions: 187.9K\n# Testcase Example: '[4,5,6,7,0,1,2]\\n0'\n#\n# 假设按照升序排序的数组在预先未知的某个点上进行了旋转。\n# \n# ( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。\n# \n# 搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。\n# \n# 你可以假设数组中不存在重复的元素。\n# \n# 你的算法时间复杂度必须是 O(log n) 级别。\n# \n# 示例 1:\n# \n# 输入: nums = [4,5,6,7,0,1,2], target = 0\n# 输出: 4\n# \n# \n# 示例 2:\n# \n# 输入: nums = [4,5,6,7,0,1,2], target = 3\n# 输出: -1\n# \n#\n\n# @lc code=start\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n\n def find_half(nums,target,start):\n\n length = len(nums)\n if length == 1 and not nums[0] == target:\n return -1\n # 递归\n\n direction = 0\n # 判断截断位置与目标关系\n if nums[0] > target:\n # 小数��边\n direction = -1\n elif nums[0] < target:\n # 大数那边\n direction = 1\n else:\n return start\n\n if nums[-1] == target:\n return len(nums)-1+start\n if nums[length//2] == target:\n return start+length//2\n ans = -1\n\n left = 0\n right = 0\n if direction == -1:\n if nums[length//2] < target:\n right = 1\n elif nums[length//2] > nums[0]:\n right = 1\n else:\n left = 1\n else:\n if nums[length//2] > target:\n left = 1\n elif nums[length//2] < nums[0]:\n left = 1\n else:\n right = 1\n if right == 1:\n start = start + length//2\n ans = find_half(nums[length//2:],target,start)\n else:\n # 返回左边\n start = start + 0\n ans = find_half(nums[:length//2],target,start)\n \n return ans\n\n if len(nums) == 0:\n return -1\n ans = find_half(nums,target,0)\n return ans\n \n\n \n# @lc code=end\n\nnums = [4,5,6,7,0,1,2]\ntarget = 3\n# nums = [8,1,2,3,4,5,6,7]\n# target = 6\nsolu = Solution()\nprint(solu.search(nums,target))\n","sub_path":"33.搜索旋转排序数组.py","file_name":"33.搜索旋转排序数组.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"350237748","text":"#import json and os libraries so that you can use the methods in those libraries\nimport json\nimport os\n\n#import the User class from the user.py file to use the User class\nfrom user import User\n\n#menu runs your program\ndef menu():\n #get users name; if the user file already exists, load that file, if it doesn't, create a new user\n name = input(\"Enter your name: \")\n filename = \"{}.txt\".format(name)\n if file_exists(filename):\n with open(filename, 'r') as f:\n json_data = json.load(f)\n user = User.from_json(json_data)\n else:\n user = User(name)\n\n #give user options to add movies, see list of movies, set movie as watched, delete movies, see watched movies, save a text json file or quit\n user_input = input(\"Enter 'a' to add a movie, 's' to see the list of movies, 'w' to set a movie as watched, 'd' to delete a movie, 'l' to see the list of watched movies, 'f' to save or 'q' to quit: \")\n \n while user_input != 'q':\n #if a, use the user class add movie object to add a user\n if user_input == 'a':\n movie_name = input(\"Enter the movie name: \")\n movie_genre = input(\"Enter the movie genre: \")\n user.add_movie(movie_name, movie_genre)\n \n #if s, pull up the user's movie list which is a list of movie objects and pull the movie properities from that list\n elif user_input == 's':\n for movie in user.movies:\n print(\"Name: {} Genre: {} Watched: {}\".format(movie.name, movie.genre, movie.watched))\n \n #if w, use the user method, set_watched, to change the value to set watched\n elif user_input == 'w':\n movie_name = input(\"Enter the movie name to set as watched: \")\n user.set_watched(movie_name)\n \n #if d, use the user method, delete_movie, to change delete the movie\n elif user_input == 'd':\n movie_name = input(\"Enter the movie name to delete: \")\n user.delete_movie(movie_name)\n\n #if l, cycle through the user list of watched movies (pulled up in the user method watched_movies) and print each movie object\n elif user_input == 'l':\n for movie in user.watched_movies():\n print(\"Name: {} Genre: {} Watched: {}\".format(movie.name, movie.genre, movie.watched))\n \n #if f, save user file as a new text file using JSON to pass the data through the text file\n elif user_input == 'f':\n with open(filename, 'w') as f:\n json.dump(user.json(), f)\n\n # keep giving the user options until they quit\n user_input = input(\"Enter 'a' to add a movie, 's' to see the list of movies, 'w' to set a movie as watched, 'd' to delete a movie, 'l' to see the list of watched movies, 'f' to save or 'q' to quit: \")\n\n#check to see if the file exists\ndef file_exists(filename):\n return os.path.isfile(filename)\n\n#run the program\nmenu()\n","sub_path":"4Python/4MovieRental2.0/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"333475227","text":"#!/usr/bin/env python\n# -*- mode: python; indent-tabs-mode: nil; -*- coding: iso-8859-1 -*-\n\n\"\"\"\n\nDublinCoreI18n.py\n\nCopyright 2009-2010 by Marcello Perathoner\n\nDistributable under the GNU General Public License Version 3 or newer.\n\nTranslate a DublinCore struct with Babel.\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport cherrypy\nimport babel\n\n\nclass DublinCoreI18nMixin (object):\n \"\"\" Translator Mixin for GutenbergDatabaseDublinCore class. \"\"\"\n\n def __init__ (self):\n self.translated = False\n self.hr_release_date = None\n self.rights = None\n\n\n @staticmethod\n def dummy_text_holder ():\n \"\"\"Never gets called.\n\n Only holds some gettext messages to translate. Keep this in\n sync with GutenbergDatabaseDublinCore.\n\n \"\"\"\n _('Copyrighted. Read the copyright notice inside this book for details.')\n _('Public domain in the USA.')\n\n\n def translate (self):\n \"\"\" Translate DublinCore struct. \"\"\"\n\n if self.translated:\n # already translated\n return\n\n self.hr_release_date = babel.dates.format_date (\n self.release_date, locale = str (cherrypy.response.i18n.locale))\n\n if cherrypy.response.i18n.locale.language == 'en':\n # no translation required\n return\n\n self.rights = _(self.rights)\n for author in self.authors:\n author.role = _(author.role)\n for marc in self.marcs:\n marc.caption = _(marc.caption)\n for dcmitype in self.dcmitypes:\n dcmitype.description = _(dcmitype.description)\n for lang in self.languages:\n if lang.id in cherrypy.response.i18n.locale.languages:\n lang.language = cherrypy.response.i18n.locale.languages[lang.id].capitalize ()\n for file_ in self.files:\n file_.hr_filetype = _(file_.hr_filetype)\n for file_ in self.generated_files:\n file_.hr_filetype = _(file_.hr_filetype)\n\n self.translated = True\n","sub_path":"DublinCoreI18n.py","file_name":"DublinCoreI18n.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344460027","text":"from multiprocessing import Process\nimport os\nimport prodigy\n\ndef info(title, port):\n print(title)\n print(f\"module name: {__name__}\")\n print(f\"parent process: {os.getppid()}\")\n print(f\"process id: {os.getpid()} \\n\")\n prodigy.serve(\n \"ner.manual\", \n \"gsr_is_protest\", # db\n \"en_core_web_sm\",\n \"data/raw_github-issue-titles.jsonl\", # input file\n port=port # port\n )\n\ndef f(name, port):\n info(\"function f\", port)\n print('hello', name)\n\nif __name__ == '__main__':\n # info(\"main line\")\n for i in range(3):\n p = Process(target=f, args=('bob', 9010+i))\n p.start()\n ","sub_path":"multi-prodigy/learn_mutiprocessing.py","file_name":"learn_mutiprocessing.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"183309502","text":"#-*-coding:utf-8-*-\nfrom sqlalchemy import func,distinct\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface as SAI\n\nfrom . import filters\nfrom superset.fab.models.filters import SupersetFilters\n\n\nclass SupersetSQLAInterface(SAI):\n\n filter_converter_class = filters.SuperSetSQLAFilterConverter\n\n def get_pk_column(self):\n pk_name = self.get_pk_name()\n pk_column = None\n for c in self.obj.__mapper__.columns:\n if pk_name == c.name:\n pk_column = c\n break\n return pk_column\n\n def query(self, filters=None, order_column='', order_direction='',\n page=None, page_size=None):\n \"\"\" \n QUERY\n :param filters:\n dict with filters {:\n :param page:\n the current page\n :param page_size:\n the current page size\n\n \"\"\"\n query = self.session.query(self.obj)\n if len(order_column.split('.')) >= 2:\n tmp_order_column = ''\n for join_relation in order_column.split('.')[:-1]:\n model_relation = self.get_related_model(join_relation)\n query = query.join(model_relation)\n # redefine order column name, because relationship can have a different name\n # from the related table name.\n tmp_order_column = tmp_order_column + model_relation.__tablename__ + '.' \n order_column = tmp_order_column + order_column.split('.')[-1]\n\n pk_column = self.get_pk_column()\n query_count = self.session.query(pk_column)\n query_count = self._get_base_query(query=query_count,\n filters=filters)\n query_count = self.session.query(func.count('*')).select_from(query_count.distinct().subquery())\n count = query_count.scalar()\n query = self._get_base_query(query=query,\n filters=filters,\n order_column=order_column,\n order_direction=order_direction)\n\n\n if page:\n query = query.offset(page * page_size)\n if page_size:\n query = query.limit(page_size)\n\n return count, query.all()\n\n def get_filters(self, search_columns=None):\n search_columns = search_columns or []\n return SupersetFilters(self.filter_converter_class, self, search_columns)\n\n\n","sub_path":"superset/fab/models/sqla/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"459289333","text":"'''\nCreated on November 09, 2018\n\n@author: Alejandro Molina\n@author: Robert Peharz\n'''\n\nfrom scipy.special import logsumexp\n\nfrom spn.algorithms.Inference import log_likelihood\n\nfrom spn.structure.leaves.parametric.Parametric import Gaussian\n\nfrom spn.structure.Base import eval_spn_top_down, Sum, Product, get_nodes_by_type, get_number_of_nodes, Leaf\nimport numpy as np\n\n\ndef gradient_backward(spn, data, lls_per_node):\n node_gradients = {}\n node_gradients[Sum] = sum_gradient_backward\n node_gradients[Product] = prod_gradient_backward\n node_gradients[Leaf] = leaf_gradient_backward\n\n gradient_result = np.zeros_like(lls_per_node)\n\n eval_spn_top_down(spn, node_gradients, parent_result=np.zeros((data.shape[0])), gradient_result=gradient_result,\n lls_per_node=lls_per_node)\n\n return gradient_result\n\n\ndef leaf_gradient_backward(node, parent_result, gradient_result=None, lls_per_node=None):\n gradients = np.zeros((parent_result.shape[0]))\n gradients[:] = parent_result # log_sum_exp\n\n gradient_result[:, node.id] = gradients\n\n\ndef sum_gradient_backward(node, parent_result, gradient_result=None, lls_per_node=None):\n gradients = np.zeros((parent_result.shape[0]))\n gradients[:] = parent_result # log_sum_exp\n\n gradient_result[:, node.id] = gradients\n\n messages_to_children = []\n\n for i, c in enumerate(node.children):\n messages_to_children.append(gradients + np.log(node.weights[i]))\n\n return messages_to_children\n\n\ndef prod_gradient_backward(node, parent_result, gradient_result=None, lls_per_node=None):\n gradients = np.zeros((parent_result.shape[0]))\n gradients[:] = parent_result # log_sum_exp\n\n gradient_result[:, node.id] = gradients\n\n messages_to_children = []\n\n # TODO handle zeros for efficiency, darwiche 2003\n\n output_ll = lls_per_node[:, node.id]\n\n for i, c in enumerate(node.children):\n messages_to_children.append(output_ll - lls_per_node[:, c.id])\n\n return messages_to_children\n\n\ndef gaussian_em_update(node, lls, gradients, root_lls):\n p = (gradients - root_lls) + lls\n new_mean = p + np.log(node.mean)\n node.mean = logsumexp(new_mean) - logsumexp(p)\n\n\n\n_leaf_node_updates = {Gaussian: gaussian_em_update}\n\n\ndef EM_optimization(spn, data, iterations=5, leaf_node_updates=_leaf_node_updates):\n for _ in range(iterations):\n lls_per_node = np.zeros((data.shape[0], get_number_of_nodes(spn)))\n\n # one pass bottom up evaluating the likelihoods\n log_likelihood(spn, data, dtype=data.dtype, lls_matrix=lls_per_node)\n\n gradients = gradient_backward(spn, data, lls_per_node)\n\n R = lls_per_node[:, 0]\n\n for sum_node in get_nodes_by_type(spn, Sum):\n RinvGrad = (gradients[:, sum_node.id] - R)\n for i, c in enumerate(sum_node.children):\n new_w = RinvGrad + lls_per_node[:, c.id] + np.log(sum_node.weights[i])\n sum_node.weights[i] = logsumexp(new_w)\n total_weight = np.sum(sum_node.weights)\n sum_node.weights = (sum_node.weights / total_weight).tolist()\n\n for leaf_node in get_nodes_by_type(spn, Leaf):\n f = leaf_node_updates[leaf_node.__class__]\n f(leaf_node, lls_per_node[:, leaf_node.id], gradients[:, leaf_node.id], R)\n","sub_path":"src/spn/algorithms/EM.py","file_name":"EM.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"288252528","text":"#!/usr/bin/env python\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2008, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Revision $Id$\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport time\nimport traceback\nimport collections\n\nimport roslib\nroslib.load_manifest('rosdoc_rosorg')\n\nimport rospkg\n\nimport rosdoc\nimport rosdoc.upload\n\nfrom .core import load_repos, Repo, repo_packages, repo_stacks\nfrom . import package_header\nfrom . import stack_header\nfrom . import repo_header\nfrom . import megamanifest\nfrom . import megastack\n\n\ndef generate_docs(ctx, repos, checkout_dir, test=False):\n \"\"\"\n For each repo and each stack and package in each repo generate header html etc.\n \"\"\"\n artifacts = []\n\n stack_dirs = []\n\n timings = ctx.timings\n timings['package-header'] = 0.\n timings['stack-header'] = 0.\n timings['repo-header'] = 0.\n\n # first package/stack to be documented wins\n completed_packages = set()\n completed_stacks = set()\n\n meta_repos = collections.defaultdict(list)\n\n rosstack = rospkg.RosStack()\n for repo_name, repo in repos:\n print(\"repo\", repo_name)\n\n # Packages\n start = time.time()\n packages = repo_packages(repo, checkout_dir)\n print(\"repo packages: %s\"%(packages))\n\n # ros-repo doesn't include the ros stack, so we have to add it back in\n if repo_name == 'ros':\n packages = packages + rosstack.packages_of('ros')\n # only document requested packages, and don't document anything already documented (name collisions)\n packages = list((set(packages) & set(ctx.packages)) - completed_packages)\n\n print(\"[%s] Generating manifest.yaml files for [%s]\"%(repo_name, ','.join(packages)))\n package_files = package_header.generate_package_headers(ctx, repo, packages)\n for p_f in package_files:\n # workaround bug in the indexer where 'external' packages\n # don't generate artifacts, and thus the manifest.yaml is\n # not updated. We generally assume that artifacts is\n # redundant because most packages generate a full\n # directory during the normal rosdoc. In this case, we\n # manually add to the artifacts so that it will get\n # uploaded.\n pfd = os.path.dirname(p_f)\n package = os.path.basename(pfd)\n # check both full path and package name\n if not pfd in artifacts and package not in artifacts:\n artifacts.append(pfd)\n timings['package-header'] += time.time() - start\n\n # Stacks\n start = time.time()\n stacks = repo_stacks(repo, checkout_dir)\n # NOTE: we ignore ctx.stacks as we assume we document\n # everything in the repo. Don't document anything already\n # documented (name collisions)\n stacks = list(set(stacks) - completed_stacks)\n print(\"repo stacks: %s\"%(stacks))\n timings['stack-header'] += time.time() - start\n\n # - generate\n print(\"[%s] Generating stack.yaml files for [%s]\"%(repo_name, ','.join(stacks)))\n stack_files = stack_header.generate_stack_headers(ctx, repo, stacks)\n # - simplify artifacts to the directory name\n stack_dirs.extend([os.path.dirname(f) for f in stack_files])\n\n # Repos\n start = time.time()\n artifacts.extend(repo_header.generate_repo_header(ctx, repo, stack_files, package_files))\n if repo.aggregate_name is not None and repo.name != repo.aggregate_name:\n meta_repos[repo.aggregate_name].append((repo, stack_files, package_files))\n timings['repo-header'] += time.time() - start\n\n completed_packages.update(packages)\n completed_stacks.update(stacks)\n\n #print(\"completed packages: %s\"%(completed_packages))\n #print(\"completed stacks: %s\"%(completed_stacks))\n\n # meta repo generation\n start = time.time()\n for aggregate_name, repo_assets in meta_repos.iteritems():\n print(\"processing meta repo [%s]\"%(aggregate_name))\n\n # generate union of sub-repos\n stack_files = []\n package_files = []\n for _, stack_asset, p in repo_assets:\n stack_files.extend(stack_asset)\n package_files.extend(stack_asset)\n\n uri = type_ = ''\n rosinstall = {}\n name = local_path = aggregate_name\n repo = Repo(name, type_, uri, rosinstall, local_path)\n artifacts.extend(repo_header.generate_repo_header(ctx, repo, stack_files, package_files))\n timings['repo-header'] += time.time() - start\n\n # we don't include package artifacts because they are already covered elsewhere\n return artifacts + stack_dirs\n\n\ndef generate_rosbrowse(ctx, repos, checkout_dir, test=False):\n timings = ctx.timings\n timings['megamanifest'] = 0.\n timings['megastack'] = 0.\n\n artifacts = []\n try:\n start = time.time()\n megamani = megamanifest.generate_megamanifest(\n ctx.docdir,\n ctx.manifests,\n repos,\n checkout_dir) or []\n artifacts.extend(megamani)\n timings['megamanifest'] += time.time() - start\n\n except:\n sys.stderr.write(\"megamanifest generation failed\\n\")\n traceback.print_exc()\n\n try:\n start = time.time()\n mega_stack = megastack.generate_megastack(\n ctx.docdir,\n ctx.stack_manifests,\n repos,\n checkout_dir) or []\n\n artifacts.extend(mega_stack)\n timings['megastack'] += time.time() - start\n except:\n sys.stderr.write(\"megastack generation failed\\n\")\n traceback.print_exc()\n\n return artifacts\n\n\ndef rosorg_main():\n parser = rosdoc.get_optparse('rosdoc_rosorg')\n parser.add_option(\"--repos\", default=None,\n dest=\"repos\", metavar=\"ROSBROWSE_REPOS_FILE\",\n help=\"rosinstall file with repos list for determining repository names/roots\")\n parser.add_option(\"--distro-index\", default=None,\n dest=\"distro_index\", metavar=\"DISTRO_INDEX_FILE\",\n help=\"distro index yaml file for determining repository contents for binary installs\")\n parser.add_option(\"--checkout\", default='checkouts',\n dest=\"checkout_dir\", metavar=\"CHECKOUT_DIR\",\n help=\"path to directory where entries of --repos file have been checked out\")\n parser.add_option(\"--test\", default=False,\n dest=\"test\", action=\"store_true\",\n help=\"run in test mode\")\n parser.add_option(\"--rosbrowse\", default=False,\n dest=\"rosbrowse\", action=\"store_true\",\n help=\"run rosbrowse indexer instead\")\n\n options, package_filters = parser.parse_args()\n\n # Load the repository file\n if options.repos:\n repos_file = options.repos\n else:\n parser.error(\"please specify a --repos file\")\n repos = load_repos(repos_file, options.distro_index)\n\n # Load the rosdoc environment\n ctx = rosdoc.rdcore.RosdocContext(\n options.name,\n options.docdir,\n package_filters=package_filters,\n path_filters=options.paths)\n\n try:\n ctx.quiet = options.quiet\n ctx.init()\n\n if not options.test:\n # when we are testing just rosdoc_rosorg, we don't want to do\n # the more expensive rosdoc build\n artifacts = rosdoc.generate_docs(ctx)\n else:\n artifacts = []\n\n if repos is None or repos == []:\n parser.error(\"No repos found for repos file %s\" % repos_file)\n\n\n if not options.rosbrowse:\n artifacts.extend(generate_docs(ctx, repos, options.checkout_dir, options.test))\n else:\n artifacts.extend(generate_rosbrowse(ctx, repos, options.checkout_dir))\n\n if options.upload:\n rosdoc.upload.upload(ctx, artifacts, options.upload)\n\n print(\"Timings\")\n for k, v in ctx.timings.iteritems():\n print(\" * %.2f %s\"%(v, k))\n\n except KeyboardInterrupt:\n pass\n except:\n traceback.print_exc()\n sys.exit(1)\n","sub_path":"rosdoc_rosorg/src/rosdoc_rosorg/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"66066441","text":"'''\n File name: KBandit.py\n Author: Jaquim Cadogan\n Date created: 5-10-18\n Python Version: 3.7 (!)\n'''\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport numpy.random as npr\nimport matplotlib.pyplot as plt\nfrom dataclasses import dataclass\n\nsns.set()\nnpr.seed(seed=42)\nplt.style.use('ggplot')\n\n# painstakinly filling figure parameters (obtained by playing with sliders)\n# for matplotlib in by hand (no excuse not to look fly)\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['font.serif'] = 'Ubuntu'\nplt.rcParams['font.monospace'] = 'Ubuntu Mono'\nplt.rcParams['font.size'] = 10\nplt.rcParams['axes.labelsize'] = 10\nplt.rcParams['axes.labelweight'] = 'bold'\nplt.rcParams['xtick.labelsize'] = 8\nplt.rcParams['ytick.labelsize'] = 8\nplt.rcParams['legend.fontsize'] = 10\nplt.rcParams['figure.titlesize'] = 12\n\n@dataclass\nclass KBandit:\n\t# in Python 3.7 we can type annotate variable for increased readility and get hints \n\t# on what you are dealing with, note that nothing is enforced on byte level however\n\tQmu: float = 5\n\tscale: float = 2\n\tbandit: int = 10\n\tQsigma: float = 2\n\tRsigma: float = 0.5\n\talgorithm: str = None\n\tepsilon: float = None\n\toptimism: float = None\n\ttimesteps: int = 20000\n\tstationary: bool = False\n\tapproximation: pd.DataFrame() = None\n\texpanded_mean: pd.DataFrame() = None\n\n\tdef initialize(self):\n\t\t# if we are dealing with the optimitic egreedy algorithm we initialize the Q-values with some \n\t\t# custom to be passed value, otherwise init with zero\n\t\tself.Q = np.ones((self.timesteps, self.bandit)) * self.optimism if bool(self.optimism) else np.zeros((self.timesteps, self.bandit))\n\t\tself.N = np.zeros((self.timesteps, self.bandit))\n\t\tself.r_over_time = np.zeros((self.timesteps, self.bandit))\n\t\t# let's define arms with true means that are also normally distributed\n\t\tself.Qtrue = npr.normal(self.Qmu, self.Qsigma, self.bandit)\n\t\t# retrieve the reward for a given arm, with fixed true mean for the respective arm, fixed variance for all arms\n\t\tself.rewards = lambda x: np.array([npr.normal(mu, self.Rsigma) for mu in self.Qtrue])[x]\n\t\t# egreedy random tie breaking when multiple equal max (disregard the *args for egreedy)\n\t\tself.egreedy = lambda q, *args: npr.randint(0, self.bandit) if npr.rand() < self.epsilon else npr.choice(np.flatnonzero(q == q.max()))\n\t\t# compute and argmax among the upper confidence bound for every of the bandit\n\t\tself.ucb = lambda Q, c, t, n: np.argmax([q + (self.scale *(np.sqrt(np.log(t)/np.sum(n[0:t,i], axis=0)))) for i, q in enumerate(Q)])\n\t\t# neat trick to avoid case statement and easily generalizible, as this dict is easily extendable and modular\n\t\tself.alg_map = {'egreedy': self.egreedy, 'ucb': self.ucb}\n\t\t# we can pass functions as variables, and we can even pass\n\t\t# variables to these functions as variables (very cool)\n\t\tself.func = lambda *args: self.alg_map.get(self.algorithm)(*args)\n\n\tdef approximate(self):\n\t\t\"\"\"\n\t\tThe implementation of the actual algorithm and the updates of the Q-values over time.\n\t\tI tried to stay as close as possible to the pseudo-algorithms as described in Sutton and Barto, Ch. 2\n\t\tUltimately we are interested in how the average reward grows for the respective algorithms. \n\t\tI use a neat trick from pandas to do this.\n\t\t\"\"\"\n\t\tfor t in range(1, self.timesteps):\n\t\t\tAt = self.func(self.Q[t-1,], 2, t, self.N) # according to specified algorithm (self.algorithm): choose arm to lever\n\t\n\t\t\tR = self.rewards(At) # retrieve normally distributed reward for chosen action\n\n\t\t\tself.N[t, ] = self.N[t-1] # get action counts up until time t\n\t\t\tself.N[t, At] += 1 # update action count for action played\n\n\t\t\tself.Q[t,] = self.Q[t-1, ] # get q values up until time t\n\t\t\tself.Q[t, At] = self.Q[t-1, At] + ((R - self.Q[t-1, At])/self.N[t, At]) # update Q for actiom played\n\n\t\t\tself.r_over_time[t, At] = R # this is what we are ultimately interested in\n\n\t\t# note: sum first to warp all rewards into 1 column, then compute its expanded mean\n\t\tself.r_over_time = pd.DataFrame(self.r_over_time).sum(axis=1).expanding().mean() \n\t\tself.approximation = pd.DataFrame(self.Q)\n\t\tself.expanded_mean = self.approximation.expanding().mean()\n\n\n\tdef plot_mean(self, last: bool = False):\n\t\t\"\"\"Fill in a 4x4 subplot figure, 1 for every k (4 float:\n\t\t\"\"\"Retrieve the expanded average mean value at termination of the iteration cycle\"\"\"\n\t\treturn self.r_over_time.sum(axis=1).expanding().mean().iloc[-1]\n\ndef plot_param_dependence():\n\t\"\"\"\n\tRecreate the plot for the three algorithms and their dependence on\n\tthe value for their controlling parameter as seen in Sutton and Barto, Ch.2\n\n\t\"\"\"\n\tparam_values = [1/128, 1/64, 1/32, 1/16, 1/8, 1/4, 1/2, 1, 2, 4]\n\tucb_values, egreedy_values, optEGreedy_values = list(), list(), list()\n\ttimesteps = 1000\n\n\tfor param in param_values:\n\t\tUCB = KBandit(algorithm='ucb', bandit=10, scale=param, timesteps=timesteps)\n\t\tUCB.initialize()\n\t\tUCB.approximate()\n\t\tucb_values.append(UCB.return_mean())\n\n\t\tEGreedy = KBandit(algorithm='egreedy', bandit=10, epsilon=param, timesteps=timesteps)\n\t\tEGreedy.initialize()\n\t\tEGreedy.approximate()\n\t\tegreedy_values.append(EGreedy.return_mean())\n\n\t\toptEGreedy = KBandit(algorithm='egreedy', bandit=10, epsilon=0.1, optimism=param, timesteps=timesteps)\n\t\toptEGreedy.initialize()\n\t\toptEGreedy.approximate()\n\t\toptEGreedy_values.append(optEGreedy.return_mean())\n\n\tplt.clf()\n\n\t# formatting stuff\n\tplt.rcParams[\"figure.figsize\"] = [15, 10]\n\tplt.plot(np.arange(len(param_values)), ucb_values, label=r'UCB (c)')\n\tplt.plot(np.arange(len(param_values)), optEGreedy_values, label=r'Optimistic $\\epsilon$-greedy ($\\epsilon$=0.1) ($Q_{0}$)')\n\tplt.plot(np.arange(len(param_values)), egreedy_values, label=r'$\\epsilon$-greedy ($\\epsilon$)')\n\tplt.xticks(np.arange(len(param_values)), [r'$\\frac{1}{128}$', r'$\\frac{1}{64}$', r'$\\frac{1}{32}$', r'$\\frac{1}{16}$',\\\n\t\t\tr'$\\frac{1}{8}$', r'$\\frac{1}{4}$', r'$\\frac{1}{2}$', '1', '2', '4'])\n\tplt.xlabel(r'$\\epsilon$, $c$, $Q_{0}$')\n\tplt.suptitle(r'Average reward over first' + '\\n' + '1000 iterations ' + r'($k$=10)' , fontweight='bold')\n\tplt.ylabel(r'Average reward')\n\tplt.legend()\n\tplt.ylim(0, 12)\n\n\tplt.show()\n\ndef plot_expanding_mean(param=1/4, timesteps=10000):\n\t\"\"\"\n\t\tPlot the growth of the average reward for a specified parameter and timesteps\n\t\"\"\"\n\tplt.clf()\n\t# formatting stuff\n\tplt.rcParams['figure.subplot.left'] = 0.09\n\tplt.rcParams['figure.subplot.bottom'] = 0.08\n\tplt.rcParams['figure.subplot.right'] = 0.90\n\tplt.rcParams['figure.subplot.top'] = 0.91\n\tplt.rcParams['figure.subplot.wspace'] = 0.25\n\tplt.rcParams['figure.subplot.hspace'] = 0.34\n\tplt.rcParams[\"figure.figsize\"] = [20, 10]\n\n\tfor k in range(5, 21):\n\t\t\n\t\tprint(k)\n\n\t\tUCB = KBandit(algorithm='ucb', bandit=k, scale=param, timesteps=timesteps)\n\t\tUCB.initialize()\n\t\tUCB.approximate()\n\t\tUCB.plot_mean()\n\n\t\toptEGreedy = KBandit(algorithm='egreedy', bandit=k, epsilon=param, optimism=param, timesteps=timesteps)\n\t\toptEGreedy.initialize()\n\t\toptEGreedy.approximate()\n\t\toptEGreedy.plot_mean()\n\n\t\tEGreedy = KBandit(algorithm='egreedy', bandit=k, epsilon=param, timesteps=timesteps)\n\t\tEGreedy.initialize()\n\t\tEGreedy.approximate()\n\t\tEGreedy.plot_mean()\n\n\t# formatting stuff\n\tplt.suptitle(r'Average reward for varying $K$'+ '\\n' + r'($\\epsilon$, $c$, $Q_{0}$)=$\\frac{1}{4}$', fontweight='bold')\n\n\tplt.text(-timesteps*1.55, -2, 'Iterations',\n rotation=0,\n size=16,\n horizontalalignment='center',\n verticalalignment='top',\n multialignment='center')\n\n\tplt.text(-timesteps*4.5, 35, 'Average reward',\n rotation=90,\n size=16,\n horizontalalignment='center',\n verticalalignment='top',\n multialignment='center')\n\t\t\n\tplt.show()\n\ndef plot_rewards_distribution():\n\t\"\"\"\n\t\tIn order to visualize what the distributions of the arms look like for varying K.\n\t\tNote that the closer the means for certain arms are, the lower the KL-divergence is.\n\t\tA nice way to explain for example an equal performance of UCB and optimistic egreedy,\n\t\tor even an outperformance of optimistic egreedy.\n\t\n\t\"\"\"\n\tplt.clf()\n\n\t# format stuff\n\tplt.suptitle(r'Distribution of rewards for varying k' + '\\n'+ r'Sample size = $10^{5}$', fontweight='bold')\n\tplt.rcParams['figure.subplot.left'] = 0.09\n\tplt.rcParams['figure.subplot.bottom'] = 0.08\n\tplt.rcParams['figure.subplot.right'] = 0.90\n\tplt.rcParams['figure.subplot.top'] = 0.91\n\tplt.rcParams['figure.subplot.wspace'] = 0.25\n\tplt.rcParams['figure.subplot.hspace'] = 0.34\n\tplt.rcParams[\"figure.figsize\"] = [35, 10]\n\n\t# sample for every arm, for every k \n\t# to estimate its distribution\n\tfor k in range(5, 21):\n\t\tprint(k) # to indicate how far we are (takes a while, leaving it here delibaretly)\n\t\tplt.subplot(4, 4, (k-5)+1)\n\n\t\tUCB = KBandit(algorithm='ucb', bandit=k, scale=1/4, timesteps=10000)\n\t\tUCB.initialize()\n\n\t\tdraws = list()\n\t\tfor draw in range(100000):\n\t\t\tdraw = list()\n\t\t\tfor arm in range(0,k):\n\t\t\t\tdraw.append(UCB.rewards(arm))\n\t\t\tdraws.append(draw)\n\n\t\tplt.xticks(visible=False)\n\t\tsns.violinplot(data=pd.DataFrame(draws), inner=\"points\")\n\t\tplt.title(r'$K$=%s'%k)\n\n\t# format stuff\n\tplt.text(-27, -2, 'Bandits',\n rotation=0,\n size=16,\n horizontalalignment='center',\n verticalalignment='top',\n multialignment='center',\n fontweight='bold')\n\n\tplt.text(-80, 35, 'Reward distribution',\n rotation=90,\n size=16,\n horizontalalignment='center',\n verticalalignment='top',\n multialignment='center',\n fontweight='bold')\n\t\n\tplt.show()\n\n\n\n\n","sub_path":"KBandit.py","file_name":"KBandit.py","file_ext":"py","file_size_in_byte":9870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393433653","text":"import requests\nimport csv\nimport os\nimport json\nimport boto3\nfrom datetime import datetime\nfrom datetime import timedelta \n\nTOKEN_CSV = os.environ['FOLDERPATH'] + os.environ['TOKEN_CSV']\nUSER_CSV = os.environ['FOLDERPATH'] + os.environ['USER_CSV']\nAPPSYNC_API_KEY = os.environ['APPSYNC_API_KEY']\nAPPSYNC_URL = os.environ['APPSYNC_URL']\nS3_BUCKET = os.environ['S3_BUCKET']\nSTART_DATE = os.getenv('START_DATE') or datetime.now()\nEND_DATE = os.getenv('END_DATE') or datetime.now() + timedelta(days=7)\n\ns3 = boto3.client('s3')\n\ntry:\n s3.download_file(S3_BUCKET, os.environ['TOKEN_CSV'], TOKEN_CSV)\n\n with open(TOKEN_CSV, mode='r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n next(csv_reader, None)\n for row in csv_reader:\n\n headers = {\n 'Content-Type' : \"application/graphql\",\n 'x-api-key' : APPSYNC_API_KEY,\n 'cache-control' : \"no-cache\",\n 'Authorization' : row[1]\n }\n break\n\n def execute_gql(query):\n payload_obj = {\"query\": query}\n print(payload_obj)\n payload = json.dumps(payload_obj)\n response = requests.request(\"POST\", APPSYNC_URL, data=payload, headers=headers)\n return response\n\n if __name__ == '__main__':\n DATE_TIME_STRING_FORMAT = '%Y-%m-%dT%H:%M+0000'\n while START_DATE <= END_DATE:\n START_DATE = START_DATE + timedelta(hours=1)\n depart_date = datetime.strftime(START_DATE, DATE_TIME_STRING_FORMAT)\n arrival_date = datetime.strftime(START_DATE + timedelta(days=2), DATE_TIME_STRING_FORMAT)\n \n query = f'mutation {{createFlight(input:{{departureDate: \"{depart_date}\", departureAirportCode: \\\"LGW\\\",departureAirportName: \\\"London Gatwick\\\",departureCity: \\\"London\\\", departureLocale: \\\"Europe/London\\\", arrivalDate: \"{arrival_date}\", arrivalAirportCode: \\\"MAD\\\", arrivalAirportName: \\\"Madrid Barajas\\\", arrivalCity: \\\"Madrid\\\", arrivalLocale: \\\"Europe/Madrid\\\", ticketPrice: 100, ticketCurrency: \\\"EUR\\\",flightNumber: 1830, seatCapacity: 2000}}) {{id}}}}'\n print(execute_gql(query)) \n\nexcept Exception as error:\n print(f'Exception - {error}')","sub_path":"src/perf-tests/setup/mock-scripts/load-flight-data.py","file_name":"load-flight-data.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80769027","text":"import numpy as np\nimport multiprocessing as mp\nfrom contextlib import contextmanager\nfrom hdf5storage import loadmat, savemat\nfrom os.path import join, isfile, isdir\nimport os\n\nfrom util import *\n\n# Pool the a function across multiple inputs and wait for them to complete\n@contextmanager\ndef poolcontext(num_proc):\n pool = mp.Pool(num_proc)\n yield pool\n pool.terminate()\n\n# Get the confograms for the given logits and ground truth labels\ndef confs_for_pixels(logits, gt, slices, args):\n\t# If we are not taking the softmax by slice, take the softmax once and be done with it\n\tif not args.sm_by_slice:\n\t\tsm = sm_of_logits(logits, start_idx=1, zero_pad=True)\n\n\tfor i, slc in enumerate(slices):\n\t\t# Remap the ground truth to the local labels of the current slice\n\t\tslc_gt = np.array([remap_gt(lab, slc) for lab in gt])\n\t\t\n\t\t# If we are taking the softmax by slice, remap the logits then take the softmax\n\t\t#\n\t\t# Otherwise, just remap the softmax previously computed\n\t\tif args.sm_by_slice:\n\t\t\tslc_logits = np.array([remap_scores(logit_vec, slc) for logit_vec in logits])\n\t\t\tslc_sm = sm_of_logits(slc_logits)\n\t\telse:\n\t\t\tslc_sm = np.array([remap_scores(sm_vec, slc) for sm_vec in sm])\n\n\t\t\t\n\t\tfor j, node in enumerate(slc):\n\t\t\t# Since we are measuring precision in the calibration confograms, mask the ground truth and softmax by where the current node is softmax\n\t\t\tpred_labels = np.argmax(slc_sm, axis=-1)\n\t\t\targmax_mask = pred_labels == j\n\n\t\t\tslc_gt_masked = slc_gt[argmax_mask]\n\t\t\tslc_sm_masked = slc_sm[argmax_mask]\n\n\t\t\t# Because of the previous mask, the j-th softmax value will always be the max\n\t\t\tsm_conf = slc_sm_masked[:,j]\n\n\t\t\t# Save the confidence of each pixel as well as whether it was correct to disk\n\t\t\tnode.append_confs(sm_conf, slc_gt_masked == j)\n\n\treturn slices\n\n\n# Return the correct and count confograms given the hierarchy specified by slices\ndef get_confs_for_idxs(idxs, slices, args):\n\tfor slc in slices:\n\t\tfor node in slc:\n\t\t\tnode.__init__(node.name, node.node_idx, node.terminals, data_dir=args.data_dir)\n\n\t# If we are computing the confograms on the fly, load each image individually and accumulate all the confograms\n\tfor idx in idxs:\n\t\tlogits = load_logits(args.imset, idx, reshape=True)\n\t\tgt = load_gt(args.imset, idx, reshape=True)\n\n\t\tfg_mask = fg_mask_for(gt)\n\t\tlogits = logits[fg_mask]\n\t\tgt = gt[fg_mask]\n\n\t\tslices = confs_for_pixels(logits, gt, slices, args)\n\n\treturn slices\n\ndef get_confs_for_idxs_unpack(params):\n\treturn get_confs_for_idxs(*params)\n\ndef aggregate_proc_confs(proc_slices, slices, data_dir):\n\tfor i, slc in enumerate(slices):\n\t\tfor j, node in enumerate(slc):\n\t\t\tnode.__init__(node.name, node.node_idx, node.terminals, data_dir=args.data_dir, is_main=True)\n\t\t\n\t\t\tconf_f = open(node.conf_file, 'a')\n\t\t\tcorr_f = open(node.corr_file, 'a')\n\n\t\t\tfor proc_slice in proc_slices:\n\t\t\t\tproc_node = proc_slice[i][j]\n\t\t\t\t\n\t\t\t\tconf, corr_mask = proc_node.get_file_contents()\n\t\t\t\tproc_node.reset()\n\t\t\t\t\n\t\t\t\tif conf is None:\n\t\t\t\t\tcontinue\n\n\t\t\t\tnp.savetxt(conf_f, conf)\n\t\t\t\tnp.savetxt(corr_f, corr_mask)\n\n\t\t\tconf_f.close()\n\t\t\tcorr_f.close()\n\n\treturn slices\n\n\nfrom argparse import ArgumentParser\nparser = ArgumentParser(description='Build the calibration hierarchy using multiprocessing.')\nparser.add_argument('--slice_file', dest='slice_file', type=str, default='slices.pkl', help='The pickle file that specifies the hierarchy.')\nparser.add_argument('--imset', dest='imset', type=str, default='val', help='The image set to build the calibration confograms from. Either val or test')\nparser.add_argument('--num_proc', dest='num_proc', type=int, default=1, help='The number of processes to spawn to parallelize calibration.')\nparser.add_argument('--output_file', dest='output_file', type=str, default=None, help='The pickle file to output the calibration hierarchy to. None if slice_file to be overwritten.')\nparser.add_argument('--dont_reset', dest='reset', action='store_false', help='Pass if you want to accumulate calibration confograms. Normally they are reset when this script is run.')\nparser.add_argument('--sm_by_slice', dest='sm_by_slice', action='store_true', help='Whether or not to take the softmax of the logits at each slice of the hierarchy. True by default.')\nparser.add_argument('--data_dir', dest='data_dir', type=str, default='calib_data', help='The data to store confidences in')\nparser.add_argument('--test', dest='test', action='store_true', help='Whether or not to test the calibration script. Takes the first 2*num_proc from the imset.')\n\n\nif __name__ == '__main__':\n\targs = parser.parse_args()\n\t\n\tif not isdir(args.data_dir):\n\t\tos.mkdir(args.data_dir)\n\n\t# Load the slices from the specified file\n\n\tslices = read_slices(args.slice_file, reset=args.reset)\n\n\t# Load the index ordering -- indexes are ordered by number of foreground pixels in descending order\n\t#\n\t# This way, if multiprocessing is used, all processes will be given approximately the same workload\n\n\tidx_ordering = None\n\tidx_ordering_fname = args.imset.lower() + '_ordered.txt'\n\n\tif not isfile(idx_ordering_fname):\n\t\tfrom order_by_num_fg import order_imset_by_num_fg\n\t\tidx_ordering = order_imset_by_num_fg(args.imset, save=True)\n\telse:\n\t\twith open(idx_ordering_fname) as f:\n\t\t\tidx_ordering = [int(idx) for idx in f.read().split('\\n')]\n\t\t\t\n\t\t\t\n\tif args.test:\n\t\tidx_ordering = idx_ordering[:args.num_proc]\n\n\t# Split the indexes up between processed to try and spread the work evenly\n\n\tidx_ordering = np.array(idx_ordering)\n\tparam_batches = []\n\n\tfor procno in range(args.num_proc):\n\t\tidx_batch = idx_ordering[procno::args.num_proc]\n\t\tparam_batches.append((idx_batch, slices.copy(), args))\n\n\twith poolcontext(args.num_proc) as p:\n\t\tproc_slices = p.map(get_confs_for_idxs_unpack, param_batches)\n\n\tslices = aggregate_proc_confs(proc_slices, slices, args.data_dir)\n\n\t# Save the calibration data\n\n\toutput_fname = args.output_file\n\tif output_fname is None:\n\t\toutput_fname = args.slice_file\n\n\tsave_slices(output_fname, slices)\n\t\n\tif args.test:\n\t\tos.remove(args.data_dir)\n","sub_path":"build_calib_hier_par.py","file_name":"build_calib_hier_par.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"60491014","text":"T=int(input())\nfor i in range(T):\n H, W, N=map(int, input().split())\n floor=0\n ho=0\n if N%H==0:\n floor=H*100\n ho=N//H\n else:\n floor=(N%H)*100\n ho=1+N//H\n print(floor+ho)","sub_path":"BaeckJoon/No10250.py","file_name":"No10250.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"277948705","text":"# Enter script code\n# This code work only just after copying, not later.\nimport time\ntime.sleep(0.25)\n\n\nnotecontent = clipboard.get_selection()\n\n\nnotepath = \"/home/bhishan/Dropbox/AutoKey/clips/\"\nnotename = \"note\" + time.strftime(\"%Y%m%d%H%M%S\") + \".txt\"\n\n# write note\nfo = open(notepath+notename,'w')\nfo.write(notecontent)\nfo.close()","sub_path":"Tutorial_Ubuntu/AutoKey/New Note.py","file_name":"New Note.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424949547","text":"import logic\nimport random\n\n\"\"\"\nThis algorithm tests which direction results in the highest score. If multiple directions have same score, they are ordered by priority.\n\"\"\"\n\ndef getNextMoves(matrix):\n \"\"\" alrogithm to determine which moves to do next.\n\n return either a list of allowed moves (i.e. either 1,2,3 or 4, or as string \"left\", \"right, \"up\", \"down\") or only the next move\n \"\"\"\n\n max_score = logic.score(matrix)\n same_score = []\n for i in [\"right\",\"down\",\"left\",\"up\"]:\n temp, done = logic.direction(matrix, i)\n if not done: continue\n this_score = logic.score(temp)\n if this_score > max_score:\n max_score = this_score\n same_score = []\n if this_score == max_score:\n same_score.append(i)\n\n # pick one of possible directions with highest scored according to priorities\n return same_score[0]\n","sub_path":"algorithms/scoreMaximizer.py","file_name":"scoreMaximizer.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456598606","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nCode to calculate KS 1 sample test and accept/reject hypothesis(2c)\nCode to calculate Permutation test and accept/reject hypothesis(2c)\nTakes cleaned state 4 dataframe as input \nInput to be passed to KS_1_sample_main and Permutation_main\n\"\"\"\nimport copy\nimport pandas as pd\nimport numpy as np\nfrom decimal import *\nfrom scipy.stats import poisson\nfrom scipy.stats import geom\nfrom scipy.stats import binom\nimport matplotlib.pyplot as plt\nimport math\nimport clean\n\n\n# Calculate CDF for the number of cases/deaths on each day\ndef get_cdf_list(num_samples):\n cdf_list = []\n cumulative_pr = 0\n for _ in range(num_samples):\n cumulative_pr += 1 / num_samples\n cdf_list.append(cumulative_pr)\n return cdf_list\n\n#Calculate max cdf to the left of the point\ndef get_left_cdf(state_df, col_name, x, eCDF_col):\n if state_df[col_name].max() < x:\n return 1\n elif state_df[col_name].min() > x:\n return 0\n else:\n left_cdf = state_df.loc[state_df[col_name] < x, eCDF_col]\n F_cap_left = 0.0 if left_cdf.empty else left_cdf.max()\n return F_cap_left\n\n#Calculate min cdf to the right of the point\ndef get_right_cdf(state_df, col_name, x, eCDF_col):\n if state_df[col_name].max() < x:\n return 1\n elif state_df[col_name].min() > x:\n return 0\n else:\n right_cdf = state_df.loc[state_df[col_name] >= x, eCDF_col]\n F_cap_right = 0.0 if right_cdf.empty else right_cdf.min()\n return F_cap_right\n \n#Calculate sample mean for given data and column\ndef sample_mean(data, CT_col_name):\n x_points = data[CT_col_name].to_list()\n total = 0\n mean = 0\n for x in x_points:\n total = total + x\n mean = total/ len(data)\n \n return mean\n\n#Calculate sample variance for given data and column and mean\ndef sample_variance(data, sample_mean, CT_col_name):\n x_points = data[CT_col_name].to_list()\n total = 0\n for x in x_points:\n total = total + pow((x-sample_mean),2)\n variance = total/len(data)\n \n return variance\n\n#Calculates Poisson MME Parameters\ndef poisson_para(mean):\n lambda_mme = mean\n return lambda_mme\n\n#Calculates Geometric MME Parameters\ndef geometric_para(mean):\n p_mme = 1/mean\n return p_mme\n\n#Calculates Binomial MME Parameters\ndef binomial_para(mean,variance):\n n_mme = pow(mean,2)/(mean - variance)\n p_mme = mean/n_mme\n \n return n_mme,p_mme\n\ndef plot_KS_1_Sample_eCDF(DC_df, DC_col_name, max_diff_x, x_label, distribution_type):\n \n y_ecdf_mme = DC_df['DC_eCDF_mme'].to_numpy()\n x_points_DC = DC_df[DC_col_name].to_numpy()\n y_ecdf_DC = DC_df['DC_eCDF'].to_numpy()\n\n plt.figure('KS 1-Sample Test eCDF', figsize=(6,6))\n plt.step(x_points_DC, y_ecdf_mme, where='post', lw = 1.5, label=distribution_type+ ' CDF')\n plt.step(x_points_DC, y_ecdf_DC, where='post', lw = 1.5, label='DC eCDF')\n for x in max_diff_x:\n plt.axvline(x, linestyle=\"dashed\", lw=1)\n\n plt.xlabel(x_label)\n plt.ylabel('eCDF')\n plt.legend(loc='best')\n plt.grid()\n plt.show()\n\n\n\n#Calculates KS Statistic Values\ndef calc_KS_1_sample_test(x_points, parameters_list, data, distribution_name, column_type , column_name):\n dict_name = 'KS_' + distribution_name +'_cols'\n dict_name = ['x', 'F_cap_x', 'F_cap_DC_left', 'F_cap_DC_right', 'left_diff_abs', 'right_diff_abs']\n row_list = []\n for x in x_points:\n if(distribution_name == 'binomial'):\n #Find cdf of binomial at given point x\n F_cap_x = binom.cdf(x, parameters_list[0], parameters_list[1])\n if(distribution_name == 'poisson'):\n #Find cdf of poisson at given point x\n F_cap_x = poisson.cdf(x, parameters_list[0])\n if(distribution_name == 'geometric'):\n #Find cdf of geometric at given point x\n F_cap_x = geom.cdf(x, parameters_list[0])\n # Find CDF to the left of point x in the sorted DC dataset\n F_cap_DC_left = get_left_cdf(data ,column_name, x, 'DC_eCDF')\n # Find CDF to the right of point x in the sorted DC dataset\n F_cap_DC_right = get_right_cdf(data, column_name, x, 'DC_eCDF')\n # Find absolute difference between left CDFs of x points and DC datasets\n left_diff_abs = round(abs(F_cap_x - F_cap_DC_left), 4)\n # Find absolute difference between right CDFs of x points and DC datasets\n right_diff_abs = round(abs(F_cap_x - F_cap_DC_right), 4)\n \n row = [x, F_cap_x, F_cap_DC_left, F_cap_DC_right, left_diff_abs, right_diff_abs]\n row_dict = dict(zip(dict_name, row))\n row_list.append(row_dict)\n \n # Build KS Test Table (represented as a dataframe) \n df_name = 'KS_' + distribution_name +'_df'\n df_name = pd.DataFrame(row_list, columns=dict_name)\n \n # Calculate KS statistic value\n max_diff_x = []\n d_right = df_name.iloc[df_name['right_diff_abs'].idxmax(axis=1)][['x', 'right_diff_abs']]\n d_left = df_name.iloc[df_name['left_diff_abs'].idxmax(axis=1)][['x', 'left_diff_abs']]\n if d_right['right_diff_abs'] == d_left['left_diff_abs']:\n print(\"KS Statistic is {0} at x = {1} and {2}\".format(d_right['right_diff_abs'], d_left['x'], d_right['x']))\n max_diff_x.append(d_right['x'])\n max_diff_x.append(d_left['x'])\n elif d_right['right_diff_abs'] > d_left['left_diff_abs']:\n print(\"KS Statistic is {0} at x = {1}\".format(d_right['right_diff_abs'], d_right['x']))\n max_diff_x.append(d_right['x'])\n else:\n print(\"KS Statistic is {0} at x = {1}\".format(d_left['left_diff_abs'], d_left['x']))\n max_diff_x.append(d_left['x'])\n\n # Reject/Accept Null Hypothesis based on calculated KS Statistic d and given threshold=0.05\n d = max(d_right['right_diff_abs'], d_left['left_diff_abs'])\n critical_value = 0.05\n hypothesis_type = 'confirmed positive cases' if column_type == 'confirmed' else column_type\n\n if d > critical_value:\n print(\"Rejected Null Hypothesis: We reject the hypothesis that the distribution of daily {0} in DC is {3}, as KS Statistic d = {1} exceeds threshold {2}\".format(hypothesis_type, d, critical_value, distribution_name))\n print()\n else:\n print(\"Failed to reject Null Hypothesis: We accept the hypothesis that the distribution of daily {0} is same in both CT and DC, as KS Statistic d = {1} does not exceed threshold {2}\".format(hypothesis_type, d, critical_value))\n print()\n \n \n return max_diff_x\n\n#Generates required data before calling calculation of KS 1 sample test statistic \ndef KS_1_sample_test(states_data,column_type, distribution_type):\n CT_col_name = 'CT ' + column_type\n DC_col_name = 'DC ' + column_type\n\n # Split the dataset per state and sort the 2 state-specific columns on which we need to perform the KS Test (#cases/#confirmed)\n CT_sorted_df = states_data.loc[(states_data['Date'] >= '2020-10-01') & (states_data['Date'] <= '2020-12-31')][[CT_col_name]].sort_values(CT_col_name).reset_index(drop=True)\n DC_sorted_df = states_data.loc[(states_data['Date'] >= '2020-10-01') & (states_data['Date'] <= '2020-12-31')][[DC_col_name]].sort_values(DC_col_name).reset_index(drop=True)\n\n # Add a new column denoting the CDF at each point in the DC cases/deaths columns\n DC_sorted_df['DC_eCDF'] = get_cdf_list(DC_sorted_df.shape[0])\n \n # Find distinct datapoints and their corresponding CDFs at each of the points\n DC_distinct_df = DC_sorted_df.drop_duplicates(subset=DC_col_name, keep=\"last\").reset_index(drop=True)\n \n # points for x column for KS test\n x_points = DC_distinct_df[DC_col_name].to_list()\n \n if (distribution_type == 'poisson'):\n #Poisson distribution\n mean = sample_mean(CT_sorted_df, CT_col_name)\n #MME parameters\n lambda_mme = poisson_para(mean)\n #Calls calculation of KS 1 sample statistic \n max_diff_x = calc_KS_1_sample_test(x_points, [lambda_mme] , DC_distinct_df , 'poisson', column_type,DC_col_name ) \n \n DC_sorted_df['DC_eCDF_mme'] = DC_sorted_df.apply(lambda row : poisson.cdf(row[DC_col_name], lambda_mme),axis =1)\n plot_KS_1_Sample_eCDF( DC_sorted_df, DC_col_name, max_diff_x, column_type.capitalize(),distribution_type)\n \n \n if (distribution_type == 'geometric'):\n #Geometric distribution\n mean = sample_mean(CT_sorted_df,CT_col_name)\n #MME parameters\n p_mme = geometric_para(mean)\n #Calls calculation of KS 1 sample statistic \n max_diff_x = calc_KS_1_sample_test(x_points, [p_mme] , DC_distinct_df , 'geometric', column_type, DC_col_name)\n \n DC_sorted_df['DC_eCDF_mme'] = DC_sorted_df.apply(lambda row : geom.cdf(row[DC_col_name], p_mme),axis =1)\n plot_KS_1_Sample_eCDF( DC_sorted_df, DC_col_name, max_diff_x, column_type.capitalize(),distribution_type)\n \n if (distribution_type == 'binomial'):\n #Binomial distribution\n mean = sample_mean(CT_sorted_df,CT_col_name)\n variance = sample_variance(CT_sorted_df,mean,CT_col_name)\n #MME parameters\n n_mme, p_mme = binomial_para(mean,variance)\n #Calls calculation of KS 1 sample statistic c\n max_diff_x = calc_KS_1_sample_test(x_points, [n_mme, p_mme] , DC_distinct_df , 'binomial', column_type, DC_col_name)\n \n DC_sorted_df['DC_eCDF_mme'] = DC_sorted_df.apply(lambda row : binom.cdf(row[DC_col_name], n_mme, p_mme),axis =1)\n plot_KS_1_Sample_eCDF(DC_sorted_df, DC_col_name, max_diff_x, column_type.capitalize(),distribution_type)\n\n \n#Calculates p value for permutaiton test\ndef calc_permutation_test(sample_size, data_points, t_obs , len_points):\n outlier_count = 0\n for i in range(sample_size):\n #Generates a random array of data_points\n perm_data = np.random.permutation(data_points)\n CT_mean = 0\n DC_mean = 0\n for index in range(len(perm_data)):\n if index < len_points:\n #Calculates sum of X points\n CT_mean += float(perm_data[index])\n else:\n #Calculates sum of Y points\n DC_mean += float(perm_data[index])\n #Calculates mean of X and Y points\n CT_mean /= len_points\n DC_mean /= (len(data_points) - len_points)\n #Add 1 if difference in mean greater than T observed\n if abs(CT_mean - DC_mean) > t_obs:\n outlier_count += 1\n #Calculates and returns p value\n return outlier_count / sample_size\n\n#Generates required data before calling calculation of permutation test p value\ndef Permutation_test(states_data,column_type):\n \n CT_col_name = 'CT ' + column_type\n DC_col_name = 'DC ' + column_type\n\n # Split the dataset per state and sort the 2 state-specific columns on which we need to perform the KS Test (#cases/#confirmed)\n CT_df = states_data.loc[(states_data['Date'] >= '2020-10-01') & (states_data['Date'] <= '2020-12-31')][[CT_col_name]]\n DC_df = states_data.loc[(states_data['Date'] >= '2020-10-01') & (states_data['Date'] <= '2020-12-31')][[DC_col_name]]\n \n #Calculates sample mean of X and Y points\n CT_mean = sample_mean(CT_df, CT_col_name)\n DC_mean = sample_mean(DC_df, DC_col_name)\n \n #Calculates T observed \n T_obs = abs(CT_mean - DC_mean)\n \n #Converts dataframe points into list\n x_points = CT_df[CT_col_name].to_list()\n y_points = DC_df[DC_col_name].to_list()\n\n #Adds both lists to create a single list for random permutation\n data_points = x_points + y_points\n \n #Calls calculation of p value function with 1000 permutations\n p_value_1000 = calc_permutation_test(1000, data_points, T_obs , len(x_points))\n \n # Reject/Accept Null Hypothesis based on calculated p value and given threshold=0.05\n critical_value = 0.05\n hypothesis_type = 'confirmed positive cases' if column_type == 'confirmed' else column_type\n \n if p_value_1000 <= critical_value:\n print(\"Rejected Null Hypothesis: We reject the hypothesis that the distribution of daily {0} is same in both CT and DC, as Permutation test p-value = {1} does not exceed threshold {2}\".format(hypothesis_type, p_value_1000, critical_value))\n print()\n else:\n print(\"Failed to reject Null Hypothesis: We accept the hypothesis that the distribution of daily {0} is same in both CT and DC, as Permutation test p-value = {1} exceeds threshold {2}\".format(hypothesis_type, p_value_1000, critical_value))\n print()\n \n#KS 1 sample test main function \ndef KS_1_sample_main(states_data):\n print(\"{0} 2c) KS_1_sample test starts here {0}\".format(20*\"-\"))\n KS_1_sample_test(states_data,'confirmed', 'poisson')\n KS_1_sample_test(states_data,'confirmed', 'geometric')\n KS_1_sample_test(states_data,'confirmed', 'binomial')\n KS_1_sample_test(states_data,'deaths', 'poisson')\n KS_1_sample_test(states_data,'deaths', 'geometric')\n KS_1_sample_test(states_data,'deaths', 'binomial')\n\n#Permutation test main function \ndef Permutation_main(states_data):\n print(\"{0} 2c) Permutation test starts here {0}\".format(20*\"-\"))\n Permutation_test(states_data,'confirmed')\n Permutation_test(states_data,'deaths')\n","sub_path":"cse-544-project/src/one_sample_ks_perm.py","file_name":"one_sample_ks_perm.py","file_ext":"py","file_size_in_byte":13218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"95837893","text":"def returnMedian(a, b):\n countstop = int((len(a) + len(b)) / 2) - 1\n lp = 0\n sp = 0\n\n if len(a) < len(b):\n a, b = b, a\n\n data = None\n while countstop:\n countstop -= 1\n\n if a[lp] <= b[sp]:\n data = a[lp]\n lp += 1\n elif sp <= len(b) - 1 and a[lp] > b[sp]:\n data = b[sp]\n if sp >= len(b) - 1:\n pass\n else:\n sp += 1\n\n print(data)\n\n\na = [1, 4, 5, 6, 8]\nb = [2, 3, 7]\n\nreturnMedian(a, b)\n","sub_path":"Archive/P/ArrayStrings/medianof2sortedarrays1.py","file_name":"medianof2sortedarrays1.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"55876349","text":"\"\"\"Project_books URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom books.views import home_view, create_view, edit_view, view_view, delete_view, edit_global_view, view_global_view, delete_global_view, edit_list_view, view_list_view, delete_list_view\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', home_view),\n path('create/', create_view),\n path('edit/', edit_view),\n path('view/', view_view),\n path('delete/', delete_view),\n\n path('edit//', edit_global_view),\n path('view/', view_global_view),\n path('delete/', delete_global_view),\n \n path('edit_list/', edit_list_view),\n path('view_list/', view_list_view),\n path('delete_list/', delete_list_view)\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"Project_books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601840313","text":"from tornado.httpclient import HTTPClient, HTTPError\n\nfrom lxml import etree\n\nfrom hkauth.exceptions import (\n InvalidAPIKey\n)\n\nfrom hkauth.helpers import cached\n\n@cached(time=1800)\ndef character(character_id):\n http_client = HTTPClient()\n\n try:\n response = http_client.fetch(\"https://api.eveonline.com/eve/CharacterInfo.xml.aspx?characterID={}\".format(character_id))\n except HTTPError as e:\n if e.code in (400, 403):\n raise InvalidAPIKey()\n else:\n raise\n\n document = etree.fromstring(response.body)\n\n http_client.close()\n\n result = document.xpath(\"/eveapi/result\")[0]\n\n # Special cases that might not exist in the result\n alliance_id = None\n alliance_name = None\n\n if len(result.xpath(\"allianceID\")):\n alliance_id = int(result.xpath(\"allianceID\")[0].text)\n\n if len(result.xpath(\"alliance\")):\n alliance_name = result.xpath(\"alliance\")[0].text\n\n return {\n \"character_id\": int(result.xpath(\"characterID\")[0].text),\n \"character_name\": result.xpath(\"characterName\")[0].text,\n \"corporation_id\": int(result.xpath(\"corporationID\")[0].text),\n \"corporation_name\": result.xpath(\"corporation\")[0].text,\n \"alliance_id\": alliance_id,\n \"alliance_name\": alliance_name\n }\n","sub_path":"hkauth/eve/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"446574686","text":"import bpy\nimport bmesh\nimport mathutils\nfrom mathutils import *\nfrom math import *\n\nfrom . import DelaunayVoronoi\n\n# utility properties & functions\ndefaultResolutionSynthesis = 16\n\n# list of safe functions for eval()\nsafe_list = ['math', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh',\n 'degrees', 'e', 'exp', 'fabs', 'floor', 'fmod', 'frexp', 'hypot',\n 'ldexp', 'log', 'log10', 'modf', 'pi', 'pow', 'radians',\n 'sin', 'sinh', 'sqrt', 'tan', 'tanh']\n\n# use the list to filter the local namespace\nsafe_dict = dict((k, globals().get(k, None)) for k in safe_list)\n\n# A very simple \"bridge\" tool.\n# Connects two equally long vertex rows with faces.\n# Returns a list of the new faces (list of lists)\n#\n# vertIdx1 ... First vertex list (list of vertex indices).\n# vertIdx2 ... Second vertex list (list of vertex indices).\n# closed ... Creates a loop (first & last are closed).\n# flipped ... Invert the normal of the face(s).\n#\n# Note: You can set vertIdx1 to a single vertex index to create\n# a fan/star of faces.\n# Note: If both vertex idx list are the same length they have\n# to have at least 2 vertices.\ndef createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):\n faces = []\n\n if not vertIdx1 or not vertIdx2:\n return None\n\n if len(vertIdx1) < 2 and len(vertIdx2) < 2:\n return None\n\n fan = False\n if (len(vertIdx1) != len(vertIdx2)):\n if (len(vertIdx1) == 1 and len(vertIdx2) > 1):\n fan = True\n else:\n return None\n\n total = len(vertIdx2)\n\n if closed:\n # Bridge the start with the end.\n if flipped:\n face = [\n vertIdx1[0],\n vertIdx2[0],\n vertIdx2[total - 1]]\n if not fan:\n face.append(vertIdx1[total - 1])\n faces.append(face)\n\n else:\n face = [vertIdx2[0], vertIdx1[0]]\n if not fan:\n face.append(vertIdx1[total - 1])\n face.append(vertIdx2[total - 1])\n faces.append(face)\n\n # Bridge the rest of the faces.\n for num in range(total - 1):\n if flipped:\n if fan:\n face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]\n else:\n face = [vertIdx2[num], vertIdx1[num],\n vertIdx1[num + 1], vertIdx2[num + 1]]\n faces.append(face)\n else:\n if fan:\n face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]\n else:\n face = [vertIdx1[num], vertIdx2[num],\n vertIdx2[num + 1], vertIdx1[num + 1]]\n faces.append(face)\n\n return faces\n\n \nclass VoronoiSurface:\n def __init__(self, locations, xbuff, ybuff):\n self.locations = locations\n self.xbuff = xbuff\n self.ybuff = ybuff\n\n def Calculate(self):\n rvVertices = []\n rvFaces = []\n \n rawVertices, rawFacesDict = DelaunayVoronoi.computeVoronoiDiagram(self.locations, self.xbuff, self.ybuff, polygonsOutput=True, formatOutput=True, closePoly=False)\n \n for rawVert in rawVertices: \n rvVertices.append(mathutils.Vector((rawVert[0], rawVert[1], 0.0))) # TODO: is it possible to keep/calc z?\n \n # need to remove possible doubles, for now\n for key, val in rawFacesDict.items(): \n newVal = []\n for currI in val:\n if not currI in newVal: newVal.append(currI)\n if len(newVal) >= 3: rvFaces.append(newVal)\n\n return rvVertices, rvFaces\n\n \nclass HeightFunctionCartesianSurface:\n def __init__(self, equation, startX, endX, startY, endY):\n self.equation = equation\n self.startX = startX\n self.endX = endX\n self.startY = startY\n self.endY = endY\n\n def Calculate(self, resX, resY):\n rvVertices = []\n rvFaces = []\n \n try: expr_args = (compile(self.equation, __file__, 'eval'), {\"__builtins__\": None}, safe_dict)\n except: return rvVertices, rvFaces\n\n delta_x = (self.endX - self.startX) / float(resX - 1)\n delta_y = (self.endY - self.startY) / float(resY - 1)\n \n edgeloop_prev = []\n for row_x in range(resX):\n edgeloop_cur = []\n x = self.startX + row_x * delta_x\n\n for row_y in range(resY):\n y = self.startY + row_y * delta_y\n z = 0.0\n\n safe_dict['x'] = x\n safe_dict['y'] = y\n\n try: z = float(eval(*expr_args))\n except: return [], []\n\n edgeloop_cur.append(len(rvVertices))\n rvVertices.append(Vector((x, y, z)))\n\n if len(edgeloop_prev) > 0:\n try: faces_row = createFaces(edgeloop_prev, edgeloop_cur)\n except: return [], []\n rvFaces.extend(faces_row)\n\n edgeloop_prev = edgeloop_cur\n\n return rvVertices, rvFaces\n\n \nclass HeightFunctionPolarSurface:\n def __init__(self, equation, startR, endR, startA, endA):\n self.equation = equation\n self.startR = startR\n self.endR = endR\n self.startA = startA\n self.endA = endA\n\n def Calculate(self, resR, resA):\n rvVertices = []\n rvFaces = []\n \n try: expr_args = (compile(self.equation, __file__, 'eval'), {\"__builtins__\": None}, safe_dict)\n except: return rvVertices, rvFaces\n\n delta_r = (self.endR - self.startR) / float(resR - 1)\n delta_a = (self.endA - self.startA) / float(resA - 1)\n \n edgeloop_prev = []\n for row_r in range(resR):\n edgeloop_cur = []\n r = self.startR + row_r * delta_r\n\n for row_a in range(resA):\n a = self.startA + row_a * delta_a\n z = 0.0\n\n safe_dict['r'] = r\n safe_dict['a'] = a\n\n try: z = float(eval(*expr_args))\n except: return [], []\n\n x = r * cos(a)\n y = r * sin(a)\n\n edgeloop_cur.append(len(rvVertices))\n rvVertices.append(Vector((x, y, z)))\n\n if len(edgeloop_prev) > 0:\n try: faces_row = createFaces(edgeloop_prev, edgeloop_cur)\n except: return [], []\n rvFaces.extend(faces_row)\n\n edgeloop_prev = edgeloop_cur\n \n return rvVertices, rvFaces\n ","sub_path":"nodes/mesh_generators/Surfaces.py","file_name":"Surfaces.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"390974392","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nClassical Density Functional Theory Package.\nCopyright Mary Coe m.k.coe@bristol.ac.uk\n\nCreated March 2019. Last Update March 2019.\n\nClassical Density Functional Theory (cDFT) is a method to find the equilibrium\ndensity profile of fluid subjected to an external potential. A good review of\nthe method can be found in Roth R. 2010. J. Phys.:Condens. Matter 22 063102.\n\nThis program provides a quick tutorial on some of the basic functions of the \npackage. It acts as the documentation until the full documentation is uploaded\nin the next few weeks.\n\nThis package is not complete. This is a working version, and therefore does\nnot include error messages. Therefore, caution is advised. The most common \nerror, is making the grid too small. The program requires the grid to be\ngreater than the number of points in the wall plus four times the number of\npoints in the radii of the particles. Also, some sum rules only work if the \nradii is 1.0.\n\nThe package currently supports:\n 1. Planar and Spherical Hard Walls\n 2. Rosenfeld and White-Bear functionals\n 3. Planar adsorption and contact density sum rules\n 4. Spherical adsorption, surface tension, and grand potential sum rules.\nThese are all described below. \n\nOver the coming months, this package will be improved to also support:\n 1. Cylindrical Hard Walls\n 2. White-Bear Mark II functional\n 3. Lennard-Jones Potentials\n 4. Multiprocessing\n\nThe required python modules to run this package are:\n 1. pyfftw\n 2. numpy\n 3. matplotlib\n\nIn the mean time, enjoy playing around with cDFT!\n\"\"\"\n\n# To use the package, we import the various files\nimport cDFT.minimisation as minimisation\nimport cDFT.output as output\nimport cDFT.standard_routines as standard_routines\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# The DFT object is specified as packing fraction, interaction, radius of spheres, \n# temperature, number of grid points (recommended to be a power of 2), grid discretisation \\\n# and functional.\nRF = minimisation.DFT(0.2,'Hard Wall',1.0,1.0,2**14,0.005,'Rosenfeld')\n\n# To initiate a planar minimisation, you must give a DFT, a mixing parameter (recommended\n# to be between 0.01 and 0.1) and an output filepath. To minimise, then just call the\n# minimise routine.\nplanar = minimisation.planar(RF, 0.1, './RF_planar_example/')\nplanar.minimise()\n\n# To see the density profile, send the minimisation object to output. This is\n# saved to a pdf in the output directory './RF_planar_example/'.\noutput.plot_single_density(planar)\n\n# For the planar geometry, the surface tension, grand potential and adsorption can be found.\n# We can also explore the accuracy of the minimisation using some sum rules relating to these.\n# To find the surface tension, or the excess grand potential, use the standard_routines package, \n# specifying first the geometry as a string, and then sending the minimisation object.\nsurface_tension = standard_routines.surface_tension('planar', planar)\nprint(f'\\nSurface tension is {surface_tension:.6f}')\n\n# The excess grand potential returns are array, so must be summed to find the full excess\n# grand potential.\ngrand_potential = np.sum(standard_routines.excess_grand_potential('planar', planar))\nprint(f'\\nExcess Grand Potential is {grand_potential:.6f}')\n\n# For the planar geometry, the supported sum rules are the contact sum rule and the\n# adsorption sum rule. The contact sum rule is beta*p = rho[contact], so can be found\n# by comparing the pressure and contact density. The minimisation object, when initiated,\n# creates a copy of the DFT, hence to access properties of the minimised object we use\n# planar.DFT. For the adsorption sum rule we again use the standard_routines library, \n# specifying the DFT object, the mixing parameter, the output file path and the geometry.\nprint(f'\\n---------------------------------------')\nprint(f'Contact Sum Rule:\\nPressure = {planar.DFT.pressure:.6f} rho[Contact] = {planar.DFT.rho[planar.NiW]}')\nprint(f'Relative error is {abs(planar.DFT.pressure-planar.DFT.rho[planar.NiW])/planar.DFT.pressure:.6f}')\nprint(f'---------------------------------------\\n')\n\nstandard_routines.adsorption_sum_rule(RF,0.1,'./RF_planar_example/','planar')\n\n# The package also supports the White-Bear functional\nWB = minimisation.DFT(0.2,'Hard Wall',1.0,1.0,2**14,0.005,'Whitebear')\n\n# As well as a spherical wall. Here, the arguments are DFT, bulk wall radius \n# (note that the density profile is measured from the centre of the hard spheres\n# hence the first non-zero density point will be at R_s = R_bulk + R), the mixing\n# paramater and the output file path. We minimise in the same way and output in the\n# same way.\nspherical = minimisation.spherical(WB, 2.0, 0.1,'./WB_spherical_example/')\nspherical.minimise()\noutput.plot_single_density(spherical)\n\n# Like before, we can find the surface tension and excess grand potential. We \n# also have access to a wider range of sum rules. For example, the adsorption\n# sum rule is supported. Note there is now an extra argument, R_bulk.\nstandard_routines.adsorption_sum_rule(WB, 0.1,'./WB_spherical_example/','spherical', 2.0)\n\n# As well as a surface tension (gamma) sum rule beta*(pressure + 2*gamma/Rs +dgamma/dRs = rho[contact])\n# and an excess grand potential (omega) sum rule (beta*domega/dRs = 4*pi*Rs^2*rho[contact]). \n# Note, these are currently only supported for R = 1.0. We can access them individually\n# using\nstandard_routines.spherical_surface_tension_sum_rule(WB, 2.0, 0.1, './WB_spherical_example/')\nstandard_routines. spherical_omega_sum_rule(WB, 2.0, 0.1, './WB_spherical_example/')\n\n# or both using\nstandard_routines.spherical_surface_tension_and_omega_rule(WB, 2.0, 0.1, './WB_spherical_example/')\n\n# There is also a routine to see how the 'surface tension', 'excess grand potential'or\n# 'contact' value varies with wall radius. This returns an array of results which can be\n# plotted. Note: This will take a few minutes to run and will output a lot of data to \n# the terminal which can be ignored.\nresults_1 = standard_routines.plot_by_wall_radius(WB, 0.1, './WB_spherical_example/', 'surface tension')\nresults_2 = standard_routines.plot_by_wall_radius(WB, 0.1, './WB_spherical_example/', 'contact')\n\nplt.figure(1)\nax1 = plt.subplot(211)\nax1.plot(results_1[:,0], results_1[:,1], color='plum', marker='o', markerfacecolor='purple', \\\n markeredgecolor='purple', markersize=3)\nax1.set_title('Surface Tension')\nax1.set_ylabel(r'$\\beta\\gamma(R_{bulk}) (R^2)$')\nax1.set_xlabel(r'$R/R_{bulk}$')\nax1.set_xlim(min(results_1[:,0]), max(results_1[:,0]))\n\nax2 = plt.subplot(212)\nax2.plot(results_2[:,0], results_2[:,1], color='turquoise', marker='*', markerfacecolor='teal', \\\n markeredgecolor='teal', markersize=3)\nax2.set_title('Density at Contact')\nax2.set_ylabel(r'$\\rho(R_s)(R^3)$')\nax2.set_xlabel(r'$R/R_s$')\nax2.set_xlim(min(results_2[:,0]), max(results_2[:,0]))\nplt.tight_layout()\nplt.savefig('./WB_spherical_example/spherical_radii_examples.pdf')\nplt.close(1)\n\n# Finally, we can compare density profiles.\nRF_2 = minimisation.DFT(0.3,'Hard Wall',1.0,1.0,2**14,0.005,'Rosenfeld')\nplanar_2 = minimisation.planar(RF_2,0.05,'./RF_planar_example/')\nplanar_2.minimise()\n\noutput.plot_multiple_density([planar,planar_2], './RF_planar_example/planar_comparison.pdf')\n\n# Over the next few months, this package will be getting a lot of new features, so check\n# back for the latest version! ","sub_path":"how_to_use.py","file_name":"how_to_use.py","file_ext":"py","file_size_in_byte":7443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"293492118","text":"import json\n\nconfig_path = 'new_config.json'\nconfig = json.load(open(config_path))\nall_fingers = set([i for i in range(1, 11)])\n\nfor i in config:\n if not i.startswith('flip'):\n img = sorted(config[i])\n flip_img = sorted(11 - i for i in config[f'flip_{i}'])\n if img != flip_img:\n print(f'{i}: real {img}, flip {flip_img}')\n good = int(input(f'good real(1)/flip(2)'))\n while good != 1 and good != 2:\n good = int(input('good real(1)/flip(2) '))\n\n if good == 2:\n config[i] = flip_img\n else:\n config[f'flip_{i}'] = sorted(11 - i for i in img)\n json.dump(config, open(config_path, 'w+'))","sub_path":"compareFlips.py","file_name":"compareFlips.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"585258751","text":"\nfrom sc2ai.envs.sc2env import SingleAgentSC2Env\nfrom sc2ai.envs.actions import *\nfrom sc2ai.envs.observations import *\n\n\nclass CollectMineralAndGasEnv(SingleAgentSC2Env):\n \"\"\"A class containing specifications for the CollectMineralsAndGas Minimap\n \"\"\"\n def __init__(self, **kwargs):\n action_set = DefaultActionSet([\n NoOpAction(),\n SelectPointAction(select_point_act=\"select\"),\n SelectRectAction(select_add=\"select\"),\n SelectArmyAction(select_add=\"select\"),\n MoveScreenAction(queued=\"now\"),\n ])\n\n observation_set = ObservationSet([\n MapCategory(\"feature_screen\", [\n FeatureScreenSelfUnitFilter(),\n FeatureScreenNeutralUnitFilter()])\n ])\n\n super().__init__(\"CollectMineralAndGas\", action_set, observation_set, num_players=1, **kwargs)\n","sub_path":"sc2ai/envs/minigames/collect_minerals_and_gas.py","file_name":"collect_minerals_and_gas.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"581808340","text":"import datetime\n\nfrom application.model.attendance import Attendance\nfrom application.model.codecooler import Student\n\n\nclass AttendanceController:\n \"\"\"\n Creates AttendanceController obj.\n\n Instance attributes:\n user_input: UserInput obj\n view: View obj\n \"\"\"\n\n def __init__(self, user_input, view):\n \"\"\"\n Creates AttendanceController obj.\n Parameters:\n user_input: UserInput obj\n view_input: View obj\n \"\"\"\n\n self.user_input = user_input\n self.view = view\n\n def check_attendance_action(self):\n \"\"\"\n Creates Attendance obj based on user input and adds to attendaces\n attribute (list).\n \"\"\"\n\n students = Student.get_students()\n today_date = datetime.date.today()\n for student in students:\n self.view.show_codecooler(student)\n is_present = self.user_input.get_boolean_input()\n new_attendance = Attendance(today_date, student, is_present)\n if not any(new_attendance.get_date() == attendance.get_date() for attendance in student.get_attendance()):\n new_attendance.add_to_attendances()\n student.add_attendance(new_attendance)\n else:\n for attendance in student.get_attendance():\n if attendance.get_date() == new_attendance.get_date():\n attendance.set_is_present(new_attendance.get_is_present())\n\n\n self.view.show_message(\"There are no more students!\")\n self.user_input.press_enter_to_continue()\n\n def view_attendance_action(self):\n \"\"\"\n View students percetage attendance and, if user wants, view\n attendance details for choosen student.\n \"\"\"\n\n for index, student in enumerate(Student.get_students()):\n present = 0\n for attendance in student.get_attendance():\n if attendance.get_is_present():\n present += 1\n\n percent_attendance = int(100 * (present/len(student.get_attendance())))\n self.view.show_attendance(index + 1, student, percent_attendance)\n\n group_name = \"student's\"\n user_aux_menu_decision = self.user_input.get_aux_menu_input(len(Student.get_students()), group_name)\n\n if isinstance(user_aux_menu_decision, int):\n choosen_student = Student.get_students()[user_aux_menu_decision]\n self.view.clear()\n self.view.show_attendance_details(choosen_student)\n self.user_input.press_enter_to_continue()\n","sub_path":"application/controller/attendance_controller.py","file_name":"attendance_controller.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"237657600","text":"options = {}\nall_ingredients = []\n\nwith open ('021.txt') as input_file:\n labels = [(ingredients.split(' '), allergins.strip(')').split(', ')) for ingredients, allergins in [line.strip().split(' (contains ') for line in input_file.read().split('\\n')]]\n \n # work out which are options\n for ingredients, allergins in labels:\n all_ingredients += ingredients\n for allergin in allergins:\n current_options = options.get(allergin, set(ingredients))\n options[allergin] = current_options.intersection(ingredients)\n\n confirmed = {}\n # if there is only one option, it is confirmed. Remove it from other options and repeat\n while options:\n for allergin, ingredients in options.items():\n if len(ingredients) == 1:\n confirmed[allergin] = list(ingredients)[0]\n del options[allergin]\n for i in options.values():\n i.difference_update(ingredients)\n break\n\n print(f'part one: {len([i for i in all_ingredients if i not in confirmed.values()])}')\n print(f\"part two: {','.join([ingredient for _, ingredient in sorted(confirmed.items(), key=lambda a: a[0])])}\")\n\n\n\n","sub_path":"021.py","file_name":"021.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135530662","text":"\"\"\"\nCrie um programa que leia uma frase qualquer e diga se ela é um palindromo, desconsiderando os espaços\nEx: APOS A SOPA\nA SACADA DA CASA\nA TORRE DA DERROTA\nO LOBO AMA O BOLO\nANOTARAM A DATA DA MARATONA\n\"\"\"\n\n\"\"\"\nfrase = ''.join(str(input(\"Digite uma frase: \").strip()).split())\npalindromo = ''.join(frase[::-1].split())\nprint(frase)\nprint(palindromo)\nif frase == palindromo:\n print(\"É um palíndromo\")\nelse:\n print(\"Não é um palíndromo\")\n\"\"\"\n\n\"\"\"\nfrase = str(input(\"Digite uma frase: \").strip())\nlista = []\n\nfor letra in range(0, len(frase)):\n if frase[letra] != ' ':\n lista.append(frase[letra])\npalindromo = ''.join(lista)\nif palindromo == palindromo[::-1]:\n print(\"É um palíndromo\")\nelse:\n print(\"Não é um palíndromo\")\n\"\"\"\nfrase = str(input(\"Digite uma frase: \")).strip().upper()\npalavras = frase.split()\njunto = \"\".join(palavras)\nprint(\"Você digitou a frase {}\".format(junto))\ninverso = ''\nfor letra in range(len(junto) - 1, -1, -1):\n inverso += junto[letra]\nprint(\"O inverso de {} é {}\".format(junto, inverso))\nif inverso == junto:\n print(\"Temos um palíndromo!\")\nelse:\n print(\"A frase digitada não é um palíndromo!\")\n","sub_path":"PythonExercicios/Mundo_2/ex053.py","file_name":"ex053.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"223182925","text":"from ew.backend import core as bknd_core\nfrom ew.backend import item as bknd_item\nfrom ew.backend.item import EwItem\nfrom ew.static import cfg as ewcfg\nfrom ew.static import cosmetics as cosmetics_static\nfrom ew.utils import core as ewutils\nfrom ew.utils.item import gen_item_props\n\n\nasync def dedorn_all_costumes():\n costume_count = 0\n # Grab costumes from the cache if enabled\n item_cache = bknd_core.get_cache(obj_type = \"EwItem\")\n if item_cache is not False:\n # separate search criteria for adorned or slimeoided\n p1 = {\"context\": \"costume\", \"adorned\": \"true\"}\n p2 = {\"context\": \"costume\", \"slimeoid\": \"true\"}\n # compile both results\n costumes_data = item_cache.find_entries(criteria={\"item_props\": p1})\n costumes_data += item_cache.find_entries(criteria={\"item_props\": p2})\n\n # Build a list that'll be handled in the same way\n costumes = list(map(lambda dat: dat.get(\"id_item\"), costumes_data))\n else:\n costumes = bknd_core.execute_sql_query(\"SELECT id_item FROM items_prop WHERE name = 'context' AND value = 'costume' AND id_item IN (SELECT id_item FROM items_prop WHERE (name = 'adorned' OR name = 'slimeoid') AND value = 'true')\")\n\n for costume_id in costumes:\n costume_item = EwItem(id_item=costume_id)\n\n costume_item.item_props['adorned'] = 'false'\n costume_item.item_props['slimeoid'] = 'false'\n\n costume_item.persist()\n\n costume_count += 1\n\n ewutils.logMsg(\"Dedorned {} costumes after full moon ended.\".format(costume_count))\n\n\ndef get_cosmetic_max_durability(item_data: EwItem) -> int:\n \"\"\" Determine the max durability of a cosmetic item. \"\"\"\n # Special item_props override comes first\n if item_data.item_props.get(\"original_durability\"):\n return int(item_data.item_props[\"original_durability\"])\n\n # Souls and scalps have preset durabilities and don't appear in the cosm map\n if item_data.item_props['id_cosmetic'] == 'soul':\n return ewcfg.soul_durability\n if item_data.item_props['id_cosmetic'] == 'scalp':\n return ewcfg.generic_scalp_durability\n\n # If there's no special rules, just look up the durability in the cosmetics list\n cosmetic_definition = cosmetics_static.cosmetic_map.get(item_data.item_props[\"id_cosmetic\"])\n if cosmetic_definition:\n max_durability = cosmetic_definition.durability\n else:\n # Otherwise use the base cosmetic durability\n if item_data.item_props.get('rarity') == ewcfg.rarity_princeps:\n # Princeps have a different base durability for reasons\n max_durability = ewcfg.base_durability * 100\n else:\n max_durability = ewcfg.base_durability\n\n return max_durability\n\n\ndef repair_cosmetic(item_data: EwItem, new_durability: int = None) -> EwItem:\n \"\"\" Repair a cosmetic to its original durability. \"\"\"\n if new_durability is None:\n new_durability = get_cosmetic_max_durability(item_data)\n\n item_data.item_props[\"durability\"] = new_durability\n\n return item_data\n\n\ndef update_cosmetic(item_data: EwItem) -> EwItem:\n \"\"\" Update a cosmetic to the current item definition in the code base. \"\"\"\n # TODO: Update cosmetics to save less properties. Do we really need to keep saving stats if they aren't coming back?\n cosm_def = cosmetics_static.cosmetic_map.get(item_data.item_props[\"id_cosmetic\"])\n if cosm_def is None:\n ewutils.logMsg(f\"WARNING: Couldn't find cosmetic definition under {item_data.item_props['id_cosmetic']} for item id {item_data.id_item}.\")\n return item_data\n # TODO no. 2: Eventually refactor this into a simpler update_item. Probably as part of a wider item_props refactor.\n cosm_props = gen_item_props(cosm_def)\n item_data.item_props.update(cosm_props)\n\n return item_data\n\n\ndef restyle_cosmetic(item_data: EwItem, new_style: str) -> EwItem:\n \"\"\" Change a cosmetic item's style property.\"\"\"\n if new_style not in ewcfg.valid_styles:\n return item_data\n\n item_data.item_props[\"fashion_style\"] = new_style\n\n return item_data\n\n\nasync def has_cosmetic(user_data, search_cosmetic, ignore_adorned = False, ignore_slimeoid = False):\n \"\"\" See if a player has a matching cosmetic. \"\"\"\n cosmetic_items = bknd_item.inventory(\n id_user=user_data.id_user,\n id_server=user_data.id_server,\n item_type_filter=ewcfg.it_cosmetic\n )\n\n item_sought = None\n item_data = None\n item_from_slimeoid = None\n search_id = int(search_cosmetic) if search_cosmetic.isnumeric() else -2\n\n for item in cosmetic_items:\n item_props = item[\"item_props\"]\n if item.get('id_item') == search_id or search_cosmetic in ewutils.flattenTokenListToString(item.get('name')):\n\n if item_props.get('adorned') and ignore_adorned:\n continue\n\n if item_props.get('slimeoid') and ignore_slimeoid:\n continue\n\n if item_from_slimeoid is None and item_props.get(\"slimeoid\") == 'true':\n item_from_slimeoid = item\n continue\n else:\n item_sought = item\n break\n\n if item_sought is None:\n item_sought = item_from_slimeoid\n\n if item_sought is not None:\n item_data = EwItem(id_item=item_sought[\"id_item\"])\n\n return item_data\n","sub_path":"ew/utils/cosmeticitem.py","file_name":"cosmeticitem.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"542759212","text":"\"\"\"\nFile IO: I made it so that the user enters the name of a text file (input.txt) and that contains the menu that\ngets printed. There is also an output log that gets created that stores changes and what time they happened at.\n\n\nTests:\nEnter a number other than 0-4 and see if the program breaks or if anything happens (including a helpful error\nmessage).\nEnter an invalid file name for the input file.\n\n\"\"\"\n\nfrom datetime import datetime\n\nclass Radio(object):\n default_modes = [\"FM\", \"AM\", \"CD\", \"AUX\"]\n\n # Initial volume is set to 20%, initial mode is set to FM\n def __init__(self):\n self.volume = 20\n self.saved_volume = self.volume \n self.modes = list(self.default_modes)\n self.current_mode = self.modes.pop()\n\n # Cycle through modes\n def change_mode(self):\n if len(self.modes) == 0:\n # reload modes when list is empty\n self.modes = list(self.default_modes)\n\n self.current_mode = self.modes.pop()\n\n # Increase the volume by 10% if not already at max volume\n def increase_volume(self):\n if self.volume <= 90:\n self.volume += 10\n\n # Decrease the volume by 10% if not already at min volume\n def decrease_volume(self):\n if self.volume >= 10:\n self.volume -= 10\n\n # Muting sets the volume to 0%, and saves the volume separately so it can be restored\n def toggle_mute(self):\n if self.volume > 0:\n self.saved_volume = self.volume\n self.volume = 0\n else:\n self.volume = self.saved_volume\n\n\n# If module is run directly, show demo\nif __name__ == \"__main__\":\n my_radio = Radio()\n\n radio_log = open('radio_log.txt', 'a')\n\n bad_file = True\n while bad_file:\n try:\n menu_file_name = input(\"Enter file to read menu in from: \")\n with open(menu_file_name) as in_file:\n # for line in in_file:\n # print(line.strip('\\n'))\n list_menu = in_file.readlines() # menu is a list of strings\n menu = \"\".join(list_menu) # menu is the elements of the list, but as one big string\n bad_file = False\n except FileNotFoundError:\n print(\"You entered a filename that doesn't exist in this folder. Try again. \")\n while True:\n\n print(\"Volume: {}% | {}\".format(my_radio.volume, my_radio.current_mode))\n print(menu)\n\n option = input(\"Select option: \").strip()\n\n if option == \"1\":\n my_radio.change_mode()\n radio_log.write(f\"\\n Current Time: {datetime.now()}. Radio mode changed to: {my_radio.current_mode} \\n\")\n elif option == \"2\":\n my_radio.increase_volume()\n radio_log.write(f\"\\n Current Time: {datetime.now()}. Radio volume changed to: {my_radio.volume} \\n\")\n elif option == \"3\":\n my_radio.decrease_volume()\n radio_log.write(f\"\\n Current Time: {datetime.now()}. Radio volume: {my_radio.volume} \\n\")\n elif option == \"4\":\n my_radio.toggle_mute()\n radio_log.write(f\"\\n Current Time: {datetime.now()}. Radio volume: {my_radio.volume} \\n\")\n elif option == \"0\":\n radio_log.close()\n break\n else:\n print(\"Invalid option\")\n","sub_path":"Week2/week2assignment.py","file_name":"week2assignment.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"22496059","text":"import os\nimport string\nimport json\nimport urllib3\nimport requests \nimport random\nfrom model.config import stopwords\n\nclass Model:\n\n def __init__(self, ignoreArticles=None, collection=\"words/\", debug=False):\n self.baseurl = 'https://sproj.api.colehollant.com/thesaurus/api/v1/'\n self.ignoreArticles = ignoreArticles if ignoreArticles is not None else True\n self.articles = stopwords\n self.debug = debug\n\n def entryExists(self, word, collection='senselevel'):\n word = word.strip().lower()\n if not word:\n if self.debug:\n print(\"Word is empty\")\n return False, word, {}\n if self.ignoreArticles and word in self.articles:\n if self.debug:\n print(\"Ignoring article\")\n return False, word, {}\n code, response = self.requestWord(word, collection)\n if code != 200: # we've got an error!\n if self.debug:\n print(\"Not present in db:\", word)\n return False, word, {}\n return True, word, response['data']\n\n def cleanInput(self, text):\n return text\n\n def stripPunctuation(self, word):\n return word.translate(str.maketrans('', '', string.punctuation))\n\n def replaceWordPunctuation(self, originalWord, newWord):\n originalStripped = self.stripPunctuation(originalWord).lower()\n return originalWord.replace(originalStripped, newWord.lower())\n \n def requestWord(self, word, collection='senselevel'):\n headers = {\n 'Content-Type': 'application/json', \n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',\n }\n url = self.baseurl + collection + '/' + word\n response = requests.get(url, headers=headers)\n return response.status_code, response.json()\n ","sub_path":"thesaurus/model/model/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"468549404","text":"import numpy as np\nimport talib as ta\nimport matplotlib.pyplot as plt\n\n#close = np.random.random(100)\n\nclose = np.random.randint(50000, 60000, 100)\nclose = np.array(close, dtype=float)\nprint(close)\n\nma5 = ta.SMA(close, 5)\nma10 = ta.SMA(close, 10)\n\nrsi14 = ta.SMA(close, timeperiod=14)\nmacd, macdsignal, macdhist = ta.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)\n\nprint(ma5)\nprint(ma10)\n\nprint(ma5[0])\nprint(ma10[0])\n\n# last array\nprint(ma5[-1])\nprint(ma10[-1])\nprint(rsi14[-1])\n\nfor i in range(len(macd)) :\n print(\"macd macdsignal machhist\", i, close[i], macd[i], macdsignal[i], macdhist[i])\n\nplt.figure(figsize=(11,3))\nplt.plot(close, \"r-\")\nplt.plot(ma5, \"b-\")\nplt.show()\n","sub_path":"Study And Project/DAT-I LAB Study and Project/creon dat/ex_talib.py","file_name":"ex_talib.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583827867","text":"\n\nfrom xai.brain.wordbase.verbs._fondle import _FONDLE\n\n#calss header\nclass _FONDLED(_FONDLE, ):\n\tdef __init__(self,): \n\t\t_FONDLE.__init__(self)\n\t\tself.name = \"FONDLED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"fondle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_fondled.py","file_name":"_fondled.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"340245865","text":"## Time - O(n**2)\n## Space - O(n)\n## Des - since decision making parameter is 1, maintain a 1-D array(dp) and iterate through the array to know the number of values that are lesser and increase the value at dp array position by considering the max of those two values\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n # dp=[1]*len(nums)\n # for i in range(len(nums)):\n # for j in range(0,i):\n # if nums[i]>nums[j]:\n # dp[i]=max(dp[i],dp[j]+1)\n # return max(dp)\n\n ############# Binary Search #############\n ### time - O(nlongn)\n ### space-O(n)\n # arr=[0]*len(nums)\n # arr[0]=nums[0]\n # l=1\n # def binarysearch(arr,low,high,target):\n # while low<=high:\n # mid=low+(high-low)//2\n # if target==arr[mid]:\n # return mid\n # elif target>arr[mid]:\n # low=mid+1\n # else:\n # high=mid-1\n # return low\n\n # for i in range(1,len(nums)):\n # if nums[i]>arr[l-1]:\n # arr[l]=nums[i]\n # l+=1\n # else:\n # val=binarysearch(arr,0,l-1,nums[i])\n # arr[val]=nums[i]\n # return l\n\n ######### Using bisect in python similar to BS #########\n ### time - O(nlongn)\n ### space-O(n)\n import bisect\n arr = []\n for i in range(len(nums)):\n l = bisect_left(arr, nums[i])\n if l == len(arr):\n arr.append(nums[i])\n else:\n arr[l] = nums[i]\n # print(arr)\n return len(arr)\n\n","sub_path":"longest_increasing_subsequence.py","file_name":"longest_increasing_subsequence.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"49008095","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Author',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=50)),\n ('last_name', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, null=True, blank=True)),\n ('slug', models.SlugField(null=True, blank=True)),\n ('description', models.CharField(max_length=100, null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='News',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=150, null=None)),\n ('content', models.TextField(max_length=10000, blank=None)),\n ('date_published', models.DateField(auto_now=True)),\n ('author', models.ForeignKey(to='backend_app.Author')),\n ('categories', models.ForeignKey(to='backend_app.Category')),\n ],\n options={\n 'verbose_name_plural': 'News',\n },\n ),\n ]\n","sub_path":"backend/backend_app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"296194274","text":"#!/usr/bin/env python3\n\nimport click\nimport json\nimport logging\nimport sys\nimport warnings\n\nfrom tabulate import tabulate\nfrom wait_for import TimedOutError\n\nimport bonfire.config as conf\nfrom bonfire.qontract import get_apps_for_env, sub_refs, get_secret_names_in_namespace\nfrom bonfire.openshift import (\n apply_config,\n get_all_namespaces,\n wait_for_all_resources,\n wait_for_db_resources,\n find_clowd_env_for_ns,\n wait_for_clowd_env_target_ns,\n wait_on_cji,\n wait_on_reservation,\n get_reservation,\n check_for_existing_reservation,\n oc,\n whoami,\n)\nfrom bonfire.utils import (\n FatalError,\n split_equals,\n find_what_depends_on,\n validate_time_string,\n check_pypi,\n get_version,\n)\nfrom bonfire.local import get_local_apps\nfrom bonfire.processor import (\n TemplateProcessor,\n process_clowd_env,\n process_iqe_cji,\n process_reservation,\n)\nfrom bonfire.namespaces import (\n Namespace,\n get_namespaces,\n reserve_namespace,\n release_namespace,\n add_base_resources,\n reconcile,\n)\nfrom bonfire.secrets import import_secrets_from_dir\n\nlog = logging.getLogger(__name__)\n\nAPP_SRE_SRC = \"appsre\"\nLOCAL_SRC = \"local\"\nNO_RESERVATION_SYS = \"this cluster does not use a namespace reservation system\"\n\n\ndef _error(msg):\n click.echo(f\"ERROR: {msg}\", err=True)\n sys.exit(1)\n\n\n@click.group(context_settings=dict(help_option_names=[\"-h\", \"--help\"]))\n@click.option(\"--debug\", \"-d\", help=\"Enable debug logging\", is_flag=True, default=False)\ndef main(debug):\n logging.getLogger(\"sh\").setLevel(logging.CRITICAL) # silence the 'sh' library logger\n logging.basicConfig(\n format=\"%(asctime)s [%(levelname)8s] [%(threadName)20s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=logging.DEBUG if debug else logging.INFO,\n )\n\n def custom_formatwarning(msg, *args, **kwargs):\n # ignore everything except the message\n return str(msg)\n\n warnings.formatwarning = custom_formatwarning\n warnings.simplefilter(\"default\")\n logging.captureWarnings(True)\n if conf.ENV_FILE:\n log.debug(\"using env file: %s\", conf.ENV_FILE)\n\n check_pypi()\n\n\n@main.group()\ndef namespace():\n \"\"\"Perform operations related to namespace reservation\"\"\"\n pass\n\n\n@main.group()\ndef config():\n \"\"\"Commands related to bonfire configuration\"\"\"\n pass\n\n\n@main.group()\ndef apps():\n \"\"\"Show information about deployable apps\"\"\"\n pass\n\n\n@main.group()\ndef reservation():\n \"\"\"ALPHA: Perform operations related to the NamespaceReservation CRD\"\"\"\n pass\n\n\ndef _warn_if_unsafe(namespace):\n ns = Namespace(name=namespace)\n if not ns.owned_by_me and not ns.available:\n if not click.confirm(\n \"Namespace currently not ready or reserved by someone else. Continue anyway?\"\n ):\n click.echo(\"Aborting\")\n sys.exit(0)\n\n\ndef _warn_before_delete():\n if not click.confirm(\n \"Deleting your reservation will also delete the associated namespace. Proceed?\"\n ):\n click.echo(\"Aborting\")\n sys.exit(0)\n\n\ndef _warn_of_existing(requester):\n if not click.confirm(\n f\"Existing reservation(s) detected for requester '{requester}'. \"\n \"Do you need to reserve an additional namespace?\"\n ):\n click.echo(\"Aborting\")\n sys.exit(0)\n\n\ndef _reserve_namespace(duration, retries, namespace):\n log.info(\n \"reserving ephemeral namespace%s...\",\n f\" '{namespace}'\" if namespace else \"\",\n )\n\n if namespace:\n _warn_if_unsafe(namespace)\n\n ns = reserve_namespace(duration, retries, namespace)\n if not ns:\n _error(\"unable to reserve namespace\")\n\n return ns\n\n\ndef _get_target_namespace(duration, retries, namespace=None):\n \"\"\"Determine the namespace to deploy to.\n\n Use ns reservation system if on a cluster that has reservable namespaces. Otherwise the user\n must specify a namespace with '--namespace' and we assume they have ownership of it.\n\n Returns tuple of:\n (bool indicating whether ns reservation system was used, namespace name)\n \"\"\"\n # check if we're on a cluster that has reservable namespaces\n reservable_namespaces = get_namespaces()\n if reservable_namespaces:\n ns = _reserve_namespace(duration, retries, namespace)\n return (True, ns.name)\n else:\n # we're not, user has to namespace to deploy to\n if not namespace:\n _error(NO_RESERVATION_SYS + \". Use -n/--namespace to specify target namespace\")\n\n # make sure ns exists on the cluster\n cluster_namespaces = get_all_namespaces()\n for cluster_ns in cluster_namespaces:\n if cluster_ns[\"metadata\"][\"name\"] == namespace:\n ns = namespace\n break\n else:\n _error(f\"namespace '{namespace}' not found on cluster\")\n\n return (False, ns)\n\n\ndef _wait_on_namespace_resources(namespace, timeout, db_only=False):\n if db_only:\n wait_for_db_resources(namespace, timeout)\n else:\n wait_for_all_resources(namespace, timeout)\n\n\ndef _prepare_namespace(namespace):\n base_secret_names = get_secret_names_in_namespace(conf.BASE_NAMESPACE_NAME)\n add_base_resources(namespace, base_secret_names)\n\n\n_ns_reserve_options = [\n click.option(\n \"--duration\",\n \"-d\",\n required=False,\n type=int,\n default=1,\n help=\"duration of reservation in hrs (default: 1)\",\n ),\n click.option(\n \"--retries\",\n \"-r\",\n required=False,\n type=int,\n default=0,\n help=\"how many times to retry namespace reserve before giving up (default: infinite)\",\n ),\n]\n\n_timeout_option = [\n click.option(\n \"--timeout\",\n \"-t\",\n required=True,\n type=int,\n default=300,\n help=\"timeout in sec (default = 300) to wait for resources to be ready\",\n )\n]\n\n\ndef _validate_set_template_ref(ctx, param, value):\n try:\n split_value = split_equals(value)\n if split_value:\n # check that values unpack properly\n for app_component, value in split_value.items():\n # TODO: remove once app name syntax fully deprecated\n split = app_component.split(\"/\")\n if len(split) == 2:\n warnings.warn(\n (\n \"--set-template-ref: /= syntax is deprecated, \"\n \"use =\"\n ),\n DeprecationWarning,\n )\n elif len(split) > 2:\n raise ValueError\n return split_value\n except ValueError:\n raise click.BadParameter(\"format must be '='\")\n\n\ndef _validate_set_parameter(ctx, param, value):\n try:\n split_value = split_equals(value)\n if split_value:\n # check that values unpack properly\n for param_path, value in split_value.items():\n # TODO: remove once app name syntax fully deprecated\n split = param_path.split(\"/\")\n if len(split) == 3:\n warnings.warn(\n (\n \"--set-parameter: //= syntax is \"\n \"deprecated, use /=\"\n ),\n DeprecationWarning,\n )\n elif len(split) < 2 or len(split) > 3:\n raise ValueError\n return split_value\n except ValueError:\n raise click.BadParameter(\"format must be '/='\")\n\n\ndef _validate_set_image_tag(ctx, param, value):\n try:\n return split_equals(value)\n except ValueError:\n raise click.BadParameter(\"format must be '='\")\n\n\ndef _validate_resource_arguments(ctx, param, value):\n opposite_option = {\n \"remove_resources\": \"no_remove_resources\",\n \"no_remove_resources\": \"remove_resources\",\n }\n if any([val.startswith(\"-\") for val in value]):\n raise click.BadParameter(\n \"--remove-resources/--no-remove-resources requires a component name or keyword 'all'\"\n )\n if \"all\" in value and \"all\" in ctx.params.get(opposite_option[param.name], {}):\n raise click.BadParameter(\n \"--remove-resources and --no-remove-resources can't be both set to 'all'\"\n )\n if param.name == \"remove_resources\" and not value:\n value = (\"all\",)\n return value\n\n\ndef _validate_reservation_duration(ctx, param, value):\n try:\n return validate_time_string(value)\n except ValueError:\n raise click.BadParameter(\"expecting h/m/s string. Ex: '1h30m'\")\n\n\n_app_source_options = [\n click.option(\n \"--source\",\n \"-s\",\n help=f\"Configuration source to use when fetching app templates (default: {LOCAL_SRC})\",\n type=click.Choice([LOCAL_SRC, APP_SRE_SRC], case_sensitive=False),\n default=LOCAL_SRC,\n ),\n click.option(\n \"--local-config-path\",\n \"-c\",\n help=(\n \"File to use for local config (default: first try ./config.yaml, then \"\n \"$XDG_CONFIG_HOME/bonfire/config.yaml)\"\n ),\n default=None,\n ),\n click.option(\n \"--target-env\",\n help=(\n f\"When using source={APP_SRE_SRC}, name of environment to fetch templates for\"\n f\" (default: {conf.EPHEMERAL_ENV_NAME})\"\n ),\n type=str,\n default=conf.EPHEMERAL_ENV_NAME,\n ),\n]\n\n\n_process_options = [\n click.argument(\n \"app_names\",\n required=True,\n nargs=-1,\n ),\n _app_source_options[0],\n _app_source_options[1],\n click.option(\n \"--set-image-tag\",\n \"-i\",\n help=(\"Override image tag for an image using format '='\"),\n multiple=True,\n callback=_validate_set_image_tag,\n ),\n click.option(\n \"--set-template-ref\",\n help=\"Override template ref for a component using format '='\",\n multiple=True,\n callback=_validate_set_template_ref,\n ),\n click.option(\n \"--set-parameter\",\n \"-p\",\n help=(\n \"Override parameter for a component using format \"\n \"'//=\"\n ),\n multiple=True,\n callback=_validate_set_parameter,\n ),\n click.option(\n \"--clowd-env\",\n \"-e\",\n help=(\n f\"Name of ClowdEnvironment (default: if --namespace provided, {conf.ENV_NAME_FORMAT})\"\n ),\n type=str,\n default=None,\n ),\n _app_source_options[2],\n click.option(\n \"--ref-env\",\n help=f\"Query {APP_SRE_SRC} for apps in this environment and substitute 'ref'/'IMAGE_TAG'\",\n type=str,\n default=None,\n ),\n click.option(\n \"--get-dependencies/--no-get-dependencies\",\n help=\"Get config for any listed 'dependencies' in ClowdApps (default: true)\",\n default=True,\n ),\n click.option(\n \"--remove-resources\",\n help=(\n \"Remove resource limits and requests on ClowdApp configs \"\n \"for specific components (default: all)\"\n ),\n type=str,\n multiple=True,\n callback=_validate_resource_arguments,\n ),\n click.option(\n \"--no-remove-resources\",\n help=(\n \"Don't remove resource limits and requests on ClowdApp configs \"\n \"for specific components (default: none)\"\n ),\n type=str,\n multiple=True,\n callback=_validate_resource_arguments,\n ),\n click.option(\n \"--single-replicas/--no-single-replicas\",\n help=\"Set replicas to '1' on all on ClowdApp configs (default: true)\",\n default=True,\n ),\n click.option(\n \"--component\",\n \"-C\",\n \"component_filter\",\n help=\"Specific component(s) that should be processed (default: all)\",\n type=str,\n multiple=True,\n ),\n]\n\n\n_clowdenv_process_options = [\n click.option(\n \"--namespace\",\n \"-n\",\n help=\"Target namespace of the ClowdEnvironment (default: none)\",\n type=str,\n ),\n click.option(\n \"--quay-user\",\n \"-u\",\n help=\"Quay username for pullSecret provider\",\n type=str,\n ),\n click.option(\n \"--clowd-env\",\n \"-e\",\n help=(f\"Name of ClowdEnvironment (default: if target ns provided, {conf.ENV_NAME_FORMAT})\"),\n type=str,\n default=None,\n ),\n click.option(\n \"--template-file\",\n \"-f\",\n help=(\n \"Path to ClowdEnvironment template file (default: use local cluster template packaged\"\n \" with bonfire)\"\n ),\n type=str,\n default=None,\n ),\n]\n\n\n_iqe_cji_process_options = [\n click.argument(\n \"clowd_app_name\",\n type=str,\n required=True,\n ),\n click.option(\n \"--debug-pod\",\n \"-d\",\n \"debug\",\n help=\"Set debug mode on IQE pod\",\n default=False,\n is_flag=True,\n ),\n click.option(\n \"--marker\",\n \"-m\",\n help=\"pytest marker expression\",\n type=str,\n default=\"\",\n ),\n click.option(\n \"--filter\",\n \"-k\",\n help=\"pytest filter expression\",\n type=str,\n default=\"\",\n ),\n click.option(\n \"--env\",\n \"-e\",\n help=\"dynaconf env name\",\n type=str,\n default=\"clowder_smoke\",\n ),\n click.option(\n \"--image-tag\",\n \"-i\",\n help=\"image tag to use for IQE pod\",\n type=str,\n default=\"\",\n ),\n click.option(\n \"--cji-name\",\n \"-c\",\n help=\"Name of ClowdJobInvocation (default: generate a random name)\",\n type=str,\n default=None,\n ),\n click.option(\n \"--template-file\",\n \"-f\",\n help=(\n \"Path to ClowdJobInvocation template file (default: use IQE CJI template packaged\"\n \" with bonfire)\"\n ),\n type=str,\n default=None,\n ),\n click.option(\n \"--requirements\",\n help=\"iqe --requirements expression\",\n type=str,\n default=\"\",\n ),\n click.option(\n \"--requirements-priority\",\n help=\"iqe --requirements-priority expression\",\n type=str,\n default=\"\",\n ),\n click.option(\n \"--test-importance\",\n help=\"iqe --test-importance expression\",\n type=str,\n default=\"\",\n ),\n]\n\n_reservation_process_options = [\n click.option(\n \"--name\",\n type=str,\n default=None,\n help=\"Identifier for the reservation\",\n ),\n click.option(\n \"--requester\",\n \"-r\",\n type=str,\n default=None,\n help=\"Name of the user requesting a reservation\",\n ),\n click.option(\n \"--duration\",\n \"-d\",\n type=str,\n default=\"1h\",\n help=\"Duration of the reservation\",\n callback=_validate_reservation_duration,\n ),\n]\n\n_reservation_lookup_options = [\n click.option(\n \"--name\",\n type=str,\n default=None,\n help=\"Identifier for the reservation\",\n ),\n click.option(\n \"--requester\",\n \"-r\",\n type=str,\n default=None,\n help=\"Name of the user requesting a reservation\",\n ),\n click.option(\n \"--namespace\",\n \"-n\",\n type=str,\n default=None,\n help=\"Namespace for the reservation\",\n ),\n]\n\n\ndef options(options_list):\n \"\"\"Click decorator used to set a list of click options on a command.\"\"\"\n\n def inner(func):\n for option in reversed(options_list):\n func = option(func)\n return func\n\n return inner\n\n\n@namespace.command(\"list\")\n@click.option(\n \"--available\",\n \"-a\",\n is_flag=True,\n default=False,\n help=\"show only un-reserved/ready namespaces\",\n)\n@click.option(\n \"--mine\",\n \"-m\",\n is_flag=True,\n default=False,\n help=\"show only namespaces reserved in your name\",\n)\n@click.option(\n \"--output\",\n \"-o\",\n default=\"cli\",\n help=\"which output format to return the data in\",\n type=click.Choice([\"cli\", \"json\"], case_sensitive=False),\n)\ndef _list_namespaces(available, mine, output):\n \"\"\"Get list of ephemeral namespaces\"\"\"\n namespaces = get_namespaces(available=available, mine=mine)\n if not available and not mine and not namespaces:\n _error(NO_RESERVATION_SYS)\n elif not namespaces:\n if output == \"json\":\n click.echo(\"{}\")\n else:\n click.echo(\"no namespaces found\")\n else:\n if output == \"json\":\n data = {}\n for ns in namespaces:\n data[ns.name] = {\n \"reserved\": ns.reserved,\n \"ready\": ns.ready,\n \"requester\": ns.requester_name,\n \"expires_in\": ns.expires_in,\n }\n click.echo(json.dumps(data, indent=2))\n else:\n data = {\n \"NAME\": [ns.name for ns in namespaces],\n \"RESERVED\": [str(ns.reserved).lower() for ns in namespaces],\n \"RESERVABLE\": [str(ns.ready).lower() for ns in namespaces],\n \"REQUESTER\": [ns.requester_name for ns in namespaces],\n \"EXPIRES IN\": [ns.expires_in for ns in namespaces],\n }\n tabulated = tabulate(data, headers=\"keys\")\n click.echo(tabulated)\n\n\n@namespace.command(\"reserve\")\n@options(_ns_reserve_options)\n@click.argument(\"namespace\", required=False, type=str)\ndef _cmd_namespace_reserve(duration, retries, namespace):\n \"\"\"Reserve an ephemeral namespace (specific or random)\"\"\"\n if not get_namespaces():\n _error(NO_RESERVATION_SYS)\n ns = _reserve_namespace(duration, retries, namespace)\n click.echo(ns.name)\n\n\n@namespace.command(\"release\")\n@click.argument(\"namespace\", required=True, type=str)\n@click.option(\n \"-f\",\n \"--force\",\n is_flag=True,\n default=False,\n help=\"Do not check if you own this namespace\",\n)\ndef _cmd_namespace_release(namespace, force):\n \"\"\"Remove reservation from an ephemeral namespace\"\"\"\n if not get_namespaces():\n _error(NO_RESERVATION_SYS)\n if not force:\n _warn_if_unsafe(namespace)\n release_namespace(namespace)\n\n\n@namespace.command(\"wait-on-resources\")\n@click.argument(\"namespace\", required=True, type=str)\n@click.option(\n \"--db-only\",\n is_flag=True,\n default=False,\n help=\"Only wait for DB resources owned by ClowdApps to be ready\",\n)\n@options(_timeout_option)\ndef _cmd_namespace_wait_on_resources(namespace, timeout, db_only):\n \"\"\"Wait for rolled out resources to be ready in namespace\"\"\"\n try:\n _wait_on_namespace_resources(namespace, timeout, db_only=db_only)\n except TimedOutError as err:\n log.error(\"Hit timeout error: %s\", err)\n _error(\"namespace wait timed out\")\n\n\n@namespace.command(\"prepare\", hidden=True)\n@click.argument(\"namespace\", required=True, type=str)\ndef _cmd_namespace_prepare(namespace):\n \"\"\"Copy base resources into specified namespace (for admin use only)\"\"\"\n _prepare_namespace(namespace)\n\n\n@namespace.command(\"reconcile\", hidden=True)\ndef _cmd_namespace_reconcile():\n \"\"\"Run reconciler for namespace reservations (for admin use only)\"\"\"\n reconcile()\n\n\ndef _get_apps_config(source, target_env, ref_env, local_config_path):\n config = conf.load_config(local_config_path)\n\n if source == APP_SRE_SRC:\n log.info(\"fetching apps config using source: %s, target env: %s\", source, target_env)\n if not target_env:\n _error(\"target env must be supplied for source '{APP_SRE_SRC}'\")\n apps_config = get_apps_for_env(target_env)\n\n if target_env == conf.EPHEMERAL_ENV_NAME and not ref_env:\n log.info(\"target env is 'ephemeral' with no ref env given, using 'master' for all apps\")\n for _, app_cfg in apps_config.items():\n for component in app_cfg.get(\"components\", []):\n component[\"ref\"] = \"master\"\n\n # override any apps that were defined in 'apps' setion of local config file\n apps_config.update(get_local_apps(config, fetch_remote=False))\n\n elif source == LOCAL_SRC:\n log.info(\"fetching apps config using source: %s\", source)\n apps_config = get_local_apps(config, fetch_remote=True)\n\n if ref_env:\n log.info(\"subbing app template refs/image tags using environment: %s\", ref_env)\n apps_config = sub_refs(apps_config, ref_env)\n\n return apps_config\n\n\ndef _get_env_name(target_namespace, env_name):\n if not env_name:\n if not target_namespace:\n _error(\n \"unable to infer name of ClowdEnvironment if namespace not provided.\"\n \" Please run with one of: --clowd-env or --namespace\"\n )\n env_name = conf.ENV_NAME_FORMAT.format(namespace=target_namespace)\n return env_name\n\n\ndef _process(\n app_names,\n source,\n get_dependencies,\n set_image_tag,\n ref_env,\n target_env,\n set_template_ref,\n set_parameter,\n clowd_env,\n local_config_path,\n remove_resources,\n no_remove_resources,\n single_replicas,\n component_filter,\n):\n apps_config = _get_apps_config(source, target_env, ref_env, local_config_path)\n\n processor = TemplateProcessor(\n apps_config,\n app_names,\n get_dependencies,\n set_image_tag,\n set_template_ref,\n set_parameter,\n clowd_env,\n remove_resources,\n no_remove_resources,\n single_replicas,\n component_filter,\n )\n return processor.process()\n\n\n@main.command(\"process\")\n@options(_process_options)\n@click.option(\n \"--namespace\",\n \"-n\",\n help=\"Namespace you intend to deploy to (default: none)\",\n type=str,\n)\ndef _cmd_process(\n app_names,\n source,\n get_dependencies,\n set_image_tag,\n ref_env,\n target_env,\n set_template_ref,\n set_parameter,\n clowd_env,\n namespace,\n local_config_path,\n remove_resources,\n no_remove_resources,\n single_replicas,\n component_filter,\n):\n \"\"\"Fetch and process application templates\"\"\"\n clowd_env = _get_env_name(namespace, clowd_env)\n\n processed_templates = _process(\n app_names,\n source,\n get_dependencies,\n set_image_tag,\n ref_env,\n target_env,\n set_template_ref,\n set_parameter,\n clowd_env,\n local_config_path,\n remove_resources,\n no_remove_resources,\n single_replicas,\n component_filter,\n )\n print(json.dumps(processed_templates, indent=2))\n\n\n@main.command(\"deploy\")\n@options(_process_options)\n@click.option(\n \"--namespace\",\n \"-n\",\n help=\"Namespace to deploy to (if none given, bonfire will try to reserve one)\",\n default=None,\n)\n@click.option(\n \"--import-secrets\",\n is_flag=True,\n help=\"Import secrets from local directory at deploy time\",\n default=False,\n)\n@click.option(\n \"--secrets-dir\",\n type=str,\n help=\"Directory to use for secrets import (default: \" \"$XDG_CONFIG_HOME/bonfire/secrets/)\",\n default=conf.DEFAULT_SECRETS_DIR,\n)\n@click.option(\n \"--no-release-on-fail\",\n \"-f\",\n is_flag=True,\n help=\"Do not release namespace reservation if deployment fails\",\n)\n@options(_ns_reserve_options)\n@options(_timeout_option)\ndef _cmd_config_deploy(\n app_names,\n source,\n get_dependencies,\n set_image_tag,\n ref_env,\n target_env,\n set_template_ref,\n set_parameter,\n clowd_env,\n local_config_path,\n remove_resources,\n no_remove_resources,\n single_replicas,\n namespace,\n duration,\n retries,\n timeout,\n no_release_on_fail,\n component_filter,\n import_secrets,\n secrets_dir,\n):\n \"\"\"Process app templates and deploy them to a cluster\"\"\"\n requested_ns = namespace\n\n log.debug(\"checking if namespace has been reserved via ns operator...\")\n operator_reservation = get_reservation(namespace=requested_ns)\n\n if not operator_reservation:\n log.debug(\"no ns operator reservation found, using old ns reservation system\")\n used_ns_reservation_system, ns = _get_target_namespace(duration, retries, requested_ns)\n else:\n log.debug(\"found existing ns operator reservation\")\n used_ns_reservation_system, ns = False, requested_ns\n\n if import_secrets:\n import_secrets_from_dir(secrets_dir)\n\n if not clowd_env:\n # if no ClowdEnvironment name provided, see if a ClowdEnvironment is associated with this ns\n match = find_clowd_env_for_ns(ns)\n if not match:\n _error(\n f\"could not find a ClowdEnvironment tied to ns '{ns}'. Specify which one \"\n \"if you have already deployed one with '--clowd-env' or deploy one with \"\n \"'bonfire deploy-env'\"\n )\n clowd_env = match[\"metadata\"][\"name\"]\n log.debug(\"inferred clowd_env: '%s'\", clowd_env)\n\n def _err_handler(err):\n try:\n if not no_release_on_fail and not requested_ns and used_ns_reservation_system:\n # if we auto-reserved this ns, auto-release it on failure unless\n # --no-release-on-fail was requested\n log.info(\"releasing namespace '%s'\", ns)\n release_namespace(ns)\n finally:\n msg = f\"deploy failed: {str(err)}\"\n _error(msg)\n\n try:\n log.info(\"processing app templates...\")\n apps_config = _process(\n app_names,\n source,\n get_dependencies,\n set_image_tag,\n ref_env,\n target_env,\n set_template_ref,\n set_parameter,\n clowd_env,\n local_config_path,\n remove_resources,\n no_remove_resources,\n single_replicas,\n component_filter,\n )\n log.debug(\"app configs:\\n%s\", json.dumps(apps_config, indent=2))\n if not apps_config[\"items\"]:\n log.warning(\"no configurations found to apply!\")\n else:\n log.info(\"applying app configs...\")\n apply_config(ns, apps_config)\n log.info(\"waiting on resources for max of %dsec...\", timeout)\n _wait_on_namespace_resources(ns, timeout)\n except KeyboardInterrupt as err:\n log.error(\"aborted by keyboard interrupt!\")\n _err_handler(err)\n except TimedOutError as err:\n log.error(\"hit timeout error: %s\", err)\n _err_handler(err)\n except FatalError as err:\n log.error(\"hit fatal error: %s\", err)\n _err_handler(err)\n except Exception as err:\n log.exception(\"hit unexpected error!\")\n _err_handler(err)\n else:\n log.info(\"successfully deployed to namespace '%s'\", ns)\n click.echo(ns)\n\n\ndef _process_clowdenv(target_namespace, quay_user, env_name, template_file):\n env_name = _get_env_name(target_namespace, env_name)\n return process_clowd_env(target_namespace, quay_user, env_name, template_file)\n\n\n@main.command(\"process-env\")\n@options(_clowdenv_process_options)\ndef _cmd_process_clowdenv(namespace, quay_user, clowd_env, template_file):\n \"\"\"Process ClowdEnv template and print output\"\"\"\n clowd_env_config = _process_clowdenv(namespace, quay_user, clowd_env, template_file)\n print(json.dumps(clowd_env_config, indent=2))\n\n\n@main.command(\"deploy-env\")\n@options(_clowdenv_process_options)\n@click.option(\n \"--import-secrets\",\n is_flag=True,\n help=\"Import secrets from local directory at deploy time\",\n default=False,\n)\n@click.option(\n \"--secrets-dir\",\n type=str,\n help=(\"Import secrets from this directory (default: \" \"$XDG_CONFIG_HOME/bonfire/secrets/)\"),\n default=conf.DEFAULT_SECRETS_DIR,\n)\n@options(_timeout_option)\ndef _cmd_deploy_clowdenv(\n namespace, quay_user, clowd_env, template_file, timeout, import_secrets, secrets_dir\n):\n \"\"\"Process ClowdEnv template and deploy to a cluster\"\"\"\n _warn_if_unsafe(namespace)\n\n def _err_handler(err):\n msg = f\"deploy failed: {str(err)}\"\n _error(msg)\n\n try:\n if import_secrets:\n import_secrets_from_dir(secrets_dir)\n\n clowd_env_config = _process_clowdenv(namespace, quay_user, clowd_env, template_file)\n\n log.debug(\"ClowdEnvironment config:\\n%s\", clowd_env_config)\n\n apply_config(None, clowd_env_config)\n\n if not namespace:\n # wait for Clowder to tell us what target namespace it created\n namespace = wait_for_clowd_env_target_ns(clowd_env)\n\n log.info(\"waiting on resources for max of %dsec...\", timeout)\n _wait_on_namespace_resources(namespace, timeout)\n\n clowd_env_name = find_clowd_env_for_ns(namespace)[\"metadata\"][\"name\"]\n except KeyboardInterrupt as err:\n log.error(\"aborted by keyboard interrupt!\")\n _err_handler(err)\n except TimedOutError as err:\n log.error(\"hit timeout error: %s\", err)\n _err_handler(err)\n except FatalError as err:\n log.error(\"hit fatal error: %s\", err)\n _err_handler(err)\n except Exception as err:\n log.exception(\"hit unexpected error!\")\n _err_handler(err)\n else:\n log.info(\"ClowdEnvironment '%s' using ns '%s' is ready\", clowd_env_name, namespace)\n click.echo(namespace)\n\n\n@main.command(\"process-iqe-cji\")\n@options(_iqe_cji_process_options)\ndef _cmd_process_iqe_cji(\n clowd_app_name,\n debug,\n marker,\n filter,\n env,\n image_tag,\n cji_name,\n template_file,\n requirements,\n requirements_priority,\n test_importance,\n):\n \"\"\"Process IQE ClowdJobInvocation template and print output\"\"\"\n cji_config = process_iqe_cji(\n clowd_app_name,\n debug,\n marker,\n filter,\n env,\n image_tag,\n cji_name,\n template_file,\n requirements,\n requirements_priority,\n test_importance,\n )\n print(json.dumps(cji_config, indent=2))\n\n\n@main.command(\"deploy-iqe-cji\")\n@click.option(\"--namespace\", \"-n\", help=\"Namespace to deploy to\", type=str, required=True)\n@options(_iqe_cji_process_options)\n@options(_timeout_option)\ndef _cmd_deploy_iqe_cji(\n namespace,\n clowd_app_name,\n debug,\n marker,\n filter,\n env,\n image_tag,\n cji_name,\n template_file,\n timeout,\n requirements,\n requirements_priority,\n test_importance,\n):\n \"\"\"Process IQE CJI template, apply it, and wait for it to start running.\"\"\"\n _warn_if_unsafe(namespace)\n\n def _err_handler(err):\n msg = f\"deploy failed: {str(err)}\"\n _error(msg)\n\n try:\n cji_config = process_iqe_cji(\n clowd_app_name,\n debug,\n marker,\n filter,\n env,\n image_tag,\n cji_name,\n template_file,\n requirements,\n requirements_priority,\n test_importance,\n )\n\n log.debug(\"processed CJI config:\\n%s\", cji_config)\n\n try:\n cji_name = cji_config[\"items\"][0][\"metadata\"][\"name\"]\n except (KeyError, IndexError):\n raise Exception(\"error parsing name of CJI from processed template, check CJI template\")\n\n apply_config(namespace, cji_config)\n\n log.info(\"waiting on CJI '%s' for max of %dsec...\", cji_name, timeout)\n pod_name = wait_on_cji(namespace, cji_name, timeout)\n except KeyboardInterrupt as err:\n log.error(\"aborted by keyboard interrupt!\")\n _err_handler(err)\n except TimedOutError as err:\n log.error(\"hit timeout error: %s\", err)\n _err_handler(err)\n except FatalError as err:\n log.error(\"hit fatal error: %s\", err)\n _err_handler(err)\n except Exception as err:\n log.exception(\"hit unexpected error!\")\n _err_handler(err)\n else:\n log.info(\n \"pod '%s' related to CJI '%s' in ns '%s' is running\", pod_name, cji_name, namespace\n )\n click.echo(pod_name)\n\n\n@main.command(\"version\")\ndef _cmd_version():\n \"\"\"Print bonfire version\"\"\"\n click.echo(\"bonfire version \" + get_version())\n\n\n@config.command(\"write-default\")\n@click.argument(\"path\", required=False, type=str)\ndef _cmd_write_default_config(path):\n \"\"\"Write default configuration file to PATH (default: $XDG_CONFIG_HOME/bonfire/config.yaml)\"\"\"\n conf.write_default_config(path)\n\n\n@config.command(\"edit\")\n@click.argument(\"path\", required=False, type=str)\ndef _cmd_edit_default_config(path):\n \"\"\"Edit configuration with $EDITOR (default path: $XDG_CONFIG_HOME/bonfire/config.yaml)\"\"\"\n conf.edit_default_config(path)\n\n\n@options(_app_source_options)\n@click.option(\n \"--components/--no-components\",\n \"list_components\",\n default=False,\n help=\"List components contained within each app group\",\n)\n@apps.command(\"list\")\ndef _cmd_apps_list(\n source,\n local_config_path,\n target_env,\n list_components,\n):\n \"\"\"List names of all apps that are marked for deployment in given 'target_env'\"\"\"\n apps = _get_apps_config(source, target_env, None, local_config_path)\n\n print(\"\")\n sorted_keys = sorted(apps.keys())\n for app_name in sorted_keys:\n app_config = apps[app_name]\n print(app_name)\n if list_components:\n component_names = sorted([c[\"name\"] for c in app_config[\"components\"]])\n for component_name in component_names:\n print(f\" `-- {component_name}\")\n\n\n@options(_app_source_options)\n@click.argument(\n \"component\",\n type=str,\n)\n@apps.command(\"what-depends-on\")\ndef _cmd_apps_what_depends_on(\n source,\n local_config_path,\n target_env,\n component,\n):\n \"\"\"Show any apps that depend on COMPONENT for deployments in given 'target_env'\"\"\"\n apps = _get_apps_config(source, target_env, None, local_config_path)\n found = find_what_depends_on(apps, component)\n print(\"\\n\".join(found) or f\"no apps depending on {component} found\")\n\n\n@reservation.command(\"create\")\n@click.option(\n \"--bot\",\n \"-b\",\n is_flag=True,\n help=\"Use this flag to skip the duplicate reservation check (for automation)\",\n)\n@options(_reservation_process_options)\n@options(_timeout_option)\ndef _create_new_reservation(bot, name, requester, duration, timeout):\n def _err_handler(err):\n msg = f\"reservation failed: {str(err)}\"\n _error(msg)\n\n try:\n res = get_reservation(name)\n # Name should be unique on reservation creation.\n if res:\n raise FatalError(f\"Reservation with name {name} already exists\")\n\n res_config = process_reservation(name, requester, duration)\n\n log.debug(\"processed reservation:\\n%s\", res_config)\n\n if not bot:\n if check_for_existing_reservation(res_config[\"items\"][0][\"spec\"][\"requester\"]):\n _warn_of_existing(res_config[\"items\"][0][\"spec\"][\"requester\"])\n\n try:\n res_name = res_config[\"items\"][0][\"metadata\"][\"name\"]\n except (KeyError, IndexError):\n raise Exception(\n \"error parsing name of Reservation from processed template, \"\n \"check Reservation template\"\n )\n\n apply_config(None, list_resource=res_config)\n\n ns_name = wait_on_reservation(res_name, timeout)\n except KeyboardInterrupt as err:\n log.error(\"aborted by keyboard interrupt!\")\n _err_handler(err)\n except TimedOutError as err:\n log.error(\"hit timeout error: %s\", err)\n _err_handler(err)\n except FatalError as err:\n log.error(\"hit fatal error: %s\", err)\n _err_handler(err)\n except Exception as err:\n log.exception(\"hit unexpected error!\")\n _err_handler(err)\n else:\n log.info(\n \"namespace '%s' is reserved by '%s' for '%s'\",\n ns_name,\n res_config[\"items\"][0][\"spec\"][\"requester\"],\n duration,\n )\n click.echo(ns_name)\n\n\n@reservation.command(\"extend\")\n@click.option(\n \"--duration\",\n \"-d\",\n type=str,\n default=\"1h\",\n help=\"Amount of time to extend the reservation\",\n callback=_validate_reservation_duration,\n)\n@options(_reservation_lookup_options)\ndef _extend_reservation(name, namespace, requester, duration):\n def _err_handler(err):\n msg = f\"reservation extension failed: {str(err)}\"\n _error(msg)\n\n if not (name or namespace or requester):\n _err_handler(\n \"To extend a reservation provide one of name, \"\n \"namespace, or requester. See 'bonfire reservation extend -h'\"\n )\n\n try:\n res = get_reservation(name, namespace, requester)\n if res:\n res_config = process_reservation(\n res[\"metadata\"][\"name\"],\n res[\"spec\"][\"requester\"],\n duration,\n )\n\n log.debug(\"processed reservation:\\n%s\", res_config)\n\n apply_config(None, list_resource=res_config)\n else:\n raise FatalError(\"Reservation lookup failed\")\n except KeyboardInterrupt as err:\n log.error(\"aborted by keyboard interrupt!\")\n _err_handler(err)\n except TimedOutError as err:\n log.error(\"hit timeout error: %s\", err)\n _err_handler(err)\n except FatalError as err:\n log.error(\"hit fatal error: %s\", err)\n _err_handler(err)\n except Exception as err:\n log.exception(\"hit unexpected error!\")\n _err_handler(err)\n else:\n log.info(\"reservation '%s' extended by '%s'\", res[\"metadata\"][\"name\"], duration)\n\n\n@reservation.command(\"delete\")\n@options(_reservation_lookup_options)\ndef _delete_reservation(name, namespace, requester):\n def _err_handler(err):\n msg = f\"reservation deletion failed: {str(err)}\"\n _error(msg)\n\n if not (name or namespace or requester):\n _err_handler(\n \"To delete a reservation provide one of name, \"\n \"namespace, or requester. See 'bonfire reservation delete -h'\"\n )\n\n try:\n res = get_reservation(name, namespace, requester)\n if res:\n _warn_before_delete()\n res_name = res[\"metadata\"][\"name\"]\n log.info(\"deleting reservation '%s'\", res_name)\n oc(\"delete\", \"reservation\", res_name)\n log.info(\"reservation '%s' deleted\", res_name)\n else:\n raise FatalError(\"Reservation lookup failed\")\n except KeyboardInterrupt as err:\n log.error(\"aborted by keyboard interrupt!\")\n _err_handler(err)\n except TimedOutError as err:\n log.error(\"hit timeout error: %s\", err)\n _err_handler(err)\n except FatalError as err:\n log.error(\"hit fatal error: %s\", err)\n _err_handler(err)\n except Exception as err:\n log.exception(\"hit unexpected error!\")\n _err_handler(err)\n\n\n@reservation.command(\"list\")\n@click.option(\n \"--mine\",\n \"-m\",\n is_flag=True,\n help=\"Return reservations belonging to the result of oc whoami\",\n)\n@click.option(\n \"--requester\",\n \"-r\",\n type=str,\n default=None,\n help=\"Return reservations belonging to the provided requester\",\n)\ndef _list_reservations(mine, requester):\n def _err_handler(err):\n msg = f\"reservation listing failed: {str(err)}\"\n _error(msg)\n\n try:\n if mine:\n try:\n requester = whoami()\n except Exception:\n log.info(\n \"whoami returned an error - getting reservations for 'bonfire'\"\n ) # minikube\n requester = \"bonfire\"\n oc(\"get\", \"reservation\", \"--selector\", f\"requester={requester}\")\n else:\n if requester:\n oc(\"get\", \"reservation\", \"--selector\", f\"requester={requester}\")\n else:\n oc(\"get\", \"reservation\")\n except KeyboardInterrupt as err:\n log.error(\"aborted by keyboard interrupt!\")\n _err_handler(err)\n except TimedOutError as err:\n log.error(\"hit timeout error: %s\", err)\n _err_handler(err)\n except FatalError as err:\n log.error(\"hit fatal error: %s\", err)\n _err_handler(err)\n except Exception as err:\n log.exception(\"hit unexpected error!\")\n _err_handler(err)\n\n\ndef main_with_handler():\n try:\n main()\n except FatalError as err:\n _error(str(err))\n\n\nif __name__ == \"__main__\":\n main_with_handler()\n","sub_path":"bonfire/bonfire.py","file_name":"bonfire.py","file_ext":"py","file_size_in_byte":40441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"5453263","text":"import pymongo\n\nclient = pymongo.MongoClient()\ndb = client['test']\ndb = client.test\n\nrestaurants = db['restaurants']\nrestaurants = db.restaurants\n\nfor doc in restaurants.find():\n print(type(doc), doc)\n\n\ncondition = {\"borough\": \"Manhattan\"}\n\n# query embedded documents\ncondition = {\"address.zipcode\": \"10075\"}\n\n# query field in array\ncondition = {\"grades.grade\": \"B\"}\n\n# grades.score > 30\ncondition = {\"grades.score\": {\"$gt\": 30}}\n\n# grades.score < 30\ncondition = {\"grades.score\": {\"$lt\": 10}}\n\n# combine conditions: logic AND\ncondition = {\"cuisine\": \"Italian\", \"address.zipcode\": \"10075\"}\n\n# combine conditions: logic OR\ncondition = {\"$or\": [{\"cuisine\": \"Italian\"}, {\"address.zipcode\": \"10075\"}]}\n\nfor doc in restaurants.find(condition):\n print(type(doc), doc)\n\ncursor = restaurants.find(condition).sort([\n ('borough', pymongo.ASCENDING),\n ('address.zipcode', pymongo.DESCENDING),\n])\n\n\n# Insert One\nfrom datetime import datetime\ndoc = restaurants.insert_one(\n {\n \"address\": {\n \"street\": \"2 Avenue\",\n \"zipcode\": \"10075\",\n \"building\": \"1480\",\n \"coord\": [-73.9557413, 40.7720266]\n },\n \"borough\": \"Manhattan\",\n \"cuisine\": \"Italian\",\n \"grades\": [\n {\n \"date\": datetime.strptime(\"2014-10-01\", \"%Y-%m-%d\"),\n \"grade\": \"A\",\n \"score\": 11\n },\n {\n \"date\": datetime.strptime(\"2014-01-16\", \"%Y-%m-%d\"),\n \"grade\": \"B\",\n \"score\": 17\n }\n ],\n \"name\": \"Vella\",\n \"restaurant_id\": \"41704620\"\n }\n)\ndoc.inserted_id\n\n\n# Update One\nresult = restaurants.update_one(\n {\"name\": \"Juni\"},\n {\n \"$set\": {\n \"cuisine\": \"American (New)\"\n },\n \"$currentDate\": {\"lastModified\": True}\n }\n)\n\nresult.matched_count\nresult.modified_count\n\n\n# Replace a Document\n","sub_path":"python.tutorial/mongodb_study/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"606309964","text":"\n\nimport copy\nimport functools\nimport itertools\nimport operator\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport project.agents as agents\nimport project.policies as policies\nimport project.networks as networks\nimport project.utils as utils\nimport project.critics as critics\nimport project.buffers as buffers\nimport project.samplers as samplers\nimport project.normalizers as normalizers\nimport project.envs as envs\n\n\nclass DIAYN(agents.Agent):\n\n class Config(agents.Agent.Config):\n\n comment = None\n\n # -- General\n env = 'HalfCheetah-v2'\n expert_data = None\n expert_data_size = None\n num_skills = 50\n gamma = 0.99 # Discount factor\n num_epochs = 10_000\n num_samples_per_epoch = 1000\n num_train_steps_per_epoch = 1000\n batch_size = 128\n buffer_capacity = 10_000_000\n min_buffer_size = 1_000 # Min samples in replay buffer before training starts\n max_path_length_train = 1_000\n log_freq = 1\n save_freq = 100\n\n # -- Evaluation\n eval_freq = 100\n eval_size = 1_000\n eval_video_freq = 1\n eval_video_length = 100 # Max length for eval video\n eval_video_size = [200, 200]\n eval_video_fps = 20\n max_path_length_eval = 1_000\n\n LR = 3e-4 #1e-3\n WIDTH = 300\n\n # -- Policy\n policy_num_components = 4\n policy_hidden_num = 2\n policy_hidden_size = WIDTH\n policy_hidden_act = 'relu'\n policy_optimizer = 'adam'\n policy_lr = LR\n policy_grad_norm_clip = None #10.0\n\n # -- Discriminator\n clf_enc_enable = True\n clf_enc_initial_steps = 1000\n clf_enc_hidden_num = 0\n clf_enc_hidden_size = 64\n clf_enc_dim = 5\n clf_hidden_num = 2\n clf_hidden_size = WIDTH\n clf_hidden_act = 'relu'\n clf_optimizer = 'adam'\n clf_lr = LR\n clf_grad_norm_clip = None #20.0\n\n # -- Critic\n critic_hidden_num = 2\n critic_hidden_size = WIDTH\n critic_hidden_act = 'relu'\n target_update_tau = 0.01 # Strength of target network polyak averaging\n target_update_freq = 1 # How often to update the target networks\n critic_optimizer = 'adam'\n critic_lr = LR\n critic_grad_norm_clip = None #20.0\n\n # -- Temperature\n alpha_initial = 0.1\n target_entropy = None # Will be inferred from action space if None\n alpha_optimizer = 'adam'\n alpha_lr = LR\n train_alpha = False # Disable training of alpha if this is set\n\n\n\n def init(self):\n \"\"\"\n Initializes the DIAYN agent by setting up\n - Networks\n - A skill conditioned policy wrapping\n an MLP-based tanh gaussian policy\n - A classifier for predicting current skill from observation\n - 2 MLP-based Q-functions: s x a x z -> q\n - A tunably entropy weight (temperature/alpha)\n - Optimizers, one for each of the aforementioned\n - A simple ring-buffer for experience replay\n - Samplers for gathering training data and evaluating\n - Target entropy for the soft policy\n \"\"\"\n # Initialize environment to get input/output dimensions\n self.train_env = utils.make_env(self.cfg.env)\n self.eval_env = utils.make_env(self.cfg.env)\n ob_dim, = self.ob_dim, = self.train_env.observation_space.shape\n ac_dim, = self.ac_dim, = self.train_env.action_space.shape\n # Setup actor and critics\n self.policy = policies.SkillConditionedTanhGMMMLPPolicy(\n ob_dim=ob_dim,\n num_skills=self.cfg.num_skills,\n ac_dim=ac_dim,\n num_components=self.cfg.policy_num_components,\n hidden_num=self.cfg.policy_hidden_num,\n hidden_size=self.cfg.policy_hidden_size,\n hidden_act=self.cfg.policy_hidden_act,\n )\n if self.cfg.clf_enc_enable:\n enc_dim = self.cfg.clf_enc_dim\n self.encoder = networks.MLP(\n input_size=ob_dim,\n output_size=self.cfg.clf_enc_dim,\n hidden_size=self.cfg.clf_enc_hidden_size,\n hidden_num=self.cfg.clf_enc_hidden_num,\n hidden_act=self.cfg.clf_hidden_act\n )\n else:\n enc_dim = ob_dim\n self.encoder = nn.Identity()\n self.clf_gan = networks.MLP(\n input_size=enc_dim,\n output_size=2,\n hidden_num=self.cfg.clf_hidden_num,\n hidden_size=self.cfg.clf_hidden_size,\n hidden_act=self.cfg.clf_hidden_act,\n )\n self.clf_diayn = networks.MLP(\n input_size=enc_dim,\n output_size=self.cfg.num_skills,\n hidden_num=self.cfg.clf_hidden_num,\n hidden_size=self.cfg.clf_hidden_size,\n hidden_act=self.cfg.clf_hidden_act,\n )\n self.qf1 = critics.QAMLPCritic(\n ob_dim + self.cfg.num_skills, ac_dim,\n hidden_num=self.cfg.critic_hidden_num,\n hidden_size=self.cfg.critic_hidden_size,\n hidden_act=self.cfg.critic_hidden_act,\n )\n self.qf2 = critics.QAMLPCritic(\n ob_dim + self.cfg.num_skills, ac_dim,\n hidden_num=self.cfg.critic_hidden_num,\n hidden_size=self.cfg.critic_hidden_size,\n hidden_act=self.cfg.critic_hidden_act,\n )\n\n # Temperature parameter used to weight the entropy bonus\n self.log_alpha = nn.Parameter(\n torch.as_tensor(self.cfg.alpha_initial, dtype=torch.float32).log()\n )\n\n # Make copies of Q-functions for bootstrap targets\n self.qf1_target = copy.deepcopy(self.qf1)\n self.qf2_target = copy.deepcopy(self.qf2)\n\n # And send everything to the right device\n self.to(self.device)\n\n # Setup optimizers for all networks (and log_alpha)\n self.policy_optimizer = utils.get_optimizer(\n name=self.cfg.policy_optimizer,\n params=self.policy.parameters(),\n lr=self.cfg.policy_lr,\n )\n self.clf_diayn_optimizer = utils.get_optimizer(\n name=self.cfg.clf_optimizer,\n params=self.clf_diayn.parameters(),\n lr=self.cfg.clf_lr,\n )\n self.clf_gan_optimizer = utils.get_optimizer(\n name=self.cfg.clf_optimizer,\n params=itertools.chain(\n self.encoder.parameters(),\n self.clf_gan.parameters()\n ),\n lr=self.cfg.clf_lr,\n )\n self.qf1_optimizer = utils.get_optimizer(\n name=self.cfg.critic_optimizer,\n params=self.qf1.parameters(),\n lr=self.cfg.critic_lr,\n )\n self.qf2_optimizer = utils.get_optimizer(\n name=self.cfg.critic_optimizer,\n params=self.qf2.parameters(),\n lr=self.cfg.critic_lr\n )\n self.alpha_optimizer = utils.get_optimizer(\n name=self.cfg.alpha_optimizer,\n params=[self.log_alpha],\n lr=self.cfg.alpha_lr,\n )\n\n # Setup replay buffer\n self.buffer = buffers.RingBuffer(\n capacity=int(self.cfg.buffer_capacity),\n keys=[ 'ob', 'skill', 'ac', 'rew', 'next_ob', 'done'],\n dims=[ ob_dim, None, ac_dim, None, ob_dim, None],\n dtypes=[float, int, float, float, float, float]\n )\n\n # Setup samplers (used for data generating / evaluating rollouts)\n self.train_sampler = samplers.Sampler(\n env=self.train_env,\n policy=self.policy,\n max_steps=self.cfg.max_path_length_train\n )\n self.eval_sampler = samplers.Sampler(\n env=self.eval_env,\n policy=self.policy,\n max_steps=self.cfg.max_path_length_eval\n )\n\n # Set target entropy, derive from size of action space if non-obvious\n if self.cfg.target_entropy is None:\n self.target_entropy = -ac_dim\n self.logger.info(\n 'Using dynamic target entropy: %s', self.target_entropy\n )\n else:\n self.target_entropy = self.cfg.target_entropy\n self.logger.info(\n 'Using static target entropy: %s', self.target_entropy\n )\n\n @property\n def alpha(self):\n \"\"\"Un-logs the temperature parameter to get the entropy weight\"\"\"\n return self.log_alpha.exp().detach()\n\n def train(self):\n \"\"\"\n Trains the SAC agent with the following procedure:\n - Sample initial data for the replay buffer\n - Loop for the configured number of epochs:\n - Sample some more data\n - Loop for the configured number of train steps:\n - Train critics against Bellman bootstraps\n - Train actor against critic with entropy bonus\n - Train alpha based on current and target entropy\n - Polyak average the target networks of the critics\n - Evaluate the current policy\n \"\"\"\n # Fit encoder\n if self.cfg.clf_enc_enable:\n self.logger.info('Fitting encoder')\n self.pretrain_encoder()\n\n # Generate initial data for the replay buffer\n missing_data = self.cfg.min_buffer_size - len(self.buffer)\n if missing_data > 0:\n self.logger.info(f'Seeding buffer with {missing_data} samples')\n self.buffer << self.train_sampler.sample_steps(\n n=missing_data, random=False\n )\n\n self.logger.info('Begin training')\n for epoch in range(1, self.cfg.num_epochs + 1):\n\n # Create a logger for dumping diagnostics\n epoch_logs = self.logger.epoch_logs(epoch)\n\n # Sample more steps for this epoch and add to replay buffer\n self.buffer << self.train_sampler.sample_steps(\n n=self.cfg.num_samples_per_epoch,\n )\n epoch_logs.add_scalar_dict(self.get_data_info(), prefix='Data')\n\n # Train with data from replay buffer\n for step in range(self.cfg.num_train_steps_per_epoch):\n\n # Sample batch (and convert all to torch tensors)\n obs, skills, acs, _, next_obs, dones = self.buffer.sample(\n self.cfg.batch_size,\n tensor=True,\n device=self.device,\n as_dict=False,\n )\n\n # Train discriminator\n disc_info = self.update_disc(obs=obs, skills=skills)\n epoch_logs.add_scalar_dict(disc_info, prefix='Disc')\n\n # Train q functions\n critic_info = self.update_critics(\n obs=obs, skills=skills, acs=acs, next_obs=next_obs, dones=dones\n )\n epoch_logs.add_scalar_dict(critic_info, prefix='Critic')\n\n # Train actor and tune temperature\n actor_info = self.update_actor(obs=obs, skills=skills)\n epoch_logs.add_scalar_dict(actor_info, prefix='Actor')\n\n # Apply polyak averaging to target networks\n self.update_targets()\n\n # Eval, on occasions\n if epoch % self.cfg.eval_freq == 0:\n self.logger.info('Evaluating %s skills', self.cfg.num_skills)\n eval_info, eval_frames = self.evaluate(epoch, greedy=True)\n epoch_logs.add_scalar_dict(\n eval_info, prefix='Eval', agg=['min', 'max', 'mean']\n )\n if eval_frames:\n epoch_logs.add_videos(\n 'EvalRollout', eval_frames, fps=self.cfg.eval_video_fps\n )\n\n # Write logs\n if (\n epoch % self.cfg.log_freq == 0 or\n epoch % self.cfg.eval_freq == 0\n ):\n epoch_logs.dump(step=self.train_sampler.total_steps)\n\n # Save checkpoint\n if (\n epoch % self.cfg.save_freq == 0 and\n self.savedir is not None\n ):\n filename = f'agent-{str(epoch).zfill(5)}.pt'\n self.save(name=filename)\n self.logger.info('Agent saved to %s', filename)\n\n\n def get_data_info(self, debug=False):\n \"\"\"\n Generates diagnostics about the gathered data\n \"\"\"\n info = {}\n info['BufferSize'] = len(self.buffer)\n info['TotalEnvSteps'] = self.train_sampler.total_steps\n info['Last25TrainRets'] = self.train_sampler.returns[-25:]\n # Optionally add summary statistics about everything in buffer\n if debug:\n info.update(self.buffer.get_info())\n\n return info\n\n def update_clf_gan(self, obs, expert_obs):\n \"\"\"\n Updates parameters for the gan expert detector\n\n Args:\n obs (Tensor): A batch of obervations, each O floats\n expert_obs (Tensor): A batch of obervations, each O floats\n\n Returns:\n A dictionary with diagnostics\n \"\"\"\n # Concatenate into one batch and generate targets\n obs_all = torch.cat([obs, expert_obs])\n is_expert = torch.cat([\n torch.zeros(len(obs), dtype=torch.int64, device=obs.device),\n torch.ones(len(expert_obs), dtype=torch.int64, device=obs.device),\n ])\n\n # Encode and compute loss\n logits = self.clf_gan(self.encoder(obs_all))\n clf_gan_loss = F.cross_entropy(logits, is_expert)\n\n # Minimize it\n clf_gan_grad_norm = utils.optimize(\n loss=clf_gan_loss,\n optimizer=self.clf_gan_optimizer,\n norm_clip=self.cfg.clf_grad_norm_clip,\n )\n\n # Diagnostics\n info = {}\n info['ClfGANAccuracy'] = (logits.argmax(dim=-1) == is_expert).float().mean()\n info['ClfGANLoss'] = clf_gan_loss.detach()\n info['ClfGANGradNorm'] = clf_gan_grad_norm\n\n return info\n\n def update_disc(self, obs, skills):\n \"\"\"\n Updates parameters of the discriminator (classifier)\n\n Args:\n obs (Tensor): A batch of obervations, each O floats\n skills (Tensor): A batch of skills, each a single integer\n\n Returns:\n A dictionary with diagnostics\n \"\"\"\n # Encode observations\n with torch.no_grad():\n enc = self.encoder(obs)\n\n # Compute classification loss for skills\n logits = self.clf_diayn(enc)\n clf_loss = F.cross_entropy(logits, skills)\n\n # Minimize it\n clf_grad_norm = utils.optimize(\n loss=clf_loss,\n optimizer=self.clf_diayn_optimizer,\n norm_clip=self.cfg.clf_grad_norm_clip,\n )\n\n # Diagnostics\n info = {}\n info['ClfAccuracy'] = (logits.argmax(dim=-1) == skills).float().mean()\n info['ClfLoss'] = clf_loss.detach()\n info['ClfGradNorm'] = clf_grad_norm\n\n return info\n\n def update_critics(self, obs, skills, acs, next_obs, dones):\n \"\"\"\n Updates parameters of the critics (Q-functions)\n\n Args:\n obs (Tensor): A batch of obervations, each O floats\n skills (Tensor): A batch of skills, each a single integer\n acs (Tensor): A batch of actions, each a float\n next_obs (Tensor): A batch of next observations (see obs)\n dones (Tensor): A batch of done flags, each a single float\n\n Returns:\n A dictinoary with diagnostics\n \"\"\"\n # Generate DIAYN reward\n with torch.no_grad():\n # Encode observations\n enc = self.encoder(obs)\n # Compute cross-entropy loss for the discriminator\n clf_xe = F.cross_entropy(self.clf_diayn(enc), skills, reduction='none')\n # Then, use the cross-entropy to generate a synthetic reward\n rews = -1 * clf_xe.detach()\n # Subtract (uniform) log likelihood\n p_skill = torch.tensor(1. / self.cfg.num_skills, device=rews.device)\n rews -= torch.log(p_skill)\n\n # Convert skills to one-hot so the fit into the policy and critics\n skills_one_hot = utils.to_one_hot(skills, self.cfg.num_skills)\n\n # Make action-value predictions with both q-functions\n q1_pred = self.qf1(obs, skills_one_hot, acs)\n q2_pred = self.qf2(obs, skills_one_hot, acs)\n\n # Bootstrap target from next observation\n with torch.no_grad():\n\n # Sample actions and their log probabilities at next step\n pi = self.policy(next_obs, skills_one_hot)\n next_acs, next_acs_logp = pi.sample_with_log_prob()\n\n # Select the smallest estimate of action-value in the next step\n target_q_values_raw = torch.min(\n self.qf1_target(next_obs, skills_one_hot, next_acs),\n self.qf2_target(next_obs, skills_one_hot, next_acs),\n )\n\n # And add the weighted entropy bonus (negative log)\n target_q_values = target_q_values_raw - self.alpha * next_acs_logp\n\n # Combine with rewards using the Bellman recursion\n q_target = rews + (1. - dones) * self.cfg.gamma * target_q_values\n\n # Use mean squared error as loss\n qf1_loss = F.mse_loss(q1_pred, q_target)\n qf2_loss = F.mse_loss(q2_pred, q_target)\n\n # And minimize it\n qf1_grad_norm = utils.optimize(\n loss=qf1_loss,\n optimizer=self.qf1_optimizer,\n norm_clip=self.cfg.critic_grad_norm_clip,\n )\n qf2_grad_norm = utils.optimize(\n loss=qf2_loss,\n optimizer=self.qf2_optimizer,\n norm_clip=self.cfg.critic_grad_norm_clip,\n )\n\n # Diagonstics\n info = {} # For later\n info['DIAYNReward'] = rews.mean()\n info['QTarget'] = q_target.mean().detach()\n info['QAbsDiff'] = (q1_pred - q2_pred).abs().mean().detach()\n info['Qf1Loss'] = qf1_loss.detach()\n info['Qf2Loss'] = qf2_loss.detach()\n info['Qf1GradNorm'] = qf1_grad_norm\n info['Qf2GradNorm'] = qf2_grad_norm\n\n return info\n\n def update_actor(self, obs, skills):\n \"\"\"\n Updates parameters for the policy (and possibly alpha)\n\n Args:\n obs (Tensor): A batch of observations, each O floats\n skills (Tensor): A batch of skills, each a single integer\n\n Returns:\n A differentiable policy_loss, a differentiable (log) alpha loss,\n and a dictionary with detached diagnostic info\n \"\"\"\n # Convert skills to one-hot\n skills_one_hot = utils.to_one_hot(skills, self.cfg.num_skills)\n\n # Sample actions, along with log(p(a|s)), from current policy\n pi = self.policy(obs, skills_one_hot)\n acs, log_prob = pi.sample_with_log_prob(grad=True) # Enable r-sampling\n\n # Estimate value for each action and take the lower between q1 and q2\n q_values = torch.min(\n self.qf1(obs, skills_one_hot, acs),\n self.qf2(obs, skills_one_hot, acs)\n )\n\n # Climb the gradient of the (lower) q-function and add entropy bonus\n # -> loss is negative q + negative entropy\n policy_loss = (self.alpha * log_prob - q_values).mean()\n\n # Minimize it\n policy_grad_norm = utils.optimize(\n loss=policy_loss,\n optimizer=self.policy_optimizer,\n norm_clip=self.cfg.policy_grad_norm_clip,\n )\n\n # Generate loss for the alpha value\n target_entropy_plus_logp = log_prob.detach() + self.target_entropy\n alpha_loss = (-self.log_alpha * target_entropy_plus_logp).mean()\n # But only minimize it if configured\n if self.cfg.train_alpha:\n utils.optimize(alpha_loss, self.alpha_optimizer)\n\n # Diagnostics\n info = {}\n info['Entropy'] = -log_prob.mean().detach()\n info['PolicyLoss'] = policy_loss.detach()\n info['PolicyGradNorm'] = policy_grad_norm\n info['AlphaLoss'] = alpha_loss.detach()\n info['AlphaValue'] = self.alpha\n\n return info\n\n def update_targets(self):\n \"\"\"\n Applies polyak averaging to the target network of both q-functions\n \"\"\"\n utils.polyak(\n net=self.qf1, target=self.qf1_target,\n tau=self.cfg.target_update_tau\n )\n utils.polyak(\n net=self.qf2, target=self.qf2_target,\n tau=self.cfg.target_update_tau\n )\n\n def evaluate(self, epoch=None, render=False, greedy=False):\n \"\"\"\n Evaluate the current policy across all skills\n\n Args:\n epoch (int): Used to determine whether to do rendering if set\n render (bool): Manually specifies whether to render\n greedy (bool): Whether to do mean action (false=sample action)\n\n Returns:\n A tuple containing\n - A dictionary of eval results {Return: [...], TrajLen: [...]}\n - A list of video frames for each skill (None if not rendering)\n \"\"\"\n # Determine whether we should render for this epoch\n if render:\n render = True\n elif epoch is None:\n render = False\n elif self.cfg.eval_video_freq <= 0:\n render = False\n else:\n eval_num = epoch // self.cfg.eval_freq\n render = eval_num % self.cfg.eval_video_freq == 0\n\n # Rollout all skills\n info = []\n frames = []\n for skill in range(self.cfg.num_skills):\n with self.policy.configure(greedy=greedy, skill_dist=skill):\n info_, frames_ = self.eval_sampler.evaluate(\n n=self.cfg.eval_size,\n render=render,\n render_max=self.cfg.eval_video_length,\n render_size=tuple(self.cfg.eval_video_size),\n )\n info.append(info_)\n frames.append(frames_)\n\n # Combine: list of dicts of lists -> dict of lists\n info = {\n k: functools.reduce(operator.add, (i[k] for i in info))\n for k in info[0]\n }\n frames = frames if render else None\n\n return info, frames\n\n def load_expert_data(self):\n \"\"\"\n Initializes a buffer with expert data from config\n \"\"\"\n # Get expert data\n if self.cfg.expert_data is None:\n raise ValueError('No expert data provided in config')\n elif isinstance(self.cfg.expert_data, str):\n self.logger.info('Loading expert data from: %s', self.cfg.expert_data)\n with open(self.cfg.expert_data, 'rb') as f:\n expert_data = pickle.load(f)\n else:\n self.logger.info('Using configured expert data as-is')\n expert_data = self.cfg.expert_data\n # Assert that it is a list of dicts\n if (\n not isinstance(expert_data, (list, tuple)) or\n not len(expert_data) or\n not all(isinstance(path, dict) for path in expert_data) or\n any({'ob', 'ac'} - set(path) for path in expert_data)\n ):\n raise ValueError(\n 'Expert data must be a non-empty list of dicts with'\n '\"ob\" and \"ac\" keys'\n )\n # Limit amount of expert data if configured\n if self.cfg.expert_data_size:\n self.logger.info(\n 'Filtering expert data to %s entries', self.cfg.expert_data_size\n )\n expert_data_filtered = []\n n = 0\n for path in expert_data:\n j = min(len(path['ob']), self.cfg.expert_data_size - n)\n path = {k: v[:j] for k, v in path.items()}\n expert_data_filtered.append(path)\n n += j\n if n >= self.cfg.expert_data_size:\n break\n else:\n raise ValueError('Not enough expert data!')\n expert_data = expert_data_filtered\n # Initialize buffer\n expert_buffer = buffers.RingBuffer(\n capacity=sum(len(path['ob']) for path in expert_data),\n keys=['ob'],\n dims=[self.ob_dim],\n )\n self.logger.info('Initialized expert buffer (%s)', expert_buffer)\n # And fill it\n for path in expert_data:\n expert_buffer << path\n self.logger.info('Filled expert buffer (%s)', expert_buffer)\n return expert_buffer\n\n\n def pretrain_encoder(self):\n\n self.logger.info('Loading expert data')\n expert_buffer = self.load_expert_data()\n\n self.logger.info('Sampling random data')\n rand_buffer = buffers.RingBuffer(\n capacity=len(expert_buffer),\n keys=['ob'],\n dims=[self.ob_dim],\n )\n rand_buffer << self.train_sampler.sample_steps(\n n=len(expert_buffer), random=True\n )\n self.logger.info('Random data: %s', rand_buffer)\n\n for i in range(self.cfg.clf_enc_initial_steps):\n expert_obs, = expert_buffer.sample(\n self.cfg.batch_size,\n tensor=True,\n device=self.device,\n as_dict=False\n )\n obs, = rand_buffer.sample(\n self.cfg.batch_size,\n tensor=True,\n device=self.device,\n as_dict=False,\n )\n clf_gan_info = self.update_clf_gan(obs=obs, expert_obs=expert_obs)\n if i % 100 == 0:\n self.logger.info('Discriminator: %s', clf_gan_info)\n\n\n\nif __name__ == '__main__':\n DIAYN.run_cli()\n","sub_path":"project/agents/diayn_enc.py","file_name":"diayn_enc.py","file_ext":"py","file_size_in_byte":25841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121356444","text":"#!/usr/bin/env python3\n\nfrom csv import DictReader, writer, QUOTE_NONNUMERIC\nfrom collections import Counter, namedtuple\n\nTypeCrashes = namedtuple('TypeCrashes', ['type', 'crashes'])\n\ndef getAircraftTypes():\n with open('cleansedCrashData.csv', 'r') as csvFile:\n reader = DictReader(csvFile)\n for row in reader:\n yield row['Type']\n \ndef writeSortedListToCSV(sortable, key, filename, reverse=False): \n sortedBy = sorted(sortable, key=key, reverse=reverse)\n with open(filename, 'w') as outCsv:\n csvWriter = writer(outCsv, quoting=QUOTE_NONNUMERIC)\n csvWriter.writerow(['type', 'crashes'])\n for row in sortedBy:\n csvWriter.writerow(row)\n \ndef main():\n \n crashesByType = Counter(getAircraftTypes())\n \n sortable = [TypeCrashes(key, crashesByType[key]) for key in crashesByType]\n \n writeSortedListToCSV(\n sortable, \n lambda x: x.crashes,\n 'TypesByNumberOfCrashes.csv',\n True)\n \n writeSortedListToCSV(\n sortable,\n lambda x: x.type,\n 'TypesAlphabetically.csv')\n\n \n \n\nif __name__ == '__main__':\n main()\n","sub_path":"crashData/CrashesByAircraftType.py","file_name":"CrashesByAircraftType.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80393526","text":"#coding:utf-8\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom automaticPort.forms import FormUsuario\nfrom automaticPort.models.modelUsuario import Usuario\nfrom automaticPort.forms import FormLogin\n\nclass Cadastro(View):\n template = \"visitante/cadastro/cadastro.html\"\n conteudo = {\n 'form_login': FormLogin(),\n 'controleMenu':'cadastroUsuario',\n }\n\n def get(self, request):\n form_usuario = FormUsuario()\n\n self.conteudo.update({\n 'form_usuario': form_usuario,\n 'messengerSucess': '',\n 'messengerError': '',\n })\n return render(request, self.template, self.conteudo)\n\n def post(self, request):\n self.conteudo.update({\n 'messengerSucess': '',\n 'messengerError': '',\n })\n\n form_usuario = FormUsuario(request.POST)\n if form_usuario.is_valid():\n form_usuario.save()\n self.conteudo.update({\n 'messengerSucess': 'Cadastro Realizado! Verifique seu email para ativá-lo.',\n })\n\n self.conteudo.update({\n 'form_usuario': form_usuario,\n })\n return render(request, self.template, self.conteudo)\n\n\n\nclass AtivaCadastro(View):\n template = 'visitante/index/index.html'\n conteudo = {\n 'form_login': FormLogin(),\n 'controleMenu': 'index',\n }\n\n def get(self, request, chaveAtivacao=None):\n self.conteudo.update({\n 'messengerSucess': '',\n 'messengerError': '',\n })\n if (chaveAtivacao):\n try:\n usuario = Usuario.objects.get(chaveAtivacaoUsuario=str(chaveAtivacao))\n except:\n usuario=None\n\n if (usuario):\n if not (usuario.is_active):\n usuario.is_active = True\n usuario.save()\n self.conteudo.update({\n 'messengerSucess': 'Cadastro ativado!',\n })\n else:\n self.conteudo.update({\n 'messengerError': 'Chave já foi utilizada!',\n })\n\n else:\n self.conteudo.update({\n 'messengerError': 'Chave não associada a nenhum Cadastro!',\n })\n else:\n self.conteudo.update({\n 'messengerError': 'Nenhuma chave informada!',\n })\n return render(request, self.template, self.conteudo)","sub_path":"automaticPort/views/cadastro.py","file_name":"cadastro.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"302400858","text":"import cv2\nimport numpy as np\nimport os\n\n\nnow_dir = os.path.dirname(os.path.abspath(__file__))\ntarget = os.path.join(now_dir, \"input.mp4\")\nresult = os.path.join(now_dir, \"result.avi\")\n\nanime_cascade_path = os.path.join(now_dir, 'lbpcascade_animeface.xml')\nanime_cascade = cv2.CascadeClassifier(anime_cascade_path)\n\nmovie = cv2.VideoCapture(target)\n\nfps = movie.get(cv2.CAP_PROP_FPS)\nheight = movie.get(cv2.CAP_PROP_FRAME_HEIGHT)\nwidth = movie.get(cv2.CAP_PROP_FRAME_WIDTH)\nall_frame = movie.get(cv2.CAP_PROP_FRAME_COUNT)\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter(result, fourcc, fps, (int(width), int(height)))\n\nwhile movie.isOpened():\n ret, frame = movie.read()\n if ret:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = anime_cascade.detectMultiScale(gray)\n \n for x, y, w, h in faces:\n cv2.rectangle(frame, (x,y), (x+w,y+h), color=(64, 64, 64), thickness=3)\n cv2.imshow('frames', frame)\n out.write(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n else:\n break\nprint('finish')\nout.release()\nmovie.release()\ncv2.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"570573437","text":"import logging\nimport numpy\nimport os\nimport progressbar\nimport sys\nimport subprocess\nimport time\nimport videoparser\n\nfrom PIL import Image, ImageDraw\n\n\ndef usage():\n print(\"usage: main.py video_file output.png\")\n\n\nif len(sys.argv) != 3:\n usage()\n sys.exit(1)\n\n# Declare a blank image and prepare it for drawing\nout_image = Image.new(\"RGB\", (1920, 1080))\nout_image_draw = ImageDraw.Draw(out_image, \"RGB\")\nout_frame = 0\n\nvideo = videoparser.Video(sys.argv[1], frames_wanted=out_image.width,\n downscale=2 ** 4)\n\nout_path = sys.argv[2]\n\nwidgets = [\n progressbar.Percentage(), ' ',\n progressbar.Bar(), ' ',\n progressbar.AdaptiveTransferSpeed(unit=\"f\"), ' ',\n progressbar.ETA()\n]\n\nbar = progressbar.ProgressBar(max_value=out_image.width, widgets=widgets)\n\nwith bar:\n while True:\n frame = video.next_frame()\n if frame == None:\n break\n\n out_image_draw.line([(out_frame, 0), (out_frame, out_image.height)],\n fill=\"rgb\" + str(frame.average_color()))\n out_frame += 1\n\n bar.update(out_frame)\n\nout_image.save(out_path)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"4624173","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport frames.supplier as ui\n\nSQL = \"SELECT * FROM suppliers ORDER BY company ASC;\"\n\nclass UI(tk.Toplevel):\n def __init__(self, parent,):\n super().__init__(name=\"suppliers\")\n\n self.parent = parent\n self.protocol(\"WM_DELETE_WINDOW\", self.on_cancel)\n self.attributes(\"-topmost\", True)\n self.table = \"suppliers\"\n self.field = \"supplier_id\"\n self.obj = None\n self.init_ui()\n self.nametowidget(\".\").engine.center_me(self)\n\n def init_ui(self):\n\n w = self.nametowidget(\".\").engine.get_frame(self, 2)\n self.lstItems = self.nametowidget(\".\").engine.get_listbox(w,)\n self.lstItems.bind(\"<>\", self.on_item_selected)\n self.lstItems.bind(\"\", self.on_item_activated)\n w.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, padx=5, pady=5)\n\n w = self.nametowidget(\".\").engine.get_frame(self, 2)\n self.nametowidget(\".\").engine.get_add_edit_cancel(self, w)\n w.pack(fill=tk.BOTH, expand=1)\n\n def on_open(self,):\n\n msg = \"{0}\".format(self.winfo_name().title())\n self.title(msg)\n self.set_values()\n\n def set_values(self):\n\n self.lstItems.delete(0, tk.END)\n index = 0\n self.dict_items = {}\n\n rs = self.nametowidget(\".\").engine.read(True, SQL, ())\n\n if rs:\n self.lstItems.delete(0, tk.END)\n\n for i in rs:\n s = \"{:}\".format(i[1])\n self.lstItems.insert(tk.END, s)\n if i[2] != 1:\n self.lstItems.itemconfig(index, {\"bg\":\"light gray\"})\n self.dict_items[index] = i[0]\n index += 1\n\n def on_add(self, evt):\n\n self.obj = ui.UI(self)\n self.obj.on_open()\n\n def on_edit(self, evt):\n self.on_item_activated()\n\n def on_item_selected(self, evt):\n\n if self.lstItems.curselection():\n index = self.lstItems.curselection()[0]\n pk = self.dict_items.get(index)\n self.selected_item = self.nametowidget(\".\").engine.get_selected(self.table,\n self.field,\n pk)\n def on_item_activated(self, evt=None):\n\n if self.lstItems.curselection():\n index = self.lstItems.curselection()[0]\n self.obj = ui.UI(self, index)\n self.obj.on_open(self.selected_item,)\n\n else:\n messagebox.showwarning(self.nametowidget(\".\").title(),\n self.nametowidget(\".\").engine.no_selected,\n parent=self)\n\n def on_cancel(self, evt=None):\n if self.obj is not None:\n self.obj.destroy()\n self.destroy()\n","sub_path":"frames/suppliers.py","file_name":"suppliers.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"471108781","text":"import json\nimport os\nimport random\nimport sys\nimport time\nimport datetime\n\nimport requests\nfrom retrying import retry\n\ncur_path = os.path.split(os.path.realpath(__file__))[0]\nfile_path = os.path.abspath(os.path.join(cur_path, \"..\"))\nsys.path.insert(0, file_path)\n\nfrom announcement.spider_configs import (\n SPIDER_MYSQL_HOST, SPIDER_MYSQL_PORT, SPIDER_MYSQL_USER, SPIDER_MYSQL_PASSWORD, SPIDER_MYSQL_DB,\n R_SPIDER_MYSQL_HOST, R_SPIDER_MYSQL_PORT, R_SPIDER_MYSQL_USER, R_SPIDER_MYSQL_PASSWORD,\n R_SPIDER_MYSQL_DB, )\nfrom announcement.sql_base import Connection\n\n\n_dt = datetime.datetime.combine(datetime.datetime.now(), datetime.time.min).strftime('%Y-%m-%d')\nfile_name = f'/Users/furuiyang/sntdataprocess/announcement/stock_single_{_dt}.json'\n\n_map = {\n '601399': '601268',\n '601360': '601313',\n '600010': 'B06475',\n '600029': '136054',\n '601975': '600087',\n '600267': '122427',\n '600322': '122421',\n '600352': '136206',\n '600369': '122404',\n '600748': '122362',\n '600770': '122088',\n '600801': '122188',\n '601607': '600849',\n '600963': '122257',\n '001872': '000022',\n '001914': '000043',\n '601377': '122304',\n '601688': '122388',\n '601008': '136092',\n '601238': '113009',\n '601038': '122253',\n '600011': '122008',\n '600030': '122385',\n '600035': '122378',\n '600068': '136427',\n '600098': '122157',\n '600157': '122267',\n '600185': '110030',\n '600210': '122043',\n '600226': '122254',\n '600236': '122192',\n '600256': '122102',\n '600360': '122134',\n '600376': '122377',\n '600383': '122488',\n '600518': '122354',\n '600575': '122235',\n '600635': '122112',\n '600648': '136666',\n '600657': '136294',\n '600660': '136566',\n '600743': '122370',\n '600755': '110033',\n '600765': '122104',\n '600804': '122132',\n '600823': '136303',\n '600859': '122190',\n '600869': '136317',\n '601788': '143155',\n}\n\n\nclass JuchaoCounter(object):\n def __init__(self):\n self.api = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'\n self.headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Host': 'www.cninfo.com.cn',\n 'Origin': 'http://www.cninfo.com.cn',\n 'Pragma': 'no-cache',\n 'Referer': 'http://www.cninfo.com.cn/new/commonUrl/pageOfSearch?url=disclosure/list/search&lastPage=index',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36',\n }\n self._spider_conn = Connection(\n host=SPIDER_MYSQL_HOST,\n port=SPIDER_MYSQL_PORT,\n user=SPIDER_MYSQL_USER,\n password=SPIDER_MYSQL_PASSWORD,\n database=SPIDER_MYSQL_DB,\n )\n\n self._r_spider_conn = Connection(\n host=R_SPIDER_MYSQL_HOST,\n port=R_SPIDER_MYSQL_PORT,\n user=R_SPIDER_MYSQL_USER,\n password=R_SPIDER_MYSQL_PASSWORD,\n database=R_SPIDER_MYSQL_DB,\n )\n\n @property\n def codes_map(self):\n codes_map = {}\n sql = '''select code, OrgId from juchao_codemap; '''\n res = self._spider_conn.query(sql)\n for r in res:\n codes_map[r.get('OrgId')] = r.get(\"code\")\n return codes_map\n\n def launch(self, org_id: str):\n codes_map = self.codes_map\n org_id_lst = sorted(list(codes_map.keys()))\n position = org_id_lst.index(org_id)\n print(\"position\", position)\n for org_id in org_id_lst[position:]:\n code = codes_map.get(org_id)\n stock_str = ','.join([code, org_id])\n print(stock_str)\n self.get_count(stock_str)\n\n @retry(stop_max_attempt_number=3)\n def get_count(self, stock_str: str):\n time.sleep(random.randint(1, 3)/10)\n post_data = {\n 'pageNum': 1,\n 'pageSize': 30,\n 'column': 'szse',\n 'tabName': 'fulltext',\n 'plate': '',\n 'stock': stock_str,\n 'searchkey': '',\n 'secid': '',\n 'category': '',\n 'trade': '',\n 'seDate': '',\n 'sortName': '',\n 'sortType': '',\n 'isHLtitle': True,\n }\n resp = requests.post(self.api, headers=self.headers, data=post_data, timeout=3)\n if resp.status_code == 200:\n text = resp.text\n py_datas = json.loads(text)\n total_ann = py_datas.get(\"totalAnnouncement\")\n total_rec = py_datas.get(\"totalRecordNum\")\n record = {\n \"SecuCode\": stock_str.split(',')[0],\n \"TotalAnn\": total_ann,\n \"TotalRec\": total_rec,\n }\n # print(record)\n # 记录单个 code 的结束\n with open(file_name, \"a\") as f:\n f.write(json.dumps(record)+'\\n')\n\n def check_count_bydate(self):\n \"\"\"\n 根据时间去计算每天的个数\n \"\"\"\n sql = '''select SecuCode, count(*) from juchao_ant2 group by AntTime ; '''\n\n def check_count(self):\n sql = '''select SecuCode, count(*) from juchao_ant2 group by SecuCode ; '''\n ret = self._r_spider_conn.query(sql)\n exist_map = {}\n for r in ret:\n exist_map[r.get('SecuCode')] = r.get(\"count(*)\")\n # print(exist_map)\n\n web_map = {}\n with open(file_name, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n r = json.loads(line)\n web_map[r.get(\"SecuCode\")] = r.get(\"TotalAnn\")\n\n no_lst = []\n big_delta_lst = []\n small_delta_lst = []\n\n for code in web_map:\n _count = 0\n if code in _map:\n _code = _map.get(code)\n _count = exist_map.get(_code)\n\n exist_num = exist_map.get(code)\n exist_num += _count\n\n web_num = web_map.get(code)\n if not exist_num:\n no_lst.append(code)\n elif exist_num != web_num:\n delta = web_num - exist_num\n if delta > 0:\n big_delta_lst.append((code, delta))\n # big_delta_lst.append(code)\n else:\n # small_delta_lst.append((code, delta))\n small_delta_lst.append(code)\n\n # print(no_lst)\n # print(len(no_lst))\n\n print(big_delta_lst)\n print(len(big_delta_lst))\n\n # print(small_delta_lst)\n # print(len(small_delta_lst))\n\n\nif __name__ == '__main__':\n # org_id = os.environ.get(\"ORG\", '9900000062')\n # JuchaoCounter().launch(org_id)\n\n JuchaoCounter().check_count()\n\n\n'''\nselect secucode, AntTime, AntTitle from juchao_ant2 where SecuCode = '603056' order by AntTime desc, AntId desc limit 60, 30; \nselect secucode, AntTime, AntTitle from juchao_ant2 where SecuCode = '600340' and categoryname = '中介报告' order by AntTime desc, AntId desc limit 0, 30; \n\n'''\n","sub_path":"scripts/count_checker.py","file_name":"count_checker.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"478047145","text":"from flask import render_template, abort, Blueprint\nfrom ara import models, utils\n\nhost = Blueprint('host', __name__)\n\n\n@host.route('/')\ndef host_summary():\n hosts = models.Host.query.order_by(models.Host.name)\n stats = utils.get_summary_stats(hosts, 'host_id')\n\n return render_template('host_summary.html',\n hosts=hosts,\n stats=stats)\n\n\n@host.route('/')\ndef show_host(host):\n try:\n host = models.Host.query.filter_by(name=host).one()\n except models.NoResultFound:\n abort(404)\n\n stats = utils.get_host_playbook_stats(host)\n\n return render_template('host.html', host=host, stats=stats)\n","sub_path":"ara/views/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"47043539","text":"import json\nimport os\n\nfrom jupyterhub.app import app_log\n\n\n_custom_config_cache = []\n_custom_config_last_update = 0\n_custom_config_file = os.environ.get(\"CUSTOM_CONFIG_PATH\")\n\n\ndef get_custom_config():\n global _custom_config_cache\n global _custom_config_last_update\n\n # Only update custom_config, if it has changed on disk\n try:\n last_change = os.path.getmtime(_custom_config_file)\n if last_change > _custom_config_last_update:\n app_log.debug(\"Load custom config file.\")\n with open(_custom_config_file, \"r\") as f:\n ret = json.load(f)\n _custom_config_last_update = last_change\n _custom_config_cache = ret\n\n from .logs.extra_handlers import create_extra_handlers\n\n app_log.debug(\"Update Logger\")\n create_extra_handlers()\n except:\n app_log.exception(\"Could not load custom config file\")\n else:\n return _custom_config_cache\n\n\n_reservations_cache = {}\n_reservations_last_update = 0\n_reservations_file = os.environ.get(\"RESERVATIONS_FILE\")\n\n\ndef get_reservations():\n global _reservations_cache\n global _reservations_last_update\n try:\n # Only update reservations, if it has changed on disk\n last_change = os.path.getmtime(_reservations_file)\n if last_change > _reservations_last_update:\n app_log.debug(\"Load reservation file\")\n with open(_reservations_file, \"r\") as f:\n ret = json.load(f)\n _reservations_last_update = last_change\n _reservations_cache = ret\n except:\n app_log.exception(\"Could not load reservation file\")\n finally:\n return _reservations_cache\n\n\n_incidents_cache = {}\n_incidents_last_update = 0\n_incidents_file = os.environ.get(\"INCIDENTS_FILE\")\n\n\ndef get_incidents():\n global _incidents_cache\n global _incidents_last_update\n try:\n last_change = os.path.getmtime(_incidents_file)\n if last_change > _incidents_last_update:\n app_log.debug(\"Load incidents file\")\n with open(_incidents_file, \"r\") as f:\n ret = json.load(f)\n _incidents_last_update = last_change\n _incidents_cache = ret\n except:\n app_log.exception(\"Could not load incidents file\")\n return _incidents_cache\n","sub_path":"custom/4.0.0/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7814794","text":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 2017. 10. 25.\r\n@author: HyechurnJang\r\n'''\r\n\r\nimport os\r\nimport uuid\r\nimport types\r\nimport jinja2\r\nfrom pygics import Lock, ContentType, export, rest\r\n\r\ndef createVid(): return 'v-' + str(uuid.uuid4())\r\n\r\nclass Tag(dict):\r\n \r\n def __init__(self, tag, **attrs):\r\n dict.__init__(self, tag=tag, elems=[], attrs={})\r\n for key, val in attrs.items(): self['attrs'][key.lower()] = val\r\n \r\n def __len__(self, *args, **kwargs):\r\n return self['elems'].__len__()\r\n \r\n def __str__(self):\r\n ret = '<%s' % self['tag']\r\n for k, v in self['attrs'].items(): ret += ' %s=\"%s\"' % (k, v)\r\n ret += '>'\r\n for elem in self['elems']: ret += unicode(elem)\r\n ret += '' % self['tag']\r\n return ret\r\n \r\n #===========================================================================\r\n # Attributes (a.k.a : event & links)\r\n #===========================================================================\r\n def attr(self, **attrs):\r\n own_attrs = self['attrs']\r\n for key, val in attrs.items():\r\n key_low = key.lower()\r\n own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if key_low in own_attrs else val\r\n return self\r\n \r\n def __lshift__(self, opts):\r\n if opts: return self.attr(**opts)\r\n return self\r\n \r\n def baseattr(self, **attrs):\r\n own_attrs = self['attrs']\r\n for key, val in attrs.items():\r\n key_low = key.lower()\r\n own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if key_low in own_attrs else val\r\n return self\r\n #===========================================================================\r\n # Elements (a.k.a : children)\r\n #===========================================================================\r\n def html(self, *elems):\r\n for elem in elems: self['elems'].append(elem)\r\n return self\r\n \r\n def __rshift__(self, elems):\r\n if elems:\r\n if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems)\r\n else: return self.html(*(elems,))\r\n return self\r\n\r\nclass Cache:\r\n \r\n _CACHE_DATA = {}\r\n \r\n @classmethod\r\n def getCache(cls, file_path):\r\n if file_path in Cache._CACHE_DATA:\r\n return Cache._CACHE_DATA[file_path]\r\n else:\r\n class CacheDescriptor(types.FileType):\r\n def __init__(self, file_path):\r\n with open(file_path, 'rb') as fd: self.data = fd.read()\r\n self.file_path = file_path\r\n @property\r\n def name(self): return self.file_path\r\n def read(self): return self.data\r\n def close(self): return None\r\n if not os.path.exists(file_path): raise Exception('could not find %s' % file_path)\r\n cache = CacheDescriptor(file_path)\r\n Cache._CACHE_DATA[file_path] = cache\r\n return cache\r\n\r\nclass Static:\r\n \r\n def __init__(self, url, static='static', cache=True):\r\n mod_path, mod_name = pmd()\r\n mod_name = mod_name.replace('.', '/')\r\n \r\n if not url: self.url = '/%s' % mod_name\r\n elif url[0] == '/': self.url = url\r\n else: self.url = '/%s/%s' % (mod_name, url)\r\n if not static: self.static = mod_path\r\n elif static[0] == '/': self.static = '%s%s' % (mod_path, static)\r\n else: self.static = '%s/%s' % (mod_path, static)\r\n self._static_cache = cache\r\n \r\n @export('GET', self.url)\r\n def send_static(req, *argv):\r\n path = '/'.join(argv)\r\n file_path = '%s/%s' % (self.static, path)\r\n if self._static_cache: return Cache.getCache(file_path)\r\n else:\r\n if not os.path.exists(file_path): raise Exception('could not find %s' % path)\r\n return open(file_path, 'rb')\r\n \r\n\r\nclass Page:\r\n \r\n def __init__(self,\r\n url=None,\r\n title='',\r\n favicon='/page/static/image/favicon.ico',\r\n static='static',\r\n cache=True):\r\n mod_path, mod_name = pmd()\r\n mod_name = mod_name.replace('.', '/')\r\n \r\n if not url: self.url = '/%s' % mod_name\r\n elif url[0] == '/': self.url = url\r\n else: self.url = '/%s/%s' % (mod_name, url)\r\n \r\n if static[0] == '/': static = static[1:]\r\n if not static: self.static_path = mod_path\r\n else: self.static_path = '%s/%s' % (mod_path, static)\r\n \r\n if not static: self.static_url = '%s/static' % self.url\r\n elif self.url != '/': self.static_url = '%s/%s' % (self.url, static)\r\n else: self.static_url = '/%s' % static\r\n \r\n self._page_init = '/page/empty'\r\n self._page_view = {}\r\n \r\n self._page_title = title\r\n self._page_favicon = favicon\r\n self._page_meta_list = []\r\n self._page_css_list = []\r\n self._page_js_list = []\r\n self._page_head = ''\r\n self._page_header = ''\r\n self._page_footer = ''\r\n \r\n self._page_cache = cache\r\n self._page_cache_data = {}\r\n \r\n self._page_updated = True\r\n self._page_lock = Lock()\r\n self._page_rendered = None\r\n with open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read())\r\n \r\n @export('GET', self.url, content_type=ContentType.TextHtml)\r\n def send_template(req): return self.__render__()\r\n \r\n @export('GET', self.static_url)\r\n def send_static(req, *argv):\r\n path = '/'.join(argv)\r\n file_path = '%s/%s' % (self.static_path, path)\r\n if self._page_cache: return Cache.getCache(file_path)\r\n else:\r\n if not os.path.exists(file_path): raise Exception('could not find %s' % path)\r\n return open(file_path, 'rb')\r\n \r\n def __render__(self):\r\n if self._page_updated:\r\n self._page_lock.on()\r\n self._page_rendered = self._page_template.render({\r\n 'init' : self._page_init,\r\n 'title' : self._page_title,\r\n 'favicon' : self._page_favicon,\r\n 'meta_list' : self._page_meta_list,\r\n 'css_list' : self._page_css_list,\r\n 'js_list' : self._page_js_list,\r\n 'head' : unicode(self._page_head),\r\n 'header' : unicode(self._page_header),\r\n 'footer' : unicode(self._page_footer)\r\n })\r\n self._page_rendered = self._page_rendered.encode('utf-8')\r\n self._page_updated = False\r\n self._page_lock.off()\r\n return self._page_rendered\r\n else:\r\n return self._page_rendered\r\n \r\n def meta(self, *meta_list):\r\n self._page_lock.on()\r\n for meta in meta_list:\r\n meta_str = ' '\r\n for key, val in meta.items(): meta_str += '%s=\"%s\"' % (key, val)\r\n self._page_meta_list.append(meta_str)\r\n self._page_updated = True\r\n self._page_lock.off()\r\n return self\r\n \r\n def css(self, *css_list):\r\n self._page_lock.on()\r\n for css in css_list: self._page_css_list.append(css)\r\n self._page_updated = True\r\n self._page_lock.off()\r\n return self\r\n \r\n def js(self, *js_list):\r\n self._page_lock.on()\r\n for js in js_list: self._page_js_list.append(js)\r\n self._page_updated = True\r\n self._page_lock.off()\r\n return self\r\n \r\n def head(self, html):\r\n self._page_lock.on()\r\n self._page_head = html\r\n self._page_updated = True\r\n self._page_lock.off()\r\n return self\r\n \r\n def header(self, html):\r\n self._page_lock.on()\r\n self._page_header = html\r\n self._page_updated = True\r\n self._page_lock.off()\r\n return self\r\n \r\n def footer(self, html):\r\n self._page_lock.on()\r\n self._page_footer = html\r\n self._page_updated = True\r\n self._page_lock.off()\r\n return self\r\n \r\n #===========================================================================\r\n # View Definition\r\n #===========================================================================\r\n def init(self, method='r', **opts):\r\n \r\n def wrapper(func):\r\n crud = method.lower()\r\n id = createVid()\r\n name = func.__name__\r\n url = '%s/%s' % (self.url if self.url != '/' else '', func.__name__)\r\n self._page_view[name] = {'id' : id, 'name' : name, 'url' : url}\r\n \r\n if 'r' in crud or '*' in crud:\r\n @rest('GET', url, **opts)\r\n def get(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n if 'c' in crud or '*' in crud:\r\n @rest('POST', url, **opts)\r\n def post(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n if 'u' in crud or '*' in crud:\r\n @rest('PUT', url, **opts)\r\n def put(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n if 'd' in crud or '*' in crud:\r\n @rest('DELETE', url, **opts)\r\n def delete(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n self._page_lock.on()\r\n self._page_init = url\r\n self._page_updated = True\r\n self._page_lock.off()\r\n \r\n return wrapper\r\n \r\n def view(self, method='r', **opts):\r\n \r\n def wrapper(func):\r\n crud = method.lower()\r\n id = createVid()\r\n name = func.__name__\r\n url = '%s/%s' % (self.url if self.url != '/' else '', name)\r\n self._page_view[name] = {'id' : id, 'name' : name, 'url' : url}\r\n \r\n if 'r' in crud or '*' in crud:\r\n @rest('GET', url, **opts)\r\n def get(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n if 'c' in crud or '*' in crud:\r\n @rest('POST', url, **opts)\r\n def post(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n if 'u' in crud or '*' in crud:\r\n @rest('PUT', url, **opts)\r\n def put(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n if 'd' in crud or '*' in crud:\r\n @rest('DELETE', url, **opts)\r\n def delete(req, *argv, **kargs): return func(req, *argv, **kargs)\r\n \r\n return wrapper\r\n \r\n def getView(self, name, *path):\r\n view = self._page_view[name]\r\n return {'id' : view['id'], 'name' : name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if path else view['url']}\r\n \r\n #===========================================================================\r\n # View Functions\r\n #===========================================================================\r\n def patch(self, name, *argv):\r\n view = self._page_view[name]\r\n id = view['id']\r\n url = '%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url']\r\n return Tag('script', Id=id, Page_Url=url).html(\r\n '$(document).ready(function(){page_patch(\"%s\")});' % id\r\n )\r\n \r\n def __call__(self, name, *argv):\r\n return self.patch(name, *argv)\r\n \r\n def reload(self, *names):\r\n reload = []\r\n for name in names:\r\n reload.append(self._page_view[name]['id'])\r\n return {'reload' : reload}\r\n \r\n def __getitem__(self, names):\r\n if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names)\r\n else: return self.reload(*(names,))\r\n \r\n #===========================================================================\r\n # Interactive Functions\r\n #===========================================================================\r\n \r\n class InteractiveTag(Tag):\r\n \r\n def __init__(self, view, *argv):\r\n Tag.__init__(self, 'script')\r\n self._view_id = view['id']\r\n self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if argv else view['url']\r\n self._event_id = createVid()\r\n self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id}\r\n \r\n def event(self): return self._event_attr\r\n \r\n def get(self, name, *argv):\r\n \r\n class Get(Page.InteractiveTag):\r\n \r\n def __init__(self, view, *argv):\r\n Page.InteractiveTag.__init__(self, view, *argv)\r\n self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id)\r\n \r\n return Get(self._page_view[name], *argv)\r\n \r\n def post(self, name, *argv):\r\n \r\n class Post(Page.InteractiveTag):\r\n \r\n def __init__(self, view, *argv):\r\n Page.InteractiveTag.__init__(self, view, *argv)\r\n self._data_id = self._event_id + '-data'\r\n self._data_attr = {'class' : self._data_id}\r\n self._event_attr['page_data'] = self._data_id\r\n self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id)\r\n \r\n def data(self): return self._data_attr\r\n \r\n return Post(self._page_view[name], *argv)\r\n \r\n def put(self, name, *argv):\r\n \r\n class Put(Page.InteractiveTag):\r\n \r\n def __init__(self, view, *argv):\r\n Page.InteractiveTag.__init__(self, view, *argv)\r\n self._data_id = self._event_id + '-data'\r\n self._data_attr = {'class' : self._data_id}\r\n self._event_attr['page_data'] = self._data_id\r\n self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id)\r\n \r\n def data(self): return self._data_attr\r\n \r\n return Put(self._page_view[name], *argv)\r\n \r\n def delete(self, name, *argv):\r\n \r\n class Delete(Page.InteractiveTag):\r\n \r\n def __init__(self, view, *argv):\r\n Page.InteractiveTag.__init__(self, view, *argv)\r\n self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id)\r\n \r\n return Delete(self._page_view[name], *argv)\r\n\r\n#===============================================================================\r\n# Page Statics\r\n#===============================================================================\r\nPage(url='/page', cache=True)\r\n\r\n@export('GET', '/page/empty', content_type=ContentType.AppJson)\r\ndef empty_page(req): return {'error' : 'Page Empty'}\r\n\r\n@export('GET', '/favicon.ico', content_type=ContentType.AppStream)\r\ndef default_favicon(req, *argv): return Cache.getCache(pwd() + '/static/image/favicon.ico')\r\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":15216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386125152","text":"#!/usr/bin/env python\n\nimport json\nimport sys\nimport argparse\nfrom io import IOBase\n\n\ndef problem_from_json(input: IOBase):\n problem = json.load(input)\n\n print(len(problem['hole']))\n for [x, y] in problem['hole']:\n print(f'{x} {y}')\n\n figure = problem['figure']\n print(len(figure['edges']))\n for [a, b] in figure['edges']:\n print(f'{a} {b}')\n print(len(figure['vertices']))\n for [x, y] in figure['vertices']:\n print(f'{x} {y}')\n\n print(problem['epsilon'])\n\n\ndef solution_from_json(input: IOBase):\n solution = json.load(input)\n\n print(len(solution['vertices']))\n for [x, y] in solution['vertices']:\n print(f'{x} {y}')\n\n\ndef read_pairs(input: IOBase):\n v = []\n for _ in range(int(input.readline())):\n v.append(list(map(int, input.readline().split())))\n return v\n\n\ndef problem_to_json(input: IOBase):\n obj = {}\n\n obj['hole'] = read_pairs(input)\n\n figure = {}\n figure['edges'] = read_pairs(input)\n figure['vertices'] = read_pairs(input)\n obj['figure'] = figure\n\n epsilon = int(input.readline())\n obj['epsilon'] = epsilon\n\n print(json.dumps(obj))\n\n\ndef solution_to_json(input: IOBase):\n obj = {}\n obj['vertices'] = read_pairs(input)\n\n print(json.dumps(obj))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='''Problem/solution conversion tool.\n By default, converts problem JSON into problem text.''')\n parser.add_argument('-s', '--solution', action='store_true',\n help='Convert solution instead of problem')\n parser.add_argument('-t', '--text', action='store_true',\n help='Convert text into JSON insetad of the opposite')\n\n args = parser.parse_args()\n\n if args.solution:\n if args.text:\n solution_to_json(sys.stdin)\n else:\n solution_from_json(sys.stdin)\n else:\n if args.text:\n problem_to_json(sys.stdin)\n else:\n problem_from_json(sys.stdin)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"141503884","text":"# SPDX-License-Identifier: Apache-2.0\n# SPDX-FileCopyrightText: © 2019- d3p Developers and their Assignees\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nMAJOR_VERSION = 0\nMINOR_VERSION = 2\nPATCH_VERSION = 0\nEXT_VERSION = \"\"\n\nEXT_VERSION_SUFFIX = f\"-{EXT_VERSION}\" if len(EXT_VERSION) > 0 else \"\"\n\nVERSION = f\"{MAJOR_VERSION}.{MINOR_VERSION}.{PATCH_VERSION}{EXT_VERSION_SUFFIX}\"\n","sub_path":"d3p/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"472474534","text":"\n'''\n- load dataset to be evaluated\n- load trained model\n- load function to compute CAM\n- Save images in a folder\n\n'''\n\n# define functions\n\n# import the necessary packages\nfrom sklearn.metrics import recall_score, precision_score, f1_score, confusion_matrix, roc_curve, roc_auc_score\nfrom tensorflow.keras.models import model_from_json, load_model\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications.densenet import preprocess_input\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport cv2\n\n\n\ndef build_pretrained_model(model_file, isTrain=True):\n with open(model_file + '.json', 'r') as f:\n model = model_from_json(f.read())\n model.load_weights(model_file + '_weights.best.hdf5')\n if isTrain is False:\n model.trainable = False\n return model\n\n\n\nclass GradCAM:\n\tdef __init__(self, model, classIdx, layerName=None):\n\t\t# store the model, the class index used to measure the class\n\t\t# activation map, and the layer to be used when visualizing\n\t\t# the class activation map\n\t\tself.model = model\n\t\tself.classIdx = classIdx\n\t\tself.layerName = layerName\n\n\t\t# if the layer name is None, attempt to automatically find\n\t\t# the target output layer\n\t\tif self.layerName is None:\n\t\t\tself.layerName = self.find_target_layer()\n\n\tdef find_target_layer(self):\n\t\t# attempt to find the final convolutional layer in the network\n\t\t# by looping over the layers of the network in reverse order\n\t\tfor layer in reversed(self.model.layers):\n\t\t\t# check to see if the layer has a 4D output\n\t\t\tif len(layer.output_shape) == 4:\n\t\t\t\treturn layer.name\n\n\t\t# otherwise, we could not find a 4D layer so the GradCAM\n\t\t# algorithm cannot be applied\n\t\traise ValueError(\"Could not find 4D layer. Cannot apply GradCAM.\")\n\n\tdef compute_heatmap(self, image, eps=1e-8):\n\t\t# construct our gradient model by supplying (1) the inputs\n\t\t# to our pre-trained model, (2) the output of the (presumably)\n\t\t# final 4D layer in the network, and (3) the output of the\n\t\t# softmax activations from the model\n\t\tgradModel = Model(\n\t\t\tinputs=[self.model.inputs],\n\t\t\toutputs=[self.model.get_layer(self.layerName).output, \n\t\t\t\tself.model.output])\n\n\t\t# record operations for automatic differentiation\n\t\twith tf.GradientTape() as tape:\n\t\t\t# cast the image tensor to a float-32 data type, pass the\n\t\t\t# image through the gradient model, and grab the loss\n\t\t\t# associated with the specific class index\n\t\t\tinputs = tf.cast(image, tf.float32)\n\t\t\t(convOutputs, predictions) = gradModel(inputs)\n\t\t\tloss = predictions[:, self.classIdx]\n\n\t\t# use automatic differentiation to compute the gradients\n\t\tgrads = tape.gradient(loss, convOutputs)\n\n\t\t# compute the guided gradients\n\t\tcastConvOutputs = tf.cast(convOutputs > 0, \"float32\")\n\t\tcastGrads = tf.cast(grads > 0, \"float32\")\n\t\tguidedGrads = castConvOutputs * castGrads * grads\n\n\t\t# the convolution and guided gradients have a batch dimension\n\t\t# (which we don't need) so let's grab the volume itself and\n\t\t# discard the batch\n\t\tconvOutputs = convOutputs[0]\n\t\tguidedGrads = guidedGrads[0]\n\n\t\t# compute the average of the gradient values, and using them\n\t\t# as weights, compute the ponderation of the filters with\n\t\t# respect to the weights\n\t\tweights = tf.reduce_mean(guidedGrads, axis=(0, 1))\n\t\tcam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)\n\n\t\t# grab the spatial dimensions of the input image and resize\n\t\t# the output class activation map to match the input image\n\t\t# dimensions\n\t\t(w, h) = (image.shape[2], image.shape[1])\n\t\theatmap = cv2.resize(cam.numpy(), (w, h))\n\n\t\t# normalize the heatmap such that all values lie in the range\n\t\t# [0, 1], scale the resulting values to the range [0, 255],\n\t\t# and then convert to an unsigned 8-bit integer\n\t\tnumer = heatmap - np.min(heatmap)\n\t\tdenom = (heatmap.max() - heatmap.min()) + eps\n\t\theatmap = numer / denom\n\t\theatmap = (heatmap * 255).astype(\"uint8\")\n\n\t\t# return the resulting heatmap to the calling function\n\t\treturn heatmap\n\n\tdef overlay_heatmap(self, heatmap, image, alpha=0.5,\n\t\tcolormap=cv2.COLORMAP_VIRIDIS):\n\t\t# apply the supplied color map to the heatmap and then\n\t\t# overlay the heatmap on the input image\n\t\theatmap = cv2.applyColorMap(heatmap, colormap)\n\t\toutput = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)\n\n\t\t# return a 2-tuple of the color mapped heatmap and the output,\n\t\t# overlaid image\n\t\treturn (heatmap, output)\n \n#############################################################################\n\n\n\"\"\" LOAD DATASET \"\"\"\n\nroot_path = '/scratch/parceirosbr/bigoilict/share/GeoFacies/jose/.datasets/'\n\ntest_dataset = 'test_df_covid_softmax_offset'\ntest_df = pd.read_csv(test_dataset)\n\nmodel_file = 'densenet121_covid19_softmax_weights_offset'\n\n#############################################################################\n# define image size\nIMG_SIZE = (224, 224)\ncolumns = ['normal', 'pneumonia', 'covid']\n\nload_datagen = ImageDataGenerator()\nload_test_data = load_datagen.flow_from_dataframe(dataframe=test_df,\n x_col='Image Path',\n y_col=columns,\n drop_duplicates=False,\n class_mode=\"raw\",\n target_size=IMG_SIZE,\n color_mode = 'rgb',\n batch_size=len(test_df))\n # batch_size=64)\n\ntest_X, test_Y = next(load_test_data) # one big batch\ntest_X = preprocess_input(test_X)\n\nmodel = build_pretrained_model(model_file, isTrain=False)\n\npreds = model.predict(test_X)\n# compute global metrics\npreds = np.argmax(preds, axis=-1)\ny_test = np.argmax(test_Y, axis=-1)\n\n\nprint('precision', precision_score(y_test, preds, average=None))\nprint('recall', recall_score(y_test, preds, average=None))\nprint('f1_score', f1_score(y_test, preds, average=None))\n\nprint('CM', confusion_matrix(y_test, preds))\n\nimages_dir = 'images/'\n\nfor index in range(len(test_Y)):\n \n image = test_X[index][np.newaxis]\n org = image[0]\n org = np.uint8(255*(org-org.min())/(org.max()-org.min()))\n # org = np.tile(image[0]*255, [1,1,3]).astype('uint8') # to rgb\n preds = model.predict(image)\n i = np.argmax(preds[0])\n # print('reference,', test_Y[index], 'prediction', np.around(preds[0], decimals=2), preds[0].sum())\n # print('reference,', test_Y[index], 'prediction', np.around(preds[0], decimals=2), preds[0].sum())\n # print(columns)\n # print('image', index)\n \n image_output_filename = images_dir + columns[np.argmax(test_Y[index])] + '_' + str(index) + '.jpg'\n \n # initialize our gradient class activation map and build the heatmap\n cam = GradCAM(model, i)\n heatmap = cam.compute_heatmap(image)\n \n # resize the resulting heatmap to the original input image dimensions\n # and then overlay heatmap on top of the image\n heatmap = cv2.resize(heatmap, (org.shape[1], org.shape[0]))\n \n (heatmap, output) = cam.overlay_heatmap(heatmap, org, alpha=0.7)\n output = np.hstack([org, heatmap, output])\n output = cv2.resize(output, (org.shape[1]*3, org.shape[0]))\n cv2.imwrite(image_output_filename, output)\n\n\n","sub_path":"Scripts/COVID-19 Classifier/compute_cam_from_data_frame.py","file_name":"compute_cam_from_data_frame.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"311193190","text":"from json import JSONDecodeError\n\nfrom imgurpython import ImgurClient\nfrom imgurpython.helpers.error import ImgurClientError\n\nfrom screenshots_to_url.constants import CLIENT_ID, CLIENT_SECRET, IMGUR, ACCESS_TOKEN, REFRESH_TOKEN\nimport json\n\nfrom screenshots_to_url.system_utils import is_file\n\n\ndef get_access_keys_dict():\n if is_file(IMGUR):\n with open(IMGUR) as json_file:\n try:\n return json.load(json_file)\n except JSONDecodeError as e:\n print(e)\n return {}\n else:\n return {}\n\n\ndef write_access_keys(access_keys):\n with open(IMGUR, 'w') as outfile:\n json.dump(access_keys, outfile, indent=2)\n\n\ndef access_keys_are_configured(access_keys):\n return CLIENT_ID in access_keys and access_keys[CLIENT_ID] is not None \\\n and CLIENT_SECRET in access_keys and access_keys[CLIENT_SECRET] is not None\n\n\ndef tokens_are_configured(access_keys):\n return ACCESS_TOKEN in access_keys and access_keys[ACCESS_TOKEN] is not None \\\n and REFRESH_TOKEN in access_keys and access_keys[REFRESH_TOKEN] is not None\n\n\ndef extract_client_keys(access_keys_dict, read_new_keys):\n if not read_new_keys and access_keys_are_configured(access_keys_dict):\n client_id = access_keys_dict[CLIENT_ID]\n client_secret = access_keys_dict[CLIENT_SECRET]\n else:\n print('Please provide Access Keys below:')\n client_id = input('Enter client_id:')\n client_secret = input('Enter client_secret:')\n\n return client_id, client_secret\n\n\ndef extract_tokens(access_keys_dict, client, read_new_token):\n if not read_new_token and tokens_are_configured(access_keys_dict):\n access_token = access_keys_dict[ACCESS_TOKEN]\n refresh_token = access_keys_dict[REFRESH_TOKEN]\n else:\n authorization_url = client.get_auth_url('pin')\n print(\"Go to the following URL: {0}\".format(authorization_url))\n pin = input('Enter pin code:')\n\n credentials = client.authorize(pin, 'pin')\n access_token = credentials['access_token']\n refresh_token = credentials['refresh_token']\n\n return access_token, refresh_token\n\n\ndef get_imgur_client(read_new_client=False, read_new_token=False) -> ImgurClient:\n access_keys_dict = get_access_keys_dict()\n client_id, client_secret = extract_client_keys(access_keys_dict, read_new_client)\n\n try:\n client = ImgurClient(client_id=client_id, client_secret=client_secret)\n except ImgurClientError as e:\n print(e)\n print('Invalid credentials supplied. Let\\'s try again :)')\n return get_imgur_client(read_new_client=True, read_new_token=read_new_token)\n\n access_token, refresh_token = extract_tokens(access_keys_dict, client, read_new_token)\n\n print(\"Authentication successful! Here are the details:\")\n print(\" Access token: {0}\".format(access_token))\n print(\" Refresh token: {0}\".format(refresh_token))\n client.set_user_auth(access_token, refresh_token)\n\n write_access_keys({\n CLIENT_ID: client_id,\n CLIENT_SECRET: client_secret,\n ACCESS_TOKEN: access_token,\n REFRESH_TOKEN: refresh_token\n })\n\n return client\n","sub_path":"screenshots_to_url/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"366685252","text":"import pandas as pd\nfrom math import isnan\n\ndef getMedian(array):\n if (len(array) % 2):\n return(array[len(array) // 2])\n else:\n return((array[len(array) // 2] + array[len(array) // 2 - 1]) / 2)\n\ndef playWithAverages(data, cols):\n average, median, maximum, minimum = {}, {}, {}, {}\n for i in cols:\n a, validValues, values = 0, [], 0\n for j in data[i]:\n if (not isnan(j)):\n a += j\n values += 1\n validValues += [j]\n median[i] = getMedian(validValues)\n a /= values\n average[i] = a\n maximum[i] = max(validValues)\n minimum[i] = min(validValues)\n print(maximum)\n\n for i in cols:\n for j in range(len(data[i])):\n #if (isnan(data[i][j])):\n #data.set_value(j, i, average[i])\n #data[i][j] = median[i]\n data.set_value(j, i, (data[i][j] - minimum[i]) / (maximum[i] - minimum[i]))\n\ndef removeRows(data, cols):\n for c in cols:\n data = data[pd.isna(data[c]) != True]\n return data\n\ndata = pd.read_csv('diabetes_dataset.csv').astype('float')\ncols = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness',\n 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']\n#\n# data = removeRows(data, cols)\n# print(data)\n# data.to_csv(\"diabetes_removeAllInvalidRows_dataset.csv\")\n#data = pd.read_csv('diabetes_app.csv').astype('float')\nplayWithAverages(data, cols)\ndata = removeRows(data, cols)\nprint(data)\ndata.to_csv(\"diabetes_averageNormByMinMaxRemovingMissingValues_dataset.csv\", index=False)\n","sub_path":"01_Preprocessing/fill.py","file_name":"fill.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"365301241","text":"from django.conf.urls import url, include\nfrom taxa import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom rest_framework.renderers import TemplateHTMLRenderer, BrowsableAPIRenderer, JSONRenderer\n\n\nurlpatterns = [\n # Browsable API\n url(r'^api/$', views.api_root),\n url(r'^api/ancestors/(?P[0-9]+)/$', views.AncestorsView.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)), name='api_ancestors'),\n url(r'^api/detail/(?P[0-9]+)/$', views.TaxonDetail.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer))),\n url(r'^api/common-names/(?P[0-9]+)/$', views.CommonNameList.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)), name='api_common_names'),\n url(r'^api/common-name/(?P[0-9]+)/$', views.CommonNameDetail.as_view()),\n url(r'^api/lineage/(?P\\d*)/$', views.LineageView.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)), name='api_lineage'),\n url(r'^api/children/(?P\\d*)/$',\n views.ChildrenView.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)),\n name='api_children'),\n url(r'^api/taxon-write/$', views.TaxonWrite.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)), name='api_taxon_write'),\n url(r'^api/info-write/$', views.InfoWrite.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)), name='api_info_write'),\n url(r'^api/cn-write/$', views.CommonNameWrite.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)), name='api_cn_write'),\n url(r'^api/rank-list/$', views.RankList.as_view(renderer_classes=(BrowsableAPIRenderer, JSONRenderer)), name='api_rank_list'),\n\n url(r'^api/alphabetical-genera/(?P[A-Za-z]+)/(?P[A-Z])/$', views.AlphabeticalGeneraList.as_view(), name='api_genera_list_default'),\n\n url(r'^api/category-list/(?P[A-Z][A-Z])/$', views.CategoryList.as_view(), name='api_category_list_default'),\n\n url(r'^api/description-format-write/$', views.create_taxon_authority, name='api_descrip_write'),\n url(r'^api/get-taxa-group-list/$', views.get_taxa_group_list, name='api_get_taxa_group_list'),\n url(r'^api/get-distributions/$', views.get_distributions_from_polygon, name='api_get_taxa_in_polygon'),\n url(r'^get-images/(?P\\d*)/$', views.get_images_for_species, name='api_get_images'),\n\n # HTML template views\n url(r'^detail/(?P[0-9]+)/$', views.TaxonDetail.as_view(renderer_classes=(TemplateHTMLRenderer, JSONRenderer)), name='taxa_detail'),\n url(r'^list/$', views.TaxonListView.as_view(renderer_classes=(TemplateHTMLRenderer, JSONRenderer, BrowsableAPIRenderer)), name='search_autocomplete'),\n url(r'^lineage/(?P\\d*)/$', views.LineageView.as_view(renderer_classes=(TemplateHTMLRenderer,)), name='lineage_pk'),\n url(r'^distribution/(?P\\d*)/$', views.DistributionList.as_view(renderer_classes=(TemplateHTMLRenderer, JSONRenderer)), name='distribution_list_polygon'),\n url(r'^distribution/point/(?P\\d*)/$', views.PointDistributionList.as_view(renderer_classes=(TemplateHTMLRenderer, JSONRenderer, BrowsableAPIRenderer)), name='distribution_list'),\n]\nurlpatterns = format_suffix_patterns(urlpatterns)","sub_path":"taxa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"626966922","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nimport sys\nimport math\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pytest\nimport logging\n\nimport tvm\nfrom tvm import relay, transform\nfrom tvm import rpc\nfrom tvm import te\nfrom tvm.contrib import utils as util\nfrom tvm.relay.op.contrib import vsi_npu\nfrom tvm.contrib.download import download_testdata\nfrom tvm.testing import assert_allclose\n\n# TODO(Sven) : this is workaround for new version of TVM\nfrom tvm.contrib import graph_executor as graph_runtime\nfrom tvm.contrib import graph_executor as runtime\n\nfrom tflite_deeplab import *\nimport tflite\n\nlogging.basicConfig(level=logging.INFO)\n\nnp.set_printoptions(threshold=np.inf)\nRPC_HOST = os.environ[\"RPC_HOST\"]\nRPC_PORT = int(os.environ[\"RPC_PORT\"])\nCROSS_CC = os.environ[\"CROSS_CC\"]\nROOTFS = os.environ[\"ROOTFS\"]\nlib_name = os.environ[\"MOD_NAME\"]\nlib_path = os.environ[\"MOD_PATH\"]\n\nremote = rpc.connect(RPC_HOST, RPC_PORT)\n\ndef get_ref_result(shape, model_path,image_data,input_tensor_name,DTYPE):\n inputs = input_tensor_name\n tflite_model_buf = open(model_path, \"rb\").read()\n model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n\n mod, params = relay.frontend.from_tflite(\n model, shape_dict={inputs: shape}, dtype_dict={inputs: DTYPE}\n )\n target = \"llvm\"\n with tvm.transform.PassContext(opt_level=3,\n disabled_pass=[\"AlterOpLayout\"]):\n lib = relay.build(mod, target, params=params)\n ctx = tvm.cpu()\n cpu_mod = graph_runtime.GraphModule(lib[\"default\"](ctx))\n cpu_mod.set_input(inputs, tvm.nd.array(image_data))\n\n # if True:\n # logging.info(\"Evaluate graph runtime inference cost on CPU\")\n # ftimer = cpu_mod.module.time_evaluator(\"run\", ctx, number=1, repeat=1)\n # # Measure in millisecond.\n # prof_res = np.array(ftimer().results) * 1000\n # logging.info(\"CPU runtime inference time (std dev): {} ms ({} ms)\".format\n # (round(np.mean(prof_res), 2), round(np.std(prof_res), 2)))\n\n cpu_mod.run()\n ref_out = cpu_mod.get_output(0)\n return ref_out.asnumpy()\n\ndef compile_tflite_model(shape,model_path,input_data,input_tensor_name,DTYPE):\n inputs = input_tensor_name\n tflite_model_buf = open(model_path, \"rb\").read()\n model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n # Parse TFLite model and convert it to a Relay module\n mod, params = relay.frontend.from_tflite(\n model, shape_dict={inputs: shape}, dtype_dict={inputs: DTYPE}\n )\n logging.info(mod.astext())\n tmp_path = util.tempdir()\n lib_name = \"model.so\"\n lib_path = tmp_path.relpath(lib_name)\n\n kwargs = {}\n kwargs[\"cc\"] = CROSS_CC\n target = \"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu\"\n kwargs[\"options\"] = [\"-L\"+ROOTFS+\"/lib \", \"-L\" + ROOTFS+\"/usr/lib \",\n \"-L\" + ROOTFS+\"/usr/lib/aarch64-poky-linux/9.2.0 \", \"--sysroot=\" + ROOTFS]\n with transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n mod = vsi_npu.partition_for_vsi_npu(mod, params)\n lib = relay.build(mod, target, params=params)\n lib.export_library(lib_path, fcompile=False, **kwargs)\n\n # remote = rpc.connect(RPC_HOST, RPC_PORT)\n # remote.upload(lib_path + lib_name)\n # lib = remote.load_module(lib_name)\n\n remote.upload(lib_path)\n lib = remote.load_module(lib_name)\n ctx = remote.cpu()\n\n rt_mod = graph_runtime.GraphModule(lib[\"default\"](ctx))\n\n # ctx = remote.cpu()\n # rt_mod = graph_runtime.GraphModule(lib[\"default\"](ctx))\n rt_mod.set_input(**input_data)\n rt_mod.run()\n rf_output = rt_mod.get_output(0)\n return rf_output.asnumpy()\n\ndef print_top5(input):\n k = 5\n n = input.flatten()\n n_arg0 = np.argpartition(n, -k)[-k:]\n n = n[n_arg0]\n n_arg1 = np.argsort(n)[::-1]\n n_arg0 = n_arg0[n_arg1]\n for i in range(k):\n logging.info(\"{} : {}\".format(n_arg0[i], n[n_arg1[i]]))\n\ndef get_model(model_list, model_name):\n for model in model_list:\n if (model['name'] == model_name):\n return model\n\nmodel_list = [\n {'name': 'mobilenet_v1_1.0_224_quant.tflite',\n 'shape': (1, 224, 224, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'mobilenet_v2_quant.tflite',\n 'shape': (1, 224, 224, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'mobilenet_v3_quant.tflite',\n 'shape': (1, 512, 512, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'inception_v1_224_quant.tflite',\n 'shape': (1, 224, 224, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'inception_v2_224_quant.tflite',\n 'shape': (1, 224, 224, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'inception_v3_299_quant.tflite',\n 'shape': (1, 299, 299, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'efficientnet-edgetpu-S_quant.tflite',\n 'shape': (1, 224, 224, 3),\n 'input_tensor_name': 'images',\n 'dtype': \"uint8\"},\n {'name': 'deeplab_v3_plus_quant.tflite',\n 'shape': (1, 513, 513, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'deeplabv3_mnv2_pascal_quant.tflite',\n 'shape': (1, 513, 513, 3),\n 'input_tensor_name': 'MobilenetV2/MobilenetV2/input',\n 'dtype': \"uint8\"},\n {'name': 'unet.M865SW-632.tflite',\n 'shape': (1, 120, 160, 1),\n 'input_tensor_name': 'input_1',\n 'dtype': \"float32\"},\n {'name': 'deeplab_v3_plus_quant.tflite',\n 'shape': (1, 513, 513, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'yolov3-tiny_uint8_acuity.tflite',\n 'shape': (1, 416, 416, 3),\n 'input_tensor_name': 'input_0:out0',\n 'dtype': \"uint8\"},\n {'name': 'yolov3_uint8_acuity.tflite',\n 'shape': (1, 608, 608, 3),\n 'input_tensor_name': 'input_0:out0',\n 'dtype': \"uint8\"},\n {'name': 'unet_quant.tflite',\n 'shape': (1, 384, 384, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'srgan_quant.tflite',\n 'shape': (1, 128, 128, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'pynet_quant.tflite',\n 'shape': (1, 96, 96, 3),\n 'input_tensor_name': 'input',\n 'dtype': \"uint8\"},\n {'name': 'ssd_mobilenet_v2_fpnlite_320x320_coco17_quant.M865SW-669.tflite',\n 'shape': (1, 320, 320, 3),\n 'input_tensor_name': 'serving_default_input:0',\n 'dtype': \"uint8\"},\n]\n\ndef process(model_name):\n model = get_model(model_list, model_name)\n logging.info(model)\n shape = model['shape']\n input_tensor_name = model['input_tensor_name']\n DTYPE = model['dtype']\n # wait=input(\"press any key and continue...\")\n\n path = \"./\"\n img = Image.open(path + \"space_shuttle_224x224.jpg\")\n img = img.resize((shape[1], shape[2]))\n n1 = np.array(img)\n #n1 = n1[:, :, 0] # pick one channel\n #n1 = np.broadcast_to(n1, (4, 224, 224, 3)) # batch the image\n n1 = n1.reshape(shape)\n input_data = n1.astype(np.uint8)\n\n # input_data = np.ones(shape, DTYPE)\n\n vsi_input_data = {\n input_tensor_name: tvm.nd.array(input_data),\n }\n ref_output = get_ref_result(shape, model_name, input_data, input_tensor_name, DTYPE)\n vsi_output = compile_tflite_model(shape, model_name, vsi_input_data, input_tensor_name, DTYPE)\n\n if DTYPE == \"uint8\":\n tolerance = 5\n else:\n tolerance = 1e-3\n\n logging.info(\"top5 of ref:\")\n print_top5(ref_output)\n\n logging.info(\"top5 of vsi:\")\n print_top5(vsi_output)\n\n result = abs(vsi_output.astype(\"float32\") - ref_output.astype(\"float32\"))\n np.savetxt(path + model_name +\"_ref_output.txt\", ref_output.flatten(), fmt='%.3f')\n np.savetxt(path + model_name + \"_vsi_output.txt\", vsi_output.flatten(), fmt='%.3f')\n np.savetxt(path + model_name + \"_diff.txt\", result.flatten(), fmt='%.3f')\n\n assert_allclose(vsi_output, ref_output, rtol=0, atol=tolerance)\n\n\ndef test_mobilenet_v1_224_quant():\n model = 'mobilenet_v1_1.0_224_quant.tflite'\n process(model)\n\ndef test_mobilenet_v2_quant():\n model = 'mobilenet_v2_quant.tflite'\n process(model)\n\ndef test_mobilenet_v3_quant():\n model = 'mobilenet_v3_quant.tflite'\n process(model)\n\ndef test_inception_v1_224_quant():\n model = 'inception_v1_224_quant.tflite'\n process(model)\n\ndef test_inception_v2_224_quant():\n model = 'inception_v2_224_quant.tflite'\n process(model)\n\ndef test_inception_v3_299_quant():\n model = 'inception_v3_299_quant.tflite'\n process(model)\n\ndef test_efficientnet_edgetpu_S_quant():\n model = 'efficientnet-edgetpu-S_quant.tflite'\n process(model)\n\ndef test_deeplab_v3_plus_quant():\n model = 'deeplab_v3_plus_quant.tflite'\n process(model)\n\ndef test_deeplabv3_mnv2_pascal_quant():\n model = 'deeplabv3_mnv2_pascal_quant.flite'\n process(model)\n\ndef test_unet():\n model = 'unet.M865SW-632.tflite'\n process(model)\n\ndef test_deeplab_v3_plus_quant():\n model = 'deeplab_v3_plus_quant.tflite'\n process(model)\n\ndef test_yolov3_tiny_uint8_acuity():\n model = 'yolov3-tiny_uint8_acuity.tflite'\n process(model)\n\ndef test_unet_quant():\n model = 'unet_quant.tflite'\n process(model)\n\ndef test_srgan_quant():\n model = 'srgan_quant.tflite'\n process(model)\n\ndef test_pynet_quant():\n model = 'pynet_quant.tflite'\n process(model)\n\ndef test_ssd_mobilenet_v2_fpnlite_320x320_coco17_quant():\n model = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_quant.M865SW-669.tflite'\n process(model)\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n","sub_path":"tests/python/contrib/test_vsi_npu/test_vsi_tflite_model_all.py","file_name":"test_vsi_tflite_model_all.py","file_ext":"py","file_size_in_byte":10343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"303183525","text":"from rest_framework.permissions import BasePermission\n\n\nclass InstructorOnly(BasePermission):\n def has_permission(self, request, view):\n if request.method == \"GET\":\n return True\n\n if request.method == \"POST\" or request.method == \"PUT\":\n return request.user.is_superuser\n","sub_path":"courses/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"605699114","text":"# 索引和聚合\n\nfrom pymongo import MongoClient\n\nconn = MongoClient('localhost', 27017)\ndb = conn.stu\nmy_set = db.class4\n\n# index = my_set.ensure_index('name')\n# print(index)\n\n# 复合索引\n# index = my_set.ensure_index([('name', 1), ('King', -1)])\n# print(index)\n\n# 唯一索引和稀疏索引\n# col = db.class0\n# index = col.ensure_index('name', unique=True)\n# print(index)\n# index = my_set.ensure_index('King_name', sparse=True)\n# print(index)\n\n# 删除索引\n# my_set.drop_index('name_1')\nmy_set.drop_indexes()\n\nfor x in my_set.list_indexes():\n print(x)\n\nconn.close()\n","sub_path":"mongodb/mongo_index.py","file_name":"mongo_index.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134613321","text":"def mergeSort(array): #this function is needed by countInversionsRecursevely()\n\n def merge(a1,a2):\n n = len(a1)+len(a2)\n i=0\n j=0\n result = []\n for k in range(n):\n if a1[i]=len(a1):\n for l in range(j,len(a2)):\n result.append(a2[l])\n break\n elif j>=len(a2):\n for l in range(i,len(a1)):\n result.append(a1[l])\n break\n return result\n \n\n if len(array)<2:\n return array\n\n if len(array)==2:\n return [min(array),max(array)]\n\n return merge(mergeSort(array[0:len(array)/2]),mergeSort(array[len(array)/2:]))\n\ndef bruteForce(array):\n count = 0\n for i in range(len(array)):\n for j in range(i,len(array)):\n if array[i] > array[j]:\n count +=1\n return count\n\ndef countInversionsRecursevely(array):\n\n def countMergeInversions(a1,a2):\n\n result = 0\n i,j=0,0\n for k in range(len(a1)+len(a2)+1):\n if a1[i]>a2[j]:\n result+=1\n j+=1\n else:\n i+=1\n result+=j\n if i>=len(a1):\n result-=j\n break\n elif j>=len(a2):\n result += (len(a1)-i-1)*len(a2)\n break\n return result\n\n if len(array) < 100:\n return bruteForce(array)\n \n x = countInversionsRecursevely(array[0:len(array)//2])\n y = countInversionsRecursevely(array[len(array)//2:])\n srtX = mergeSort(array[0:len(array)//2])\n srtY = mergeSort(array[len(array)//2:])\n z = countMergeInversions(srtX,srtY)\n\n return x+y+z\n \n\n'''\n#pre-defined arrays for easy test\na = []\nb = [1]\nc = [1,3,2]\nd = [1,5,65,3,9,4,2,5]\n\nimport time\nimport random\narray = []\nfor i in range(100000):\n array.append(random.randint(0,99))\n\n#start_time = time.time()\n#a = bruteForce(array)\n#print time.time() - start_time, \"seconds\"\n\nstart_time = time.time()\nb = countInversionsRecursevely(array)\nprint time.time() - start_time, \"seconds\"\n\n'''\n","sub_path":"Week01/counting_inversions.py","file_name":"counting_inversions.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"587823149","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 7 13:25:42 2018\n\n@author: tobi\n\"\"\"\nimport subprocess\nimport re\nimport time\nimport pandas as pd\nimport seaborn as sns\nfrom collections import Counter\nfrom string_match_scoring import *\nfrom negation_detection import *\nfrom nltk.stem import PorterStemmer\nimport pickle\nimport pprint\nimport os\nimport json\nimport os\nimport jsonlines\nfrom random import choices, seed\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nport = PorterStemmer()\n\n\ndz = ['[Pathologic Function]',\n '[Congenital Abnormality]',\n '[Disease or Syndrome]',\n '[Congenital Abnormality,Disease or Syndrome]',\n '[Pathologic Function]',\n '[Injury or Poisoning]',\n '[Anatomical Abnormality]'\n ]\n\ncancer = ['[Neoplastic Process]']\n\nfindings = ['[Finding]', #, '[Sign or Symptom]', #'[Functional Concept]'\n ]\n\nanat = ['[Body Part, Organ, or Organ Component]',\n '[Body Location or Region]',\n '[Body Space or Junction]'\n ]\n\ndevice = ['[Medical Device]',\n '[Therapeutic or Preventive Procedure]']\n\nmodifier = ['[Quantitative Concept]',\n '[Qualitative Concept]',\n '[Temporal Concept]',\n '[Spatial Concept]',\n '[Functional Concept]'\n ]\n\ndiagnostics = ['[Diagnostic Procedure]'\n ]\n\nphrase_filter = ['clear.', 'normal.','normal', 'the', 'noted', 'at the', 'present','.',\n 'no abnormality','clear', 'detected.','this finding', 'findings', 'RIGHT',\n 'On the right','clinical indication','detected','history','finding','',\n 'heart size','cardiac size','little nos','vascular','right chest',\n 'impression','right rib cage','identified','sensitive','ipf','unchanged',\n 'worsened','margin','n','disease','related vertebral','related',\n 'depend','fat pad','negative','symptoms','communicated','resolution',\n 'result','chronic cough','case','anomalous','drainage','oriented',\n 'sudden onset pain','sneezing','height reduction','incarcerated',\n 'slightly worse','new finding','evaluation finding','ill','cough',\n 'orientated','pain','senile','sharp','suspicious','small lesion',\n 'either','condition','right lung field','detect','diagnosis',\n 'pleural','parenchyma','pulmonary','chest wall', #'enlarged',\n 'minimal change','thoracic spine','thoracic inlet','lesion','day 7',\n 'upper lobe','thickening','difficult','lungs maybe','issue',\n 'treatment','pulmonary abnormality','atypical','abnormality',\n 'signs','lungs','shadowing','indicated','presence','lung parenchyma',\n 'weight loss','smoker','weightloss','worsening','hilum',\n 'identified posterior chest','center','mediastinal','not well',\n 'resolved','change in size','change in size cardiac','fever',\n 'antibiotic therapy','extent of disease','line','mixed',\n 'interstitial pattern parenchyma','ex-smoker','inflammatory lingula',\n '50','interstitial','density','mixed interstitial','confusion',\n 'little nos lower','regional change','trauma','pulmonary mediastinal',\n 'mediastinum','displacement','level evidence','patient problem',\n 'single chest','fat pads','heart','limited chest','location',\n 'limited','prior surgery','superimposition','diversion',\n 'chest', 'convex to right', 'cardio- mediastinal', 'prominent',\n 'symmetrical','shadow','deviate','breathing exposure',\n ]\n\nsurg_filter = ['infiltration','retraction', 'enlarged','reduction','overlapping',\n 'gastric bubble']\n\nmissed_findings = ['osteoarthritic','cardiomediastinal','bronchovascular', 'peribronchial',\n 'fibrotic', 'consolidation', 'fracture', 'osteoarthritic', 'cardiomegaly', \n 'elevation', 'congestion', 'tortuosity', 'impingement', 'thickening',\n 'displacement', 'Lump','cystic','focal lesion', 'density', #'enlarged', 'enlargement',\n 'dilatation','curvilinear','flattening', 'unfolded', 'unfolding',\n 'opacification','volume loss','Patchy','hyperinflated',#'interstitial',\n 'inflammatory','Osteoporotic','mediastinal','infective','convexity',\n 'osteoporotic','coarsening','curvilinear','prominent','intrapulmonary',\n 'lesion'\n ]\n\nmm_blind = ['coarsening','hyperinflated','enlarged','peribronchial','unfolding',\n 'osteopenic','fullness','congested','congestion','prominent',#'shadow',\n 'deviating', 'scoliotic','curvature','convex', 'consolidation',\n 'density', 'opacities'\n ]\n\nmissed_dz = ['fracture', 'cardiomegaly','Pectus excavatum','ectatic','ectasia',\n 'scoliosis','hilar mass'\n ]\n\nwrong_term = ['pathology','Clinical', 'history','clear','normal','radiograph',\n 'X-ray','MAS','mas','follow-up','resolution','result','uncertain',\n 'finding','pain','inspiration','symptoms','radiotherapy','identified'\n ]\n\nsection_header = [\"Clinical Notes:\",\"Indication:\", \"Clinical Indication\",\n \"Clinical history:\",\"Indications:\", 'History:', \n 'Clinical History:','History\\n',\"Clinical Details:\",\n 'Clinical Information:'\n ]\n\nsummary = ['conclusion:','impression:','conclusion\\n','impression\\n'\n ]\n\nfinding_header = ['Findings:', 'Report:']\n\n# port.stem(i)\ndz = [i.lower() for i in dz]\nfindings = [i.lower() for i in findings]\ndevice = [i.lower() for i in device]\ncancer = [i.lower() for i in cancer]\nanat = [i.lower() for i in anat]\nmodifier = [i.lower() for i in modifier]\ndiagnostics = [i.lower() for i in diagnostics]\nmm_blind = [port.stem(i.lower()) for i in mm_blind]\n\nphrase_filter = [i.lower() for i in phrase_filter] #port.stem(i).lower()\nmissed_findings = [i.lower() for i in missed_findings]\nmissed_dz = [i.lower() for i in missed_dz]\nwrong_term = [i.lower() for i in wrong_term]\nsection_header = [i.lower() for i in section_header]\n\ncardiomegaly = ['cardiomegaly','enlarg heart','cardiac enlarg'] #0\nconsolidation = ['consolid'] #1\nopacity = ['opac'] #2\npneumonia = ['pneumoni','infect','lobe inflammat','air bronchogram'] #3\nedema = ['oedema'] #4\nnodule = ['carcinoma','mass','nodul','malignant','granuloma','tumor',\n 'space-occupying lesion'] #5\nemphysema = ['emphysema','copd','coad','chronic obstructive airway disease',\n 'hyperinflat','overinflat',\n ] #6\nfibrosis = ['fibros','scar'] #7\npleural_abnormality = ['effusion','pleura','empyema',] #8\npneumothorax = ['pneumothorax'] #9\nsurgery_foreign_object = ['prosthet','otomy','surgr'] #10\nhernia = ['hernia','air fluid level'] #11\natelectasis = ['atelecta'] #12\nbronchus_inflammation = ['bronchia'] #13\naorta_tortuosity_or_widen = ['tortuos','widen','ectati','aortic unfold','aortic ectas'\n ] #14\naorta_calcification = ['aort calcific','atheroma'] #15\nspine_fracture = ['spine fracture','osteopenia','height reduction','osteoporosis',\n 'crush fracture','degenerative','spondylos','wedging',\n 'osteophytes',#'loss height',\n ] #16\ncurvature_spine = ['kyphosis','scolios','curvature','scheuermann disease','lordosis'\n ] #17\nrib_abnormal = ['rib fracture','pectus','narrow chest','costochondrit',\n 'intercostal','clavicul fracture', 'stern fracture'\n ] #18\ncyst = ['cyst','air fluid level'] #19\ninfiltration = ['infiltrat','air bronchogram',] #20\ndiaphragm_abnormality = ['eventration','raised diaphragm','elevat hemidiaphragm',\n ] #21\ncollapse = ['collapse'] #22\nother_important = ['hematoma','air fluid level','tuberculosis','congestive cardiac failure','ccf',\n 'left ventricular failure','pulmonary congestion','lymphadenopathy',\n 'lymphadenopathy','diversion','compression','developmental variant',\n 'calcified lymph nodes','valve disease aorta',\n 'disease aorta','breast','adenopathy',\"schmorl's node\",\n 'ventricular failure','mastectomy'\n ] #23\nother = [] #24\n\n\ncardiomegaly = [i.lower() for i in cardiomegaly]\nconsolidation = [i.lower() for i in consolidation]\nopacity = [i.lower() for i in opacity]\npneumonia = [i.lower() for i in pneumonia]\nedema = [i.lower() for i in edema]\nnodule = [i.lower() for i in nodule]\nemphysema = [i.lower() for i in emphysema]\nfibrosis = [i.lower() for i in fibrosis]\npleural_abnormality = [i.lower() for i in pleural_abnormality] \npneumothorax = [i.lower() for i in pneumothorax]\nsurgery_foreign_object = [i.lower() for i in surgery_foreign_object]\nhernia = [i.lower() for i in hernia]\natelectasis = [i.lower() for i in atelectasis]\nbronchus_inflammation = [i.lower() for i in bronchus_inflammation]\naorta_tortuosity_or_widen = [i.lower() for i in aorta_tortuosity_or_widen]\naorta_calcification = [i.lower() for i in aorta_calcification]\nspine_fracture = [i.lower() for i in spine_fracture]\ncurvature_spine = [i.lower() for i in curvature_spine]\nrib_abnormal = [i.lower() for i in rib_abnormal]\ncyst = [i.lower() for i in cyst]\ninfiltration = [i.lower() for i in infiltration]\ndiaphragm_abnormality = [i.lower() for i in diaphragm_abnormality]\ncollapse = [i.lower() for i in collapse]\nother_important = [i.lower() for i in other_important]\nother = []\n\n\nreference_terms = [\n cardiomegaly,\n consolidation,\n opacity,\n pneumonia,\n edema,\n nodule,\n emphysema,\n fibrosis,\n pleural_abnormality, \n pneumothorax ,\n surgery_foreign_object,\n hernia,\n atelectasis,\n bronchus_inflammation,\n aorta_tortuosity_or_widen,\n aorta_calcification,\n spine_fracture,\n curvature_spine,\n rib_abnormal,\n cyst,\n infiltration,\n diaphragm_abnormality,\n collapse,\n other_important,\n other\n ]\n\nreference_grp_list = [\n 'cardiomegaly',\n 'consolidation',\n 'opacity',\n 'pneumonia',\n 'edema',\n 'nodule',\n 'emphysema',\n 'fibrosis',\n 'pleural_abnormality', \n 'pneumothorax',\n 'surgery_foreign_object',\n 'hernia',\n 'atelectasis',\n 'bronchus_inflammation',\n 'aorta_tortuosity_or_widen',\n 'aorta_calcification',\n 'spine_fracture',\n 'curvature_spine',\n 'rib_abnormal',\n 'cyst',\n 'infiltration',\n 'diaphragm_abnormality',\n 'collapse',\n 'other_important',\n 'other'\n ]\n\n\ndef create_refDict(reference_terms_dict, reference_grp_list,reference_terms):\n for i, ref_group_name in enumerate(reference_grp_list):\n for j, ref_term in enumerate(reference_terms[i]):\n for ref in ref_term.split():\n reference_terms_dict.update({ref:make_ngrams(ref)})\n return reference_terms_dict\n\n\n \ndef make_ngrams(ref):\n n_gramz = []\n while len(ref) < 4:\n ref += '_'\n for N in range(2,5):\n n_gramz.extend(char_ngram(ref, N))\n return set(n_gramz)\n\n\nreference_terms_dict = {}\nreference_terms_dict = create_refDict(reference_terms_dict, reference_grp_list,reference_terms)\n\nreference_terms_dict['cardiomegaly']\nreference_terms_dict.keys()\n\nreference_terms_dict_main = {\n 'cardiomegaly':cardiomegaly,\n 'consolidation':consolidation,\n 'opacity':opacity,\n 'pneumonia':pneumonia,\n 'edema':edema,\n 'nodule':nodule,\n 'emphysema':emphysema,\n 'fibrosis':fibrosis,\n 'pleural_abnormality':pleural_abnormality, \n 'pneumothorax':pneumothorax,\n 'surgery_foreign_object':surgery_foreign_object,\n 'hernia':hernia,\n 'atelectasis':atelectasis,\n 'bronchus_inflammation':bronchus_inflammation,\n 'aorta_tortuosity_or_widen':aorta_tortuosity_or_widen,\n 'aorta_calcification':aorta_calcification,\n 'spine_fracture':spine_fracture,\n 'curvature_spine': curvature_spine ,\n 'rib_abnormal':rib_abnormal,\n 'cyst':cyst,\n 'infiltration':infiltration,\n 'diaphragm_abnormality':diaphragm_abnormality,\n 'collapse':collapse,\n 'other_important':other_important,\n 'other':other\n }\n\n\n\n\ndef out_metamap(report01, metamap=metamap, options=options):\n echo = subprocess.Popen(['echo', report01.encode('utf-8')], \n stdout=subprocess.PIPE,\n )\n \n mtmap = subprocess.Popen([metamap, options],\n stdin=echo.stdout,\n stdout=subprocess.PIPE,\n )\n end_of_pipe = mtmap.stdout\n \n for line in end_of_pipe:\n line = line.decode('utf-8').strip()\n print(line)\n\n\ndef find_char(s, ch):\n return [i for i, ltr in enumerate(s) if ltr == ch]\n\ndef find_any_term(s, term_list):\n return [s.lower().find(i.lower()) for i in term_list if i.lower() in s.lower()][0]\n\ndef report_findings(report, sigs=section_header, mark=2):\n idx_header = find_any_term(report, sigs)\n idxs = find_char(report, '\\n')\n start_idx = [i for i in idxs if i > idx_header][mark]\n return report[start_idx:]\n\n\ndef parse_report(pID, report_n, metamap=metamap, options=options, fast_mode=False):\n mappings, output, chunks = [], [], []\n #start = time.time()\n if fast_mode:\n if any(header in report_n.lower() for header in summary):\n report_n = report_findings(report_n,sigs=summary,mark=0)\n else:\n report_n = report_findings(report_n)\n elif any(header in report_n.lower() for header in section_header):\n report_n = report_findings(report_n)\n #print(\"\\nDone Formatting report {} in {:.4f}s\".format(pID, time.time()-start))\n echo = subprocess.Popen(['echo', report_n.encode('utf-8')], \n stdout=subprocess.PIPE,\n )\n \n mtmap = subprocess.Popen([metamap, options],\n stdin=echo.stdout,\n stdout=subprocess.PIPE,\n )\n end_of_pipe = mtmap.stdout\n #print(\"\\nMM Extraction Done in {:.4f}s\".format(time.time()-start))\n for line in end_of_pipe: \n line = line.decode('utf-8').lower().strip()\n if line[:2].isdigit():\n mappings.append(\" \".join(line.split()[1:]))\n elif line[:2]=='me':\n output.append(mappings)\n mappings = []\n elif (line[:1] == '') or (line[:1] == '\\n'):\n output.append(mappings)\n chunks.append(output)\n mappings = []\n output = []\n else:\n continue\n output.append(mappings)\n chunks.append(output)\n #print(\"\\nDone Parsing report {} in {:.4f}s\".format(pID, time.time()-start))\n return chunks\n\n\ndef extract_term(mapping):\n return re.split('[\\(?\\[{1}]', mapping)[0].strip().lower()\n\ndef check_neg(term):\n if term[:2] =='n ':\n return True\n else:\n return False\n\n\ndef concat_terms(mappings, sigs=[dz+missed_dz], wrong = wrong_term):\n term_concat = ''\n for mapping in mappings:\n if any(sig in mapping for sig in sigs):\n term = check_terms(mapping)\n term_concat += term + ' '\n return term_concat.strip()\n\n\ndef check_terms(mapping, wrong = wrong_term):\n term = extract_term(mapping)\n # check if negated or wrong terms\n if check_neg(term):\n return ''\n if any(t_ in wrong for t_ in term.lower().split()):\n return ''\n if len(term) > 0:\n if term[0].isdigit():\n return ''\n elif not term[0].isalpha():\n term = re.sub(r'([^\\w])+', '', term)\n return term\n\n\n\ndef parse_mappings(mappings, diz_pos, obs, neo, dev, \\\n sigs=[modifier,dz+missed_dz,findings+missed_findings,anat,cancer,device]):\n vd, vo = 0,0\n for mapping in mappings:\n if any(sig.lower() in mapping.lower() for sig in sigs[1]): # dz, missed\n #diz_pos.append(check_terms(mapping))\n vd += 1\n elif any(sig.lower() in mapping.lower() for sig in sigs[2]): # finding/observation, missed\n #obs.append(check_terms(mapping))\n vo += 1\n elif any(sig.lower() in mapping.lower() for sig in sigs[4]): # cancer\n neo.append(check_terms(mapping))\n elif any(sig.lower() in mapping.lower() for sig in sigs[5]): # iatrogenic\n dev.append(check_terms(mapping))\n #\"\"\"\n if vd > 0:\n diz_pos.append(concat_terms(mappings, sigs[1]+sigs[3]))\n if vo > 0:\n obs.append(concat_terms(mappings,sigs[2]+sigs[3]))\n #\"\"\"\n return diz_pos, obs, neo, dev\n\n\n\ndef clean_findings(term_list, negated):\n if term_list != []:\n to_remove = []\n term_list = [i.lower() for i in term_list if i != ''] # port.stem(i.lower())\n term_list = list((set(term_list)-set(phrase_filter))-set(negated))\n for term in term_list:\n if match_to_list(term, negated):\n print(term, \" :: Removed\")\n to_remove.append(term)\n term_list = list(set(term_list)-set(to_remove))\n return term_list\n\n\n\ndef clean_surg_findings(term_list, negated):\n if term_list != []:\n surg_filters = set(surg_filter)\n surg_filters.update(set(phrase_filter))\n surg_filters.update(negated)\n term_list = [i.lower() for i in term_list] # port.stem(i.lower())\n term_list = list(set(term_list)-surg_filters)\n return term_list\n\n \ndef extract_metamap(pID, report_n,fast_mode=False):\n start = time.time()\n diz_pos, obs, neo, dev, status = [], [], [], [], ''\n chunks = parse_report(pID,report_n,fast_mode=fast_mode)\n if len(chunks) > 1: \n chunks = chunks[1:] # sync chunks and phrase indices\n for j, output in enumerate(chunks):\n for mappings in output:\n diz_pos, obs,neo, dev = parse_mappings(mappings, diz_pos, obs,neo, dev)\n negated, mm_agnostic = get_negated(report_n)\n print(\"Negated Terms: \", negated) # false positive reduction\n print(\"MM blind Terms: \", mm_agnostic) # false negative reduction\n findings = clean_findings(diz_pos+obs+mm_agnostic,negated) \n neo = clean_findings(neo,negated) \n dev = clean_surg_findings(dev,negated) \n if (len(findings) > 0) or (len(neo) > 0) or (len(dev) > 0):\n status = 'abnormal'\n else:\n status = 'normal'\n print(\"Concept Extraction Done for report {} in {:.4f}s\".format(pID, time.time()-start))\n print('status:',status,'\\nfindings:',findings,'\\nneoplasm:',neo,'\\nsurg:',dev)\n return [status,findings,neo,dev,[negated,mm_agnostic]]\n else:\n return [status,diz_pos+obs,neo,dev,[]]\n \n \n\ndef flag_uncertain(phrase):\n return phrase\n\n\ndef matchSnomedSynonyms(term, reference_term):\n return term\n\n\ndef bucket_terms(pID,term_list,result,auto_update=False):\n # tokenize, lemmatize, remove stop words\n # if any reference term in term_list, replace term\n # map unmatched terms\n start = time.time()\n result[pID] = [0]*(len(reference_terms)+1) # 0-22 positions + report status\n if term_list[0] == 'abnormal': # status\n result[pID][-1] = 1\n if term_list[2] != []: # neoplasm\n result[pID][5] = 1\n if term_list[3] != []: # surgery_foreign_object\n result[pID][10] = 1 \n term_list = term_list[1] # remaining findings\n to_remove = []\n # reduce search space with overlap join\n # http://www.aclweb.org/anthology/C10-1096\n for i,category in enumerate(reference_terms):\n if (i < len(reference_terms)-2) and (term_list!=[]):\n for term in term_list:\n # start new string matching algorithm\n if match_to_list(term, reference_terms[i]):\n #print(term, reference_terms[i])\n result[pID][i] = 1\n if auto_update:\n if term not in reference_terms[i]:\n reference_terms[i].append(term)\n to_remove.append(term)\n break # assuming only single category match\n # disambiguate air fluid level in cyst vs hernia\n if ('air fluid level' in to_remove) and (any('hernia' in t for t in term_list)):\n result[pID][19] = 0 # not a cyst\n result[pID][11] = 1 # more likely a hernia\n term_list = list(set(term_list)-set(to_remove))\n if term_list != []:\n result[pID][-2] = 1 # other\n # reference_terms[-1].extend(term_list)\n # write reference_terms to csv\n print(\"Term Matching done in {:.4f}s\".format(time.time()-start))\n return result, to_remove\n\n\n\ndef run_metamap(report_set, idxs, r_stats=False):\n print('Stats Mode :: {}'.format(r_stats))\n start = time.time()\n reports_results ={}\n final_result = {}\n report_stats = {}\n for i in idxs:\n print(report_set[\"report_text\"][i],'\\n')\n start2 = time.time()\n # rep_len = len(report_set[\"report_text\"][i])\n # if rep_len > n : # switch to fast mode\n # check distrubtion of result length with status- short reports can be assigned\n # normal in fast mode.\n # reports_results[i] = extract_metamap(i, report_set[\"report_text\"][i], fast_mode=True)\n reports_results[i] = extract_metamap(i, report_set[\"report_text\"][i])\n if reports_results[i][0] == 'abnormal':\n final_result, to_remove = bucket_terms(i,reports_results[i],final_result)\n else:\n final_result[i], to_remove = [0]*(len(reference_terms)+1), []\n t_end = time.time()-start2\n if r_stats:\n report_stats.update({i : {'len':len(report_set[\"report_text\"][i]),\n 'speed':t_end,\n 'text': report_set[\"report_text\"][i],\n 'diagnostics': {'negated':reports_results[i][4][0],\n 'mm_agnostic':reports_results[i][4][1],\n 'matched_terms':to_remove},\n 'labels': {'all_labels':final_result[i],\n 'cardiomegaly':final_result[i][0],\n 'consolidation':final_result[i][1],\n 'opacity':final_result[i][2],\n 'pneumonia':final_result[i][3],\n 'edema':final_result[i][4],\n 'nodule':final_result[i][5],\n 'emphysema':final_result[i][6],\n 'fibrosis':final_result[i][7],\n 'pleural_abnormality':final_result[i][8], \n 'pneumothorax':final_result[i][9],\n 'surgery_foreign_object':final_result[i][10],\n 'hernia':final_result[i][11],\n 'atelectasis':final_result[i][12],\n 'bronchus_inflammation':final_result[i][13],\n 'aorta_tortuosity_or_widen':final_result[i][14],\n 'aorta_calcification':final_result[i][15],\n 'spine_abnormality':final_result[i][16],\n 'curvature_spine':final_result[i][17],\n 'rib_abnormal':final_result[i][18],\n 'cyst':final_result[i][19],\n 'infiltration':final_result[i][20],\n 'diaphragm_abnormality':final_result[i][21],\n 'collapse':final_result[i][22],\n 'other_important':final_result[i][23],\n 'other':final_result[i][24],\n 'status':final_result[i][25]},\n 'status': reports_results[i][0],\n 'findings':reports_results[i][1],\n 'neoplasm':reports_results[i][2],\n 'surg_foreign':reports_results[i][3] \n }})\n print(\"\\n\\nDone Dumping {} reports in {:.4f}s\".format(len(idxs), time.time()-start))\n return report_stats\n\n\ndef evalReportLabels(indices):\n for i in indices:\n print(reports_gold[\"report_text\"][i])\n #print('\\nGold Standard Labels: \\n')\n #print(reports_gold.loc[i,'Cyst':\"valid\"])\n print('\\nMetamap Labels: \\n')\n print(reports_df.loc[i,'status':'surg_foreign'])\n print(final_df.loc[i,\"cardiomegaly\":\"status\"])\n print('\\n\\n')\n\n\n\n\n#rand_idx = choices(range(len(reports_gold)), k = 100)\n \n#report_stats = run_metamap(reports_gold, rand_idx[98:99], r_stats=False)\n \n#report_stats[5774]['labels']['nodule']\n#report_stats[5086]['labels']['hernia']\n \n#evalReportLabels(rand_idx[:10])\n\n#reports_resultsx = {}\n#final_resultx = {}\n#i = 6777 #4021 #9228 #4793 #864 501/510\n#print(reports_gold[\"report_text\"][i])\n#reports_resultsx[i] = extract_metamap(i, reports_gold[\"report_text\"][i])\n#final_resultx = bucket_terms(i,reports_resultsx[i],final_resultx)\n \n#out_metamap('Anterolateral osteophytes at multiple levels in the thoracic spine. ')\n\nif __name__ == \"__main__\":\n print (\"Start\")\n print (\"Usage: python mm_process.py metamap_path input_file_path 100 mm_opts\")\n #filename = 'xrc_multilabel.csv'\n #path = os.getcwd() + '/Projects/data/' + filename\n \n arg_names = ['metamap_path', 'input_file_path', 'n', 'mm_opts']\n args = dict(zip(arg_names, sys.argv))\n\n arg_lst = collections.namedtuple('arg_lst', arg_names)\n args = arg_lst(*(args.get(arg, None) for arg in arg_names))\n\n if not args[1]:\n print('Metamap path missing')\n else:\n metamap = args[1].encode('utf-8')\n\n if not args[2]:\n print('File path missing')\n else:\n path = args[2]\n\n if not args[3]:\n n = 100\n else:\n n = int(args[3])\n\n if not args[4]:\n options = ''\n else:\n options = args[4]\n \n \n print(\"metamap_path: {}, input_file_path: {}, n: {}, mm_opts: {}\".format(metamap, path, n, options))\n \n #path = sys.argv[1]\n #metamap_path = sys.argv[2]\n #n = sys.argv[3]\n #metamap = '/Users/tobi/projects/public_mm/bin/metamap16'.encode('utf-8')\n home_dir = os.getcwd() + '/Projects'\n os.chdir(home_dir)\n\n \n options = ''\n \n port = PorterStemmer()\n\n reports_gold = pd.read_csv(path)\n rand_idx = choices(range(len(reports_gold)), k = n)\n report_stats = run_metamap(reports_gold, rand_idx, r_stats=True)\n print('All DONE!!')\n\n\n\n","sub_path":"metamap_process_16_conditions_benchmark.py","file_name":"metamap_process_16_conditions_benchmark.py","file_ext":"py","file_size_in_byte":27670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"190842948","text":"import logging\nfrom modules.util import Failed\n\nlogger = logging.getLogger(\"Plex Meta Manager\")\n\nbuilders = [\"stevenlu_popular\"]\nbase_url = \"https://s3.amazonaws.com/popular-movies/movies.json\"\n\nclass StevenLu:\n def __init__(self, config):\n self.config = config\n\n def get_stevenlu_ids(self, method):\n if method == \"stevenlu_popular\":\n logger.info(f\"Processing StevenLu Popular Movies\")\n return [(i[\"imdb_id\"], \"imdb\") for i in self.config.get_json(base_url)]\n else:\n raise Failed(f\"StevenLu Error: Method {method} not supported\")\n","sub_path":"modules/stevenlu.py","file_name":"stevenlu.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506780993","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 18 21:41:39 2018\n\n@author: xiaozhe\n\"\"\"\n\n\nfrom sklearn.datasets.base import Bunch\nfrom sklearn import feature_extraction\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport logging\nfrom functionsforclassifier import readfile, write_pkl, read_pkl\n\n\nwordbag_path = './word_bunch/train_jieba_wordbunch_set.pkl'\nstopwdpath = './停用词集/百度停用词列表.txt'\nstopwdlist = readfile(stopwdpath).splitlines()\n\nbunch = read_pkl(wordbag_path)\ntrain_tfidfspace = Bunch(target_name = bunch.target_name, label = bunch.label,\n filenames = bunch.filenames, tdm=[], vocabulary = {})\nvectorizer = TfidfVectorizer(stop_words=stopwdlist,\n sublinear_tf=True, max_df=0.5)\ntransformer = TfidfTransformer()\ntrain_tfidfspace.tdm = vectorizer.fit_transform(bunch.contents)\ntrain_tfidfspace.vocabulary = vectorizer.vocabulary_\nspace_path = './word_bunch/train_tfidf_space.pkl'\nwrite_pkl(space_path, train_tfidfspace)\n\n\nbunch_test_path = './word_bunch/test_jieba_wordbunch_set.pkl'\nbunch_test = read_pkl(bunch_test_path)\ntestspace = Bunch(target_name = bunch_test.target_name,\n filenames = bunch_test.filenames,\n label = bunch_test.label,\n contents = bunch_test.contents, tdm=[], vocabulary={})\nvectorizer_test = TfidfVectorizer(stop_words=stopwdlist,\n sublinear_tf=True, max_df=0.5,\n vocabulary=train_tfidfspace.vocabulary)\ntestspace.tdm = vectorizer_test.fit_transform(bunch_test.contents)\ntestspace.vocabulary = train_tfidfspace.vocabulary\ntestspace_path = './word_bunch/test_tfidf_space.pkl'\nwrite_pkl(testspace_path, testspace)\n\nlogging.debug('Finished creating the IF-TDF space for train and test set')\n","sub_path":"tfidf_3.py","file_name":"tfidf_3.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"406045436","text":"from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom orders.models import Order\nfrom orders.services import OrderEmailChanger\n\n\nclass OrderChangeForm(forms.ModelForm):\n email = forms.CharField(help_text=_('If changed user, receives welcome letter one more time'))\n\n class Meta:\n model = Order\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n order = kwargs['instance']\n initial = kwargs.get('initial') or dict()\n\n if order is not None:\n initial.update(self.get_custom_initial_data(order))\n\n kwargs['initial'] = initial\n\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def get_custom_initial_data(order: Order) -> dict:\n return {\n 'email': order.user.email,\n }\n\n def save(self, commit=True) -> Order:\n order = super().save(commit=commit)\n\n self.call_services(order)\n\n return order\n\n def call_services(self, order):\n self._change_email_if_required(order)\n\n def _change_email_if_required(self, order: Order):\n if self.initial['email'] != self.cleaned_data['email']:\n email_changer = OrderEmailChanger(order=order, email=self.cleaned_data['email'])\n email_changer()\n\n\nclass OrderAddForm(forms.ModelForm):\n email = forms.CharField(required=False, widget=forms.HiddenInput)\n\n class Meta:\n model = Order\n fields = '__all__'\n","sub_path":"src/orders/admin/orders/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287344902","text":"import csv\nimport numpy as np\nimport pandas as pd\ntiny = np.finfo(np.float32).tiny\nimport time\n\n\n# Set starting time:\nstart_time = time.time();\n\n\n# Importing Human proteome and Pickle interaction files:\n# Converting Pickle binary interaction format to uniprot 'interacts with' format.\n# Adding (replacing) interactions column of the Pickle database.\n# Writing Human proteome data with Pickle interactions.\n\npickle_ints = pd.read_csv('Pickle_input', delimiter='\\t', usecols=['InteractorA', 'InteractorB'])\nHuman_proteome = pd.read_csv('uniprot_input', delimiter='\\t')\n\ngrp1 = pickle_ints.groupby('InteractorA')['InteractorB'].apply(list).apply(lambda x : '; '.join(str(elem) for elem in x)).reset_index(name='Interacts with').rename(columns={'InteractorA':'Entry'})\ngrp2 = pickle_ints.groupby('InteractorB')['InteractorA'].apply(list).apply(lambda x : '; '.join(str(elem) for elem in x)).reset_index(name='Interacts with').rename(columns={'InteractorB':'Entry'})\n\nints_in_uniprot_format = grp1.append(grp2).groupby('Entry')['Interacts with'].apply(list).apply(lambda x : '; '.join(str(elem) for elem in x)).reset_index(name='Interacts with')\nints_in_uniprot_format['Interacts with'] = ints_in_uniprot_format['Interacts with'].apply(lambda x : x.split('; ')).apply(lambda x : list(set(list(x)))).apply(lambda x : '; '.join(str(elem) for elem in x))\nHuman_proteome = Human_proteome.merge(ints_in_uniprot_format, how='left', on='Entry')\nHuman_proteome.to_csv('Human proteome with Pickle interactions.csv', index=False)\n\n\n# Importing .csv file and creating dictionaries and lists:\n# indx is a dictionary that will be used to find a given protein entry and return its index.\n# ints is the list of interaction lists (list of lists).\n# seqs is the list for sequence strings.\n\n\nindx = {} #entry indexes dictionary\nints = [] #list of interactions\nn_ints =[] #list of number of interactions\nseqs = [] #list of sequence strings\nEntry= [] #list of Entries\nprot_name = [] #list of protein names\norpha_uniprot = [] #list of orphanet numbers\ninvolvement = [] #list of involvement in diseases\nstatus = [] #list of Sprot/trembl status\ngene_name = [] #list of Gene names (primary)\n\nwith open('Human proteome with Pickle interactions.csv', newline = '') as csvfile:\n n = 0 #number of row\n for row in csv.DictReader(csvfile):\n indx[row['Entry']] = n\n #create list of lists for interactions, renaming \"Itself\", triming coforms and making empty lists as needed:\n ints.append([entry for entry in row['Interacts with'].split('; ')] if row['Interacts with'] is not '' else [])\n n_ints.append(row['Interacts with'].count('; ') + 1 if row['Interacts with'] is not '' else 0)\n seqs.append(row['Sequence'])\n Entry.append(row['Entry'])\n prot_name.append(row['Protein names'])\n orpha_uniprot.append(row['Cross-reference (Orphanet)'])\n involvement.append(row['Involvement in disease'])\n status.append(row['Status'])\n gene_name.append(row['Gene names (primary )'])\n n += 1\n\n\n# Here we count all of alphabet letters' repititions in sequence strings and form the rep_ent matrix, which its rows are representative of protein entries and its columns are representative of A-Z letters respectively.\n\n\nrep_ent = np.zeros((n, 22), dtype = np.float32) #matrix of number of repitition of amino acids in entry sequences initialized with zero\nfor i in range(n):\n rep_ent[(i, 0)] = seqs[i].count('A')\n\n rep_ent[(i, 1)] = seqs[i].count('C')\n rep_ent[(i, 2)] = seqs[i].count('D') + seqs[i].count('B') / 2\n rep_ent[(i, 3)] = seqs[i].count('E') + seqs[i].count('Z') / 2\n rep_ent[(i, 4)] = seqs[i].count('F')\n rep_ent[(i, 5)] = seqs[i].count('G')\n rep_ent[(i, 6)] = seqs[i].count('H')\n rep_ent[(i, 7)] = seqs[i].count('I') + seqs[i].count('J') / 2\n\n rep_ent[(i, 8)] = seqs[i].count('K')\n rep_ent[(i, 9)] = seqs[i].count('L') + seqs[i].count('J') / 2\n rep_ent[(i, 10)] = seqs[i].count('M')\n rep_ent[(i, 11)] = seqs[i].count('N') + seqs[i].count('B') / 2\n rep_ent[(i, 12)] = seqs[i].count('O')\n rep_ent[(i, 13)] = seqs[i].count('P')\n rep_ent[(i, 14)] = seqs[i].count('Q') + seqs[i].count('Z') / 2\n rep_ent[(i, 15)] = seqs[i].count('R')\n rep_ent[(i, 16)] = seqs[i].count('S')\n rep_ent[(i, 17)] = seqs[i].count('T')\n rep_ent[(i, 18)] = seqs[i].count('U')\n rep_ent[(i, 19)] = seqs[i].count('V')\n rep_ent[(i, 20)] = seqs[i].count('W')\n\n rep_ent[(i, 21)] = seqs[i].count('Y')\n\n\n# Calculating length of entry sequences, calculating probability of each letter per entry and then calculationg pit. The entry pit result is stored in pit_ent which is a (n * 1) matrix; it gives pit number for each entry.\n\n\nlen_ent = rep_ent @ np.ones((22, 1), dtype = np.float32) #length of entry sequences\nP = rep_ent / (len_ent) #matrix of probability of each amino acid in each entry\npit_ent = -np.sum(P * np.log2(P + tiny), axis = 1, keepdims = True) #matrix for pit of each entry (used tiny to avoid log0)\n\n\n# Calculating sum of pit * length for network members (except the hub). It is stored in pits_net which is a (n * 1) matrix; it gives pit * length number for each network entry.\n# And besides that we form the network of reactions and create a matrix for sum of alphabet repititions in a network (columns and rows are like rep_ent).\n# We do these two in one place to save time of dictionary look-ups.\n\n\ny_summation = np.zeros((n, 1), dtype = np.float32) #matrix of sum of pit*length of network members initialized with zero\nrep_net = np.copy(rep_ent) #matrix of number of repitition of amino acids in interacting networks initialized with rep_ent (to include network hubs)\ny_concat = np.zeros((n,22), dtype = np.float32)\nfor i in range(n):\n for entry in ints[i]:\n try:\n ind = indx[entry]\n y_summation[(i, 0)] += pit_ent[(ind, 0)] * len_ent[(ind, 0)]\n rep_net[(i), 0:22] += rep_ent[(ind), 0:22]\n y_concat[(i), 0:22] += rep_ent[(ind), 0:22]\n except KeyError:\n None\n\n\n\n# Calculating length of each network, calculating probability of each letter per network entry and then calculationg pit. The network pit result is stored in pit_net which is a (n * 1) matrix; it gives pit number for each network entry.\n\n\nlen_net = rep_net @ np.ones((22, 1), dtype = np.float32) #length of networks\nlen_y_concat = y_concat @ np.ones((22,1), dtype = np.float32) # length of ys\nP = rep_net / (len_net) #matrix of probability of each amino acid in each network\nP_y_concat = y_concat / (len_y_concat + tiny) #matrix of probability of each amino acid in ys\npit_net = -np.sum(P * np.log2(P + tiny), axis = 1, keepdims = True) #matrix for pit of each network (used tiny to avoid log0)\npit_y_concat = -np.sum(P_y_concat * np.log2(P_y_concat + tiny), axis = 1, keepdims = True) #matrix for pit of ys (used tiny to avoid log0)\n\n\n# Calcualting network efficiency:\n\n\nTotal_net = len_net * pit_net\nTotal_y_concat = pit_y_concat * (len_net - len_ent)\nB = pit_net * len_net - y_summation\n\nAge = pd.read_csv('age_input.csv', usecols=['Entry', 'modeAge'])\n\norphadata_unprocessed = pd.read_csv('orpha_input.csv', encoding = \"ISO-8859-1\",\n usecols=['OrphaNumber', 'ExpertLink', 'Name', 'Name6', 'Source', 'Name12', 'Name20', 'Name24',\n 'Name28']).rename(columns={'OrphaNumber': 'OrphaNumber (source: Orphanet)',\n 'Name': 'Disease name (source: Orphanet)', 'Name6': 'Disease type',\n 'Name12': 'Occurance type', 'Name20': 'Occurance value',\n 'Name24': 'Geo distrib', 'Name28': 'Validity'})\norphadata_unprocessed['OrphaNumber (source: Orphanet)'] = orphadata_unprocessed['OrphaNumber (source: Orphanet)'].astype(str)\n\norphadata = pd.read_csv('en_product9_prev.csv', encoding = \"ISO-8859-1\",\n usecols=['OrphaNumber', 'ExpertLink', 'Name', 'Name6', 'Source', 'Name12', 'Name20', 'Name24',\n 'Name28']).rename(columns={'OrphaNumber': 'OrphaNumber (source: Orphanet)',\n 'Name': 'Disease name (source: Orphanet)', 'Name6': 'Disease type',\n 'Name12': 'Occurance type', 'Name20': 'Occurance value',\n 'Name24': 'Geo distrib', 'Name28': 'Validity'})\norphadata = orphadata[orphadata['Geo distrib'] == 'Worldwide'] # filtering out all locally distributed diseases -- Only keeps 'Worldwide'\norphadata = orphadata[orphadata['Occurance value'] != 'Unknown'] # filtering out 'Unknown' occurences\norphadata = orphadata[orphadata['Occurance value'] != 'Not yet documented'] # filtering out missing occurence values\norphadata = orphadata[orphadata['Occurance type'] != 'Lifetime Prevalence'] # filtering out 'Lifetime Prevalence'\norphadata = orphadata[orphadata['Occurance type'] != 'Cases/families'] # filtering out 'Cases/families'\norphadata = orphadata.dropna(subset=['Occurance value'])\norphadata.drop_duplicates('OrphaNumber (source: Orphanet)', keep='first', inplace=True)\norphadata['OrphaNumber (source: Orphanet)'] = orphadata['OrphaNumber (source: Orphanet)'].astype(str)\n\nnetwork = pd.DataFrame({'Status': status,\n 'Entry': Entry,\n 'Gene name': gene_name,\n 'Protein name': prot_name,\n 'Protein length': len_ent[:, 0],\n 'Protein entropy (in pits)': pit_ent[:, 0],\n 'Number of interactions': n_ints,\n 'Network length': len_net[:, 0],\n 'Network entropy (in pits)': pit_net[:, 0],\n 'Total network entropy (in pits)': Total_net[:, 0],\n 'Scalar summation of interactors entropy (in pits)': y_summation[:, 0],\n 'Estimation of the joint entropy of interactors (in pits)': pit_y_concat[:, 0],\n 'Total joint entropy estimation (in pits)': Total_y_concat[:, 0],\n 'Mutual information estimation (in pits)': B[:, 0],\n 'Orpha no. (source: UniProt)': orpha_uniprot,\n 'Involvement in disease (source: UniProt)': involvement})\nnetwork = network.merge(Age, how='left', on='Entry').rename(columns={'modeAge': 'Gene age'})\nout1 = network.copy()\nout1.to_csv('Disease probability.csv', index = False)\n\nnetwork['Orpha no. (source: UniProt)'] = network['Orpha no. (source: UniProt)'].apply(lambda x : x.split(';')[:-1]).apply(list)\nnetwork = network.explode('Orpha no. (source: UniProt)') # requires pandas version 0.25 or later\nout2 = network.merge(orphadata_unprocessed, how='left', left_on = 'Orpha no. (source: UniProt)', right_on = 'OrphaNumber (source: Orphanet)')\nout2 = out2.reindex(columns=['Orpha no. (source: UniProt)',\n 'Involvement in disease (source: UniProt)', \n 'OrphaNumber (source: Orphanet)',\n 'Disease name (source: Orphanet)',\n 'Disease type', \n 'Occurance value',\n 'Occurance type',\n 'Geo distrib',\n 'Validity',\n 'Source',\n 'Status',\n 'Entry',\n 'Gene name',\n 'Protein name',\n 'Protein length',\n 'Protein entropy (in pits)',\n 'Number of interactions',\n 'Network length',\n 'Network entropy (in pits)',\n 'Total network entropy (in pits)',\n 'Scalar summation of interactors entropy (in pits)',\n 'Estimation of the joint entropy of interactors (in pits)',\n 'Total joint entropy estimation (in pits)', \n 'Mutual information estimation (in pits)',\n 'Gene age',\n 'ExpertLink'])\nout2['Orpha no. (source: UniProt)'] = out2['Orpha no. (source: UniProt)'].astype(float)\nout2 = out2.sort_values(by=['Orpha no. (source: UniProt)'])\nout2.to_csv('Disease analysis of all proteins.csv', index = False)\n\nout3 = network.merge(orphadata, how='left', left_on = 'Orpha no. (source: UniProt)', right_on = 'OrphaNumber (source: Orphanet)')\nout3 = out3.dropna(subset=['OrphaNumber (source: Orphanet)'])\nout3 = out3.reindex(columns=['OrphaNumber (source: Orphanet)',\n 'Disease name (source: Orphanet)',\n 'Involvement in disease (source: UniProt)',\n 'Disease type',\n 'Occurance value',\n 'Occurance type',\n 'Geo distrib',\n 'Validity',\n 'Source',\n 'Status',\n 'Entry',\n 'Gene name',\n 'Protein name',\n 'Protein length',\n 'Protein entropy (in pits)',\n 'Number of interactions',\n 'Network length',\n 'Network entropy (in pits)',\n 'Total network entropy (in pits)',\n 'Scalar summation of interactors entropy (in pits)',\n 'Estimation of the joint entropy of interactors (in pits)',\n 'Total joint entropy estimation (in pits)',\n 'Mutual information estimation (in pits)',\n 'Gene age',\n 'ExpertLink'])\nout3.to_csv('Disease occurances associations.csv', index=False)\n\nout4 = network.merge(orphadata, how='left', left_on = 'Orpha no. (source: UniProt)', right_on = 'OrphaNumber (source: Orphanet)')\nout4['Orpha no. (source: UniProt)'] = out4['Orpha no. (source: UniProt)'].fillna(0)\nout4['OrphaNumber (source: Orphanet)'] = out4['OrphaNumber (source: Orphanet)'].fillna(0)\nout4['Occurance value'] = out4.apply(lambda row : row['Occurance value'] if row['Orpha no. (source: UniProt)'] == row['OrphaNumber (source: Orphanet)'] else tiny, axis=1)\nout4['Occurance value'] = out4['Occurance value'].fillna(0)\nout4['Occurance value'] = out4['Occurance value'].replace({'<1 / 1 000 000' : 0.000001, '1-9 / 100 000': 0.00005, '1-9 / 1 000 000': 0.000005,\n '1-5 / 10 000': 0.0003, '>1 / 1000': 0.001, '6-9 / 10 000': 0.00075})\n\nTot_occur = out4[out4['Occurance value'] != 0].groupby('Entry')['Occurance value'].apply(sum).reset_index(name='Total occurance value')\nTot_occur['Total occurance value'] = Tot_occur['Total occurance value'].apply(lambda x : 'NA' if x < 0.000000001 else x)\nout4 = out1.merge(Tot_occur, how='left', on='Entry')\nout4['Total occurance value'] = out4['Total occurance value'].fillna(0)\nout4 = out4.reindex(columns=['Status',\n 'Entry',\n 'Gene name',\n 'Protein name',\n 'Protein length',\n 'Protein entropy (in pits)',\n 'Number of interactions',\n 'Network length',\n 'Network entropy (in pits)',\n 'Total network entropy (in pits)',\n 'Scalar summation of interactors entropy (in pits)',\n 'Estimation of the joint entropy of interactors (in pits)',\n 'Total joint entropy estimation (in pits)',\n 'Mutual information estimation (in pits)',\n 'Orpha no. (source: UniProt)',\n 'Total occurance value',\n 'Involvement in disease (source: UniProt)',\n 'Gene age'])\nout4.to_csv('Total occurances per disease.csv', index=False)\n\n\n# Calculating total time elapsed. It should be around half a minute!\n\n\nend_time = time.time()\nprint('Elapsed time:', end_time - start_time, 'seconds')\n\n","sub_path":"Network and Diseases without isoform insertion with PICKLE interactions.py","file_name":"Network and Diseases without isoform insertion with PICKLE interactions.py","file_ext":"py","file_size_in_byte":16732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"347854479","text":"#!/usr/bin/env python2\nimport sys, os, binascii, math, subprocess, errno, hashlib, itertools\n\n# default directories (relative to current dir unless you use absolute paths)\n# leave as \"\" for current working directory\n# using --xorpads= or --output= will override these\nxorpad_directory = \"\"\noutput_directory = \"\"\n\n#################\nversion = \"2.11\"\n\nhelptext = \"\"\"3dsconv.py ~ version %s\nhttps://github.com/ihaveamac/3dsconv\n\nusage: 3dsconv.py [options] game.3ds [game.3ds ...]\n --xorpads= - use xorpads in the specified directory\n default is %s\n --output= - save converted CIA files in the specified directory\n default is %s\n --overwrite - overwrite any existing converted CIA, if it exists\n --gen-ncchinfo - generate ncchinfo.bin for roms that don't have a valid xorpad\n --gen-ncch-all - use with --gen-ncchinfo to generate an ncchinfo.bin for all roms\n --noconvert - don't convert roms\n useful if you just want to generate ncchinfo.bin\n --force - run even if make_cia isn't found\n --nocleanup - don't remove temporary files once finished\n --verbose - print more information\n\n- make_cia should exist in your PATH\n- if a rom is encrypted, an ExHeader XORpad\n should exist in the working directory\n named \\\".Main.exheader.xorpad\\\"\n or in the directory specified by --xorpads=\n- encrypted and decrypted roms can be converted at the same time\"\"\"\n\nmu = 0x200 # media unit\nreadsize = 8*1024*1024 # used from padxorer\n\ncleanup = not \"--nocleanup\" in sys.argv\nverbose = \"--verbose\" in sys.argv\noverwrite = \"--overwrite\" in sys.argv\ngenncchinfo = \"--gen-ncchinfo\" in sys.argv\ngenncchall = \"--gen-ncch-all\" in sys.argv\nnoconvert = \"--noconvert\" in sys.argv\n\ndef print_v(msg):\n\tif verbose:\n\t\tprint(msg)\n\ndef testcommand(cmd):\n\tprint_v(\"- testing: %s\" % cmd)\n\ttry:\n\t\tproc = subprocess.Popen([cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tproc.stdout.close()\n\t\tproc.stderr.close()\n\t\tproc.wait()\n\t\treturn True\n\texcept OSError as e:\n\t\tif e.errno != 2:\n\t\t\traise\n\t\treturn False\ndef runcommand(cmdargs):\n\tprint_v(\"$ %s\" % \" \".join(cmdargs))\n\tproc = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tproc.wait()\n\tprocoutput = proc.communicate()[0]\n\tprint_v(procoutput)\n\tif proc.returncode != 0:\n\t\tprint(\"! %s had an error.\" % cmdargs[0])\n\t\t# prevent printing twice\n\t\tif not verbose:\n\t\t\tprint(\"- full command: %s\" % \" \".join(cmdargs))\n\t\t\tprint(\"- output:\")\n\t\t\tprint(procoutput)\n\n# used from http://stackoverflow.com/questions/10840533/most-pythonic-way-to-delete-a-file-which-may-not-exist\ndef silentremove(filename):\n\ttry:\n\t\tos.remove(filename)\n\texcept OSError as e: # this would be \"except OSError, e:\" before Python 2.6\n\t\tif e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n\t\t\traise # re-raise exception if a different error occured\ndef docleanup(tid):\n\tsilentremove(\"work/%s-game-orig.cxi\" % tid)\n\tsilentremove(\"work/%s-game-conv.cxi\" % tid)\n\tsilentremove(\"work/%s-manual.cfa\" % tid)\n\tsilentremove(\"work/%s-dlpchild.cfa\" % tid)\n\t# silentremove(\"work/%s-ncchheader.bin\" % tid)\n\t# silentremove(\"work/%s-exheader.bin\" % tid)\n\t# silentremove(\"work/%s-exefs.bin\" % tid)\n\t# silentremove(\"work/%s-romfs.bin\" % tid)\n\t# silentremove(\"work/%s-logo.bcma.lz\" % tid)\n\t# silentremove(\"work/%s-plain.bin\" % tid)\n\nif len(sys.argv) < 2:\n\tprint(helptext % (version, (\"current directory\" if xorpad_directory == \"\" else \"'%s'\" % xorpad_directory), (\"current directory\" if output_directory == \"\" else \"'%s'\" % output_directory)))\n\tsys.exit(1)\n\nif not \"--force\" in sys.argv:\n\tfail = False\n\tif not testcommand(\"make_cia\"):\n\t\tprint(\"! make_cia doesn't appear to be in your PATH.\")\n\t\tprint(\" you can get it from here:\")\n\t\tprint(\" https://github.com/ihaveamac/ctr_toolkit\")\n\t\tfail = True\n\tif fail:\n\t\tprint(\"- if you want to force the script to run,\")\n\t\tprint(\" add --force as one of the arguments.\")\n\t\tsys.exit(1)\n\ntry:\n\tos.makedirs(\"work\")\nexcept OSError:\n\tif not os.path.isdir(\"work\"):\n\t\traise\n\n# probably should've used argparse\nfor arg in sys.argv[1:]:\n\tif arg[:2] != \"--\":\n\t\tcontinue\n\tif arg[:10] == \"--xorpads=\":\n\t\txorpad_directory = arg[10:]\n\tif arg[:9] == \"--output=\":\n\t\toutput_directory = arg[9:]\n\nif output_directory!= \"\":\n\ttry:\n\t\tos.makedirs(output_directory)\n\texcept OSError:\n\t\tif not os.path.isdir(output_directory):\n\t\t\traise\n\nncchinfolist = []\n# this only does ExHeader stuff\n# so I think I can get away with hard-coding some things\ndef ncchinfoadd(rom):\n\tromf = open(rom, \"rb\")\n\tromf.seek(0x108)\n\ttid = romf.read(8)\n\tromf.seek(0x4000)\n\tkeyY = romf.read(16)\n\tncchinfolist.extend([tid[::-1] + \"\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\" + keyY + \"\\x01\\x00\\x00\\x00\" + \"\\x00\\x00\\x00\\x00\" + \"\\x00\\x00\\x00\\x00\" + \"\\x00\\x00\\x00\\x00\" + tid + (\"/%s.Main.exheader.xorpad\" % binascii.hexlify(tid[::-1]).upper()).ljust(112, \"\\x00\")])\n\tromf.close()\n\n# used from http://stackoverflow.com/questions/4566498/python-file-iterator-over-a-binary-file-with-newer-idiom\n#def read_in_chunks(infile, chunk_size=buffersize):\n#\tchunk = infile.read(chunk_size)\n#\twhile chunk:\n#\t\tyield chunk\n#\t\tchunk = infile.read(chunk_size)\n\n# used from http://www.gossamer-threads.com/lists/python/python/163938\ndef bytes2int(string):\n\ti = 0\n\tfor ch in string:\n\t\ti = 256*i + ord(ch)\n\treturn i\n\ntotalroms = 0\nprocessedroms = 0\n\nfor rom in sys.argv[1:]:\n\tif rom[:2] == \"--\":\n\t\tcontinue\n\ttotalroms += 1\n\tif not os.path.isfile(rom):\n\t\tprint(\"! %s doesn't exist.\" % rom)\n\t\tcontinue\n\tromname = os.path.basename(os.path.splitext(rom)[0])\n\tcianame = os.path.join(output_directory, romname+\".cia\")\n\tif not overwrite and os.path.isfile(cianame):\n\t\tprint(\"! %s already exists.\" % cianame)\n\t\tprint(\" to force conversion and overwriting this, use --overwrite\")\n\t\tcontinue\n\tif genncchinfo and genncchall:\n\t\tncchinfoadd(rom)\n\tromf = open(rom, \"rb\")\n\tromf.seek(0x100)\n\tncsdmagic = romf.read(4)\n\tromf.seek(0x108)\n\ttid = binascii.hexlify(romf.read(8)[::-1])\n\txorpad = os.path.join(xorpad_directory, \"%s.Main.exheader.xorpad\" % tid.upper())\n\tromf.seek(0x418F)\n\tdecrypted = int(binascii.hexlify(romf.read(1))) & 0x04\n\tif ncsdmagic != \"NCSD\":\n\t\tprint(\"! %s is probably not a rom.\" % rom)\n\t\tprint(\" NCSD magic not found.\")\n\t\tromf.close()\n\t\tcontinue\n\tif noconvert:\n\t\tprint(\"- not converting %s (%s) because --noconvert was used\" % (romname, \"decrypted\" if decrypted else \"encrypted\"))\n\t\tif cleanup:\n\t\t\tdocleanup(tid)\n\t\tromf.close()\n\t\tcontinue\n\tif not decrypted:\n\t\tif not os.path.isfile(xorpad):\n\t\t\tprint(\"! %s couldn't be found.\" % xorpad)\n\t\t\tif not genncchinfo:\n\t\t\t\tprint(\" use --gen-ncchinfo with this rom.\")\n\t\t\tncchinfoadd(rom)\n\t\t\tromf.close()\n\t\t\tcontinue\n\n\tdocleanup(tid)\n\n\tprint_v(\"- verifying ExHeader\")\n\tromf.seek(0x4200)\n\texh = romf.read(0x400)\n\txor = \"\"\n\tif not decrypted:\n\t\tprint_v(\"- decrypting ExHeader\")\n\t\txorfile = open(xorpad, \"rb\")\n\t\txor = xorfile.read(0x400)\n\t\txorfile.close()\n\t\txored = \"\"\n\t\tfor byte_f, byte_x in zip(exh, xor):\n\t\t\txored += chr(ord(byte_f) ^ ord(byte_x))\n\t\texh = xored\n\texh_hash = hashlib.sha256(exh).digest()\n\tromf.seek(0x4160)\n\tncch_exh_hash = romf.read(0x20)\n\tif exh_hash != ncch_exh_hash:\n\t\tif decrypted:\n\t\t\tprint(\"! this rom might be corrupt.\")\n\t\telse:\n\t\t\tprint(\"! %s is not the correct xorpad, or is corrupt.\" % xorpad)\n\t\t\tif not genncchinfo:\n\t\t\t\tprint(\" try using --gen-ncchinfo again or find the correct xorpad.\")\n\t\t\tncchinfoadd(rom)\n\t\tprint(\" ExHeader SHA-256 hash check failed.\")\n\t\tromf.close()\n\t\tif cleanup:\n\t\t\tdocleanup(tid)\n\t\tcontinue\n\n\tprint(\"- processing: %s (%s)\" % (romname, \"decrypted\" if decrypted else \"encrypted\"))\n\n\t#runcommand([\"3dstool\", \"-xvtf\", \"cxi\", \"work/%s-game-orig.cxi\" % tid, \"--exefs\", \"work/%s-exefs.bin\" % tid, \"--romfs\", \"work/%s-romfs.bin\" % tid, \"--plain\", \"work/%s-plain.bin\" % tid, \"--logo\", \"work/%s-logo.bcma.lz\" % tid])\n\n\tprint_v(\"- patching ExHeader\")\n\texh_list = list(exh)\n\tx = exh_list[0xD]\n\ty = ord(x)\n\tz = y | 2\n\tprint_v(\" offset 0xD of ExHeader:\")\n\tprint_v(\" original: %s\" % hex(y))\n\tprint_v(\" shifted: %s\" % hex(z))\n\tz = chr(z)\n\texh_list[0xD:0xE] = z\n\texh = \"\".join(exh_list)\n\t# there really has to be a better way to do this...\n\tsavesize = str(int(binascii.hexlify(exh[0x1C0:0x1C8][::-1]), 16) / 1024)\n\n\tnew_exh_hash = hashlib.sha256(exh).hexdigest()\n\tromf.seek(0x124)\n\tgamecxi_size = bytes2int(romf.read(0x4)[::-1]) * mu\n\tgamecxi = open(\"work/%s-game-conv.cxi\" % tid, \"wb\")\n\tleft = gamecxi_size\n\n\t# Game Executable CXI\n\tromf.seek(0x4000)\n\tprint_v(\"- extracting Game Executable CXI\")\n\tfor _ in itertools.repeat(0, int(math.floor((gamecxi_size / readsize)) + 1)):\n\t\ttoread = min(readsize, left)\n\t\tgamecxi.write(romf.read(toread))\n\t\tleft -= readsize\n\t\tif left <= 0:\n\t\t\tbreak\n\n\t# Manual CFA\n\tromf.seek(0x128)\n\tmanualcfa_offset = bytes2int(romf.read(0x4)[::-1]) * mu\n\tif manualcfa_offset != 0:\n\t\tromf.seek(0x12C)\n\t\tprint_v(\"- extracting Manual CFA\")\n\t\tmanualcfa_size = bytes2int(romf.read(0x4)[::-1]) * mu\n\t\tromf.seek(manualcfa_offset)\n\t\tmanualcfa = open(\"work/%s-manual.cfa\" % tid, \"wb\")\n\t\tleft = manualcfa_size\n\t\tfor _ in itertools.repeat(0, int(math.floor((manualcfa_size / readsize)) + 1)):\n\t\t\ttoread = min(readsize, left)\n\t\t\tmanualcfa.write(romf.read(toread))\n\t\t\tleft -= readsize\n\t\t\tif left <= 0:\n\t\t\t\tbreak\n\n\t# Download Play child container CFA\n\tromf.seek(0x130)\n\tdlpchildcfa_offset = bytes2int(romf.read(0x4)[::-1]) * mu\n\tif dlpchildcfa_offset != 0:\n\t\tromf.seek(0x134)\n\t\tprint_v(\"- extracting Download Play child container CFA\")\n\t\tdlpchildcfa_size = bytes2int(romf.read(0x4)[::-1]) * mu\n\t\tromf.seek(dlpchildcfa_offset)\n\t\tdlpchildcfa = open(\"work/%s-dlpchild.cfa\" % tid, \"wb\")\n\t\tleft = dlpchildcfa_size\n\t\tfor _ in itertools.repeat(0, int(math.floor((dlpchildcfa_size / readsize)) + 1)):\n\t\t\ttoread = min(readsize, left)\n\t\t\tdlpchildcfa.write(romf.read(toread))\n\t\t\tleft -= readsize\n\t\t\tif left <= 0:\n\t\t\t\tbreak\n\tromf.close()\n\n\tgamecxi.seek(0x160)\n\tgamecxi.write(hashlib.sha256(exh).digest())\n\t# re-encrypt exheader and write the byte at 0x20D\n\tgamecxi.seek(0x20D)\n\tif decrypted:\n\t\tgamecxi.write(exh[0xD:0xE])\n\telse:\n\t\tgamecxi.write(chr(ord(exh[0xD:0xE]) ^ ord(xor[0xD:0xE])))\n\tgamecxi.close()\n\n\tprint_v(\"- building CIA\")\n\tos.chdir(\"work\") # not doing this breaks make_cia's ability to properly include Manual/DLP Child for some reason\n\tcmds = [\"make_cia\", \"-o\", \"%s-game-conv.cia\" % tid, \"--savesize=%s\" % savesize, \"--content0=%s-game-conv.cxi\" % tid, \"--id_0=0\", \"--index_0=0\"]\n\tif manualcfa_offset != 0:\n\t\tcmds.extend([\"--content1=%s-manual.cfa\" % tid, \"--index_1=1\"])\n\tif dlpchildcfa_offset != 0:\n\t\tcmds.extend([\"--content2=%s-dlpchild.cfa\" % tid, \"--index_2=2\"])\n\truncommand(cmds)\n\tos.chdir(\"..\")\n\n\t# apparently if the file exists, it will throw an error on Windows\n\tsilentremove(cianame)\n\tos.rename(\"work/%s-game-conv.cia\" % tid, cianame)\n\tif cleanup:\n\t\tdocleanup(tid)\n\n\tprocessedroms += 1\n\nif totalroms == 0:\n\tprint(helptext % (version, (\"current directory\" if xorpad_directory == \"\" else \"'%s'\" % xorpad_directory), (\"current directory\" if output_directory == \"\" else \"'%s'\" % output_directory)))\n\tsys.exit(1)\nelse:\n\tprint(\"* done converting!\")\n\tprint(\" %i out of %i roms processed\" % (processedroms, totalroms))\n\tif genncchinfo and len(ncchinfolist) != 0:\n\t\tprint(\"- saving ncchinfo.bin\")\n\t\tncchinfo = open(\"ncchinfo.bin\", \"wb\")\n\t\tncchinfo.write(\"\\xFF\\xFF\\xFF\\xFF\\x04\\x00\\x00\\xF0\")\n\t\t# this is bad, I know\n\t\tncchinfo.write(binascii.unhexlify(format(len(ncchinfolist), 'x').rjust(8, '0'))[::-1])\n\t\tncchinfo.write(\"\\x00\\x00\\x00\\x00\")\n\t\tfor i in ncchinfolist:\n\t\t\tncchinfo.write(i)\n\t\tncchinfo.close()\n\t\tprint(\"- use Decrypt9 on a 3DS system to generate the xorpads.\")\n\t\tprint(\" place the file at the root or in a folder called \\\"Decrypt9\\\".\")\n\t\tprint(\" view the Decrypt9 README and download releases at\")\n\t\tprint(\" https://github.com/d0k3/Decrypt9WIP\")","sub_path":"3dsconv.py","file_name":"3dsconv.py","file_ext":"py","file_size_in_byte":11745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"76925575","text":"import logging\nimport sys\n\nclass Command(object):\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\nclass Colors(object):\n violet = Command(\"Violet\", 0x00)\n royal_blue = Command(\"Royal Blue\", 0x10)\n baby_blue = Command(\"Baby Blue\", 0x20)\n aqua = Command(\"Aqua\", 0x30)\n mint = Command(\"Mint\", 0x40)\n seafoam_green = Command(\"Seafoam Green\", 0x50)\n green = Command(\"Green\", 0x60)\n lime_green = Command(\"Lime Green\", 0x70)\n yellow = Command(\"Yellow\", 0x80)\n yellow_orange = Command(\"Yellow Orange\", 0x90)\n orange = Command(\"Orange\", 0xA0)\n red = Command(\"Red\", 0xB0)\n pink = Command(\"Pink\", 0xC0)\n fusia = Command(\"Fusia\", 0xD0)\n lilac = Command(\"Lilac\", 0xE0)\n lavendar = Command(\"Lavendar\", 0xF0)\n\nclass PartyModes(object):\n white = Command(\"static white color\", 1)\n white_fade = Command(\"white color (gradual changes)\", 2)\n all_colors = Command(\"all colors (gradual changes)\", 3)\n rgb_fade = Command(\"red/green/blue (gradual changes)\", 4)\n seven_color_jump = Command(\"7 colors (jump changes)\", 5)\n three_color_jump = Command(\"3 colors (jump changes)\", 6)\n rg_jump = Command(\"red/green (jump changes)\", 7)\n rb_jump = Command(\"red/blue (jump changes)\", 8)\n bg_jump = Command(\"blue/green (jump changes)\", 9)\n white_blink = Command(\"white color (frequently blinks)\", 10)\n white_glitter = Command(\"white color (glitters)\", 11)\n red_blink = Command(\"red color (frequently blinks)\", 12)\n red_glitter = Command(\"red color (glitters)\", 13)\n green_blink = Command(\"green color (frequently blinks)\", 14)\n green_glitter = Command(\"green color (glitters)\", 15)\n blue_blink = Command(\"blue color (frequently blinks)\", 16)\n blue_glitter = Command(\"blue color (glitters)\", 17)\n yellow_blink = Command(\"yellow color (frequently blinks)\", 18)\n yellow_glitter = Command(\"yellow color (glitters)\", 19)\n circulation = Command(\"circulation mode\", 20)\n\n def __init__(self):\n PartyModes.register_names()\n\n @classmethod\n def register_names(cls):\n if not hasattr(cls, 'names'):\n cls.names = {}\n for attr in vars(cls):\n attr_obj = getattr(cls, attr)\n if isinstance(attr_obj, Command):\n cls.names[attr_obj.value] = attr_obj.name\n\n\nclass RGB:\n def effect(self, effect_name, args=[], effect_options={}):\n effect = sys.modules['wifileds.limitlessled.effects.%s' % effect_name]\n try:\n effect.run(self, *args, **effect_options)\n except AttributeError as e:\n logging.error('Effect \"%s\" failed due to missing lighting attribute: %s' % (effect_name, e))\n pass\n\n def __init__(self, parent):\n self.parent = parent\n self.long_pause = parent.long_pause\n self.short_pause = parent.short_pause\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.colors = Colors()\n self.party_modes = PartyModes()\n\n def all_on(self):\n self.parent.send_command(0x22)\n\n def all_off(self):\n self.parent.send_command(0x21)\n\n def brightness_up(self):\n self.parent.send_command(0x23)\n\n def brightness_down(self):\n self.parent.send_command(0x24)\n\n def mode_up(self):\n self.parent.send_command(0x27)\n self.logger.info(\"Set light to mode up\")\n\n def mode_down(self):\n self.parent.send_command(0x28)\n\n def speed_up(self):\n self.parent.send_command(0x25)\n\n def speed_down(self):\n self.parent.send_command(0x26)\n\n def set_color(self, color):\n self.parent.send_command(0x20, color.value)\n self.logger.info(\"Set light to color \" + color.name)\n\n def set_color_wheel(self, percentage):\n if percentage < 0 or percentage > 1:\n raise ValueError('Wheel percentage should be > 0 and < 1.')\n self.parent.send_command(0x20, chr(int(float(255) * percentage)))\n\n def max_brightness(self):\n for i in range(0, 9):\n self.brightness_up()\n\n def min_brightness(self):\n for i in range(0, 9):\n self.brightness_down()\n\n def max_speed(self):\n for i in range(0, 9):\n self.speed_up()\n\n def min_speed(self):\n for i in range(0, 9):\n self.speed_down()\n\n def white(self):\n for i in range(0, 20):\n self.mode_down()\n self.logger.info(\"Set light mode to \" + PartyModes.names[1] + \" (\" + str(1) + \")\")\n\n def set_mode(self, party_mode):\n self.white()\n for i in range(1, party_mode.value):\n self.mode_up()\n self.logger.info(\"Set light mode to \" + PartyModes.names[i+1] + \" (\" + str(i+1) + \")\")\n","sub_path":"wifileds/limitlessled/rgb.py","file_name":"rgb.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456223764","text":"from PyQt5 import Qt\nimport pyqtgraph as pg\nimport numpy as np\n\n\n\n\"\"\" Widget class that displays graph \"\"\"\n\n\nclass Graph(Qt.QWidget):\n\n def __init__(self):\n super().__init__()\n\n layout = Qt.QVBoxLayout(self) # create layout\n\n self.view = view = pg.PlotWidget() # make a plot area\n view.setBackground('w')\n \n # remove axis from plot\n view.hideAxis('left')\n view.hideAxis('bottom')\n \n layout.addWidget(self.view) # show up plot area\n\n self.line_pen = pg.mkPen(color='m', width=1.5)\n self.trand_line = view.plot(pen=self.line_pen) # create instance of a line\n\n # create instance of one of dots they will be reusable\n self.first_dot_cloud = view.plot(pen=None, symbol='o', symbolPen=None,\n symbolSize=5, symbolBrush='b')\n\n self.second_dot_cloud = view.plot(pen=None, symbol='t', symbolPen=None,\n symbolSize=5, symbolBrush='r')\n \n def clear_plot(self):\n \"\"\" Erase all from the plot, also deleting all displaying objects \"\"\"\n self.view.clear()\n\n def plot_first_dots(self,x=[0], y=[0]):\n \"\"\" Draw dot like graph with dots coord 'x' and 'y' \"\"\"\n self.first_dot_cloud.setData(x, y)\n \n def plot_second_dots(self,x=[0], y=[0]):\n \"\"\" Draw dot like graph with dots coord 'x' and 'y' \"\"\"\n self.second_dot_cloud.setData(x, y)\n\n def plot_first_from_list(self, input_list):\n \"\"\" Plot blue dots \"\"\"\n list_x, list_y = self.separate_coords(input_list)\n\n self.plot_first_dots(list_x, list_y)\n \n def plot_second_from_list(self, input_list):\n \"\"\" Plot red dots \"\"\"\n list_x, list_y = self.separate_coords(input_list)\n\n self.plot_second_dots(list_x, list_y)\n \n def separate_coords(self, input_list):\n \"\"\" Split coords list into two separate lists \"\"\"\n list_x = []\n list_y = []\n for row in input_list:\n list_x.append(float(row[0]))\n list_y.append(float(row[1]))\n\n return list_x, list_y\n\n def plot_line(self, x=[0], y=[0]):\n \"\"\"Draw line graph with dots coord 'x' and 'y'\"\"\"\n self.trand_line.setData(x, y)\n\nif __name__ == \"__main__\":\n app = Qt.QApplication([])\n w = Graph()\n w.show()\n app.exec()\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"255671885","text":"\na=int(input(\"Type in a number: \"))\n\ndef for_mt():\n for i in range(1,a+1):\n for j in range(1,i+1):\n print(\"{}*{}= {}\".format(i,j,i*j), end=\" \")\n print(\" \")\n\ndef while_mt():\n x,y=1,0\n while x<=a:\n while y= max_value:\n max_value = price[i]\n profit += max_value - price[i]\n print(\"#{} {}\".format(tc, profit))","sub_path":"SWEA/Recommand/백만장자 프로젝트.py","file_name":"백만장자 프로젝트.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"503103993","text":"import math\r\n\r\n# The rocket class, has the calculations of everything..\r\n\r\n\r\nclass Rocket:\r\n def __init__(self, name, alpha, beta, v_start):\r\n \"\"\"\r\n This class represent a rocket and all of the info about this rocket.\r\n :param name: Type of the rocket.\r\n :param alpha: The alpha degree.\r\n :param beta: The beta degree.\r\n :param v_start: The speed at launch.\r\n \"\"\"\r\n self.name = name\r\n self.alpha = alpha\r\n self.beta = beta\r\n self.v_start = v_start\r\n self.t = self.fix_num(self.calc_time()) # Total time until landing.\r\n self.speed_x, self.speed_y = self.calc_speed()\r\n self.total_destination, self.x_des, self.y_des = self.calc_destination()\r\n\r\n def calc_time(self):\r\n \"\"\"\r\n Calculate the time until the rocket is reaching the ground.\r\n :return: The time.\r\n \"\"\"\r\n return (self.v_start*math.sin(math.radians(self.alpha)))/5\r\n\r\n def calc_speed(self):\r\n \"\"\"\r\n Calculate the different types of speeds.\r\n :return: The speeds.\r\n \"\"\"\r\n return self.fix_num(self.v_start*math.cos(math.radians(self.alpha))),\\\r\n self.fix_num(self.v_start*math.sin(math.radians(self.alpha)))\r\n\r\n def calc_destination(self):\r\n \"\"\"\r\n Calculate the different types of distance.\r\n :return: The destinations.\r\n \"\"\"\r\n d = self.speed_x*self.t / 1000\r\n return self.fix_num(d), self.fix_num(d*math.cos(math.radians(self.beta))),\\\r\n self.fix_num(d*math.sin(math.radians(self.beta)))\r\n\r\n def get_weight(self):\r\n \"\"\"\r\n By type of rocket, return her weight. If type doesnt exist return ''.\r\n :return: The weight.\r\n \"\"\"\r\n info = {\r\n \"Qassam\": \"90kg\",\r\n \"9M133\": \"27kg\",\r\n \"M75\": \"90kg\",\r\n \"M302\": \"524kg\"\r\n }\r\n try:\r\n return info[self.name]\r\n except:\r\n print('rocket cant be found')\r\n return ''\r\n\r\n def point_on_map(self, start_point_x, start_point_y, ratio=1.724):\r\n \"\"\"\r\n From all the information about the rocket, calculate where will it land.\r\n :param start_point_x: X coordinate of where the rocket launched.\r\n :param start_point_y: Y coordinate of where the rocket launched.\r\n :param ratio: Ratio between the map in the simulation and real life.\r\n :return: X coordinate and y coordinate of landing.\r\n \"\"\"\r\n end_point_x = start_point_x + self.x_des*ratio\r\n end_point_y = start_point_y - self.y_des*ratio\r\n return end_point_x, end_point_y\r\n\r\n def fix_num(self, num):\r\n \"\"\"\r\n Shorten a number with dot after it.\r\n :param num: The number.\r\n :return: Fixed number.\r\n \"\"\"\r\n num = str(num)\r\n cut = num.find('.') + 3 # the dot plus two other numbers after it.\r\n if cut == 2: # There is no dot.\r\n return int(num)\r\n num += '00' # If for some reason there arent enough\r\n string = ''\r\n for i in range(cut):\r\n string += num[i]\r\n if float(string) == int(float(string)):\r\n return int(string.split('.')[0])\r\n else:\r\n return float(string)\r\n\r\n def get_time(self):\r\n \"\"\"\r\n Calculate by the destination how much people have to go to the shelter.\r\n :return: The time in seconds.\r\n \"\"\"\r\n des = int(self.total_destination)\r\n if des < 13.5:\r\n return 15\r\n elif des < 22:\r\n return 30\r\n elif des < 32:\r\n return 45\r\n elif des < 42:\r\n return 60\r\n else:\r\n return 90\r\n","sub_path":"PythonProject/Rocket_Mode.py","file_name":"Rocket_Mode.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"602254122","text":"# n<=300 暗示floyd\r\n# !有多少条边可以删除后不影响任意两点之间的最短距离和图的连通性(即不让任意两个结点之间的最短路变长.)\r\n# !即可以删掉被松弛的边\r\n\r\n# 首先如果某条边连接了a和b两个顶点如果存在另外一条a到b的路径距离小于等于这条边的边权,\r\n# 那么这条边肯是可以删掉的,因为可以用这条路径来代替.\r\n# !Floyd 最短路计数 (Floyd求两点之间的最短路数)\r\n\r\n\r\n# 注意:\r\n# !1. 不用dict 用数组存图/距离会快很多\r\n# !2. 不用product 会快很多\r\n\r\nimport sys\r\nimport os\r\n\r\nsys.setrecursionlimit(int(1e9))\r\ninput = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\r\nMOD = int(1e9 + 7)\r\n\r\n\r\ndef main() -> None:\r\n n, m = map(int, input().split())\r\n\r\n edges = []\r\n dist = [[int(1e20)] * n for _ in range(n)]\r\n for i in range(n):\r\n dist[i][i] = 0\r\n\r\n count = [[0] * n for _ in range(n)] # !最短路计数\r\n for _ in range(m):\r\n u, v, w = map(int, input().split())\r\n u, v = u - 1, v - 1\r\n dist[u][v] = dist[v][u] = w\r\n count[u][v] = count[v][u] = 1\r\n edges.append((u, v, w))\r\n\r\n # pypy3不要用product 会慢很多\r\n for k in range(n):\r\n for i in range(n):\r\n for j in range(n):\r\n cand = dist[i][k] + dist[k][j]\r\n if dist[i][j] == cand:\r\n count[i][j] += count[i][k] * count[k][j]\r\n elif dist[i][j] > cand:\r\n dist[i][j] = cand\r\n count[i][j] = count[i][k] * count[k][j]\r\n\r\n res = 0\r\n for u, v, w in edges:\r\n # !被松弛的边或者没被松弛但最短路不止一条的边 可以删除\r\n if dist[u][v] < w or count[u][v] > 1:\r\n res += 1\r\n print(res)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if os.environ.get(\"USERNAME\", \" \") == \"caomeinaixi\":\r\n while True:\r\n main()\r\n else:\r\n main()\r\n","sub_path":"7_graph/带权图最短路和最小生成树/floyd多源/E - Edge Deletion-最短路计数.py","file_name":"E - Edge Deletion-最短路计数.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"609112805","text":"\"\"\"\n Capstone Project. Code written by PUT_YOUR_NAME_HERE.\n Fall term, 2018-2019.\n\"\"\"\n\nimport rosebotics as rb\nimport time\n\n\ndef main():\n \"\"\" Runs YOUR specific part of the project \"\"\"\n advanced_line_following()\n\n\ndef advanced_line_following():\n robot = rb.Snatch3rRobot()\n drivesystem = robot.drive_system\n colorsensor = robot.color_sensor\n\n drivesystem.start_moving(50,50)\n\n while True:\n if colorsensor.get_reflected_intensity() > 10:\n drivesystem.stop_moving()\n direction = test_water(robot,drivesystem,colorsensor)\n if direction == 0:\n print(\"There is no more line\")\n drivesystem.stop_moving()\n else:\n drivesystem.spin_in_place_degrees(direction)\n drivesystem.start_moving(50,50)\n\n\ndef test_water(robot,drivesystem,colorsensor):\n a = 0\n b = 0\n c = 0\n d = 0\n e = 0\n f = 0\n list = [a,b,c,d,e,f]\n for k in range(len(list)):\n dir = -30 * (k+1)\n if dir < -90:\n dir = -dir\n\n drivesystem.spin_in_place_degrees(dir)\n drivesystem.go_straight_inches(10,50)\n if (colorsensor.get_reflected_intensity() < 20):\n list[k] = 1\n else:\n list[k] = 0\n drivesystem.go_straight_inches(10, -50)\n drivesystem.spin_in_place_degrees(-dir)\n\n # decision\n direction = 0\n for k in range(len(list)):\n if list[k] == 1:\n if k < 3:\n direction = direction - 30\n else:\n direction = direction + 30\n\n if direction == 0:\n return direction\n\n if direction > 30 or direction < -30:\n if direction == 90:\n direction = 60\n elif direction == 60:\n direction = 45\n elif direction == 30:\n direction = 30\n elif direction == -90:\n direction = -60\n elif direction == -60:\n direction = -45\n elif direction == -30:\n direction = -30\n\n return direction\nmain()\n","sub_path":"src/m4.py","file_name":"m4.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"34195346","text":"\"\"\"Test ClowderRepo class\"\"\"\n\nimport os\nimport sys\nimport unittest\n\nfrom clowder.clowder_repo import ClowderRepo\n\n\nclass ClowderRepoTest(unittest.TestCase):\n \"\"\"clowder_repo test subclass\"\"\"\n\n current_file_path = os.path.dirname(os.path.realpath(__file__))\n cats_example_path = os.path.abspath(os.path.join(current_file_path, '..', '..', 'examples', 'cats'))\n\n def setUp(self):\n\n self.clowder_repo = ClowderRepo()\n self.clowder_yaml_path = os.path.join(self.cats_example_path, 'clowder.yaml')\n\n def test_member_variables(self):\n \"\"\"Test the state of all project member variables initialized\"\"\"\n\n clowder_path = os.path.join(self.cats_example_path, '.clowder')\n self.assertEqual(self.clowder_repo.clowder_path, clowder_path)\n\n def test_link(self):\n \"\"\"Test link() method\"\"\"\n\n self.clowder_repo.link()\n self.assertEqual(os.readlink(self.clowder_yaml_path),\n os.path.join(self.cats_example_path, '.clowder', 'clowder.yaml'))\n\n def test_link_version(self):\n \"\"\"Test link() method\"\"\"\n\n self.clowder_repo.link('v0.1')\n version_path = os.path.join('.clowder', 'versions', 'v0.1', 'clowder.yaml')\n self.assertEqual(os.readlink(self.clowder_yaml_path), os.path.join(self.cats_example_path, version_path))\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n ClowderRepoTest.cats_example_path = sys.argv.pop()\n unittest.main()\n","sub_path":"test/unittests/test_clowder_repo.py","file_name":"test_clowder_repo.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"340236075","text":"#!/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# @Author: Charramma(Huang)\r\n# @E-mail: huang.zyn@qq.com\r\n# @Date: 2020/11/21 23:26\r\n# @File: palindrome-number.py\r\n\r\nclass Solution(object):\r\n def isPalindrome(self, x):\r\n x = str(x)\r\n if x == x[::-1]:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n s = Solution()\r\n result = s.isPalindrome(121)\r\n print(result)\r\n","sub_path":"palindrome-number.py","file_name":"palindrome-number.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"570173976","text":"from rest_framework import generics, status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.shortcuts import redirect\nfrom .models import Data\nfrom .serializers import DataSerializer\nimport gspread\n\n# Create your views here.\nclass DataList(generics.ListCreateAPIView):\n\n queryset = Data.objects.all()\n serializer_class = DataSerializer\n\nclass DataDetail(generics.RetrieveUpdateDestroyAPIView):\n\n queryset = Data.objects.all()\n serializer_class = DataSerializer\n\n@api_view(['GET'])\ndef DataLoad(request, key):\n\n gc = gspread.service_account(filename='credentials.json')\n try:\n sh = gc.open_by_key(key)\n worksheet = sh.sheet1\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n res = worksheet.get_all_records()\n for row in res:\n\n # Check if employee has not been added by checking employeeID \n if not Data.objects.filter(employeeId=row['Employee ID']):\n employee = Data(firstName=row['First Name'],lastName=row['Last Name'],employeeId=row['Employee ID'],city=row['City'])\n employee.save()\n\n data = Data.objects.all()\n serializer = DataSerializer(data, many=True)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n# 13yyd8s008LlRn0tn6LC5moH1fcBELBkYw2THX6gjdHU","sub_path":"employee_data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"299192127","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom scrapy.shell import inspect_response\n\n\nclass SaarlandSpider(scrapy.Spider):\n name = \"saarland\"\n # allowed_domains = [\"www.saarland.de/4526.htm\"]\n start_urls = ['http://www.saarland.de/4526.htm']\n\n def parse(self, response):\n for link in response.css(\".relatedinfo a ::attr(href)\").extract():\n yield scrapy.Request(response.urljoin(link), callback=self.parse_list)\n\n def parse_list(self, response):\n for link in response.css(\".contentteaserlist_frame a ::attr(href)\").extract():\n yield scrapy.Request(response.urljoin(link), callback=self.parse_list)\n\n schools = response.css(\".boxpadding10\")\n for school in schools:\n # inspect_response(response, self)\n data = {}\n text_content = school.css(\"::text\").extract()\n\n data['name'] = school.css(\"h2 ::text\").extract_first()\n\n for index, line in enumerate(text_content):\n if re.match(\"^\\d+ \\w+\", line):\n data['zip'] = line.strip()\n data['street'] = text_content[index-1].strip()\n\n data['email'] = school.css(\".link_email a ::attr(href)\").extract_first()\n data['website'] = school.css(\".link_external a ::attr(href)\").extract_first()\n\n data['director'] = school.css(\"::text\").re_first(\"Schulleiter(?:[ /]?in)?:\\s(.*)\")\n data['telephone'] = school.css(\"::text\").re_first(\"Tel(?:[\\.:]+)?\\s([\\(\\) 0-9]+)\")\n data['fax'] = school.css(\"::text\").re_first(\"Fax(?:[\\.:]+)?\\s([\\(\\) 0-9]+)\")\n\n yield data","sub_path":"jedeschule/spiders/saarland.py","file_name":"saarland.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"449749304","text":"from io import StringIO\r\nfrom datetime import datetime, timedelta\r\nimport pandas as pd\r\nimport powerdash_info\r\nimport requests\r\nimport matplotlib.pyplot as plt\r\nimport time as ptime\r\nimport json \r\nimport urllib.request\r\nfrom wunderground_response import weatherResponse\r\n\r\ndef query_powerdash_recent(elapsed_min, board_name):\r\n\telapsed = (60*1000)*elapsed_min\r\n\tpayload = {'elapsed': elapsed, 'dgm': powerdash_info.powerdash_name_to_dgm[board_name],\r\n\t 'format': 'csv'}\r\n\tdata = \trequests.get(url=powerdash_info.powerdash_base_url + \"/recent\", params=payload)\r\n\tif data == \"\":\r\n\t\treturn None\r\n\treturn StringIO(data.text)\r\n\r\ndef get_prediction(weather_data):\r\n\tDATE = weather_data[0]\r\n\tDB = weather_data[1]\r\n\tWB = weather_data[2]\r\n\tHour = weather_data[3]\r\n\tMinute = weather_data[4]\r\n\tDay = weather_data[5]\r\n\tWeekend = weather_data[6]\r\n\tDateMonth = weather_data[7]\r\n\tDateDay = weather_data[8]\r\n\r\n\ttestDate = \"{:%Y-%m-%dT%H:%M:%S}\".format(datetime.now())\r\n\ttest_data = [ \"100000000\", testDate, str(int(DB)), str(int(WB)), str(Hour), str(Minute), \r\n\t\t\t\t\tstr(Day), str(Weekend), str(DateMonth), str(DateDay), \"0\" ]\r\n\t# test_data2 = ['3/20/2018 6:43:03 PM', '40', '32', '18', '1063', '2', '0', '3', '20', '0']\r\n\t# test_data3 = ['3/20/2018 8:43:03 PM', '36', '32', '20', '1063', '2', '0', '3', '20', '0']\r\n\r\n\tdata = {\r\n\t \"Inputs\": {\r\n\t \"input1\": {\r\n\t \"ColumnNames\": [\"Column 0\", \"time\", \"DB\", \"WB\", \"Hour\", \"Minute\", \"Day\", \"Weekend\", \r\n\t \t\t\t\t\"DateMonth\", \"DateDay\", \"Utility Subtotal\"],\r\n\t \"Values\": [ \r\n\t test_data \r\n\t # test_data2, \r\n\t # test_data3\r\n\t ]\r\n\t }, \r\n\t },\r\n\t}\r\n\r\n\tbody = str.encode(json.dumps(data))\r\n\turl = 'https://ussouthcentral.services.azureml.net/workspaces/791b6dabd0fd4b96ba1f7010bdd78f34/services/36b3b2c080774dd285f80a9e5cd944fe/execute?api-version=2.0&details=true'\r\n\tapi_key = 'ZEhISw58bTm2tnM4Nxy6vxwtQQjVDbpKaPy2CdZym+Z77Tn4etaQCzcHa5biDezXA7bC+wRGUSfkqh/YfRJX9Q==' # Replace this with the API key for the web service\r\n\theaders = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}\r\n\treq = urllib.request.Request(url, body, headers) \r\n\ttry:\r\n\t response = urllib.request.urlopen(req)\r\n\t # encoding = urllib.request.get_content_charset()\r\n\t # result = json.loads(response.read().decode(encoding))\r\n\t # encoding = urllib.request.headers.get_content_charset()\r\n\t # result = response.read().decode(encoding)\r\n\t result = response.read().decode('utf8').replace(\"'\", '\"')\r\n\t # print(result)\r\n\t result = json.loads(result)\r\n\t # result = json.dumps(result, indent=2)\r\n\t # print(result['Results'])\r\n\t # print(result['Results'])\r\n\t results = result['Results']\r\n\t # print( results['output1'] )\r\n\t for output in results:\r\n\t \t# print(results[output]['value']['Values'])\r\n\t \treturn results[output]['value']['Values'][0]\r\n\r\n\r\n\r\n\texcept urllib.error.HTTPError as error:\r\n\t print(\"The request failed with status code: \" + str(error.code))\r\n\t print(error.info())\r\n\r\nif __name__ == '__main__':\r\n\t# print(query_powerdash_recent(elapsed_min=2, board_name='overall utilities'))\r\n\twhile(True):\r\n\t\tweather_data = weatherResponse()\r\n\t\tprint(get_prediction(weather_data))\r\n\t\tptime.sleep(5)\r\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"246414593","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 19 10:45:06 2018\n\n@author: zhulifu\n\"\"\"\n\nimport cv2\nimport numpy as np\nK = 1\n\ndef chuli1(img,img_out):\n img1 = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n _ ,img_out = cv2.threshold(img1,150,255,cv2.THRESH_BINARY)\n return img_out\n\ndef chuli2(img1,img1_out):\n global K\n green = (0,215,0)\n cv2.line(img1,(200,20),(200,400),green,8)\n \n (centerx,centery) = (img1.shape[1]//2,img1.shape[0]//2)\n white = (221,19,23)\n for r in range(0,175,25):\n cv2.circle(img1,(centerx,centery),r,white,4)\n img1_out = img1\n K = 20\n return img1_out\n \n\ncap = cv2.VideoCapture(0)\nwhile(1):\n ret,frame = cap.read()\n frame_out = np.array(frame) ###########关键定义成numpy数组######\n# frame_out_out = np.array(frame)\n img3 = chuli1(frame,frame_out)\n img4 = chuli2(frame_out,frame_out)\n KK = 10*K\n print(\"KK\")\n cv2.imshow(\"frame\",img4)\n if cv2.waitKey(30) & 0xff == 32:\n break\ncap.release()\ncv2.destroyAllWindows()\n\n\n\n\n\n","sub_path":"python_tuxianghanshuchuli.py","file_name":"python_tuxianghanshuchuli.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"341263023","text":"import re\nfrom datetime import datetime \n\n# Use regular expression to validate email.\ndef Email(email):\n if len(email) > 7:\n if re.match(\"^.+\\\\@(\\\\[?)[a-zA-Z0-9\\\\-\\\\.]+\\\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\\\]?)$\", email) != None:\n return 1\n return 0\n\ndef Date(year, month, day):\n try:\n datetime(int(year), int(month), int(day))\n return 1\n except:\n return 0\n \ndef Datetime(year, month, day, hour, minute, second):\n try:\n thisDatetime = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n return 1 if thisDatetime < datetime.today() else 0\n except:\n return 0\n\n# Validate credit card no. and csc no.\ndef CreditCard(num):\n count = 0\n while count < len(num):\n count+=1\n if len(num) == 16:\n return 1\n else:\n return 0\n\ndef CSC(num):\n checker=['1','2','3','4','5','6','7','8','9','0']\n if len(num)!=3:\n return 0\n else:\n if num[0] not in checker:\n return 0\n elif num[1] not in checker:\n return 0\n elif num[2] not in checker:\n return 0\n else:\n return 1\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"651621969","text":"# Created by PyCharm Pro Edition \r\n# User: Kaushik Talukdar \r\n# Date: 30-03-17 \r\n# Time: 11:42 PM\r\n\r\n# check for equality\r\n\r\ncars = [\"bmw\", \"koenigsegg\", \"audi\", \"ferrari\"]\r\n\r\nfor car in cars:\r\n if(car==\"koenigsegg\"):\r\n print(car.upper())\r\n else:\r\n print(car.title())\r\n\r\n# comparison is case sensitive\r\n# \"bmw\" & \"Bmw\" is not same","sub_path":"4. If Statements/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"280131922","text":"#!/usr/bin/env python\n\n# Copyright 2019 Banco Santander S.A.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# -*- coding: utf-8 -*-\n\nfrom tkinter import *\nfrom tkinter import ttk, _setit\nfrom PIL import ImageTk, Image\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nimport json\nimport uuid\nimport base64\nimport time\nimport requests\n\nfrom tkinter import messagebox as mbox\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nimport os\nfrom ecpy.curves import Curve, Point\nfrom ecpy.keys import ECPublicKey, ECPrivateKey\n\nimport sys\nsys.path.append('../')\nfrom dao.dao import getAll, setOne, setMultiple, popOne, getOne\nfrom utilities.GUI_Utilities import reloadOptionMenu, createIdsAndString\nfrom utilities.communicationToRPC import rpcCall, apiCall\n\nclass App():\n def __init__(self):\n global root\n global Credential_list, credentialSelection, credential_menu\n global Request_list, requestSelection, request_menu\n global Response_list, responseSelection, response_menu\n global Lock_key_list, lock_key_Selection, lock_key_menu\n global bankPrivateECKey, bankPublicECKey, compressedPublicECKey, cv\n\n root = Tk()\n root.geometry('330x540')\n\n root.configure(bg='red2')\n root.title('Issuer credential app')\n\n cv = Curve.get_curve(\"Ed25519\")\n g = cv.generator\n p = cv.field\n q = cv.order\n bankPrivateECKey = 8922796882388619604127911146068705796569681654940873967836428543013949233636\n bankPublicECKey = cv.mul_point(bankPrivateECKey, g)\n compressedPublicECKey = cv.encode_point(bankPublicECKey).hex()\n\n Credential_list = [\n \"\"\n ]\n\n Request_list = [\n \"\"\n ]\n\n Response_list = [\n \"\"\n ]\n\n Lock_key_list = [\n \"\"\n ]\n\n\n b0 = ttk.Button(\n root, text=\"Retrieve credential request\",\n command=self.requestRetrieve)\n b0.grid(row=1, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n requestSelection = StringVar(root)\n requestSelection.set(Request_list[0]) # default value\n\n request_menu = OptionMenu(root, requestSelection, *Request_list)\n request_menu.grid(row=2, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n b1 = ttk.Button(\n root, text=\"Generate credential\",\n command=self.generateCredential)\n b1.grid(row=3, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n credentialSelection = StringVar(root)\n credentialSelection.set(Credential_list[0]) # default value\n\n credential_menu = OptionMenu(\n root, credentialSelection, *Credential_list)\n credential_menu.grid(row=4, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n b2 = ttk.Button(\n root, text=\"Encrypt credential on SGX\",\n command=self.encryptOnSgx)\n b2.grid(row=5, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n responseSelection = StringVar(root)\n responseSelection.set(Response_list[0]) # default value\n\n response_menu = OptionMenu(\n root, responseSelection, *Response_list)\n response_menu.grid(row=6, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n b3 = ttk.Button(\n root, text=\" Send credential to customer \",\n command=self.sendCredential)\n b3.grid(row=7, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n b4 = ttk.Button(\n root, text=\"Retrieve unlock request\",\n command=self.retrieveLockKeys)\n b4.grid(row=8, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n lock_key_Selection = StringVar(root)\n lock_key_Selection.set(Lock_key_list[0]) # default value\n\n lock_key_menu = OptionMenu(\n root, lock_key_Selection, *Lock_key_list)\n lock_key_menu.grid(row=9, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n b4 = ttk.Button(\n root, text=\"Retrieve and send unlock key\",\n command=self.sendUnlockKey)\n b4.grid(row=10, sticky='ew', pady=(11, 7), padx=(25, 0))\n\n img_logo = ImageTk.PhotoImage(Image.open(\n \"./images/santander-logo-13.png\"))\n panel_logo_1 = Label(root, image=img_logo, borderwidth=0)\n panel_logo_1.grid(row=11, sticky=S, pady=(10, 0))\n\n\n plain_credential_list = getAll(\"credentials_issuer\", \"plain_credentials\")\n\n _, usable_ids = createIdsAndString(plain_credential_list, True, \"Type\", \"Name\", \" for \", subName=\"Credential\")\n reloadOptionMenu(credentialSelection, credential_menu, usable_ids)\n\n enc_credential_list = getAll(\"credentials_issuer\", \"encrypted_credentials\")\n\n _, usable_ids = createIdsAndString(enc_credential_list, True, \"Type\", \"Name\", \" for \", subName=\"Credential\", endingLabel=\"(encrypted)\")\n reloadOptionMenu(responseSelection, response_menu, usable_ids)\n\n list_waiting_requests = getAll(\"credentials_request\", \"request\")\n \n _, usable_ids = createIdsAndString(list_waiting_requests, False, \"type\", \"name\", \" for \")\n reloadOptionMenu(requestSelection, request_menu, usable_ids)\n\n list_waiting_lock_keys = getAll(\"lock_keys_issuer\", \"lock_keys\")\n \n _, usable_ids = createIdsAndString(list_waiting_lock_keys, False, \"key\", \"DID\", \" for \")\n reloadOptionMenu(lock_key_Selection, lock_key_menu, usable_ids)\n\n root.mainloop()\n\n def requestRetrieve(self):\n global Request_list, requestSelection, request_menu\n\n pendingRequests_json = rpcCall(\"pendingRequests\")\n\n setMultiple(\"credentials_request\", \"request\", pendingRequests_json[\"result\"][\"request\"]) \n\n list_waiting_requests = pendingRequests_json[\"result\"][\"request\"]\n complete_list_requests = getAll(\"credentials_request\", \"request\")\n\n aux_str, _ = createIdsAndString(list_waiting_requests, False, \"type\", \"name\", \" for \")\n if aux_str == \"\":\n aux_str = \"No requests pending\"\n\n else:\n _, usable_ids = createIdsAndString(complete_list_requests, False, \"type\", \"name\", \" for \")\n reloadOptionMenu(requestSelection, request_menu, usable_ids)\n\n mbox.showinfo(\"Result\", \"Pending requests retrieved\")\n\n def generateCredential(self):\n global plain_credential_list\n global Credential_list, credentialSelection, credential_menu\n global Request_list, requestSelection, request_menu\n\n requestPosition = requestSelection.get()\n position = int(requestPosition.split(':')[0])\n\n credential_request = json.dumps(popOne(\"credentials_request\", \"request\", position))\n list_waiting_req_memory = getAll(\"credentials_request\", \"request\")\n\n _, usable_ids_req = createIdsAndString(list_waiting_req_memory, False, \"type\", \"name\", \" for \")\n reloadOptionMenu(requestSelection, request_menu, usable_ids_req)\n\n res_json = apiCall(\"issue3\", credential_request)\n\n res_str = json.dumps(res_json)\n\n setOne(\"credentials_issuer\", \"plain_credentials\", res_str)\n plain_credential_list = getAll(\"credentials_issuer\", \"plain_credentials\")\n\n aux_str, usable_ids = createIdsAndString(plain_credential_list, True, \"Type\", \"Name\", \" for \", subName=\"Credential\")\n \n if aux_str == \"\":\n aux_str = \"No credentials loaded\"\n\n else:\n reloadOptionMenu(credentialSelection, credential_menu, usable_ids)\n aux_str = \"Credential generated\"\n\n RSA_key = res_json[\"Subject Public key\"]\n RSA_key_Shortened = RSA_key[:40] + \"....\" + RSA_key[-40:]\n res_json[\"Subject Public key\"] = RSA_key_Shortened\n\n mbox.showinfo(\"Result\", json.dumps(res_json, indent=4))\n\n def encryptOnSgx(self):\n global Response_list, responseSelection, response_menu\n global Credential_list, credentialSelection, credential_menu\n\n credentialPosition = credentialSelection.get()\n position = int(credentialPosition.split(':')[0])\n\n data = popOne(\"credentials_issuer\", \"plain_credentials\", position)\n data_json = json.loads(data)\n print(compressedPublicECKey)\n data_json[\"Issuer public key\"] = compressedPublicECKey\n data = json.dumps(data_json)\n plain_credential_list = getAll(\"credentials_issuer\" ,\"plain_credentials\")\n req_json = res_json = apiCall(\"submit3\", data)\n req_str = json.dumps(req_json)\n\n _, usable_ids_plain = createIdsAndString(plain_credential_list, True, \"Type\", \"Name\", \" for \", subName=\"Credential\")\n reloadOptionMenu(credentialSelection, credential_menu, usable_ids_plain)\n\n setOne(\"credentials_issuer\", \"encrypted_credentials\", req_str)\n\n enc_credential_list = getAll(\"credentials_issuer\" ,\"encrypted_credentials\")\n\n aux_str, usable_ids = createIdsAndString(enc_credential_list, True, \"Type\", \"Name\", \" for \", subName=\"Credential\", endingLabel=\"(encrypted)\")\n \n if aux_str == \"\":\n aux_str = \"No credentials loaded\"\n\n else:\n reloadOptionMenu(responseSelection, response_menu, usable_ids)\n aux_str = \"Credential encrypted on SGX\"\n\n RSA_key = req_json[\"Subject Public key\"]\n RSA_key_Shortened = RSA_key[:40] + \"....\" + RSA_key[-40:]\n req_json[\"Subject Public key\"] = RSA_key_Shortened\n\n lock_key = req_json[\"Credential\"][\"lock key\"][\"value\"]\n lock_key_shortened = lock_key[:8] + \"....\" + lock_key[-8:]\n req_json[\"Credential\"][\"lock key\"][\"value\"] = lock_key_shortened\n\n issuer_signature = req_json[\"IssuerSignature\"]\n issuer_signature_short = issuer_signature[:8] + \"....\" + issuer_signature[-8:]\n req_json[\"IssuerSignature\"] = issuer_signature_short\n\n mbox.showinfo(\"Result\", json.dumps(req_json, indent=4))\n\n def sendCredential(self):\n global Response_list, responseSelection, response_menu\n\n enc_credentialPosition = responseSelection.get()\n position = int(enc_credentialPosition.split(':')[0])\n\n enc_credential = popOne(\"credentials_issuer\", \"encrypted_credentials\", position)\n enc_cred_list = getAll(\"credentials_issuer\", \"encrypted_credentials\")\n\n _, usable_ids_enc = createIdsAndString(enc_cred_list, True, \"Type\", \"Name\", \" for \", subName=\"Credential\", endingLabel=\"(encrypted)\")\n reloadOptionMenu(responseSelection, response_menu, usable_ids_enc)\n\n print(enc_credential)\n pendingRequests_json = rpcCall(\"credential\", enc_credential)\n print(pendingRequests_json)\n\n mbox.showinfo(\"Result\", \"Credential sent to user\")\n\n def retrieveLockKeys(self):\n global Lock_key_list, lock_key_Selection, lock_key_menu\n\n pendingLockKeys_json = rpcCall(\"pendingLockKeys\")\n\n setMultiple(\"lock_keys_issuer\", \"lock_keys\", pendingLockKeys_json[\"result\"][\"lock_keys\"]) \n\n list_waiting_lock_keys = pendingLockKeys_json[\"result\"][\"lock_keys\"]\n complete_list_lock_keys = getAll(\"lock_keys_issuer\", \"lock_keys\")\n\n aux_str, _ = createIdsAndString(list_waiting_lock_keys, False, \"key\", \"DID\", \" for \")\n if aux_str == \"\":\n aux_str = \"No requests pending\"\n\n else:\n _, usable_ids = createIdsAndString(complete_list_lock_keys, False, \"key\", \"DID\", \" for \")\n reloadOptionMenu(lock_key_Selection, lock_key_menu, usable_ids)\n\n mbox.showinfo(\"Result\", \"Unlock keys requests retrieved\")\n\n def sendUnlockKey(self):\n global Lock_key_list, lock_key_Selection, lock_key_menu\n global bankPrivateECKey, bankPublicECKey, compressedPublicECKey, cv\n\n\n lock_keyPosition = lock_key_Selection.get()\n position = int(lock_keyPosition.split(':')[0])\n\n lock_key_json = popOne(\"lock_keys_issuer\", \"lock_keys\", position)\n lock_keys_list = getAll(\"lock_keys_issuer\", \"lock_keys\")\n\n lock_key_compressed = lock_key_json[\"key\"]\n lock_key_x, lock_key_y = self.uncompressKey(lock_key_compressed)\n\n eph_pub_key = Point(lock_key_x,lock_key_y,cv)\n\n unlock_key = cv.mul_point(bankPrivateECKey, eph_pub_key)\n comp_key = cv.encode_point(unlock_key).hex()\n print(\"Hola\")\n print(unlock_key)\n print(comp_key)\n\n pendingRequests_json = rpcCall(\"unlockKey\", {\"DID\": \"7524\", \"unlock_key\": comp_key, \"lock_key\": lock_key_compressed})\n\n _, usable_ids = createIdsAndString(lock_keys_list, False, \"key\", \"DID\", \" for \")\n reloadOptionMenu(lock_key_Selection, lock_key_menu, usable_ids)\n\n mbox.showinfo(\"Result\", \"Unlock key sent\")\n\n def uncompressKey(self,compressedKey):\n compKey_bytes = bytes.fromhex(compressedKey)\n compKey_sign = compKey_bytes[31] & 128\n\n compKey_barray = bytearray(compKey_bytes)\n\n compKey_barray[31] &= 127\n compKey_barray.reverse()\n\n comp_key_rev = bytes(compKey_barray)\n comp_key_int = int.from_bytes(comp_key_rev, \"big\")\n\n recoveredXCoord = cv.x_recover(comp_key_int, (compKey_sign>0))\n return recoveredXCoord, comp_key_int\n\ndef main():\n App()\n\n return 0\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"issuerApp/Issuer_GUI.py","file_name":"Issuer_GUI.py","file_ext":"py","file_size_in_byte":13587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"218066396","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2011 Piston Cloud Computing, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"SQLAlchemy models for balancer data.\"\"\"\n\nimport datetime\nimport uuid\n\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy import (Column, ForeignKey, Integer, String, Boolean,\n DateTime)\n\nfrom balancer.db.base import Base, DictBase, JsonBlob\n\n\ndef create_uuid():\n return uuid.uuid4().hex\n\n\nclass Device(DictBase, Base):\n \"\"\"Represents a load balancer appliance.\"\"\"\n\n __tablename__ = 'device'\n id = Column(String(32), primary_key=True, default=create_uuid)\n name = Column(String(255))\n type = Column(String(255))\n version = Column(String(255))\n ip = Column(String(255))\n port = Column(Integer)\n user = Column(String(255))\n password = Column(String(255))\n extra = Column(JsonBlob())\n\n\nclass LoadBalancer(DictBase, Base):\n \"\"\"Represents an instance of load balancer applience for a tenant.\"\"\"\n\n __tablename__ = 'loadbalancer'\n id = Column(String(32), primary_key=True, default=create_uuid)\n device_id = Column(String(32), ForeignKey('device.id'))\n name = Column(String(255))\n algorithm = Column(String(255))\n protocol = Column(String(255))\n status = Column(String(255))\n tenant_id = Column(String(255))\n created_at = Column(DateTime, default=datetime.datetime.utcnow,\n nullable=False)\n updated_at = Column(DateTime, default=datetime.datetime.utcnow,\n onupdate=datetime.datetime.utcnow,\n nullable=False)\n deployed = Column(String(40))\n extra = Column(JsonBlob())\n\n device = relationship(Device,\n backref=backref('loadbalancers', order_by=id),\n uselist=False)\n\n\nclass ServerFarm(DictBase, Base):\n \"\"\"Represents a server farm.\"\"\"\n\n __tablename__ = 'serverfarm'\n id = Column(String(32), primary_key=True, default=create_uuid)\n lb_id = Column(String(32), ForeignKey('loadbalancer.id'))\n name = Column(String(255))\n type = Column(String(255))\n status = Column(String(255))\n deployed = Column(String(40))\n extra = Column(JsonBlob())\n\n loadbalancer = relationship(LoadBalancer,\n backref=backref('serverfarms', order_by=id),\n uselist=False)\n\n\nclass VirtualServer(DictBase, Base):\n \"\"\"Represents a Virtual IP.\"\"\"\n\n __tablename__ = 'virtualserver'\n id = Column(String(32), primary_key=True, default=create_uuid)\n sf_id = Column(String(32), ForeignKey('serverfarm.id'))\n lb_id = Column(String(32), ForeignKey('loadbalancer.id'))\n name = Column(String(255))\n address = Column(String(255))\n mask = Column(String(255))\n port = Column(String(255))\n status = Column(String(255))\n deployed = Column(String(40))\n extra = Column(JsonBlob())\n\n serverfarm = relationship(ServerFarm,\n backref=backref('virtualservers', order_by=id),\n uselist=False)\n loadbalancer = relationship(LoadBalancer,\n backref=backref('loadbalancers', order_by=id),\n uselist=False)\n\n\nclass Server(DictBase, Base):\n \"\"\"Represents a real server.\"\"\"\n\n __tablename__ = 'server'\n id = Column(String(32), primary_key=True, default=create_uuid)\n sf_id = Column(String(32), ForeignKey('serverfarm.id'))\n name = Column(String(255))\n type = Column(String(255))\n address = Column(String(255))\n port = Column(String(255))\n weight = Column(Integer)\n status = Column(String(255))\n parent_id = Column(Integer)\n deployed = Column(String(40))\n vm_id = Column(Integer)\n extra = Column(JsonBlob())\n\n serverfarm = relationship(ServerFarm,\n backref=backref('servers', order_by=id),\n uselist=False)\n\n\nclass Probe(DictBase, Base):\n \"\"\"Represents a health monitoring.\"\"\"\n\n __tablename__ = 'probe'\n id = Column(String(32), primary_key=True, default=create_uuid)\n sf_id = Column(String(32), ForeignKey('serverfarm.id'))\n name = Column(String(255))\n type = Column(String(255))\n deployed = Column(String(40))\n extra = Column(JsonBlob())\n\n serverfarm = relationship(ServerFarm,\n backref=backref('probes', order_by=id),\n uselist=False)\n\n\nclass Sticky(DictBase, Base):\n \"\"\"Represents a persistent session.\"\"\"\n\n __tablename__ = 'sticky'\n id = Column(String(32), primary_key=True, default=create_uuid)\n sf_id = Column(String(32), ForeignKey('serverfarm.id'))\n name = Column(String(255))\n type = Column(String(255))\n deployed = Column(String(40))\n extra = Column(JsonBlob())\n\n serverfarm = relationship(ServerFarm,\n backref=backref('stickies', order_by=id),\n uselist=False)\n\n\nclass Predictor(DictBase, Base):\n \"\"\"Represents a algorithm of selecting server.\"\"\"\n\n __tablename__ = 'predictor'\n id = Column(String(32), primary_key=True, default=create_uuid)\n sf_id = Column(String(32), ForeignKey('serverfarm.id'))\n type = Column(String(255))\n deployed = Column(String(40))\n extra = Column(JsonBlob())\n\n serverfarm = relationship(ServerFarm,\n backref=backref('predictors', order_by=id),\n uselist=False)\n\n\ndef register_models(engine):\n \"\"\"Create tables for models.\"\"\"\n\n Base.metadata.create_all(engine)\n","sub_path":"balancer/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"209352277","text":"import json, sys, os\n\ntry:\n if(os.path.isfile(\"path_constants.json\")):\n f = open(\"path_constants.json\", \"r\")\n elif(os.path.isfile(os.path.dirname(__file__) + \"/path_constants.json\")):\n f = open(os.path.dirname(__file__) + \"/path_constants.json\", \"r\")\n else:\n raise IOError(\"File with constants needs to be either in the working directory or in the same directory as the file currently being executed.\")\nexcept Exception as exc:\n raise IOError(\"Problem loading file containing path and filename constants: %s\" %exc)\nelse:\n try:\n constants = json.load(f)\n except Exception as exc:\n raise ImportError(\"Problem interpreting json file containing path and filename constants: %s\" % exc)\n finally:\n f.close()\n \n\npython_binary = constants[\"paths\"][\"python_binary\"] #The location of the python executable for MATLAB to communicate with python objects (specifically FS.py)\ninstallation = constants[\"paths\"][\"installation\"] #The roots directory used with all file operations, e.g. saving files for editing and conversion etc..\nmatlab_files = constants[\"paths\"][\"matlab_files\"] #Auxiliary MATLAB functions are saved here.\nspm = constants[\"paths\"][\"spm\"] #The location of the SPM12 files.\nfs_subjectsdir = constants[\"paths\"][\"fs_subjectsdir\"]\nfs_dir = constants[\"paths\"][\"fs_dir\"]\nnl_res_dir = constants[\"paths\"][\"nl_res_dir\"]\nnl_code_dir = constants[\"paths\"][\"nl_code_dir\"]\nspm8_dir = constants[\"paths\"][\"spm8_dir\"]\nnl_atlas_dir = constants[\"paths\"][\"nl_atlas_dir\"]\njobs = constants[\"paths\"][\"jobs\"]\nincoming = constants[\"paths\"][\"incoming\"]\ndicom_dir = constants[\"paths\"][\"dicom_dir\"]\nincoming_p2 = constants[\"paths\"][\"incoming_p2\"]\nstored_reports = constants[\"paths\"][\"stored_reports\"]\n","sub_path":"path_constants.py","file_name":"path_constants.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440426463","text":"import psycopg2\nimport redis\n\nimport localization\nimport settings\nimport text_handler\n\nfrom utils import get_tz_by_location\n\n\ndef check_id_in_db(user):\n with psycopg2.connect(settings.db_parameters_string) as conn:\n cur = conn.cursor()\n query = f'SELECT id FROM public.users WHERE id = {user.id}'\n cur.execute(query)\n status = cur.fetchone()\n if not status:\n if not user.first_name:\n user.first_name = 'NULL'\n if not user.last_name:\n user.last_name = 'NULL'\n query = f'INSERT INTO public.users (id, first_name, last_name)' \\\n f'VALUES (\\'{user.id}\\', \\'{user.first_name}\\', ' \\\n f'\\'{user.last_name}\\')'\n cur.execute(query)\n conn.commit()\n\n\ndef check_location(user, lat, long, bot):\n with psycopg2.connect(settings.db_parameters_string) as conn:\n cur = conn.cursor()\n lang = get_lang_from_redis(user)\n query = f'SELECT latitude, longitude FROM locations ' \\\n f'WHERE id = {user}'\n cur.execute(query)\n locations_in_db = cur.fetchone()\n response = ''\n if not locations_in_db:\n\n tz = get_tz_by_location([lat, long])\n if tz != '':\n query = f'INSERT INTO locations (id, latitude, longitude) ' \\\n f'VALUES (\\'{user}\\', \\'{lat}\\', \\'{long}\\')'\n cur.execute(query)\n conn.commit()\n check_tz(user, tz)\n response = localization.Utils.location_received(lang)\n bot.send_message(user, response)\n response = True\n else:\n response = localization.Utils.failed_check_tz(lang)\n bot.send_message(user, response)\n\n # если координаты в бд отличаются от присланных, обновляем бд\n elif lat != locations_in_db[0] or long != locations_in_db[1]:\n loc = [lat, long]\n tz = get_tz_by_location(loc)\n lang = get_lang_from_redis(user)\n if tz != '':\n check_tz(user, tz)\n query = f'UPDATE locations SET ' \\\n f'latitude = \\'{lat}\\', longitude = \\'{long}\\'' \\\n f'WHERE id = {user}'\n if lang == 'Russian':\n response = 'Координаты обновлены'\n elif lang == 'English':\n response = 'Location updated'\n bot.send_message(user, response)\n cur.execute(query)\n conn.commit()\n response = True\n else:\n response = localization.Utils.failed_check_tz(lang)\n bot.send_message(user, response)\n response = False\n return response\n\n\ndef get_location_by_id(user_id):\n with psycopg2.connect(settings.db_parameters_string) as conn:\n cur = conn.cursor()\n query = f'SELECT latitude, longitude FROM locations ' \\\n f'WHERE id = {user_id}'\n cur.execute(query)\n location = cur.fetchone()\n response = location\n return response\n\n\ndef check_tz(user, tz):\n with psycopg2.connect(settings.db_parameters_string) as conn:\n cur = conn.cursor()\n query = f'SELECT tz FROM public.tz WHERE id = {user}'\n cur.execute(query)\n time_zone = cur.fetchone()\n if not time_zone:\n query = f'INSERT INTO public.tz (id, tz) VALUES ({user}, \\'{tz}\\')'\n cur.execute(query)\n conn.commit()\n elif time_zone != tz:\n query = f'UPDATE public.tz SET tz = \\'{tz}\\' WHERE id = {user}'\n cur.execute(query)\n conn.commit()\n\n\ndef get_tz_by_id(user_id):\n with psycopg2.connect(settings.db_parameters_string) as conn:\n cur = conn.cursor()\n query = f'SELECT public.tz.tz FROM public.tz WHERE id = {user_id}'\n cur.execute(query)\n tz = cur.fetchone()\n response = tz[0]\n return response\n\n\ndef set_lang(user, lang):\n with psycopg2.connect(settings.db_parameters_string) as conn:\n cur = conn.cursor()\n query = f'SELECT lang FROM lang WHERE id = {user}'\n cur.execute(query)\n lang_in_db = cur.fetchone()\n if not lang_in_db:\n query = f'INSERT INTO lang VALUES ({user}, \\'{lang}\\')'\n cur.execute(query)\n conn.commit()\n elif lang_in_db[0] != lang:\n query = f'UPDATE lang SET lang = \\'{lang}\\' WHERE id = {user}'\n cur.execute(query)\n conn.commit()\n r = redis.StrictRedis(host=settings.r_host, port=settings.r_port)\n r.set(f'{user}', lang)\n r.expire(f'{user}', 31536000)\n\n\ndef get_lang_by_id(user):\n with psycopg2.connect(settings.db_parameters_string) as conn:\n cur = conn.cursor()\n query = f'SELECT lang FROM lang WHERE id = {user}'\n cur.execute(query)\n lang_in_bd = cur.fetchone()[0]\n if not lang_in_bd:\n response = False\n else:\n r = redis.StrictRedis(host=settings.r_host, port=settings.r_port)\n r.set(f'{user}', lang_in_bd[0])\n r.expire(f'{user}', 31536000)\n response = lang_in_bd\n return response\n\n\ndef get_lang_from_redis(user):\n r = redis.StrictRedis(host=settings.r_host, port=settings.r_port)\n lang_in_redis = r.get(user)\n if not lang_in_redis:\n lang_in_db = get_lang_by_id(user)\n if not lang_in_db:\n response = text_handler.change_lang()\n else:\n r.set(user, lang_in_db)\n r.expire(user, 31536000)\n lang = r.get(user).decode('unicode_escape')\n response = lang\n else:\n lang = r.get(user).decode('unicode_escape')\n response = lang\n return response\n","sub_path":"db_operations.py","file_name":"db_operations.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"374008590","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 7 19:59:24 2011\n\n@author: Sat Kumar Tomer\n@website: www.ambhas.com\n@email: satkumartomer@gmail.com\n\"\"\"\n\n# import the required library\nfrom osgeo import gdal\nfrom scipy.signal import medfilt2d, wiener\nfrom osgeo.gdalconst import *\nimport matplotlib.pyplot as plt\n\n# read the raster data \ndriver = gdal.GetDriverByName('GTiff')\nfile_name = \"/home/tomer/my_books/python_in_hydrology/datas/data_noisy.tif\"\ndataset = gdal.Open(file_name, GA_ReadOnly)\ngeotransform = dataset.GetGeoTransform()\ndata = dataset.GetRasterBand(1).ReadAsArray()\ndataset = None\n\n# median filter of 7X7 window\ndata_median = medfilt2d(data, kernel_size=3)\n\n# filter the image using wiener filter of window (7X7)\ndata_wiener = wiener(data, mysize=(3,3), noise=None)\n\n# plot the data\nplt.clf()\nplt.matshow(data_median)\nplt.colorbar()\nplt.savefig('/home/tomer/my_books/python_in_hydrology/images/median.png')\n\nplt.clf()\nplt.matshow(data_wiener)\nplt.colorbar()\nplt.savefig('/home/tomer/my_books/python_in_hydrology/images/wiener.png')\n\n# save the data into tif format\ndriver = gdal.GetDriverByName('GTiff')\nfile_name = \"/home/tomer/my_books/python_in_hydrology/datas/data_filtered.tif\"\ndataset = driver.Create(file_name, data_wiener.shape[1], data_wiener.shape[0], 1, gdal.GDT_Float32)\ndataset.SetGeoTransform(geotransform)\ndataset.GetRasterBand(1).WriteArray(data_wiener, 0, 0)\ndataset = None\n\n","sub_path":"codes/spatial_filter.py","file_name":"spatial_filter.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"550249988","text":"# -*- coding: utf-8 -*-\n\nimport copy\nfrom vec import *\n\nclass Rectangle:\n\tdef __init__(self, A, B):\n\t\tif isinstance(A, Vec2):\n\t\t\tself.A = copy.deepcopy(A)\n\t\telse:\n\t\t\tself.A = Vec2(A[0],A[1])\n\t\tif isinstance(B, Vec2):\n\t\t\tself.B = copy.deepcopy(B)\n\t\telse:\n\t\t\tself.B = Vec2(B[0],B[1])\n\t\t\n\t\tif self.A.x > self.B.x:\n\t\t\tself.A,self.B = self.B,self.A\n\t\n\tdef __contains__(self, v2):\n\t\treturn ((self.A.x < v2.x < self.B.x) and (min(self.A.y,self.B.y) < v2.y < max(self.A.y,self.B.y)))\n","sub_path":"IA/geometry/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"250695571","text":"import os\r\nimport time\r\nfrom imgurpython import ImgurClient as ImgurClient\r\nfrom imgurpython.helpers.error import ImgurClientError\r\nimport imgurDetails as myDetails #imgur application details\r\n\r\n#new frens? yay! check distribution of images over system\r\ndef checkDoggos():\r\n # find some doggos! locate image files\r\n new = next(os.walk(os.path.join(os.getcwd(), 'new')))[2]\r\n imported = next(os.walk(os.path.join(os.getcwd(), 'imported')))[2]\r\n archive = next(os.walk(os.path.join(os.getcwd(), 'archive')))[2]\r\n return new, imported, archive\r\n\r\n#take doggo for walk to new kennel! move image file from source to dest and facilitate renaming\r\ndef moveDoggo(source, dest, oldname, newname):\r\n os.rename(os.path.join(os.getcwd(), '{}\\\\{}'.format(source, oldname)), os.path.join(os.getcwd(), '{}\\\\{}'.format(dest, newname)))\r\n return None\r\n\r\n#welcome new doggos to family! parse images from 'new' to 'imported' with standard filename ('XXXX.jpg')\r\ndef importDoggos(numdoggos):\r\n print('Doin an import, pliss no interrupt!')\r\n movedDoggos = 0\r\n for doggo in newDoggos:\r\n try: # if vvv good boi\r\n moveDoggo('new', 'imported', doggo, '{}.jpg'.format(numdoggos))\r\n print('henlo new fren! (doggo #{})'.format(numdoggos))\r\n numdoggos += 1\r\n movedDoggos += 1\r\n except OSError: # uh-oh, heckin bad boi\r\n print('HECK! doggo \\'{}\\' is naughty!'.format(doggo))\r\n return print('{} new doggos added!'.format(movedDoggos))\r\n\r\n\r\n#time 4 internetz fame! upload to imgur and store local backup in 'archive'\r\ndef uploadDoggos(startdoggos, enddoggos):\r\n print('Doin an upload, pliss no interrupt!')\r\n uploadedDoggos = 0\r\n movedDoggos = 0\r\n for doggo in range(startdoggos, enddoggos):\r\n time.sleep(15) #wait to avoid exceeding rate limit\r\n #upload image details (destination album, image name, post title, post description)\r\n config = {\r\n 'album': myDetails.albumID(),\r\n 'name': '{}'.format(doggo),\r\n 'title': '{}'.format(doggo),\r\n 'description': '\\n'.join(['This doggo was uploaded using plusDoggo, more info at https://github.com/NickelOz/plus-doggo',\r\n 'Uploader:'.ljust(12) + myDetails.me(),\r\n 'Uploaded on {}'.format(time.strftime('%b %d, %Y at %I:%M%p')),\r\n 'Rating: 10/10']) #they're good dogs Brent\r\n }\r\n print('uploading {}.jpg, upload well pupper'.format(doggo))\r\n try: #attempt to upload to imgur\r\n client.upload_from_path(os.path.join(os.getcwd(), 'imported\\\\{}.jpg'.format(doggo)), config=config, anon=False)\r\n uploadedDoggos += 1\r\n try: #tired pupper does a slep! archive image\r\n moveDoggo('imported', 'archive', '{}.jpg'.format(doggo), '{}.jpg'.format(doggo))\r\n movedDoggos += 1\r\n except OSError:\r\n print('HECK! doggo \\'{}\\' is naughty!'.format(doggo))\r\n except ImgurClientError:\r\n print(ImgurClientError.error_message)\r\n print(ImgurClientError.status_code)\r\n return print('{} doggos uploaded to imgur! {} doggos stored locally!\\n'.format(uploadedDoggos, movedDoggos) +\r\n 'Your updated album can be found at https://www.imgur.com/a/{}'.format(myDetails.albumID()))\r\n\r\n#who is a good boi? this app is! verify with imgur API\r\nclient = ImgurClient(myDetails.clientID(), myDetails.clientSecret(), myDetails.accessToken(), myDetails.refreshToken(), mashape_key=None)\r\n\r\n#do a trick! perform user commands\\\r\nprint('Welcome to plusDoggo! What would you like to do?')\r\ncommands = ['q', 'quit', 'r', 'run']\r\ncommand = input('[R]un /// [Q]uit\\n>>> ').lower()\r\nwhile command not in commands[0:2]:\r\n if command in commands[2:4]:\r\n newDoggos, importedDoggos, archiveDoggos = checkDoggos()\r\n importDoggos(len(archiveDoggos + importedDoggos) + 1)\r\n newDoggos, importedDoggos, archiveDoggos = checkDoggos()\r\n uploadDoggos(len(archiveDoggos) + 1, len(archiveDoggos + importedDoggos) + 1)\r\n else:\r\n print('Invalid command, please re-enter command')\r\n command = input('[R]un /// [Q]uit\\n>>> ').lower()\r\nprint('Thank you for using plusDoggo! Exiting now!')\r\ntime.sleep(2)\r\n","sub_path":"plusDoggo.py","file_name":"plusDoggo.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"277752261","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/kieffer/workspace/fabio/build/lib.macosx-10.6-intel-3.5/fabio/openimage.py\n# Compiled at: 2020-04-03 09:02:03\n# Size of source mod 2**32: 10925 bytes\n\"\"\"\n\nAuthors: Henning O. Sorensen & Erik Knudsen\n Center for Fundamental Research: Metal Structures in Four Dimensions\n Risoe National Laboratory\n Frederiksborgvej 399\n DK-4000 Roskilde\n email:henning.sorensen@risoe.dk\n\nmods for fabio by JPW\nmodification for HDF5 by Jérôme Kieffer\n\n\"\"\"\nfrom __future__ import with_statement, print_function, absolute_import\nimport os.path, logging\nlogger = logging.getLogger(__name__)\nfrom . import fabioutils\nfrom .fabioutils import FilenameObject, BytesIO\nfrom .fabioimage import FabioImage\nfrom . import fabioformats\nMAGIC_NUMBERS = [\n (b'FORMAT :100', 'bruker100'),\n (b'FORMAT : 86', 'bruker'),\n (b'MM\\x00*', 'tif'),\n (b'II*\\x00\\x08\\x00', 'marccd/tif'),\n (b'II*\\x00\\x82\\x00', 'pilatus'),\n (b'II*\\x00', 'tif'),\n (b'{\\nHEA', 'dtrek'),\n (b'\\r\\n{\\r\\nEDF', 'edf'),\n (b'\\n{\\r\\nEDF', 'edf'),\n (b'{\\r\\nEDF', 'edf'),\n (b'{\\n', 'edf'),\n (b'\\n{\\n', 'edf'),\n (b'{', 'edf'),\n (b'\\r{', 'edf'),\n (b'\\n{', 'edf'),\n (b'ADEPT', 'GE'),\n (b'OD', 'OXD'),\n (b'IM', 'HiPiC'),\n (b'-\\x04', 'mar345'),\n (b'\\xd2\\x04', 'mar345'),\n (b'\\x04-', 'mar345'),\n (b'\\x04\\xd2', 'mar345'),\n (b'M\\x00\\x00\\x00A\\x00\\x00\\x00S\\x00\\x00\\x00K\\x00\\x00\\x00', 'fit2dmask'),\n (b'\\x00\\x00\\x00\\x03', 'dm3'),\n (b'No', 'kcd'),\n (b'<', 'xsd'),\n (b'\\n\\xb8\\x03\\x00', 'pixi'),\n (b'\\x89HDF\\r\\n\\x1a\\n', 'eiger/hdf5'),\n (b'R-AXIS', 'raxis'),\n (b'\\x93NUMPY', 'numpy'),\n (b'\\\\$FFF_START', 'fit2d'),\n (b'\\xff\\xd8\\xff\\xdb', 'jpeg'),\n (b'\\xff\\xd8\\xff\\xe0', 'jpeg'),\n (b'\\xff\\xd8\\xff\\xe1', 'jpeg'),\n (b'\\x00\\x00\\x00\\x0cjP \\r\\n\\x87\\n', 'jpeg2k'),\n (b'ESPERANTO FORMAT', 'esperanto')]\n\ndef do_magic(byts, filename):\n \"\"\" Try to interpret the bytes starting the file as a magic number \"\"\"\n for magic, format_type in MAGIC_NUMBERS:\n if byts.startswith(magic):\n if '/' in format_type:\n if format_type == 'eiger/hdf5':\n if '::' in filename:\n return 'hdf5'\n else:\n return 'eiger'\n else:\n if format_type == 'marccd/tif':\n if 'mccd' in filename.split('.'):\n return 'marccd'\n return 'tif'\n else:\n return format_type\n\n raise Exception('Could not interpret magic string')\n\n\ndef openimage(filename, frame=None):\n \"\"\"Open an image.\n\n It returns a FabioImage-class instance which can be used as a context manager to close the file\n at the termination.\n\n .. code-block:: python\n\n with fabio.open(\"image.edf\") as i:\n print(i.nframes)\n print(i.data)\n\n :param Union[str,FilenameObject] filename: A filename or a filename\n iterator.\n :param Union[int,None] frame: A specific frame inside this file.\n :rtype: FabioImage\n \"\"\"\n if isinstance(filename, fabioutils.PathTypes):\n if not isinstance(filename, fabioutils.StringTypes):\n filename = str(filename)\n if isinstance(filename, FilenameObject):\n try:\n logger.debug('Attempting to open %s' % filename.tobytes())\n obj = _openimage(filename.tobytes())\n logger.debug('Attempting to read frame %s from %s with reader %s' % (frame, filename.tobytes(), obj.classname))\n obj = obj.read(filename.tobytes(), frame)\n except Exception as ex:\n logger.debug('Exception %s, trying name %s' % (ex, filename.stem))\n obj = _openimage(filename.stem)\n logger.debug('Reading frame %s from %s' % (filename.num, filename.stem))\n obj.read(filename.stem, frame=filename.num)\n\n else:\n logger.debug('Attempting to open %s' % filename)\n obj = _openimage(filename)\n logger.debug('Attempting to read frame %s from %s with reader %s' % (frame, filename, obj.classname))\n obj = obj.read(obj.filename, frame)\n return obj\n\n\ndef openheader(filename):\n \"\"\" return only the header\"\"\"\n if isinstance(filename, fabioutils.PathTypes):\n if not isinstance(filename, fabioutils.StringTypes):\n filename = str(filename)\n obj = _openimage(filename)\n obj.readheader(obj.filename)\n return obj\n\n\ndef _openimage(filename):\n \"\"\"\n determine which format for a filename\n and return appropriate class which can be used for opening the image\n\n :param filename: can be an url like:\n\n hdf5:///example.h5?entry/instrument/detector/data/data#slice=[:,:,5]\n\n \"\"\"\n if hasattr(filename, 'seek') and hasattr(filename, 'read'):\n if not isinstance(filename, BytesIO):\n filename.seek(0)\n actual_filename = BytesIO(filename.read())\n else:\n if os.path.exists(filename):\n actual_filename = filename\n else:\n if '::' in filename:\n actual_filename = filename.split('::')[0]\n else:\n actual_filename = filename\n try:\n imo = FabioImage()\n with imo._open(actual_filename) as (f):\n magic_bytes = f.read(18)\n except IOError:\n logger.debug('Backtrace', exc_info=True)\n raise\n else:\n imo = None\n filetype = None\n try:\n filetype = do_magic(magic_bytes, filename)\n except Exception:\n logger.debug('Backtrace', exc_info=True)\n try:\n file_obj = FilenameObject(filename=filename)\n if file_obj is None:\n raise Exception('Unable to deconstruct filename')\n if file_obj.format is not None and len(file_obj.format) != 1 and isinstance(file_obj.format, list):\n raise Exception('openimage failed on magic bytes & name guess')\n filetype = file_obj.format\n except Exception:\n logger.debug('Backtrace', exc_info=True)\n raise IOError('Fabio could not identify ' + filename)\n\n if filetype is None:\n raise IOError('Fabio could not identify ' + filename)\n klass_name = ''.join(filetype) + 'image'\n try:\n obj = fabioformats.factory(klass_name)\n except (RuntimeError, Exception):\n logger.debug('Backtrace', exc_info=True)\n raise IOError(\"Filename %s can't be read as format %s\" % (filename, klass_name))\n\n obj.filename = filename\n return obj\n\n\ndef open_series(filenames=None, first_filename=None, single_frame=None, fixed_frames=None, fixed_frame_number=None):\n \"\"\"\n Create an object to iterate frames through a file series.\n\n This function is a wrapper over :class:`~file_series.FileSeries` to facilitate\n simple uses of file series iterations.\n\n :param Union[Generator,Iterator,List] filenames: Ordered list of filenames\n to process as a file series. It also can be a generator, and\n iterator, or :class:`~fabio.file_series.filename_series` or\n :class:`~fabio.file_series.file_series` objects.\n :param str first_filename: If provided iterate filenames from this filename\n and try to consecutivelly open next files. If this argument is specified\n the `filenames` have to unspecified. Internally it uses\n :class:`~fabio.file_series.filename_series` to iterate the filenames.\n :param Union[Bool,None] single_frame: If True, all files are supposed to\n contain only one frame.\n :param Union[Bool,None] fixed_frames: If True, all files are supposed to\n contain the same amount of frames (this fixed amount will be reached\n from the first file of the serie).\n :param Union[Integer,None] fixed_frame_number: If set, all files are\n supposed to contain the same amount of frames (sepecified by this\n argument)\n :rtype: :class:`~file_series.FileSeries`\n \"\"\"\n from . import file_series\n if filenames is not None and first_filename is not None:\n raise ValueError(\"'filenames' and 'first_filename' are mutual exclusive\")\n if first_filename is not None:\n filenames = file_series.filename_series(filename=first_filename)\n return file_series.FileSeries(filenames=filenames, single_frame=single_frame, fixed_frames=fixed_frames, fixed_frame_number=fixed_frame_number)","sub_path":"pycfiles/fabio-0.10.0-cp35-cp35m-macosx_10_6_intel/openimage.cpython-35.py","file_name":"openimage.cpython-35.py","file_ext":"py","file_size_in_byte":8522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"24791389","text":"\"\"\"\nThis module comes with this framework and takes care of logging solutions.\n\"\"\"\n\nimport logging\nimport json\nimport logging.handlers\nimport logging.config\n\n# We want to use the power of this framework\nfrom modules import Initialize\n\n\ndef prefix_wrapper(func):\n \"\"\"\n # This decorator wrapper simply replaces every normal call to the logging methods and\n # prepends a prefix passed to it.\n \"\"\"\n def inner(self, msg, *args, **kwargs):\n if 'extra' in kwargs:\n if 'ident' in kwargs['extra']:\n msg = ' '.join((kwargs['extra'].get('ident'), ':', msg))\n\n if self.prefix and isinstance(self.prefix, str):\n func(self, self.prefix + ' : ' + msg, *args, **kwargs)\n else:\n func(self, msg, *args, **kwargs)\n return inner\n\n\nclass Logger(object):\n \"\"\"\n This is the main Logging class. To use this module, call the get_instance(prefix='') method\n Never instantiate this module directly. By doing this you'll get some hidden powers that\n will make your life quite easy.\n \"\"\"\n __instance = None\n\n def __init__(self, prefix='', logfilename=''):\n _core = Initialize().get_instance()\n if not _core:\n raise Exception('This project is not initialized. '\n 'Please Instantiate this project first before using any core module')\n\n app_name = _core.project_name\n config_file = _core.base_path+'/config/'+_core.environment+'/modules/logger/config.json'\n\n if app_name is None:\n raise Exception('You must give a name for this project to the logger module.')\n\n if config_file is None:\n raise Exception('No configuration available. Please pass in a config file path')\n\n try:\n with open(config_file, 'r') as config_f:\n _config = json.load(config_f)\n except:\n raise\n\n if app_name not in _config['loggers']:\n raise Exception('You need to have a logger config section for module: {0} '\n 'with a handler parameter'.format(app_name))\n\n if 'file' in _config['loggers'][app_name]['handlers']:\n if not logfilename:\n logfilename = _config['handlers']['file']['filename']\n _config['handlers']['file']['filename'] = _core.log_directory + '/' + logfilename\n\n logging.config.dictConfig(_config)\n self.prefix = prefix\n self.logger = logging.getLogger(app_name)\n\n @staticmethod\n def get_instance(prefix='', logfilename=''):\n \"\"\"\n This will either return the existing logger instance or create a new.\n Send in the prefix you want your logs to have to this method.\n :param prefix: \n :return: Logger Instance\n \"\"\"\n if Logger.__instance is None:\n Logger.__instance = Logger(prefix, logfilename)\n\n return Logger.__instance\n\n @prefix_wrapper\n def log(self, lvl, msg, *args, **kwargs):\n \"\"\"\n Just a wrapper to the base logger methods\n \"\"\"\n self.logger.log(lvl, msg, *args, **kwargs)\n\n @prefix_wrapper\n def debug(self, msg, *args, **kwargs):\n \"\"\"\n Just a wrapper to the base logger methods\n \"\"\"\n self.logger.debug(msg, *args, **kwargs)\n\n @prefix_wrapper\n def info(self, msg, *args, **kwargs):\n \"\"\"\n Just a wrapper to the base logger methods\n \"\"\"\n self.logger.info(msg, *args, **kwargs)\n\n @prefix_wrapper\n def warning(self, msg, *args, **kwargs):\n \"\"\"\n Just a wrapper to the base logger methods\n \"\"\"\n self.logger.warning(msg, *args, **kwargs)\n\n @prefix_wrapper\n def error(self, msg, *args, **kwargs):\n \"\"\"\n Just a wrapper to the base logger methods\n \"\"\"\n self.logger.error(msg, *args, **kwargs)\n\n @prefix_wrapper\n def critical(self, msg, *args, **kwargs):\n \"\"\"\n Just a wrapper to the base logger methods\n \"\"\"\n self.logger.critical(msg, *args, **kwargs)\n","sub_path":"modules/core/logger/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"500943936","text":"#!/usr/bin/env python3.6\n\"\"\"Extend PicklePersistence to also keep track of polls.\n\"\"\"\n\nimport pickle\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom telegram.ext import PicklePersistence\n\nclass PollPicklePersistence(PicklePersistence):\n\n \"\"\"An extended version of PicklePersistence to also keep track of polls.\n \n Attributes:\n chat_data (dict): Description\n conversations (dict): Description\n poll_data (dict): Description\n user_data (dict): Description\n \"\"\"\n \n def __init__(self, filename, store_user_data=True, store_chat_data=True, singe_file=True, on_flush=False):\n \"\"\"Summary\n \n Args:\n filename (str): The filename for storing the pickle files. When :attr:`single_file` is false this will be used as a prefix.\n store_user_data (bool, optional): Optional. Whether user_data should be saved by this persistence class.\n store_chat_data (bool, optional): Optional. Whether chat_data should be saved by this persistence class.\n singe_file (bool, optional): Optional. When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is ``True``.\n on_flush (bool, optional): When ``True`` will only save to file when :meth:`flush` is called and keep data in memory until that happens.\n When ``False`` will store data on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n super().__init__(filename, store_user_data, store_chat_data, singe_file, on_flush)\n self.poll_data = None\n\n def load_singlefile(self):\n \"\"\"Modified to also load poll_data.\n \n Raises:\n TypeError: Couldn't unpickle data.\n \"\"\"\n try:\n filename = self.filename\n with open(self.filename, 'rb') as file_:\n all_ = pickle.load(file_)\n self.user_data = defaultdict(dict, all_['user_data'])\n self.chat_data = defaultdict(dict, all_['chat_data'])\n self.conversations = all_['conversations']\n self.poll_data = defaultdict(dict, all_['poll_data'])\n except IOError:\n self.conversations = {}\n self.user_data = defaultdict(dict)\n self.chat_data = defaultdict(dict)\n self.poll_data = defaultdict(dict)\n except pickle.UnpicklingError:\n raise TypeError(f\"File {filename} does not contain valid pickle data\")\n except Exception:\n raise TypeError(f\"Something went wrong unpickling {filename}\")\n\n\n def dump_singlefile(self):\n \"\"\"Modified to also save poll_data.\n \"\"\"\n with open(self.filename, 'wb') as file_:\n all_ = {'conversations': self.conversations, 'user_data': self.user_data, 'chat_data': self.chat_data, 'poll_data': self.poll_data}\n pickle.dump(all_, file_)\n\n\n def get_poll_data(self):\n \"\"\"Returns the poll_data from the pickle file if it exsists or an empty defaultdict.\n \n Returns:\n TYPE: The restoreed polls.\n \"\"\"\n if self.poll_data:\n pass\n elif not self.single_file:\n filename = f'{self.filename}_poll_data'\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.poll_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.poll_data)\n\n\n def update_poll_data(self, poll_id, data):\n \"\"\"Will update the poll_data (if changed) and depending on :attr:`on_flush` save the pickle file.\n \n Args:\n poll_id (int): The poll the data might have been changed for.\n data (dict): The :attr:`Poll` [poll_id].\n \"\"\"\n if self.poll_data.get(poll_id) == data:\n return\n self.poll_data[poll_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = f'{self.filename}_poll_data'\n self.dump_file(filename, self.poll_data)\n else:\n self.dump_singlefile()\n\n\n def drop_poll_data(self, poll_id):\n \"\"\"Will remove the poll from poll_data and depending on :attr:`on_flush` save the pickle file.\n \n Args:\n poll_id (int): The poll to drop.\n \"\"\"\n if poll_id in self.poll_data:\n del self.poll_data[poll_id]\n if not self.on_flush:\n if not self.single_file:\n filename = f'{self.filename}_poll_data'\n self.dump_file(filename, self.poll_data)\n else:\n self.dump_singlefile()\n\n\n def flush(self):\n \"\"\"Modified to handle poll_data.\n \"\"\"\n if self.single_file:\n if self.user_data or self.chat_data or self.conversations or self.poll_data:\n self.dump_singlefile()\n else:\n if self.user_data:\n self.dump_file(f'{self.filename}_user_data', self.user_data)\n if self.chat_data:\n self.dump_file(f'{self.filename}_chat_data', self.chat_data)\n if self.conversations:\n self.dump_file(f'{self.filename}_conversations', self.conversations)\n if self.poll_data:\n self.dump_file(f'{self.filename}_poll_data', self.poll_data)\n","sub_path":"pollpicklepersistence.py","file_name":"pollpicklepersistence.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287195752","text":"import astropy.units as u\nimport pytest\n\nfrom plasmapy.particles import deuteron, electron, proton\nfrom plasmapy.particles._factory import _physical_particle_factory\nfrom plasmapy.particles.exceptions import InvalidParticleError\nfrom plasmapy.particles.particle_class import CustomParticle, Particle\nfrom plasmapy.particles.particle_collections import ParticleList\n\nmass = 1e-26 * u.kg\ncharge = 1e-29 * u.C\ncustom_particle = CustomParticle(mass=mass, charge=charge)\n\nHe4 = Particle(\"He-4\", Z=0)\nZ_mean = 1.5\n\ntest_cases = [\n ([[]], {}, ParticleList()),\n ([proton], {}, proton),\n ([\"p+\"], {}, proton),\n ([\"H\"], {\"Z\": 1, \"mass_numb\": 2}, deuteron),\n ([\"muon\"], {}, Particle(\"muon\")),\n pytest.param([charge, mass], {}, custom_particle),\n ([mass, charge], {\"Z\": None, \"mass_numb\": None}, custom_particle),\n ([], {\"symbol\": \"ξ\"}, CustomParticle(symbol=\"ξ\")),\n ([[proton, electron]], {}, ParticleList([proton, electron])),\n ([], {\"mass\": mass}, CustomParticle(mass=mass)),\n ([], {\"charge\": charge}, CustomParticle(charge=charge)),\n ([\"e-\"], {}, electron),\n ([1], {}, Particle(\"H\")),\n ([\"H\"], {\"Z\": 1, \"mass_numb\": 1}, Particle(\"H\", Z=1, mass_numb=1)),\n ([custom_particle], {}, custom_particle),\n ([ParticleList([\"p+\", \"e-\"])], {}, ParticleList([\"p+\", \"e-\"])),\n ([mass, charge], {}, custom_particle),\n ([charge, mass], {}, custom_particle),\n ([charge], {}, CustomParticle(charge=charge)),\n ([mass], {}, CustomParticle(mass=mass)),\n (\n [\"He-4\"],\n {\"Z\": Z_mean},\n CustomParticle(mass=He4.mass - Z_mean * electron.mass, Z=Z_mean),\n ),\n (\n [\"He\"],\n {\"Z\": Z_mean, \"mass_numb\": 4},\n CustomParticle(mass=He4.mass - Z_mean * electron.mass, Z=Z_mean),\n ),\n]\n\n\n@pytest.mark.parametrize((\"args\", \"kwargs\", \"expected\"), test_cases)\ndef test_physical_particle_factory(args, kwargs, expected):\n result = _physical_particle_factory(*args, **kwargs)\n assert result == expected\n assert type(result) == type(expected)\n\n\ntest_cases_for_exceptions = [\n ([], {}, TypeError),\n (\"not a valid Particle\", {}, InvalidParticleError),\n ([\"not valid for a ParticleList\"], {}, InvalidParticleError),\n ([\"He-4\"], {\"Z\": 2.001}, InvalidParticleError),\n ([\"He-4\"], {\"Z\": 1 + 1j}, InvalidParticleError),\n ([\"tau neutrino\"], {\"Z\": 1.3}, InvalidParticleError),\n]\n\n\n@pytest.mark.parametrize((\"args\", \"kwargs\", \"expected\"), test_cases_for_exceptions)\ndef test_particle_factory_exceptions(args, kwargs, expected):\n with pytest.raises(expected):\n _physical_particle_factory(*args, **kwargs)\n\n\ndef test_particle_factory_custom_particle_with_none_kwargs():\n \"\"\"\n Test that when `_physical_particle_factory` is provided with a\n `CustomParticle` along with ``Z=None`` and ``mass_numb=None`` as\n keyword arguments, then it will return the `CustomParticle`.\n \"\"\"\n expected = CustomParticle(mass=1.27 * u.kg, charge=1 * u.C)\n actual = _physical_particle_factory(expected, Z=None, mass_numb=None)\n assert expected is actual\n","sub_path":"plasmapy/particles/tests/test_factory.py","file_name":"test_factory.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"639578413","text":"\"\"\" This VR hand dexterity benchmark allows the user to interact with many types of objects\nand interactive objects, and provides a good way to qualitatively measure the dexterity of a VR hand.\nYou can use the left and right controllers to start/stop/reset the timer,\nas well as show/hide its display. The \"overlay toggle\" action and its\ncorresponding button index mapping can be found in the vr_config.yaml file in the igibson folder.\n\"\"\"\nimport os\n\nimport pybullet as p\nimport pybullet_data\n\nimport igibson\nfrom igibson.objects.articulated_object import ArticulatedObject\nfrom igibson.objects.ycb_object import YCBObject\nfrom igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings\nfrom igibson.robots.behavior_robot import BehaviorRobot\nfrom igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\nfrom igibson.simulator import Simulator\n\n# Objects in the benchmark - corresponds to Rs kitchen environment, for range of items and\n# transferability to the real world\n# Note: the scene will automatically load in walls/ceilings/floors in addition to these objects\nbenchmark_names = [\n \"bottom_cabinet\",\n \"countertop\",\n \"dishwasher\",\n \"door\",\n \"fridge\",\n \"microwave\",\n \"oven\",\n \"sink\",\n \"top_cabinet\",\n \"trash_can\",\n]\n\n# Set to true to print Simulator step() statistics\nPRINT_STATS = True\n# Set to true to use gripper instead of VR hands\nUSE_GRIPPER = False\n\n# HDR files for PBR rendering\nhdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\")\nhdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\")\nlight_modulation_map_filename = os.path.join(\n igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\"\n)\nbackground_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\")\n\n\ndef main():\n # VR rendering settings\n vr_rendering_settings = MeshRendererSettings(\n optimized=True,\n fullscreen=False,\n env_texture_filename=hdr_texture,\n env_texture_filename2=hdr_texture2,\n env_texture_filename3=background_texture,\n light_modulation_map_filename=light_modulation_map_filename,\n enable_shadow=True,\n enable_pbr=True,\n msaa=True,\n light_dimming_factor=1.0,\n )\n s = Simulator(mode=\"vr\", rendering_settings=vr_rendering_settings)\n\n scene = InteractiveIndoorScene(\n \"Rs_int\", load_object_categories=benchmark_names, load_room_types=[\"kitchen\", \"lobby\"]\n )\n # scene.load_object_categories(benchmark_names)\n\n s.import_ig_scene(scene)\n p.setAdditionalSearchPath(pybullet_data.getDataPath())\n\n vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER)\n # Move VR agent to the middlvr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER)e of the kitchen\n s.set_vr_start_pos(start_pos=[0, 2.1, 0], vr_height_offset=-0.02)\n\n # Mass values to use for each object type - len(masses) objects will be created of each type\n masses = [1, 5, 10]\n\n # List of objects to load with name: filename, type, scale, base orientation, start position, spacing vector and spacing value\n obj_to_load = {\n \"mustard\": (\"006_mustard_bottle\", \"ycb\", 1, (0.0, 0.0, 0.0, 1.0), (0.0, 1.6, 1.18), (-1, 0, 0), 0.15),\n \"marker\": (\"040_large_marker\", \"ycb\", 1, (0.0, 0.0, 0.0, 1.0), (1.5, 2.6, 0.92), (0, -1, 0), 0.15),\n \"can\": (\"005_tomato_soup_can\", \"ycb\", 1, (0.0, 0.0, 0.0, 1.0), (1.7, 2.6, 0.95), (0, -1, 0), 0.15),\n \"drill\": (\"035_power_drill\", \"ycb\", 1, (0.0, 0.0, 0.0, 1.0), (1.5, 2.2, 1.15), (0, -1, 0), 0.2),\n \"small_jenga\": (\n \"jenga/jenga.urdf\",\n \"pb\",\n 1,\n (0.000000, 0.707107, 0.000000, 0.707107),\n (-0.9, 1.6, 1.18),\n (-1, 0, 0),\n 0.1,\n ),\n \"large_jenga\": (\n \"jenga/jenga.urdf\",\n \"pb\",\n 2,\n (0.000000, 0.707107, 0.000000, 0.707107),\n (-1.3, 1.6, 1.31),\n (-1, 0, 0),\n 0.15,\n ),\n \"small_duck\": (\n \"duck_vhacd.urdf\",\n \"pb\",\n 1,\n (0.000000, 0.000000, 0.707107, 0.707107),\n (-1.8, 1.95, 1.12),\n (1, 0, 0),\n 0.15,\n ),\n \"large_duck\": (\n \"duck_vhacd.urdf\",\n \"pb\",\n 2,\n (0.000000, 0.000000, 0.707107, 0.707107),\n (-1.95, 2.2, 1.2),\n (1, 0, 0),\n 0.2,\n ),\n \"small_sphere\": (\n \"sphere_small.urdf\",\n \"pb\",\n 1,\n (0.000000, 0.000000, 0.707107, 0.707107),\n (-0.5, 1.63, 1.15),\n (-1, 0, 0),\n 0.15,\n ),\n \"large_sphere\": (\n \"sphere_small.urdf\",\n \"pb\",\n 2,\n (0.000000, 0.000000, 0.707107, 0.707107),\n (-0.5, 1.47, 1.15),\n (-1, 0, 0),\n 0.15,\n ),\n }\n\n for name in obj_to_load:\n fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name]\n for i in range(len(masses)):\n if obj_type == \"ycb\":\n handle = YCBObject(fpath, scale=scale)\n elif obj_type == \"pb\":\n handle = ArticulatedObject(fpath, scale=scale)\n\n s.import_object(handle, use_pbr=False, use_pbr_mapping=False)\n # Calculate new position along spacing vector\n new_pos = (\n pos[0] + space_vec[0] * space_val * i,\n pos[1] + space_vec[1] * space_val * i,\n pos[2] + space_vec[2] * space_val * i,\n )\n handle.set_position(new_pos)\n handle.set_orientation(orn)\n p.changeDynamics(handle.body_id, -1, mass=masses[i])\n minBox, maxBox = p.getAABB(handle.body_id)\n dims = [maxBox[i] - minBox[i] for i in range(3)]\n print(\"Name {} and masses: {}\".format(name, masses))\n print(\"XYZ dimensions: {}\".format(dims))\n\n table_objects_to_load = {\n \"tray\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"tray\", \"tray_000\", \"tray_000.urdf\"),\n \"pos\": (1.100000, 0.200000, 0.650000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 0.15,\n \"mass\": 1.7,\n },\n \"plate_1\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"plate\", \"plate_000\", \"plate_000.urdf\"),\n \"pos\": (0.700000, -0.300000, 0.650000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 0.01,\n \"mass\": 1.5,\n },\n \"plate_2\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"plate\", \"plate_000\", \"plate_000.urdf\"),\n \"pos\": (1.100000, -0.300000, 0.650000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 0.01,\n \"mass\": 1.5,\n },\n \"plate_3\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"plate\", \"plate_000\", \"plate_000.urdf\"),\n \"pos\": (0.700000, -1.200000, 0.000000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 0.01,\n \"mass\": 1.5,\n },\n \"plate_4\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"plate\", \"plate_000\", \"plate_000.urdf\"),\n \"pos\": (1.100000, -1.200000, 0.000000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 0.01,\n \"mass\": 1.5,\n },\n \"chip_1\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"chip\", \"chip_000\", \"chip_000.urdf\"),\n \"pos\": (0.700000, -0.800000, 0.750000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 0.01,\n \"mass\": 0.22,\n },\n \"chip_2\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"chip\", \"chip_000\", \"chip_000.urdf\"),\n \"pos\": (1.100000, -0.800000, 0.750000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 0.01,\n \"mass\": 0.22,\n },\n \"cherry_1\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"cherry\", \"02_0\", \"02_0.urdf\"),\n \"pos\": (0.700000, -0.600000, 0.680000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 1,\n \"mass\": 0.02,\n },\n \"cherry_2\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"cherry\", \"02_0\", \"02_0.urdf\"),\n \"pos\": (1.100000, -0.600000, 0.680000),\n \"orn\": (0.000000, 0.00000, 0.707107, 0.707107),\n \"scale\": 1,\n \"mass\": 0.02,\n },\n \"shelf\": {\n \"urdf\": os.path.join(\n igibson.ig_dataset_path,\n \"objects\",\n \"shelf\",\n \"de3b28f255111570bc6a557844fbbce9\",\n \"de3b28f255111570bc6a557844fbbce9.urdf\",\n ),\n \"pos\": (1.700000, -3.500000, 1.15000),\n \"orn\": (0.000000, 0.00000, -0.707107, 0.707107),\n \"scale\": 2.50,\n \"mass\": 11,\n },\n \"wine_bottle_1\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"wine_bottle\", \"23_1\", \"23_1.urdf\"),\n \"pos\": (1.700000, -3.500000, 1.90000),\n \"orn\": (0.000000, 0.00000, -0.707107, 0.707107),\n \"scale\": 1,\n \"mass\": 1.2,\n },\n \"wine_bottle_2\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"wine_bottle\", \"23_1\", \"23_1.urdf\"),\n \"pos\": (1.700000, -3.2500000, 1.90000),\n \"orn\": (0.000000, 0.00000, -0.707107, 0.707107),\n \"scale\": 1,\n \"mass\": 1.2,\n },\n \"wine_bottle_3\": {\n \"urdf\": os.path.join(igibson.ig_dataset_path, \"objects\", \"wine_bottle\", \"23_1\", \"23_1.urdf\"),\n \"pos\": (1.700000, -3.750000, 1.90000),\n \"orn\": (0.000000, 0.00000, -0.707107, 0.707107),\n \"scale\": 1,\n \"mass\": 1.2,\n },\n \"table_1\": {\n \"urdf\": \"table/table.urdf\",\n \"pos\": (1.000000, -0.200000, 0.01),\n \"orn\": (0.000000, 0.000000, 0.707107, 0.707107),\n \"scale\": 1,\n \"mass\": 0,\n },\n \"table_2\": {\n \"urdf\": \"table/table.urdf\",\n \"pos\": (-1.500000, -3.000000, 0.01),\n \"orn\": (0.000000, 0.000000, 0.707107, 0.707107),\n \"scale\": 1,\n \"mass\": 0,\n },\n }\n\n objs_loaded = []\n for it_name, item in table_objects_to_load.items():\n fpath = item[\"urdf\"]\n pos = item[\"pos\"]\n orn = item[\"orn\"]\n scale = item[\"scale\"]\n mass = item[\"mass\"]\n item_ob = ArticulatedObject(fpath, scale=scale)\n s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False)\n item_ob.set_position(pos)\n item_ob.set_orientation(orn)\n objs_loaded.append(item_ob)\n minBox, maxBox = p.getAABB(item_ob.body_id)\n dims = [maxBox[i] - minBox[i] for i in range(3)]\n p.changeDynamics(item_ob.body_id, -1, mass=mass)\n print(\"Name {} and mass: {}\".format(it_name, mass))\n print(\"XYZ dimensions: {}\".format(dims))\n\n # Time how long demo takes\n show_overlay = False\n if show_overlay:\n ag_text = s.add_vr_overlay_text(\n text_data=\"NO AG DATA\", font_size=40, font_style=\"Bold\", color=[0, 0, 0], pos=[0, 90], size=[50, 50]\n )\n\n # Main simulation loop\n while True:\n s.step(print_stats=PRINT_STATS)\n\n # Update scroll text\n scroll_dir = s.get_scroll_input()\n if scroll_dir > -1:\n ag_text.scroll_text(up=scroll_dir)\n\n # Update VR agent\n vr_agent.apply_action()\n\n if show_overlay:\n ag_candidate_data = vr_agent.parts[\"right_hand\"].candidate_data\n if ag_candidate_data:\n t = \"\"\n for bid, link, dist in ag_candidate_data:\n t += \"{}, {}, {}\\n\".format(bid, link, dist)\n ag_text.set_text(t)\n else:\n ag_text.set_text(\"NO AG DATA\")\n\n s.disconnect()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"igibson/examples/demo/vr_demos/in_development/vr_hand_dex_benchmark.py","file_name":"vr_hand_dex_benchmark.py","file_ext":"py","file_size_in_byte":12315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"162461597","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n#tc:O(n)\n#SC:O(1)\n#Find the middle node. Reverse the elements from middle. Assign next node accordingly.\nclass Solution:\n def reorderList(self, head: ListNode) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n if not head: return head\n\n low = fast = head\n while fast and fast.next:\n low = low.next\n fast = fast.next.next\n\n tmp = low.next\n low.next = None\n\n # reverse the right half\n tail = cur = tmp\n while cur and cur.next:\n cur = cur.next\n tail.next = cur.next\n cur.next = tmp\n tmp = cur\n cur = tail\n\n # insert in order\n mid = tmp\n while mid:\n tmp = mid.next\n mid.next = head.next\n head.next = mid\n head = head.next.next\n mid = tmp\n \n \n","sub_path":"reorderList.py","file_name":"reorderList.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"625228602","text":"import os\nimport sys\nsys.path.insert(0, '//storage1.of3d.com/centralizedTools/third_party/python/modules/win/PyQt4/2.7')\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\n\nimport ast\n\nclass SceneBreakdown(QtGui.QWidget) :\n\n def __init__(self) :\n super(SceneBreakdown, self).__init__()\n self.initUI()\n\n def createOneBasicFileVerData(self) :\n groupBox = QtGui.QGroupBox(\"Ddg_s0050_0030_lay_camera\")\n groupBox.setGeometry(100, 100, 200, 200)\n\n v_label_0 = QtGui.QLabel(\"Version: v018\")\n v_label_1 = QtGui.QLabel(\"Shot : ddg_s0050_0030\")\n v_label_2 = QtGui.QLabel(\"Type : Houdini Camera\")\n v_label_3 = QtGui.QLabel(\"Updated: 2016-12-19 14:09:36\")\n\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(v_label_0)\n vbox.addWidget(v_label_1)\n vbox.addWidget(v_label_2)\n vbox.addWidget(v_label_3)\n vbox.addStretch(1)\n groupBox.setLayout(vbox)\n\n return groupBox\n\n\n def initUI(self) :\n\n self.setWindowFlags(QtCore.Qt.X11BypassWindowManagerHint)\n self.setGeometry(600, 300, 468, 513)\n self.setMinimumSize(468, 513)\n self.setWindowTitle('Scene Breakdown')\n\n # Grid Layout\n grid = QtGui.QGridLayout()\n self.setLayout(grid)\n \n funList = QtGui.QListWidget()\n\n itemN = QtGui.QListWidgetItem() \n #Create widget\n widget = QtGui.QWidget()\n widgetText = QtGui.QLabel(\"I love PyQt!\")\n widgetButton = QtGui.QPushButton(\"Push Me\")\n widgetLayout = QtGui.QHBoxLayout()\n widgetLayout.addWidget(widgetText)\n widgetLayout.addWidget(widgetButton)\n widgetLayout.addStretch()\n \n widgetLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)\n widget.setLayout(widgetLayout) \n itemN.setSizeHint(widget.sizeHint()) \n \n #Add widget to QListWidget funList\n funList.addItem(itemN)\n funList.setItemWidget(itemN, widget)\n \n grid.addWidget(funList)\n\n self.show()\n\nif __name__ == '__main__':\n ex = SceneBreakdown()","sub_path":"python/pyqt/customListItem.py","file_name":"customListItem.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"29324507","text":"import numbers\n\nfrom itertools import chain\n\nfrom ufl.measure import Measure\nfrom ufl.core.expr import Expr\nfrom ufl.checks import is_true_ufl_scalar\nfrom ufl.constantvalue import as_ufl\nfrom ufl.domain import extract_domains\n\nclass CylindricalMeasure(Measure):\n def __init__(self, radius, *args, **kwargs):\n self.radius = radius\n super().__init__(*args, **kwargs)\n def __rmul__(self, integrand):\n \"\"\"Multiply a scalar expression with measure to construct a form with\n a single integral.\n\n This is to implement the notation\n\n form = integrand * self.radius * self\n\n Integration properties are taken from this Measure object.\n\n \"\"\"\n # Avoid circular imports\n from ufl.integral import Integral\n from ufl.form import Form\n\n # Allow python literals: 1*dx and 1.0*dx\n if isinstance(integrand, (int, float)):\n integrand = as_ufl(integrand)\n\n # Let other types implement multiplication with Measure if\n # they want to (to support the dolfin-adjoint TimeMeasure)\n if not isinstance(integrand, Expr):\n return NotImplemented\n\n # Allow only scalar integrands\n if not is_true_ufl_scalar(integrand):\n raise ValueError(\n \"Can only integrate scalar expressions. The integrand is a \"\n f\"tensor expression with value shape {integrand.ufl_shape} and \"\n f\"free indices with labels {integrand.ufl_free_indices}.\")\n\n # If we have a tuple of domain ids build the integrals one by\n # one and construct as a Form in one go.\n subdomain_id = self.subdomain_id()\n if isinstance(subdomain_id, tuple):\n return Form(list(chain(*((integrand * self.reconstruct(subdomain_id=d)).integrals()\n for d in subdomain_id))))\n\n # Check that we have an integer subdomain or a string\n # (\"everywhere\" or \"otherwise\", any more?)\n if not isinstance(subdomain_id, (str, numbers.Integral,)):\n raise ValueError(\"Expecting integer or string domain id.\")\n\n # If we don't have an integration domain, try to find one in\n # integrand\n domain = self.ufl_domain()\n if domain is None:\n domains = extract_domains(integrand)\n if len(domains) == 1:\n domain, = domains\n elif len(domains) == 0:\n raise ValueError(\"This integral is missing an integration domain.\")\n else:\n raise ValueError(\"Multiple domains found, making the choice of integration domain ambiguous.\")\n\n # Otherwise create and return a one-integral form\n integral = Integral(integrand=self.radius*integrand,\n integral_type=self.integral_type(),\n domain=domain,\n subdomain_id=subdomain_id,\n metadata=self.metadata(),\n subdomain_data=self.subdomain_data())\n return Form([integral])\n def reconstruct(self,\n integral_type=None,\n subdomain_id=None,\n domain=None,\n metadata=None,\n subdomain_data=None):\n if subdomain_id is None:\n subdomain_id = self.subdomain_id()\n if domain is None:\n domain = self.ufl_domain()\n if metadata is None:\n metadata = self.metadata()\n if subdomain_data is None:\n subdomain_data = self.subdomain_data()\n return CylindricalMeasure(self.radius, self.integral_type(),\n domain=domain, subdomain_id=subdomain_id,\n metadata=metadata, subdomain_data=subdomain_data)\n","sub_path":"echemfem/cylindricalmeasure.py","file_name":"cylindricalmeasure.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"415514154","text":"# coding: UTF-8\n\nimport os\nimport time\nfrom datetime import timedelta, datetime, timezone\n\nimport pandas as pd\n\nfrom src import logger, allowed_range, retry, delta, load_data, resample\nfrom src.binance_futures_stub import BinanceFuturesStub\n\nOHLC_DIRNAME = os.path.join(os.path.dirname(__file__), \"../ohlc/{}\")\nOHLC_FILENAME = os.path.join(os.path.dirname(__file__), \"../ohlc/{}/data.csv\")\n\nclass BinanceFuturesBackTest(BinanceFuturesStub):\n # Pair\n pair = 'BTCUSDT'\n # Market price\n market_price = 0\n # OHLCV\n df_ohlcv = None\n # Current time axis\n index = None\n # Current time\n time = None\n # Order count\n order_count = 0\n # Buy signal history\n buy_signals = []\n # Sell signal history\n sell_signals = []\n # EXIT history\n close_signals = []\n # Balance history\n balance_history = []\n # Start balance\n start_balance = 0\n # Plot data\n plot_data = {}\n # Resample data\n resample_data = {}\n\n def __init__(self, account, pair):\n \"\"\"\n constructor\n :account:\n :pair:\n :param periods:\n \"\"\"\n self.pair = pair\n BinanceFuturesStub.__init__(self, account, pair=self.pair, threading=False)\n self.enable_trade_log = False\n self.start_balance = self.get_balance()\n\n def get_market_price(self):\n \"\"\"\n get market price\n :return:\n \"\"\"\n return self.market_price\n\n def now_time(self):\n \"\"\"\n current time\n :return:\n \"\"\"\n return self.time\n\n def entry(self, id, long, qty, limit=0, stop=0, post_only=False, when=True):\n \"\"\"\n places an entry order, works equivalent to tradingview pine script implementation\n https://jp.tradingview.com/study-script-reference/#fun_strategy{dot}entry\n :param id: Order id\n :param long: Long or Short\n :param qty: Quantity\n :param limit: Limit price\n :param stop: Stop limit\n :param post_only: Post only \n :param when: Do you want to execute the order or not - True for live trading\n :return:\n \"\"\"\n BinanceFuturesStub.entry(self, id, long, qty, limit, stop, post_only, when)\n\n def order(self, id, long, qty, limit=0, stop=0, post_only=False, reduce_only=False, trailing_stop=0, activationPrice=0, when=False):\n \"\"\"\n places an entry order, works equivalent to tradingview pine script implementation\n https://jp.tradingview.com/study-script-reference/#fun_strategy{dot}entry\n :param id: Order id\n :param long: Long or Short\n :param qty: Quantity\n :param limit: Limit price\n :param stop: Stop limit\n :param post_only: Post only \n :param when: Do you want to execute the order or not - True for live trading\n :return:\n \"\"\"\n BinanceFuturesStub.order(self, id, long, qty, limit, stop, post_only, reduce_only, trailing_stop, activationPrice, when)\n\n def commit(self, id, long, qty, price, need_commission=True):\n \"\"\"\n Commit\n :param id: order\n :param long: long or short\n :param qty: quantity\n :param price: price\n :param need_commission: use commision or not?\n \"\"\"\n BinanceFuturesStub.commit(self, id, long, qty, price, need_commission)\n\n if long:\n self.buy_signals.append(self.index)\n else:\n self.sell_signals.append(self.index)\n\n def close_all(self):\n \"\"\"\n Close all positions\n \"\"\"\n if self.get_position_size() == 0:\n return \n BinanceFuturesStub.close_all(self)\n self.close_signals.append(self.index)\n\n def __crawler_run(self):\n \"\"\"\n Get the data and execute the strategy.\n \"\"\"\n start = time.time()\n\n for i in range(self.ohlcv_len):\n self.balance_history.append((self.get_balance() - self.start_balance)/100000000*self.get_market_price())\n\n for i in range(len(self.df_ohlcv) - self.ohlcv_len):\n self.data = self.df_ohlcv.iloc[i:i + self.ohlcv_len, :]\n timestamp = self.data.iloc[-1].name\n close = self.data['close'].values\n open = self.data['open'].values\n high = self.data['high'].values\n low = self.data['low'].values\n volume = self.data['volume'].values\n\n if self.get_position_size() > 0 and low[-1] > self.get_trail_price():\n self.set_trail_price(low[-1])\n if self.get_position_size() < 0 and high[-1] < self.get_trail_price():\n self.set_trail_price(high[-1])\n\n self.market_price = close[-1]\n # self.time = timestamp.tz_convert('Asia/Tokyo')\n self.index = timestamp\n self.strategy(open, close, high, low, volume)\n\n self.balance_history.append((self.get_balance() - self.start_balance) / 100000000 * self.get_market_price())\n self.eval_exit()\n\n self.close_all()\n logger.info(f\"Back test time : {time.time() - start}\")\n\n def on_update(self, bin_size, strategy):\n \"\"\"\n Register the strategy function.\n :param strategy:\n \"\"\"\n self.__load_ohlcv(bin_size)\n\n BinanceFuturesStub.on_update(self, bin_size, strategy)\n self.__crawler_run()\n\n def security(self, bin_size):\n \"\"\"\n Recalculate and obtain different time frame data\n \"\"\"\n if bin_size not in self.resample_data:\n self.resample_data[bin_size] = resample(self.df_ohlcv, bin_size)\n return self.resample_data[bin_size][:self.data.iloc[-1].name].iloc[-1 * self.ohlcv_len:, :]\n\n def download_data(self, file, bin_size, start_time, end_time):\n \"\"\"\n download or get the data\n \"\"\"\n if not os.path.exists(os.path.dirname(file)):\n os.makedirs(os.path.dirname(file))\n\n data = pd.DataFrame()\n left_time = None\n source = None\n is_last_fetch = False\n\n while True:\n if left_time is None:\n left_time = start_time\n right_time = left_time + delta(allowed_range[bin_size][0]) * 99\n else:\n left_time = source.iloc[-1].name + + delta(allowed_range[bin_size][0]) * allowed_range[bin_size][2]\n right_time = left_time + delta(allowed_range[bin_size][0]) * 99\n\n if right_time > end_time:\n right_time = end_time\n is_last_fetch = True\n\n source = self.fetch_ohlcv(bin_size=bin_size, start_time=left_time, end_time=right_time) \n data = pd.concat([data, source]) \n\n if is_last_fetch:\n data.to_csv(file)\n break\n\n time.sleep(2)\n\n def __load_ohlcv(self, bin_size):\n \"\"\"\n Read the data.\n :return:\n \"\"\"\n start_time = datetime.now(timezone.utc) - 1 * timedelta(days=121)\n end_time = datetime.now(timezone.utc)\n file = OHLC_FILENAME.format(bin_size)\n\n if os.path.exists(file):\n self.df_ohlcv = load_data(file)\n else:\n self.download_data(file, bin_size, start_time, end_time)\n self.df_ohlcv = load_data(file)\n\n def show_result(self):\n \"\"\"\n Display results\n \"\"\"\n logger.info(f\"============== Result ================\")\n logger.info(f\"TRADE COUNT : {self.order_count}\")\n logger.info(f\"BALANCE : {self.get_balance()}\")\n logger.info(f\"PROFIT RATE : {self.get_balance()/self.start_balance*100} %\")\n logger.info(f\"WIN RATE : {0 if self.order_count == 0 else self.win_count/self.order_count*100} %\")\n logger.info(f\"PROFIT FACTOR : {self.win_profit if self.lose_loss == 0 else self.win_profit/self.lose_loss}\")\n logger.info(f\"MAX DRAW DOWN : {self.max_draw_down * 100}\")\n logger.info(f\"======================================\")\n\n import matplotlib.pyplot as plt\n\n plt_num = len([k for k, v in self.plot_data.items() if not v['overlay']]) + 2\n i = 1\n\n plt.figure(figsize=(12,8))\n\n plt.subplot(plt_num,1,i)\n plt.plot(self.df_ohlcv.index, self.df_ohlcv[\"high\"])\n plt.plot(self.df_ohlcv.index, self.df_ohlcv[\"low\"])\n for k, v in self.plot_data.items():\n if v['overlay']:\n color = v['color']\n plt.plot(self.df_ohlcv.index, self.df_ohlcv[k], color)\n plt.ylabel(\"Price(USD)\")\n ymin = min(self.df_ohlcv[\"low\"]) - 200\n ymax = max(self.df_ohlcv[\"high\"]) + 200\n plt.vlines(self.buy_signals, ymin, ymax, \"blue\", linestyles='dashed', linewidth=1)\n plt.vlines(self.sell_signals, ymin, ymax, \"red\", linestyles='dashed', linewidth=1)\n plt.vlines(self.close_signals, ymin, ymax, \"green\", linestyles='dashed', linewidth=1)\n\n i = i + 1\n\n for k, v in self.plot_data.items():\n if not v['overlay']:\n plt.subplot(plt_num,1,i)\n color = v['color']\n plt.plot(self.df_ohlcv.index, self.df_ohlcv[k], color)\n plt.ylabel(f\"{k}\")\n i = i + 1\n\n plt.subplot(plt_num,1,i)\n plt.plot(self.df_ohlcv.index, self.balance_history)\n plt.hlines(y=0, xmin=self.df_ohlcv.index[0],\n xmax=self.df_ohlcv.index[-1], colors='k', linestyles='dashed')\n plt.ylabel(\"PL(USD)\")\n\n plt.show()\n\n def plot(self, name, value, color, overlay=True):\n \"\"\"\n Draw the graph\n \"\"\"\n self.df_ohlcv.at[self.index, name] = value\n if name not in self.plot_data:\n self.plot_data[name] = {'color': color, 'overlay': overlay}\n\n","sub_path":"src/binance_futures_backtest.py","file_name":"binance_futures_backtest.py","file_ext":"py","file_size_in_byte":9742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"305843864","text":"from keras.models import Model\nfrom keras.layers import *\nfrom keras.optimizers import RMSprop, Adam, SGD\nfrom keras.losses import binary_crossentropy\nfrom keras import backend as K\nfrom keras import layers\nimport numpy as np\nfrom bce_dice_loss import bce_dice_loss, dice_loss\nimport keras\n\ndef dw_conv(init, nb_filter, k, kl_reg = None):\n residual = Conv2D(nb_filter * k, (1, 1), strides=(2, 2), padding='same', use_bias=False)(init)\n residual = x = BatchNormalization()(residual)\n x = Conv2D(nb_filter * k, (3, 3), padding='same', use_bias=False)(init)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dropout(0.4)(x)\n x = Conv2D(nb_filter * k, (3, 3), padding='same', use_bias=False)(x)\n x = BatchNormalization()(x)\n x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)\n\n x = layers.add([x, residual])\n\n return x\n\ndef up_conv(init, skip, nb_filter, k, kl_reg = None):\n x = Conv2DTranspose(nb_filter * k, (3, 3), padding='same', strides=(2, 2), kernel_regularizer=kl_reg)(init)\n x = BatchNormalization()(x)\n x = layers.add([x, skip])\n return x\n\ndef res_block(init, nb_filter, k=1):\n x = Activation('relu')(init)\n\n x = Conv2D(nb_filter * k, (3, 3), padding='same', use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dropout(0.4)(x)\n x = Conv2D(nb_filter * k, (3, 3), padding='same', use_bias=False)(x)\n x = BatchNormalization()(x)\n\n x = Squeeze_excitation_layer(x)\n\n x = layers.add([init, x])\n return x\n\n\ndef Squeeze_excitation_layer(input_x):\n ratio = 4\n out_dim = int(np.shape(input_x)[-1])\n squeeze = GlobalAveragePooling2D()(input_x)\n excitation = Dense(units=int(out_dim / ratio))(squeeze)\n excitation = Activation('relu')(excitation)\n excitation = Dense(units=out_dim)(excitation)\n excitation = Activation('sigmoid')(excitation)\n excitation = layers.Reshape([-1,1,out_dim])(excitation)\n scale = layers.multiply([input_x, excitation])\n\n return scale\n\n\ndef create_model(inputs, regularizers = None, k=1, lr=1e-3):\n if regularizers:\n KL_REG = keras.regularizers.l2(0.0001)\n else:\n KL_REG = None\n\n i = 0\n\n nb_filter = [16, 32, 64, 128, 256, 512, 256, 128, 64, 32, 16]\n\n #0\n x = Conv2D(nb_filter[i] *k, (3, 3), padding='same', use_bias=False)(inputs)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(nb_filter[i] *k, (3, 3), padding='same', use_bias=False)(x)\n x0 = BatchNormalization()(x)\n x = Activation('relu')(x0)\n i += 1\n\n #1\n x = dw_conv(x0, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x1 = res_block(x, k, nb_filter[i])\n i += 1\n\n #2\n x = dw_conv(x1, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x2 = res_block(x, k, nb_filter[i])\n i += 1\n\n #3\n x = dw_conv(x2, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x3 = res_block(x, k, nb_filter[i])\n i += 1\n\n #4\n x = dw_conv(x3, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x4 = res_block(x, k, nb_filter[i])\n i += 1\n\n #--------------- center ------------\n x = dw_conv(x4, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x = res_block(x, k, nb_filter[i])\n #--------------- center ------------\n i += 1\n\n #4\n x = up_conv(x, x4, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x = res_block(x, k, nb_filter[i])\n i += 1\n\n #3\n x = up_conv(x, x3, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x = res_block(x, k, nb_filter[i])\n i += 1\n\n #2\n x = up_conv(x, x2, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x = res_block(x, k, nb_filter[i])\n i += 1\n\n #1\n x = up_conv(x, x1, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x = res_block(x, k, nb_filter[i])\n i += 1\n\n #0\n x = up_conv(x, x0, nb_filter[i], k, kl_reg = KL_REG)\n x = res_block(x, k, nb_filter[i])\n x = res_block(x, k, nb_filter[i])\n x = Activation('relu')(x)\n\n classify = Conv2D(1, (1, 1), activation='sigmoid')(x)\n model = Model(inputs=inputs, outputs=classify)\n model.compile(optimizer=RMSprop(lr=lr), loss=bce_dice_loss, metrics=[dice_loss])\n\n return model\n","sub_path":"Unet_models/WRes_SE_Unet.py","file_name":"WRes_SE_Unet.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"487044144","text":"def run_length_encoding(data):\n possible_values = ['B', 'W']\n encoded = []\n count = 0\n prev = data[0]\n for x in range(len(data)) :\n if data[x] in possible_values and data[x] == prev :\n count += 1\n prev = data[x] \n if x == len(data)-1 :\n encoded.append(f'{count}{prev}') \n else :\n encoded.append(f'{count}{prev}')\n count = 1\n prev = data[x]\n\n return encoded\n\nif __name__ == '__main__':\n data = input()\n encoded = run_length_encoding(data)\n print(\"\".join(encoded))","sub_path":"Sem 4/Py_Lab/Ps_Basics_1/eight.py","file_name":"eight.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"592154855","text":"import keras\nimport math\nimport numpy as np\nimport os\nimport argparse\nfrom keras.datasets import cifar10\nfrom keras.utils import multi_gpu_model\nfrom save_paralle_model import *\nfrom BLS import *\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Conv2D, Dense, Input, add, Activation, AveragePooling2D, GlobalAveragePooling2D, Lambda, \\\n concatenate\nfrom keras.initializers import he_normal\nfrom keras.layers.merge import Concatenate\nfrom keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint\nfrom keras.models import Model, load_model\nfrom keras import optimizers, regularizers\nfrom keras import backend as K\n\nif ('tensorflow' == K.backend()):\n import tensorflow as tf\n from keras.backend.tensorflow_backend import set_session\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'\nparses = argparse.ArgumentParser()\nparses.add_argument('-m', '--mode', default='train', help='Select running mode: train, test or bls')\nargs = parses.parse_args()\n\n# ------- Parameters of DenseNet-------------\ngrowth_rate = 12\ndepth = 100\ncompression = 0.5\nimg_rows, img_cols = 32, 32\nimg_channels = 3\nnum_classes = 10\nbatch_size = 128 # 64 or 32 or other\nepochs = 300\niterations = 391\nweight_decay = 1e-4\nmodel_path = 'cifar-10-model/densenet_cifar10_model.h5'\nflag = args.mode\ninput_h, input_w = 32, 32\n\nmean = [125.307, 122.95, 113.865]\nstd = [62.9932, 62.0887, 66.7048]\n\n# --------Parameters of BLS----------------\nN1 = 10 # # of nodes belong to each window\nN2 = 1 # # of windows -------Feature mapping layer\nN3 = 500 # # of enhancement nodes -----Enhance layer\nL = 5 # # of incremental steps\nM = 50 # # of adding enhance nodes\ns = 0.8 # shrink coefficient\nc = 2 ** -30 # Regularization coefficient\nBLS_Epoch = 10\n\ntrain_acc = np.zeros([1, L + 1])\ntrain_time = np.zeros([1, L + 1])\ntest_acc = np.zeros([1, L + 1])\ntest_time = np.zeros([1, L + 1])\n\n\ndef scheduler(epoch):\n if epoch < 150:\n return 0.1\n if epoch < 225:\n return 0.01\n return 0.001\n\n\ndef densenet(img_input, classes_num):\n def conv(x, out_filters, k_size):\n return Conv2D(filters=out_filters,\n kernel_size=k_size,\n strides=(1, 1),\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(weight_decay),\n use_bias=False)(x)\n\n def dense_layer(x):\n return Dense(units=classes_num,\n activation='softmax',\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(weight_decay))(x)\n\n def bn_relu(x):\n x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)\n x = Activation('relu')(x)\n return x\n\n def bottleneck(x):\n channels = growth_rate * 4\n x = bn_relu(x)\n x = conv(x, channels, (1, 1))\n x = bn_relu(x)\n x = conv(x, growth_rate, (3, 3))\n return x\n\n def single(x):\n x = bn_relu(x)\n x = conv(x, growth_rate, (3, 3))\n return x\n\n def transition(x, inchannels):\n outchannels = int(inchannels * compression)\n x = bn_relu(x)\n x = conv(x, outchannels, (1, 1))\n x = AveragePooling2D((2, 2), strides=(2, 2))(x)\n return x, outchannels\n\n def dense_block(x, blocks, nchannels):\n concat = x\n for i in range(blocks):\n x = bottleneck(concat)\n concat = concatenate([x, concat], axis=-1)\n nchannels += growth_rate\n return concat, nchannels\n\n nblocks = (depth - 4) // 6\n nchannels = growth_rate * 2\n\n x = conv(img_input, nchannels, (3, 3))\n x, nchannels = dense_block(x, nblocks, nchannels)\n x, nchannels = transition(x, nchannels)\n x, nchannels = dense_block(x, nblocks, nchannels)\n x, nchannels = transition(x, nchannels)\n x, nchannels = dense_block(x, nblocks, nchannels)\n x = bn_relu(x)\n x = GlobalAveragePooling2D()(x)\n x = dense_layer(x)\n return x\n\n\ndef densenet_train(model, x_train, y_train, x_test, y_test):\n parallel_model = multi_gpu_model(model, gpus=2)\n sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)\n parallel_model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\n # set callback\n tb_cb = TensorBoard(log_dir='./result/densenet/', histogram_freq=0)\n change_lr = LearningRateScheduler(scheduler)\n ckpt = ParallelModelCheckpoint(model, filepath=model_path, monitor='val_loss', save_best_only=True, save_weights_only=True,\n mode='auto', period=1)\n cbks = [change_lr, tb_cb, ckpt]\n\n # set data augmentation\n print('Using real-time data augmentation.')\n datagen = ImageDataGenerator(horizontal_flip=True, width_shift_range=0.125, height_shift_range=0.125,\n fill_mode='constant', cval=0.)\n\n datagen.fit(x_train)\n\n # start training\n parallel_model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size * 2), steps_per_epoch=iterations,\n epochs=epochs, callbacks=cbks, validation_data=(x_test, y_test))\n\n\ndef densenet_test(model, x_test, y_test):\n model.load_weights(model_path)\n model.summary()\n model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n score = model.evaluate(x_test, y_test)\n\n print('Test loss: ', score[0])\n print('Test accuracy: ', score[1])\n\n\ndef bls_dense_train(model, train_x, train_y):\n # 生成映射层\n model.load_weights(model_path)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n feature_layer = Model(inputs=model.input, outputs=model.get_layer('global_average_pooling2d_1').output)\n feature_layer.save('cifar-10-model/densenet_feature_layer_model.h5')\n\n\n time_start = time.time() # 计时开始\n\n OutputOfFeatureMappingLayer = []\n for i in range(N2):\n FeatureOfEachWindow = feature_layer.predict(train_x)\n scaler1 = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit(FeatureOfEachWindow)\n FeatureOfEachWindowAfterPreprocess = scaler1.transform(FeatureOfEachWindow)\n # 通过稀疏化计算映射层每个窗口内的最终权重\n outputOfEachWindow = FeatureOfEachWindowAfterPreprocess\n OutputOfFeatureMappingLayer.append(outputOfEachWindow)\n del FeatureOfEachWindow\n N1 = OutputOfFeatureMappingLayer[0].shape[1]\n OutputOfFeatureMappingLayer = np.array(OutputOfFeatureMappingLayer)\n OutputOfFeatureMappingLayer = np.reshape(OutputOfFeatureMappingLayer,\n newshape=[-1, OutputOfFeatureMappingLayer[0].shape[1] * N2])\n # 生成强化层\n # 以下为映射层输出加偏置(强化层输入)\n InputOfEnhanceLayerWithBias = np.hstack(\n [OutputOfFeatureMappingLayer, 0.1 * np.ones((OutputOfFeatureMappingLayer.shape[0], 1))])\n # 生成强化层权重\n if N1 * N2 >= N3:\n random.seed(67797325)\n weightOfEnhanceLayer = LA.orth(2 * random.randn(InputOfEnhanceLayerWithBias.shape[1], N3) - 1)\n else:\n random.seed(67797325)\n weightOfEnhanceLayer = LA.orth(2 * random.randn(InputOfEnhanceLayerWithBias.shape[1], N3).T - 1).T\n\n tempOfOutputOfEnhanceLayer = np.dot(InputOfEnhanceLayerWithBias, weightOfEnhanceLayer)\n parameterOfShrink = s / np.max(tempOfOutputOfEnhanceLayer)\n OutputOfEnhanceLayer = tansig(tempOfOutputOfEnhanceLayer * parameterOfShrink)\n\n # 生成最终输入\n InputOfOutputLayer = np.hstack([OutputOfFeatureMappingLayer, OutputOfEnhanceLayer])\n pinvOfInput = pinv(InputOfOutputLayer, c)\n OutputWeight = pinvOfInput.dot(train_y) # 全局违逆\n time_end = time.time() # 训练完成\n trainTime = time_end - time_start\n\n # 训练输出\n OutputOfTrain = np.dot(InputOfOutputLayer, OutputWeight)\n trainAcc = show_accuracy(OutputOfTrain, train_y)\n print('Training accurate is', trainAcc * 100, '%')\n print('Training time is ', trainTime, 's')\n train_acc[0][0] = trainAcc\n train_time[0][0] = trainTime\n\n return weightOfEnhanceLayer, parameterOfShrink, OutputWeight, InputOfEnhanceLayerWithBias, InputOfOutputLayer, pinvOfInput, N1\n\n\ndef bls_dense_test(x_test, y_test, weightOfEnhanceLayer, parameterOfShrink, OutputWeight):\n # 测试过程\n OutputOfFeatureMappingLayerTest = []\n feature_layer = load_model('cifar-10-model/densenet_feature_layer_model.h5')\n feature_layer.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n time_start = time.time() # 测试计时开始\n # 映射层\n for i in range(N2):\n outputOfEachWindowTest = feature_layer.predict(x_test)\n OutputOfFeatureMappingLayerTest.append(outputOfEachWindowTest)\n\n OutputOfFeatureMappingLayerTest = np.array(OutputOfFeatureMappingLayerTest)\n OutputOfFeatureMappingLayerTest = np.reshape(OutputOfFeatureMappingLayerTest,\n newshape=[-1,\n OutputOfFeatureMappingLayerTest[0].shape[1] * N2])\n # 强化层\n InputOfEnhanceLayerWithBiasTest = np.hstack(\n [OutputOfFeatureMappingLayerTest, 0.1 * np.ones((OutputOfFeatureMappingLayerTest.shape[0], 1))])\n tempOfOutputOfEnhanceLayerTest = np.dot(InputOfEnhanceLayerWithBiasTest, weightOfEnhanceLayer)\n # 强化层输出\n OutputOfEnhanceLayerTest = tansig(tempOfOutputOfEnhanceLayerTest * parameterOfShrink)\n # 最终层输入\n InputOfOutputLayerTest = np.hstack([OutputOfFeatureMappingLayerTest, OutputOfEnhanceLayerTest])\n # 最终测试输出\n OutputOfTest = np.dot(InputOfOutputLayerTest, OutputWeight)\n time_end = time.time() # 训练完成\n testTime = time_end - time_start\n testAcc = show_accuracy(OutputOfTest, y_test)\n print('Testing accurate is', testAcc * 100, '%')\n print('Testing time is ', testTime, 's')\n test_acc[0][0] = testAcc\n test_time[0][0] = testTime\n\n return InputOfEnhanceLayerWithBiasTest, InputOfOutputLayerTest\n\n\ndef bls_dense_enhance(train_y, test_y, InputOfEnhanceLayerWithBias, InputOfEnhanceLayerWithBiasTest, InputOfOutputLayer,\n pinvOfInput, InputOfOutputLayerTest, N1):\n parameterOfShrinkAdd = []\n for e in list(range(L)):\n time_start = time.time()\n if N1 * N2 >= M:\n random.seed(e)\n weightOfEnhanceLayerAdd = LA.orth(2 * random.randn(InputOfEnhanceLayerWithBias.shape[1], M) - 1)\n else:\n random.seed(e)\n weightOfEnhanceLayerAdd = LA.orth(2 * random.randn(InputOfEnhanceLayerWithBias.shape[1], M).T - 1).T\n\n # WeightOfEnhanceLayerAdd[e,:,:] = weightOfEnhanceLayerAdd\n # weightOfEnhanceLayerAdd = weightOfEnhanceLayer[:,N3+e*M:N3+(e+1)*M]\n tempOfOutputOfEnhanceLayerAdd = np.dot(InputOfEnhanceLayerWithBias, weightOfEnhanceLayerAdd)\n parameterOfShrinkAdd.append(s / np.max(tempOfOutputOfEnhanceLayerAdd))\n OutputOfEnhanceLayerAdd = tansig(tempOfOutputOfEnhanceLayerAdd * parameterOfShrinkAdd[e])\n tempOfLastLayerInput = np.hstack([InputOfOutputLayer, OutputOfEnhanceLayerAdd])\n\n D = pinvOfInput.dot(OutputOfEnhanceLayerAdd)\n C = OutputOfEnhanceLayerAdd - InputOfOutputLayer.dot(D)\n if C.all() == 0:\n w = D.shape[1]\n B = np.mat(np.eye(w) - np.dot(D.T, D)).I.dot(np.dot(D.T, pinvOfInput))\n else:\n B = pinv(C, c)\n pinvOfInput = np.vstack([(pinvOfInput - D.dot(B)), B])\n OutputWeightEnd = pinvOfInput.dot(train_y)\n InputOfOutputLayer = tempOfLastLayerInput\n Training_time = time.time() - time_start\n train_time[0][e + 1] = Training_time\n OutputOfTrain1 = InputOfOutputLayer.dot(OutputWeightEnd)\n TrainingAccuracy = show_accuracy(OutputOfTrain1, train_y)\n train_acc[0][e + 1] = TrainingAccuracy\n print('Incremental Training Accuracy is :', TrainingAccuracy * 100, ' %')\n\n # 增量增加节点的 测试过程\n time_start = time.time()\n OutputOfEnhanceLayerAddTest = tansig(\n InputOfEnhanceLayerWithBiasTest.dot(weightOfEnhanceLayerAdd) * parameterOfShrinkAdd[e]);\n InputOfOutputLayerTest = np.hstack([InputOfOutputLayerTest, OutputOfEnhanceLayerAddTest])\n\n OutputOfTest1 = InputOfOutputLayerTest.dot(OutputWeightEnd)\n TestingAcc = show_accuracy(OutputOfTest1, test_y)\n\n Test_time = time.time() - time_start\n test_time[0][e + 1] = Test_time\n test_acc[0][e + 1] = TestingAcc\n print('Incremental Testing Accuracy is : ', TestingAcc * 100, ' %')\n\n return test_acc, test_time, train_acc, train_time\n\n\ndef BLS_Desnet(model, model_path, x_train, train_y, x_test, test_y, s, c, N1, N2, N3):\n # u = 0\n model.load_weights(model_path)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n feature_layer = Model(inputs=model.input, outputs=model.get_layer('global_average_pooling2d_1').output)\n feature_layer.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n train_x = feature_layer.predict(x_train)\n\n L = 0\n FeatureOfInputDataWithBias = np.hstack([train_x, 0.1 * np.ones((train_x.shape[0], 1))])\n OutputOfFeatureMappingLayer = np.zeros([train_x.shape[0], N2 * N1])\n Beta1OfEachWindow = []\n\n train_acc_all = np.zeros([1, L + 1])\n test_acc = np.zeros([1, L + 1])\n train_time = np.zeros([1, L + 1])\n test_time = np.zeros([1, L + 1])\n time_start = time.time() # 计时开始\n for i in range(N2):\n random.seed(i)\n weightOfEachWindow = 2 * random.randn(train_x.shape[1] + 1, N1) - 1; # 生成每个窗口的权重系数,最后一行为偏差\n # WeightOfEachWindow([],[],i) = weightOfEachWindow; #存储每个窗口的权重系数\n FeatureOfEachWindow = np.dot(FeatureOfInputDataWithBias, weightOfEachWindow) # 生成每个窗口的特征\n FeatureOfEachWindowAfterPreprocess = FeatureOfEachWindow\n # 通过稀疏化计算映射层每个窗口内的最终权重\n betaOfEachWindow = sparse_bls(FeatureOfEachWindowAfterPreprocess, FeatureOfInputDataWithBias).T\n print(betaOfEachWindow.shape)\n # 存储每个窗口的系数化权重\n Beta1OfEachWindow.append(betaOfEachWindow)\n # 每个窗口的输出 T1\n outputOfEachWindow = np.dot(FeatureOfInputDataWithBias, betaOfEachWindow)\n # print('Feature nodes in window: max:',np.max(outputOfEachWindow),'min:',np.min(outputOfEachWindow))\n # distOfMaxAndMin.append(np.max(outputOfEachWindow, axis=0) - np.min(outputOfEachWindow, axis=0))\n # minOfEachWindow.append(np.min(outputOfEachWindow, axis=0))\n # outputOfEachWindow = (outputOfEachWindow - minOfEachWindow[i]) / distOfMaxAndMin[i]\n OutputOfFeatureMappingLayer[:, N1 * i:N1 * (i + 1)] = outputOfEachWindow\n del outputOfEachWindow\n del FeatureOfEachWindow\n del weightOfEachWindow\n\n # 生成强化层\n # 以下为映射层输出加偏置(强化层输入)\n InputOfEnhanceLayerWithBias = np.hstack([OutputOfFeatureMappingLayer, 0.1 * np.ones((OutputOfFeatureMappingLayer.shape[0], 1))])\n # 生成强化层权重\n if N1 * N2 >= N3:\n random.seed(67797325)\n # dim = N1*N2+1\n # temp_matric = stats.ortho_group(dim)\n # weightOfEnhanceLayer = temp_matric[:,0:N3]\n weightOfEnhanceLayer = LA.orth(2 * random.randn(N2 * N1 + 1, N3)) - 1\n else:\n random.seed(67797325)\n weightOfEnhanceLayer = LA.orth(2 * random.randn(N2 * N1 + 1, N3).T - 1).T\n\n tempOfOutputOfEnhanceLayer = np.dot(InputOfEnhanceLayerWithBias, weightOfEnhanceLayer)\n # print('Enhance nodes: max:',np.max(tempOfOutputOfEnhanceLayer),'min:',np.min(tempOfOutputOfEnhanceLayer))\n\n parameterOfShrink = s / np.max(tempOfOutputOfEnhanceLayer)\n\n OutputOfEnhanceLayer = tansig(tempOfOutputOfEnhanceLayer * parameterOfShrink)\n\n # 生成最终输入\n InputOfOutputLayer = np.hstack([OutputOfFeatureMappingLayer, OutputOfEnhanceLayer])\n pinvOfInput = pinv(InputOfOutputLayer, c)\n OutputWeight = np.dot(pinvOfInput, train_y) # 全局违逆\n time_end = time.time() # 训练完成\n trainTime = time_end - time_start\n\n # 训练输出\n OutputOfTrain = np.dot(InputOfOutputLayer, OutputWeight)\n trainAcc = show_accuracy(OutputOfTrain, train_y)\n print('Training accurate is', trainAcc * 100, '%')\n print('Training time is ', trainTime, 's')\n train_acc_all[0][0] = trainAcc\n train_time[0][0] = trainTime\n\n\n # 测试过程\n time_start = time.time() # 测试计时开始\n test_x = feature_layer.predict(x_test)\n FeatureOfInputDataWithBiasTest = np.hstack([test_x, 0.1 * np.ones((test_x.shape[0], 1))])\n OutputOfFeatureMappingLayerTest = np.zeros([test_x.shape[0], N2 * N1])\n\n # 映射层\n for i in range(N2):\n outputOfEachWindowTest = np.dot(FeatureOfInputDataWithBiasTest, Beta1OfEachWindow[i])\n OutputOfFeatureMappingLayerTest[:, N1 * i:N1 * (i + 1)] = outputOfEachWindowTest\n # 强化层\n InputOfEnhanceLayerWithBiasTest = np.hstack(\n [OutputOfFeatureMappingLayerTest, 0.1 * np.ones((OutputOfFeatureMappingLayerTest.shape[0], 1))])\n tempOfOutputOfEnhanceLayerTest = np.dot(InputOfEnhanceLayerWithBiasTest, weightOfEnhanceLayer)\n # 强化层输出\n OutputOfEnhanceLayerTest = tansig(tempOfOutputOfEnhanceLayerTest * parameterOfShrink)\n # 最终层输入\n InputOfOutputLayerTest = np.hstack([OutputOfFeatureMappingLayerTest, OutputOfEnhanceLayerTest])\n # 最终测试输出\n OutputOfTest = np.dot(InputOfOutputLayerTest, OutputWeight)\n time_end = time.time() # 训练完成\n testTime = time_end - time_start\n testAcc = show_accuracy(OutputOfTest, test_y)\n print('Testing accurate is', testAcc * 100, '%')\n print('Testing time is ', testTime, 's')\n\n test_acc[0][0] = testAcc\n test_time[0][0] = testTime\n\n return testAcc, testTime, trainAcc, trainTime\n\n\ndef run_main():\n # load data\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n # - mean / std\n for i in range(3):\n x_train[:, :, :, i] = (x_train[:, :, :, i] - mean[i]) / std[i]\n x_test[:, :, :, i] = (x_test[:, :, :, i] - mean[i]) / std[i]\n\n # build network\n img_input = Input(shape=(img_rows, img_cols, img_channels))\n output = densenet(img_input, num_classes)\n _model = Model(img_input, output)\n _model.summary()\n if flag == 'train':\n history = densenet_train(_model, x_train, y_train, x_test, y_test)\n elif flag == 'test':\n test_start = time.time()\n densenet_test(_model, x_test, y_test)\n test_end = time.time()\n print('Testing time of CNN is: ', test_end - test_start)\n elif flag == 'bls':\n weightOfEnhanceLayer, parameterOfShrink, OutputWeight, InputOfEnhanceLayerWithBias, InputOfOutputLayer, pinvOfInput, N1 = bls_dense_train(\n _model, x_train, y_train)\n InputOfEnhanceLayerWithBiasTest, InputOfOutputLayerTest = bls_dense_test(x_test, y_test,\n weightOfEnhanceLayer, parameterOfShrink,\n OutputWeight)\n test_acc, test_time, train_acc, train_time = bls_dense_enhance(y_train, y_test,\n InputOfEnhanceLayerWithBias,\n InputOfEnhanceLayerWithBiasTest,\n InputOfOutputLayer, pinvOfInput,\n InputOfOutputLayerTest, N1)\n np.savetxt('result/dense_test_acc.txt', test_acc)\n np.savetxt('result/dense_test_time.txt', test_time)\n\n elif flag == 'dbnet':\n n3 = 100\n m = 100\n L = 150\n Test_Acc = []\n Test_Time = []\n enhance_num = []\n\n for i in range(L):\n print('The number of enhancement nodes is: ', n3)\n test_acc, test_time, train_acc, train_time = BLS_Desnet(_model, model_path, x_train, y_train, x_test, y_test, s, c, 10, 10, n3)\n Test_Acc.append(test_acc)\n Test_Time.append(test_time)\n enhance_num.append(n3)\n n3 = n3 + m\n np.savetxt('result/densenet/enhance_num.txt', enhance_num)\n np.savetxt('result/densenet/dense_test_acc.txt', Test_Acc)\n np.savetxt('result/densenet/dense_test_time.txt', Test_Time)\nif __name__ == '__main__':\n run_main()\n\n","sub_path":"CIFAR-10/densenet_cifar10.py","file_name":"densenet_cifar10.py","file_ext":"py","file_size_in_byte":21170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336938309","text":"# coding=utf-8\n\"\"\"\n\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\n\nimport bleach\nfrom jinja2 import Template, Markup\nfrom flask import render_template_string\n\nfrom abilian.i18n import _, _l\nfrom abilian.core.util import BasePresenter\nfrom abilian.web.util import url_for\n\nfrom abilian.sbe.apps.communities.models import Community\nfrom abilian.sbe.apps.documents.models import Document\nfrom abilian.sbe.apps.documents.views.util import breadcrumbs_for as doc_breadcrumb\nfrom abilian.sbe.apps.forum.models import Post, Thread\nfrom abilian.sbe.apps.wiki.models import WikiPage\n\n\nlogger = logging.getLogger(__name__)\n\n# Poor man's pattern matching.\nMESSAGES = {\n ('post', Thread): _l(u'has started conversation \"{object}\"'),\n ('post', Post): _l(u'has participated in conversation \"{object}\"'),\n ('post', Document): _l(u'has published document \"{object}\"'),\n ('post', WikiPage): _l(u'has created wiki page \"{object}\"'),\n\n ('update', Document): _l(u'has updated document \"{object}\"'),\n ('update', WikiPage): _l(u'has updated wiki page \"{object}\"'),\n ('update', Community): _l(u'has updated community \"{object}\"'),\n\n ('join', Community): _l(u'has joined the community {object}.'),\n ('leave', Community): _l(u'has left the community {object}.'),\n}\n\nOBJ_TEMPLATE = Template(\n u'{{ object_name }}'\n)\n\nPOST_BODY_TEMPLATE = u'''\n \"{{ body }}\"\n {%- if post.attachments %}\n
\n
    \n {%- for attachment in post.attachments %}\n
  • \n \n \"\"/\n {{ attachment.name }}\n ({{ attachment.content_length|filesize }})\n \n
  • \n {%- endfor %}\n
\n
\n {%- endif %}\n
\n '''\n\nDOCUMENT_BODY_TEMPLATE = u'''\n
\n
\n \"\"/\n {% for p in parents[:-1] %}\n {{ p.label }} /\n {% endfor %}\n\n {{ obj.name }}\n
\n
\n {%- if obj.antivirus_ok %}\n \n \n {{ _('Download') }} ({{ obj.content_length|filesize }})\n \n {%- endif %}\n
\n
\n'''\n\n\nclass ActivityEntryPresenter(BasePresenter):\n @property\n def object_url(self):\n return url_for(self._model.object)\n\n def message(self, ignore_community=False):\n try:\n # another quick&dirty approach for now. FIXME later.\n entry = self._model\n object_class = entry.object_type.split('.')[-1]\n object_class_localized = _(object_class)\n\n ctx = {}\n ctx['verb'] = entry.verb\n\n ctx['object_name'] = entry.object.name\n ctx['object_url'] = url_for(entry.object)\n ctx['object_type'] = object_class_localized\n ctx['object'] = OBJ_TEMPLATE.render(**ctx)\n\n if entry.target:\n ctx['target_name'] = entry.target.name\n ctx['target_url'] = url_for(entry.target)\n ctx['target'] = OBJ_TEMPLATE.render(\n object_name=ctx['target_name'],\n object_url=ctx['target_url']\n )\n\n msg = MESSAGES.get((entry.verb, entry.object.__class__))\n if msg:\n msg = msg.format(**ctx)\n if entry.target and not ignore_community:\n msg += u\" \" + _(u'in the community {target}.').format(**ctx)\n else:\n msg += u\".\"\n\n elif entry.verb == 'post':\n msg = _(u'has posted an object of type {object_type} '\n u'called \"{object}\"').format(**ctx)\n\n if entry.target and not ignore_community:\n msg += u\" \" + _(u'in the community {target}.').format(**ctx)\n else:\n msg += u\".\"\n\n elif entry.verb == 'join':\n msg = _(u'has joined the community {object}.').format(**ctx)\n\n elif entry.verb == 'leave':\n msg = _(u'has left the community {object}.').format(**ctx)\n\n elif entry.verb == 'update':\n msg = _(u'has updated {object_type} {object}.').format(**ctx)\n\n else:\n msg = _(u'has done action \"{verb}\" on object \"{object}\".').format(**ctx)\n\n return Markup(msg)\n\n except:\n logger.exception('Exception while presenting activity message')\n raise\n\n @property\n def body(self):\n if isinstance(self.object, Thread):\n body = bleach.clean(self.object.posts[0].body_html, tags=[], strip=True)\n body = Markup(body).unescape()\n if len(body) > 400:\n body = body[0:400] + u\"…\"\n body = render_template_string(POST_BODY_TEMPLATE,\n object_url=self.object_url, body=body, post=self.object.posts[0])\n return Markup(body)\n elif isinstance(self.object, Post):\n body = bleach.clean(self.object.body_html, tags=[], strip=True)\n body = Markup(body).unescape()\n if len(body) > 400:\n body = body[0:400] + u\"…\"\n body = render_template_string(POST_BODY_TEMPLATE,\n object_url=self.object_url, body=body, post=self.object)\n return Markup(body)\n elif isinstance(self.object, Document):\n parents = doc_breadcrumb(self.object)\n body = render_template_string(\n DOCUMENT_BODY_TEMPLATE,\n obj=self.object,\n parents=parents\n )\n return Markup(body)\n else:\n return \"\"\n","sub_path":"abilian/sbe/apps/wall/presenters.py","file_name":"presenters.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601302174","text":"__author__ = 'vivek'\n\nimport os\nimport sys\n\nif __name__ =='__main__':\n num_runs = int(sys.argv[1])\n evfile = sys.argv[2]\n num_clone_files = sys.argv[3]\n md_output_file = sys.argv[4]\n nearest_neighbor_file = sys.argv[5]\n w_file = sys.argv[6]\n outgrofile_name = sys.argv[7]\n max_alive_neighbors = int(sys.argv[8])\n max_dead_neighbors = int(sys.argv[9])\n md_input_file = sys.argv[10]\n cycle = int(sys.argv[11])\n\n os.system('python select.py %s -s %s -o %s' %(num_runs,evfile,num_clone_files))\n #Update Boltzman weights\n\n os.system('python reweighting.py -c %s -n %s -s %s -w %s -o %s --max_alive_neighbors=%s --max_dead_neighbors=%s' % (md_output_file,nearest_neighbor_file,num_clone_files,w_file,outgrofile_name,max_alive_neighbors,max_dead_neighbors))\n\n #Rename outputfile as inputfile for next iteration\n os.system('mv %s %s_%s'%(outgrofile_name,cycle+1,md_input_file))\n","sub_path":"src/radical/ensemblemd/extasy/bin/Analyzer/LSDMap/post_analyze.py","file_name":"post_analyze.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"155608069","text":"import logging.handlers\nimport os\n\n# 项目根目录\nimport time\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nprint(BASE_DIR)\n\n\n# 初始化日志配置\ndef init_log_config():\n # 创建日志器\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # 创建处理器对象\n sh = logging.StreamHandler()\n fh = logging.handlers.TimedRotatingFileHandler(BASE_DIR + \"/log/iFlytek.log\", when=\"midnight\",\n interval=1, backupCount=15, encoding=\"UTF-8\")\n # 创建格式化器对象\n fmt = \"%(asctime)s %(levelname)s [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s\"\n formatter = logging.Formatter(fmt)\n\n # 把格式化器添加到处理器中\n sh.setFormatter(formatter)\n fh.setFormatter(formatter)\n\n # 把处理器添加到日志器中\n logger.addHandler(sh)\n logger.addHandler(fh)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"288305767","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/cluster_drug_discovery/visualization/plots.py\n# Compiled at: 2019-06-11 07:56:19\n# Size of source mod 2**32: 1271 bytes\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.cm as cm\nimport pandas as pd\n\ndef UMAP_plot(X, Y, title='UMAP projection', fontsize=24, output='UMAPproj.png'):\n reducer = umap.UMAP(n_neighbors=5, min_dist=0.2)\n embedding = reducer.fit_transform(X)\n fig, ax = plt.subplots()\n ax.scatter((embedding[:, 0]), (embedding[:, 1]), c=[sns.color_palette()[y] for y in np.array(Y)])\n fig.gca().set_aspect('equal', 'datalim')\n ax.set_title(title)\n fig.savefig(output)\n\n\ndef plot(X, Y, labels, title='plot', fontsize=24, output='proj.png', true_false=False):\n fig, ax = plt.subplots()\n if true_false:\n x_true = []\n y_true = []\n x_false = []\n y_false = []\n for x, y, l in zip(X, Y, labels):\n if l:\n x_true.append(x)\n y_true.append(y)\n else:\n x_false.append(x)\n y_false.append(y)\n\n ax.scatter(x_true, y_true, c='g')\n ax.scatter(x_false, y_false, c='r')\n else:\n colors = cm.nipy_spectral(labels.astype(float) / (max(labels) + 1))\n ax.scatter(X, Y, c=colors)\n ax.set_title(title)\n fig.savefig(output)","sub_path":"pycfiles/Cluster_Ensembles-1.16.tar/plots.cpython-37.py","file_name":"plots.cpython-37.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"625260561","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('atados_core', '0037_address_google_address'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='googleaddress',\n name='lat',\n field=models.FloatField(null=True, verbose_name=b'lat', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='googleaddress',\n name='lng',\n field=models.FloatField(null=True, verbose_name=b'lng', blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"atados_core/migrations/0038_auto_20160525_1534.py","file_name":"0038_auto_20160525_1534.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"49722848","text":"class Solution(object):\n def combinationSum(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n candidates.sort()\n Solution.ret=[]\n self.dfs(candidates,target,[])\n return Solution.ret\n def dfs(self,candidates,target,valuelist):\n if target==0:\n Solution.ret.append(valuelist)\n for i in range(len(candidates)):\n if target0:\n for socket in sockets:\n socket.send(recv_data)\ndef main():\n server_socket = socket(AF_INET, SOCK_STREAM)\n server_socket.bind(('172.20.10.2',8989))\n server_socket.listen()\n print(\"服务器上线\")\n while True:\n client_socket, client_info = server_socket.accept()\n sockets.append(client_socket)\n t = Thread(target=read_send_Msg, args=(client_socket,))\n t.start()\n wel=\"连接成功,欢迎登陆!\"\n client_socket.send(wel.encode('utf-8'))\nif __name__=='__main__':\n main()\n\n\n\n\n\n\n\n","sub_path":"TCP服务器.py","file_name":"TCP服务器.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"14349971","text":"from restapi.decorators import api\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import *\n\n\n\n@api\ndef load_user_data(uid):\n\treturn {\n\t\t'a': 1\n\t}\n\n\n@api\ndef save_user_data(uid, data):\n\tpass\n\n\n@api\ndef add(a, b):\n\treturn a + b\n\n\n#进入游戏,是否加载剧情\n@api\ndef get_user_plot(**params):\n\tres = {}\n\tuser_name = params['user_name']\n\tcheckpoint_id = params['level']\n\tplot_list = [] #存剧情\n\tget_user_level = 0\n\tif user_name:\n\t\tif checkpoint_id:\n\t\t\tget_user = User.objects.filter(user_id=user_name,checkpoint__checkpoint_id=checkpoint_id).first()\n\t\t\tif get_user:\n\t\t\t\tget_user_level = int(get_user.status) if get_user else 0\n\t\t\t\tif get_user_level >= checkpoint_id and get_user_level != 0:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tif get_user_level == 0:\n\t\t\t\t\t\tget_user_level = 1#为0表示为新手,加载第一关剧情\n\t\t\t\t\tget_plot = Plot.objects.filter(checkpoint_id=get_user_level).order_by('serial_number')\n\t\t\t\t\tfor x in get_plot:\n\t\t\t\t\t\tjuqi = {}\n\t\t\t\t\t\tjuqi['name'] = x.plot_name\n\t\t\t\t\t\tplot_list.append(juqi)\n\t\t\telse:\n\t\t\t\tres['error'] = '请登录游戏!'\n\n\t\telse:\n\t\t\tget_user = User.objects.filter(user_id=user_name).first()\n\t\t\tif get_user:\n\t\t\t\tif get_user_level == 0:\n\t\t\t\t\tget_user_level = 1\n\t\t\t\tget_plot = Plot.objects.filter(checkpoint_id=get_user_level).order_by('serial_number')\n\t\t\t\tfor x in get_plot:\n\t\t\t\t\tjuqi = {}\n\t\t\t\t\tjuqi['name'] = x.plot_name\n\t\t\t\t\tplot_list.append(juqi)\n\t\t\telse:\n\t\t\t\tres['error'] = '请登录游戏!'\n\n\telse:\n\t\tres['error']= '请登录游戏!'\n\n\tUser.objects.filter(user_id=user_name).update(status=get_user_level)\n\tres['imgs'] = plot_list\n\treturn res\n\n#加载剧情\n@api\ndef get_plot(**params):\n\tres = {}\n\tcheckpoint_id = params['level']\n\tplot_list = [] #存剧情\n\tif checkpoint_id:\n\t\tget_plot = Plot.objects.filter(checkpoint_id=checkpoint_id).order_by('serial_number')\n\t\tfor x in get_plot:\n\t\t\tjuqi = {}\n\t\t\tjuqi['name'] = x.plot_name\n\t\t\tplot_list.append(juqi)\n\telse:\n\t\tres['error'] = '请选择关卡剧情!'\n\tres['imgs'] = plot_list\n\treturn res\n\n\n#获取当前等级\n@api\ndef get_user_level(**params):\n res = {}\n user_name = params['user_name']\n level = 1\n if user_name:\n user_id = User.objects.filter(user_id=user_name).first()\n if user_id:\n level = user_id.checkpoint.checkpoint_id if user_id.checkpoint else 1\n else:\n res['error'] = '请登录游戏!'\n else:\n res['error'] = '请登录游戏!'\n res['level'] = int(level) - 1\n\n return res\n\n\n#展示所有关卡\n@api\ndef get_level(**params):\n res = {}\n checkpoint_list = []\n checkpoint = Checkpoint.objects.all().order_by('checkpoint_id')\n for x in checkpoint:\n guanka = {}\n guanka['level_id'] = x.checkpoint_id\n guanka['level_name'] = x.checkpoint_name\n checkpoint_list.append(guanka)\n res['level_list'] = checkpoint_list\n return res\n \n\n#通关加一级\n@api\ndef get_user_clearance(**params):\n\tuser_name = params['user_name']\n\tlevel = 1\n\tif user_name:\n\t\tget_level = User.objects.filter(user_id=user_name).first()\n\t\tif get_level:\n\t\t\tlevel = get_level.checkpoint.checkpoint_id if get_level.checkpoint else 1\n\t\tlevel = level + 1\n\t\tUser.objects.filter(user_id=user_name).update(checkpoint__checkpoint_id=level)\n\telse:\n\t\tres['error'] = '请登录游戏!'\n\treturn res\n\n","sub_path":"server/sky/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"16910401","text":"import re\nimport gc\n\n\nclass FishFriend(object):\n type = 'fish'\n count = 0\n friend_count = 0\n fishguys = []\n\n def __init__(self, name, weight, age):\n self.name = name\n self.weight = weight\n self.age = age\n self.fishguys.append(self)\n FishFriend.count += 1\n\n if self.count < 2:\n print('There was a ' +self.type+ ' named ' +self.name)\n else:\n print('And a '+ self.type +' - '+self.name)\n\n def make_friends(self, friend):\n self.friend = friend\n FishFriend.friend_count += 1\n\n if FishFriend.friend_count <2:\n print(self.name+' has a friend - '+self.friend.name)\n else:\n if FishFriend.friend_count==2:\n print(self.name+' has a friend too - '+self.friend.name)\n else:\n if FishFriend.friend_count>2:\n print('And '+self.name + '\\'s friend - ' + self.friend.name)\n\n\nclass Crabfriend(FishFriend):\n type = 'crab'\n friendslist = ''\n\n def __init__(self, name, weight, age):\n FishFriend.__init__(self, name, weight, age)\n self.make_friends()\n\n\n def make_friends(self):\n\n for guy in FishFriend.fishguys:\n self.friendslist += str(guy.name)\n print('Crabs have a lot of friends - ' + self.friendslist)\n\n\n\nolly = FishFriend(name='Olly', weight=3, age=3)\njohny = FishFriend('Johny',1 , 3)\nhoward = Crabfriend('Howard', 10, 10)\n\nolly.make_friends(johny)\njohny.make_friends(olly)\n\n\n\n","sub_path":"class_practice.py","file_name":"class_practice.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"451290726","text":"from matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.figure import Figure\nimport numpy\n\nnodes, bcast, tar, chmod, bcasttot, launch = numpy.loadtxt('./collect.txt', unpack=True)\n\nf = Figure(figsize=(4, 4))\nax = f.add_subplot(111)\n\nax.plot(nodes * 24, launch, 's ', color='k', label='import scipy')\nax.plot(nodes * 24, bcasttot, 'o ', color='k', label='bcast')\nax.plot(nodes * 24, bcast, 'x ', color='k', mew=2, label='bcast/MPI_Bcast')\nax.plot(nodes * 24, tar, '+ ', color='k', mew=2, label='bcast/tar xzvf')\nax.plot(nodes * 24, launch + bcasttot, 'd ', color='k', label='total')\n\nax.set_xlabel('Number of Ranks')\nax.set_ylabel('Wall time [sec]')\nax.set_xscale('log')\nax.set_yscale('log')\nax.set_ylim(3e-1, 2e3)\n#ax.grid()\nax.legend(loc='upper left', frameon=False, ncol=1, fontsize='small')\ncanvas = FigureCanvasAgg(f)\nf.tight_layout()\nf.savefig('cray-xc30-startup-time.png', dpi=72)\nf.savefig('cray-xc30-startup-time-hires.png', dpi=200)\nf.savefig('cray-xc30-startup-time-hires.pdf', dpi=200)\n","sub_path":"cray-xc30/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"259685884","text":"import gzip\r\n\r\nimport itertools\r\nimport json\r\nimport os\r\n\r\nimport numpy as np\r\n\r\nfrom ..abrahamson_silva_kamai_2014 import AbrahamsonSilvaKamai2014\r\nfrom ..boore_stewart_seyhan_atkinson_2014 import BooreStewartSeyhanAtkinson2014\r\nfrom ..campbell_bozorgnia_2014 import CampbellBozorgnia2014\r\nfrom ..chiou_youngs_2014 import ChiouYoungs2014\r\nfrom ..idriss_2014 import Idriss2014\r\n\r\nmodels = [\r\n AbrahamsonSilvaKamai2014,\r\n BooreStewartSeyhanAtkinson2014,\r\n CampbellBozorgnia2014,\r\n ChiouYoungs2014,\r\n Idriss2014,\r\n]\r\n\r\n# Number of decimal places to test against.\r\nDECIMAL = 4\r\n\r\n# Load the tests\r\nfname = os.path.join(\r\n os.path.dirname(__file__), 'data', 'ngaw2_tests.json.gz')\r\nwith gzip.open(fname, 'rt') as fp:\r\n tests = json.load(fp)\r\n\r\n\r\ndef test_generator():\r\n for t in tests:\r\n for model in models:\r\n yield check_model, model, t['params'], t['results'][model.ABBREV]\r\n\r\n\r\ndef check_model(model, params, values):\r\n m = model(**params)\r\n\r\n np.testing.assert_array_almost_equal(\r\n m.interp_spec_accels(values['periods']),\r\n values['spec_accels'],\r\n decimal=DECIMAL,\r\n err_msg='Spectral accelerations'\r\n )\r\n\r\n np.testing.assert_array_almost_equal(\r\n m.interp_ln_stds(values['periods']),\r\n values['ln_stds'],\r\n decimal=DECIMAL,\r\n err_msg='Logarithmic standard deviations'\r\n )\r\n\r\n for key in ['pga', 'pga_ln_std', 'pgv', 'pgv_ln_std']:\r\n if values[key] is None:\r\n continue\r\n\r\n if not hasattr(m, key) or getattr(m, key) is None:\r\n continue\r\n\r\n np.testing.assert_almost_equal(\r\n getattr(m, key),\r\n values[key],\r\n decimal=DECIMAL,\r\n )\r\n\r\n","sub_path":"gmmpy/tests/run_nga_west2_tests.py","file_name":"run_nga_west2_tests.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"643555402","text":"import sys\nfrom os import sys, path\n#sys.path.extend((r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages', r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\win32',r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\Lib\\site-packages\\pywin32_system32\\pywintypes37.dll'))\n#sys.path.insert(0, r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\Lib\\site-packages')\n#sys.path.insert(0,r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\Lib\\site-packages\\pywin32_system32')\nsys.path.append(r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\Lib\\site-packages\\pywinauto')\nsys.path.append(r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\Lib\\site-packages')\nsys.path.append(r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\Lib\\site-packages\\win32')\nsys.path.append(r'C:\\Users\\zaranyikab\\AppData\\Local\\Continuum\\anaconda3\\Lib\\site-packages\\win32\\lib')\nfrom pywinauto import Application\nimport subprocess\nimport pyperclip\nfrom pathlib import Path\nimport pywinauto\nimport time\n\n\"\"\"Accessing the logs on SAG\"\"\"\nparentDir = Path(__file__).parent.parent #get parent directory\njarFilePath = str(parentDir) + \"\\sikuliScripts\\jars\"\nmyApp = r\"C:\\SAG\\ServiceAdminGUI2.exe\"\napp = Application().start(myApp) # set the app\nwindow = app.top_window()\nlogFilePath = str(parentDir) + \"\\dataEngine\\dataSource\"\nwindowsformswindowappecf = app[u'WindowsForms10.Window.8.app.0.11ecf05']\nrtiSessionId = \"\"\nibmSessionId = \"35379539-cef7-4823-b3b3-e0faa699731a\"\ndef ibimb1trxlogs():\n subprocess.call(['java', '-jar', jarFilePath + r'\\ibimb1logs_sikuli.jar'])\ndef ibimb1trclogs():\n subprocess.call(['java', '-jar', jarFilePath + r'\\ibimb1logstrc_sikuli.jar'])\ndef routingproxytrxlogs():\n subprocess.call(['java', '-jar', jarFilePath + r'\\routingproxy_sikuli.jar'])\ndef routingproxytrclogs():\n subprocess.call(['java', '-jar', jarFilePath + r'\\routingproxytrc_sikuli.jar'])\n# def closeLogsDetailsPage():\n# subprocess.call(['java', '-jar', jarFilePath + r'\\closeLogsPage_sikuli.jar'])\n# def closeViewLogsPage():\n# subprocess.call(['java', '-jar', jarFilePath + r'\\cloViewLogsWindow_sikuli.jar'])\ndef filterBySessionId(sessionId):\n \"\"\"Type session id to the filter textbox\"\"\"\n windowsformswindowappecf1 = app[u'WindowsForms10.Window.8.app.0.11ecf05']\n windowsformswindowappecf1.type_keys(\"{TAB 5}\").type_keys(sessionId)\n \"\"\"Click the filter button\"\"\"\n windowsformsbuttonappecf = windowsformswindowappecf1.Filter\n windowsformsbuttonappecf.click()\ndef copyPasteLogsData(logType,manager):\n #time.sleep(3)\n \"\"\"Select All, Copy, paste into a variable. Then write to notepad\"\"\"\n # windowsformswindowappecf.type_keys('^a{BACKSPACE}')\n #windowsformswindowappecf.click()\n if not app.Error.exists(timeout=1):\n print(\"Proceed\")\n else:\n app.Error.OK.click()\n\n pywinauto.mouse.click(button='left', coords=(600, 600))\n windowsformswindowappecf.type_keys('^a^c')\n dataCopied = pyperclip.paste()\n print(dataCopied)\n\n \"\"\"Writing to txt file\"\"\"\n with open(logFilePath + \"/\" + logType + manager + \".txt\", \"w\") as text_file:\n print(dataCopied, file=text_file)\n\"\"\"This function filter a log manager by session id\"\"\"\ndef logsFilter(sessionId,logType,manager):\n filterBySessionId(sessionId)\n copyPasteLogsData(logType, manager)\n\"\"\"Function to access SAG and get the logs\"\"\"\ndef sag():\n window.click()\n window.type_keys(\"{TAB 1}\").type_keys(r\"username\")\n #time.sleep(1)\n window.type_keys(\"{TAB 1}\").type_keys(r\"password\")\n app.window(best_match='Connect').child_window(best_match='Connect').click()\n #time.sleep(1)\n \"\"\"Set Cursor on the managers list\"\"\"\n app.ISIS_Service_Admin_GUI_v16_1_0_1_QMCW101ZATCWI.activator_QMCW101ZATCWI.set_focus()\n windowsformswindowappecf = app[u'WindowsForms10.Window.8.app.0.11ecf05']\n windowsformswindowappecf.wait('ready')\n windowsformswindowappecf2 = windowsformswindowappecf[u'WindowsForms10.Window.8.app.0.11ecf0514']\n windowsformswindowappecf2.click().set_focus()\n \"\"\"Navigate to ibimb.ibim1 manager for trx logs\"\"\"\n ibimb1trxlogs()\n print('Filtering by session id, copying trc logs to txt file')\n \"\"\"Enter Session Id and Click Filter, Select All, Copy and paste to a temp location\"\"\"\n logsFilter(ibmSessionId,\"ibmbm1\",\"trx\")\n \"\"\"closing ibm trx window\"\"\"\n windowsformswindowappecf.close()\n #time.sleep(1)\n \"\"\"Processing trc Logs, Filtering by session id, copying trc logs to txt file\"\"\"\n ibimb1trclogs()\n \"\"\"Enter Session Id and Click Filter, Select All, Copy and paste to a temp location\"\"\"\n logsFilter(ibmSessionId,\"ibmbm1\",\"trc\")\n \"\"\"ibm trc window closed\"\"\"\n windowsformswindowappecf.close()\n windowsformswindowappecf2.close()\n \"\"\"Navigate to routing proxy manager for trx logs\"\"\"\n app.windowsformswindowappecf2.wheel_mouse_input(wheel_dist=-40) # Scroll down\n routingproxytrxlogs()\n \"\"\"Filtering by session id, copying trx logs to txt file\"\"\"\n logsFilter(rtiSessionId,\"RoutingProxy\",\"trx\")\n \"\"\"closing ibm trx window\"\"\"\n windowsformswindowappecf.close()\n\n\n \"\"\"Navigate to routing proxy manager for trc logs\"\"\"\n routingproxytrclogs()\n logsFilter(rtiSessionId,\"RoutingProxy\",\"trc\")\n\n time.sleep(1)\n \"\"\"Exiting SAG\"\"\"\n app.ISISServiceAdminGUIv16101.menu_select(\"File -> Exit\")\n app.Exit.Yes.click()\n\nsag()","sub_path":"commons/SAGRunner.py","file_name":"SAGRunner.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"557781913","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle.fluid as fluid\nimport paddle.fluid.layers.tensor as tensor\nimport paddle.fluid.layers.control_flow as cf\n\nfrom paddlerec.core.utils import envs\nfrom paddlerec.core.model import ModelBase\n\n\nclass BowEncoder(object):\n \"\"\" bow-encoder \"\"\"\n\n def __init__(self):\n self.param_name = \"\"\n\n def forward(self, emb):\n return fluid.layers.sequence_pool(input=emb, pool_type='sum')\n\n\nclass CNNEncoder(object):\n \"\"\" cnn-encoder\"\"\"\n\n def __init__(self,\n param_name=\"cnn\",\n win_size=3,\n ksize=128,\n act='tanh',\n pool_type='max'):\n self.param_name = param_name\n self.win_size = win_size\n self.ksize = ksize\n self.act = act\n self.pool_type = pool_type\n\n def forward(self, emb):\n return fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=self.ksize,\n filter_size=self.win_size,\n act=self.act,\n pool_type=self.pool_type,\n param_attr=self.param_name + \".param\",\n bias_attr=self.param_name + \".bias\")\n\n\nclass GrnnEncoder(object):\n \"\"\" grnn-encoder \"\"\"\n\n def __init__(self, param_name=\"grnn\", hidden_size=128):\n self.param_name = param_name\n self.hidden_size = hidden_size\n\n def forward(self, emb):\n fc0 = fluid.layers.fc(input=emb,\n size=self.hidden_size * 3,\n param_attr=self.param_name + \"_fc.w\",\n bias_attr=False)\n\n gru_h = fluid.layers.dynamic_gru(\n input=fc0,\n size=self.hidden_size,\n is_reverse=False,\n param_attr=self.param_name + \".param\",\n bias_attr=self.param_name + \".bias\")\n return fluid.layers.sequence_pool(input=gru_h, pool_type='max')\n\n\nclass SimpleEncoderFactory(object):\n def __init__(self):\n pass\n\n ''' create an encoder through create function '''\n\n def create(self, enc_type, enc_hid_size):\n if enc_type == \"bow\":\n bow_encode = BowEncoder()\n return bow_encode\n elif enc_type == \"cnn\":\n cnn_encode = CNNEncoder(ksize=enc_hid_size)\n return cnn_encode\n elif enc_type == \"gru\":\n rnn_encode = GrnnEncoder(hidden_size=enc_hid_size)\n return rnn_encode\n\n\nclass Model(ModelBase):\n def __init__(self, config):\n ModelBase.__init__(self, config)\n\n def _init_hyper_parameters(self):\n self.query_encoder = envs.get_global_env(\n \"hyper_parameters.query_encoder\")\n self.title_encoder = envs.get_global_env(\n \"hyper_parameters.title_encoder\")\n self.query_encode_dim = envs.get_global_env(\n \"hyper_parameters.query_encode_dim\")\n self.title_encode_dim = envs.get_global_env(\n \"hyper_parameters.title_encode_dim\")\n\n self.emb_size = envs.get_global_env(\n \"hyper_parameters.sparse_feature_dim\")\n self.emb_dim = envs.get_global_env(\"hyper_parameters.embedding_dim\")\n self.emb_shape = [self.emb_size, self.emb_dim]\n\n self.hidden_size = envs.get_global_env(\"hyper_parameters.hidden_size\")\n self.margin = envs.get_global_env(\"hyper_parameters.margin\")\n\n def net(self, input, is_infer=False):\n factory = SimpleEncoderFactory()\n self.q_slots = self._sparse_data_var[0:1]\n self.query_encoders = [\n factory.create(self.query_encoder, self.query_encode_dim)\n for _ in self.q_slots\n ]\n q_embs = [\n fluid.embedding(\n input=query, size=self.emb_shape, param_attr=\"emb\")\n for query in self.q_slots\n ]\n # encode each embedding field with encoder\n q_encodes = [\n self.query_encoders[i].forward(emb) for i, emb in enumerate(q_embs)\n ]\n # concat multi view for query, pos_title, neg_title\n q_concat = fluid.layers.concat(q_encodes)\n # projection of hidden layer\n q_hid = fluid.layers.fc(q_concat,\n size=self.hidden_size,\n param_attr='q_fc.w',\n bias_attr='q_fc.b')\n\n self.pt_slots = self._sparse_data_var[1:2]\n self.title_encoders = [\n factory.create(self.title_encoder, self.title_encode_dim)\n ]\n pt_embs = [\n fluid.embedding(\n input=title, size=self.emb_shape, param_attr=\"emb\")\n for title in self.pt_slots\n ]\n pt_encodes = [\n self.title_encoders[i].forward(emb)\n for i, emb in enumerate(pt_embs)\n ]\n pt_concat = fluid.layers.concat(pt_encodes)\n pt_hid = fluid.layers.fc(pt_concat,\n size=self.hidden_size,\n param_attr='t_fc.w',\n bias_attr='t_fc.b')\n # cosine of hidden layers\n cos_pos = fluid.layers.cos_sim(q_hid, pt_hid)\n\n if is_infer:\n self._infer_results['query_pt_sim'] = cos_pos\n return\n\n self.nt_slots = self._sparse_data_var[2:3]\n nt_embs = [\n fluid.embedding(\n input=title, size=self.emb_shape, param_attr=\"emb\")\n for title in self.nt_slots\n ]\n nt_encodes = [\n self.title_encoders[i].forward(emb)\n for i, emb in enumerate(nt_embs)\n ]\n nt_concat = fluid.layers.concat(nt_encodes)\n nt_hid = fluid.layers.fc(nt_concat,\n size=self.hidden_size,\n param_attr='t_fc.w',\n bias_attr='t_fc.b')\n cos_neg = fluid.layers.cos_sim(q_hid, nt_hid)\n\n # pairwise hinge_loss\n loss_part1 = fluid.layers.elementwise_sub(\n tensor.fill_constant_batch_size_like(\n input=cos_pos,\n shape=[-1, 1],\n value=self.margin,\n dtype='float32'),\n cos_pos)\n\n loss_part2 = fluid.layers.elementwise_add(loss_part1, cos_neg)\n\n loss_part3 = fluid.layers.elementwise_max(\n tensor.fill_constant_batch_size_like(\n input=loss_part2, shape=[-1, 1], value=0.0, dtype='float32'),\n loss_part2)\n\n self._cost = fluid.layers.mean(loss_part3)\n self.acc = self.get_acc(cos_neg, cos_pos)\n self._metrics[\"loss\"] = self._cost\n self._metrics[\"acc\"] = self.acc\n\n def get_acc(self, x, y):\n less = tensor.cast(cf.less_than(x, y), dtype='float32')\n label_ones = fluid.layers.fill_constant_batch_size_like(\n input=x, dtype='float32', shape=[-1, 1], value=1.0)\n correct = fluid.layers.reduce_sum(less)\n total = fluid.layers.reduce_sum(label_ones)\n acc = fluid.layers.elementwise_div(correct, total)\n return acc\n","sub_path":"models/match/multiview-simnet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279802653","text":"import pandas as pd\nfrom gym.envs.connected_vehicles.assets.conversions import Conversions as CV\nimport sys\nfrom os.path import dirname, abspath, join\n\nsys.path.insert(0,dirname(dirname(dirname(abspath(__file__)))))\nbase_path = dirname(abspath(__file__))\n\ndef load_dc(dt):\n df = pd.read_csv(join(base_path,'driving_cycles.csv'))\n dt_old = 1.0\n return df[0:-1:int(dt/dt_old)].reset_index(drop = True)\n\ndef calc_mpg(v, a):\n \n b0 = 0.1569\n b1 = 0.02450\n b2 = -0.0007415\n b3 = 0.00005975\n \n c0 = 0.07224\n c1 = 0.09681\n c2 = 0.001075\n \n Fd = 0.1\n \n f_cruise = b0 + b1*v + b2*v**2 + b3*v**3\n f_accel = a*(c0 + c1*v + c2*v**2)\n \n if a < 0 or v == 0:\n zeta = 1\n else:\n zeta = 0 \n mf = (1 - zeta)*(f_cruise + f_accel) + zeta*Fd\n \n mf_gal_h = mf * CV.ML_TO_GAL * 3600\n v = v * CV.MPS_TO_MPH\n \n return v/mf_gal_h\n\nif __name__ == '__main__':\n \n mpg=calc_mpg(4.16, -2.11)","sub_path":"gym/envs/connected_vehicles/assets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596727389","text":"from inspect import cleandoc\n\nimport pandas as pd\nfrom petab.C import *\n\nfrom petabtests import PetabTestCase, analytical_a\n\nDESCRIPTION = cleandoc(\"\"\"\n## Objective\n\nThis case tests support for parametric overrides from condition table.\n\nThe model is to be simulated for two different experimental conditions\n(here: different initial concentrations). The observable is offsetted by\na parametric override in the condition table (i.e. the actual value has\nto be taken from the parameter table).\n\n## Model\n\nA simple conversion reaction `A <=> B` in a single compartment, following\nmass action kinetics.\n\"\"\")\n\n# problem --------------------------------------------------------------------\n\ncondition_df = pd.DataFrame(data={\n CONDITION_ID: ['c0', 'c1'],\n 'offset_A': ['offset_A_c0', 'offset_A_c1'],\n}).set_index([CONDITION_ID])\n\nmeasurement_df = pd.DataFrame(data={\n OBSERVABLE_ID: ['obs_a', 'obs_a'],\n SIMULATION_CONDITION_ID: ['c0', 'c1'],\n TIME: [10, 10],\n MEASUREMENT: [2.1, 3.2]\n})\n\nobservable_df = pd.DataFrame(data={\n OBSERVABLE_ID: ['obs_a'],\n OBSERVABLE_FORMULA: ['A + offset_A'],\n NOISE_FORMULA: [1]\n}).set_index([OBSERVABLE_ID])\n\nparameter_df = pd.DataFrame(data={\n PARAMETER_ID: ['a0', 'b0', 'k1', 'k2', 'offset_A_c0', 'offset_A_c1'],\n PARAMETER_SCALE: [LIN] * 6,\n LOWER_BOUND: [0] * 6,\n UPPER_BOUND: [10] * 6,\n NOMINAL_VALUE: [1, 0, 0.8, 0.6, 2, 3],\n ESTIMATE: [1] * 6,\n}).set_index(PARAMETER_ID)\n\n# solutions ------------------------------------------------------------------\n\nsimulation_df = measurement_df.copy(deep=True).rename(\n columns={MEASUREMENT: SIMULATION})\nsimulation_df[SIMULATION] = [analytical_a(10, 1, 0, 0.8, 0.6) + offset\n for offset in [2, 3]]\n\ncase = PetabTestCase(\n id=5,\n brief=\"Simulation. Condition-specific parameters only defined in \"\n \"parameter table.\",\n description=DESCRIPTION,\n model='conversion_modified_pysb.py',\n condition_dfs=[condition_df],\n observable_dfs=[observable_df],\n measurement_dfs=[measurement_df],\n simulation_dfs=[simulation_df],\n parameter_df=parameter_df,\n)\n","sub_path":"petabtests/cases/v2.0.0/pysb/0005/0005.py","file_name":"0005.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"255856240","text":"#This file holds all constant variables that needs to be reused accross files\nDARK = '#40514e'\nPRIMARY = '#2f89fc'\nSECONDAY = '#30e3ca'\nBACKGROUND = '#f5f5f5'\nWINDOW_WIDTH = 800\nWINDOW_HEIGHT = 600\nMIN_PADDING = 4\nMID_PADDING = 8\nMAX_PADDING = 16\nAPP_TITLE = \"PYTHON UI\"\nTOOLS_OPTION_ONE = \"Conventional MCS\"\nTOOLS_OPTION_TWO = \"ML Algorithm\"\nFILES_OPTION_ONE = \"Open CSV\"\nFILES_OPTION_TWO = \"Exit\"\n\nINDICES_OPTION_ONE = \"HL-1\"\nINDICES_OPTION_TWO = \"HL-2\"\nINDICES_SUB_OPTION_ONE = \"Option-1\"\nINDICES_SUB_OPTION_TWO = \"Option-2\"\n","sub_path":"Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"289007992","text":"'''\n2021 KaKao BLIND RECRUITMENT\n광고 삽입 LEVEL3\n출처: dev-note-97.tistory.com/156\n'''\n\n'''\n이 문제 참.. 하.. 진짜 생각할수록 화나네요..\n일단 전 집에 와서 다시 풀어보니까 22점 나왔네요..\n대부분 시간 초과가 되었습니다.\n\n풀이법을 살펴보니 가장 중요한 것은\n시간(문자열) → 초단위(정수형) → 시간(문자열)\n이렇게 변환하는 것이 아닌(이건 기본입니다..)\n전체 방영시간을 초 단위로 쪼개어 리스트로 변환한 뒤 누적 방영 시간을 구하고\n시작과 끝을 빼서 가장 큰 방영시간을 찾는 아이디어입니다.\n(기억하실지 모르겠지만, 4월 16일 라이브 방송에서 정사각형 방(SWEA) 문제를 풀었거든요. 이때 사용한 아이디어와 비슷합니다.)\n\n그리고 이를 memoization합니다. 왜 그렇게 하는지는 해당코드를 보면서 살펴봐야 이해하실겁니다.\n'''\n\n'''\n1. 초 변환\n'''\ndef to_seconds(time): # 1번\n h, m, s = map(int, time.split(':'))\n return h * 3600 + m * 60 + s\n\n'''\n2. 시간변환\n'''\ndef to_time(time): # 2번\n h = str(time//3600).zfill(2) # 초를 시간으로 변환\n time = time % 3600 # 이건 분과 초를 합친 총합 초입니다.\n m = str(time//60).zfill(2) # 분 변환\n s = str(time%60).zfill(2) # 초 변환\n return ':'.join([h, m, s]) # 다시 합치기\n\n'''\n2. 코드를 보았을 때 처음보는 메서드가 있을거에요. 아마?\nzfill 메서드는 zerofill의 약어로 앞에 0을 채워주는 메서드입니다.\n이와 비슷한 거로 rjust라는 메서드가 있어요.\n\n예를 들어서\n\"2\".zfill(3)은 \"002\"로 출력되고\n\"123\".zfill(5)는 \"00123\"으로 출력됩니다.\n\n그니까 괄호안에 수(n)은 n번째 자리까지 비어있는 숫자는 0으로 채워준다는 의미입니다.\n\n그러면 만약에 \"50000\".zfill(5)는 \"50000\"으로 출력됩니다.\n왜냐하면 이미 5자리 수이기 때문이죠.\n'''\n\n\ndef solution(play_time, adv_time, logs):\n answer = ''\n '''\n 3. 모두 초로 변환합니다.\n '''\n play_time = to_seconds(play_time) # 3번\n adv_time = to_seconds(adv_time)\n\n '''\n 4. 시청자 수를 기록할 리스트를 따로 만듭니다. \n 이걸로 초단위로 시청자수를 셀거에요.\n 참고로 play-time을 계산했기 때문에 총 플레이 시간만큼\n 메모 배열이 만들어진거에요.\n 사실 이건 생각못했어요... 공간을 너무 먹을거 같은데? 라는 느낌이 오실거에요.\n 근데 일단 쭉 가봅시다.\n '''\n memo = [0 for _ in range(play_time+1)] # 4번\n\n '''\n 5. 로그의 시작 시간과 끝나는 시간에 각각 1과 -1을 더해줍니다.\n 그 이유는 playtime을 전부다 돌거든요.. 그 때 처음부터 누적시키려면 처음은 1로 두고 마지막은 -1로 둬서\n 시청자수를 구분 해줘야합니다.\n '''\n for l in logs: # 5번\n s, e = map(str, l.split('-'))\n s = to_seconds(s)\n e = to_seconds(e)\n\n memo[s] += 1\n memo[e] -= 1\n \n '''\n 6, 7. 아래와 같이 한번 순회를 돌면 5번에 의해 기록되었던 로그의 시작 시간과 끝 시간에 의해\n 특정 초에 시청한 사람의 수가 기록됩니다. 그리고 순회를 두 번 하면 그 시청자 수가 누적됩니다.\n 예를 들어서 play_time의 초가 15라 하겠습니다.\n memo는 4번 코드에 의해 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 이렇게 됩니다.\n logs는 5번 코드에 의해 A: [3, 7], B: [5, 11], C: [9, 15]로 되어있다고 하면 memo는 다음과 같이 바뀝니다.\n (알파벳은 사람이라 생각하시면 됩니다.)\n memo = [0, 0, 0, 1, 0, 1, 0, -1 ,0 ,1 ,0 ,- 1, 0, 0, 0, -1] 이를 6번 코드를 실행하면 다음과 같이 바뀝니다.\n memo = [0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0] 그리고 이를 한번 더 7번으로 가면\n memo = [0, 0, 0, 1, 2, 4, 6, 7, 8, 10, 12, 13, 14, 15, 16, 16] 이렇게 바뀝니다.\n 도대체 이걸로 뭐 어떻게 할건데? 라는 생각이 드실텐데\n 이걸 이제 어떻게 할지에 대한 자세한 계산은 10번 각주에서 확인 부탁드립니다.\n\n **이해를 돕기위한 첨언**\n 대략적으로 말씀드리면 6번 코드로 인해 해당 시점에서 시청하는 시청자의 수를 알 수 있습니다.\n 그리고 7번 코드로 인해 해당 시점에서 광고를 시청했던 시청자수를 누적하여 계산하므로\n 해당 시점의 총 누적 방영 시간(초)이 됩니다.\n 즉, 그 시점에서 시청한 시청자에 대한 방영 시간이 아니라\n 그 시점까지 시청자에게 방영한 총 시간입니다.\n 따라서 특정 시점(H1)과 특정 시점(H2)간의 차를 통해 원하는 구간에서 시청자에게\n 광고를 방영한 시간을 알 수 있습니다.\n '''\n\n for i in range(1, play_time+1): # 6번\n memo[i] = memo[i] + memo[i-1]\n \n for i in range(1, play_time+1): # 7번\n memo[i] = memo[i] + memo[i-1]\n \n\n '''\n 이제 광고 시간과 비교해서 문제에서 요구한 해당 시각을 계산할겁니다. 그전에\n 최초 시작 가능한 광고시간과 끝나는 광고시간을 계산해주어야 합니다.\n '''\n\n '''\n 8. 처음의 끝나는 시간은은 adv_time초에서 -1한 인덱스에서 끝날거에요.\n '''\n max_play = memo[adv_time-1] # 8번\n '''\n 9. start는 당연히 0 인덱스겠죠\n '''\n start = 0 # 9번\n\n '''\n 10, 11. 다시 순회를 돕니다. 순회는 광고 시간(초)과 총 영상 플레이 시간(초) 사이만 할겁니다.\n 11번 코드부터 데이터 상에서 어떻게 나타나는지 시각적으로 확인하려면 광고시간이 필요하겠네요.\n 광고시간은 5초라 하겠습니다. 예시에서 영상 플레이 시간은 총 15초로 지정했으니까 다음과 같이 나오겠네요.\n\n -1 번째 순회\n play: 4 = memo[5]: 4 - memo[5-5]: 0\n 여기서 잠깐! 진짜로 계산해보면 광고가 0초에서 5초까지 방영되면 그 사이에 본 사람은 2명(A, B)이고\n 실제 광고가 이 사람들에게 방영한 시간은 A: 3초, B: 1초로 총 4초입니다.\n \n 이후 if 문에서 실제 방영된 시간(play)이 처음 지정한 시간(max_play)보다 크므로\n max_play를 업데이트 합니다.\n 그리고 start를 해당 i에서 광고시간을 제외하고 1을 더합니다.\n 따라서 max_play: 4, start:1로 업데이트 되고 start는 실제 광고 시작 시간(인덱스 아님)입니다.\n\n -2 번째 순회\n play: 6 = memo[6]: 6 - memo[6-5]: 0\n -1 번째 순회와 같이 계산되고 max_play와 start가 6과 2로 업데이트 됩니다.\n\n -3 번째 순회\n play: 7 = memo[7]: 7 - memo[7-5]: 0\n 업데이트 됩니다.\n max_play: 7 start: 2\n\n -4 번째 순회\n play: 7 = memo[8]: 8 - memo[8-5]: 1\n 업데이트 되지 않습니다.\n max_play: 7 start: 2\n\n -5 번째 순회\n play: 8 = memo[9]: 10 - memo[9-5]: 2\n 업데이트 됩니다.\n max_play: 8 start: 5\n\n -6 번째 순회\n play: 8 = memo[10]: 12 - memo[10-5]: 4\n 업데이트 되지 않습니다.\n max_play: 8 start: 5\n\n -7 번째 순회\n play: 7 = memo[11]: 13 - memo[11-5]: 6\n 업데이트 되지 않습니다.\n max_play: 8 start: 5\n\n -8 번째 순회\n play: 7 = memo[12]: 14 - memo[12-5]: 7\n 업데이트 되지 않습니다.\n max_play: 8 start: 5\n\n -9 번째 순회\n play: 5 = memo[13]: 15 - memo[13-5]: 10\n 업데이트 되지 않습니다.\n max_play: 8 start: 5\n\n -10 번째 순회\n play: 6 = memo[14]: 16 - memo[14-5]: 10\n 업데이트 되지 않습니다.\n max_play: 8 start: 5\n '''\n\n for i in range(adv_time, play_time): # 10번\n play = memo[i] - memo[i-adv_time] # 11번\n\n if play > max_play:\n max_play = play\n start = i - adv_time + 1\n\n '''\n 11번 코드 이해하셨나요?\n 이와 같은 방식으로 start는 5초에 방영하여\n 총 8초 동안 시청자에게 최대로 방영된다는 것을\n 알 수 있습니다.\n\n 5초에 반영하면 A:[3, 7]에게 3초(5, 6, 7)를 B[5, 11]에게 5초(5, 6, 7, 8, 9)를 반영하는 것임.\n '''\n \n answer = to_time(start) # 12번 초시간 변동\n return answer\n\n\n'''\n읽어주셔서 감사드리고,,,\n\n이 풀이를 보고 생각든건.. 아마 시작 시간을 기점으로 풀이를 시도하는건 잘못된 풀이이지 않나 생각드네요.\n왜냐면 그 풀이는 초단위로 확인이 불가능하여 모든 테스트 케이스를 고려하지 못한 풀이인거 같습니다.\n'''\n\nif __name__ == '__main__':\n print(solution(\"02:03:55\", \"00:14:15\", [\"01:20:15-01:45:14\", \"00:40:31-01:00:00\", \"00:25:50-00:48:29\", \"01:30:59-01:53:29\", \"01:37:44-02:02:30\"]))\n print(solution(\"99:59:59\", \"25:00:00\", [\"69:59:59-89:59:59\", \"01:00:00-21:00:00\", \"79:59:59-99:59:59\", \"11:00:00-31:00:00\"]))\n print(solution(\"50:00:00\", \"50:00:00\", [\"15:36:51-38:21:49\", \"10:14:18-15:36:51\", \"38:21:49-42:51:45\"]))","sub_path":"junkyu/광고삽입정석해설.py","file_name":"광고삽입정석해설.py","file_ext":"py","file_size_in_byte":9762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"562077406","text":"alp_en = \" abcdefghijklmnopqrstuvwxyz \"\nalp_ru = \" абвгдеёжзийклмнопрстуфхцчшщъыьэюя \"\n\ndef do_atbash(text):\n alp = \"\"\n r_text = list(text.lower())\n if r_text[0] in alp_en:\n alp = alp_en\n elif r_text[0] in alp_ru:\n alp = alp_ru\n\n b = reversed(alp)\n dict1 = {}\n res = \"\"\n\n for i, j in zip(alp, b):\n dict1.update(dict(zip(i, j)))\n\n for ch in r_text:\n if ch.isupper():\n res += dict1.get(ch).upper()\n else:\n res += dict1.get(ch)\n\n return res\n\nprint(do_atbash(\"HELLO\"))","sub_path":"functions/atbash.py","file_name":"atbash.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"172080045","text":"from numpy import zeros\r\nfrom sklearn import linear_model\r\nimport pandas as pd\r\nimport fitting_scoring\r\nimport process_input\r\nfrom sklearn import svm\r\nfrom sklearn import neural_network\r\nimport random\r\nimport numpy\r\nimport os\r\nimport csv\r\nfrom numpy import *\r\nimport csv\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math\r\nfrom pandas import DataFrame\r\n\r\n# ------------------------------------------------------------------------------------------------\r\nclass DrugDiscovery:\r\n input_process = process_input.InputProcessor()\r\n\r\n descriptors_file = \"Practice_Descriptors.csv\"\r\n targets_file = \"Practice_Targets.csv\"\r\n\r\n# ------------------------------------------------------------------------------------------------\r\n def function_step1(self):\r\n self.descriptors = self.input_process.open_descriptor_matrix(self.descriptors_file)\r\n print('Original matrix dimensions : ', self.descriptors.shape)\r\n self.targets = self.input_process.open_target_values(self.targets_file)\r\n return self.descriptors, self.targets\r\n\r\n# ------------------------------------------------------------------------------------------------\r\n def function_step2(self):\r\n self.descriptors, self.targets = self.input_process.removeInvalidData(self.descriptors, self.targets)\r\n print()\r\n print(self.targets)\r\n print()\r\n print('--------------------step 1 ends-------------------------')\r\n self.descriptors, self.active_descriptors = self.input_process.removeNearConstantColumns(self.descriptors)\r\n\r\n print('After removing invalid datas descriptor are as follows: ')\r\n print(self.descriptors)\r\n print()\r\n print('Now descriptor dimensions are ', self.descriptors.shape)\r\n # Rescale the descriptor data\r\n self.descriptors = self.input_process.rescale_data(self.descriptors)\r\n print('------------------------Rescaled matrix is below--------------------')\r\n print('Rescaled value of Xes is:')\r\n print(self.descriptors)\r\n\r\n print('Rescaled matrix dimenstions are:', self.descriptors.shape)\r\n print('------------------------------------------------------')\r\n return self.descriptors, self.targets, self.active_descriptors\r\n\r\n# ------------------------------------------------------------------------------------------------\r\n def function_step3(self):\r\n self.descriptors, self.targets = self.input_process.sort_descriptor_matrix(self.descriptors, self.targets)\r\n return self.descriptors, self.targets\r\n\r\n# ------------------------------------------------------------------------------------------------\r\n def function_step4(self):\r\n self.X_Train, self.X_Valid, self.X_Test, self.Y_Train, self.Y_Valid, self.Y_Test = self.input_process.simple_split(self.descriptors, self.targets)\r\n self.data = {'TrainX': self.X_Train, 'TrainY': self.Y_Train, 'ValidateX': self.X_Valid,\r\n 'ValidateY': self.Y_Valid,\r\n 'TestX': self.X_Test, 'TestY': self.Y_Test, 'UsedDesc': self.active_descriptors}\r\n print(str(self.descriptors.shape[1]) + \" valid descriptors and \" + str(self.targets.__len__()) + \" molecules available.\")\r\n return self.X_Train, self.X_Valid, self.X_Test, self.Y_Train, self.Y_Valid, self.Y_Test, self.data\r\n\r\n# ------------------------------------------------------------------------------------------------\r\n def function_step5(self):\r\n self.binary_model = zeros((50, self.X_Train.shape[1]))\r\n\r\n L = (0.015 * 593)\r\n # ------------------------------------------------------------------------------------------------\r\n\r\n def ValidRow(binary_model):\r\n for i in range(50):\r\n cc = 0\r\n for j in range(593):\r\n r = random.randint(0, 593)\r\n if r < L:\r\n binary_model[i][j] = 1\r\n cc += 1\r\n if cc < 5 and cc > 25:\r\n i -= 1\r\n else:\r\n continue\r\n return binary_model\r\n\r\n self.binary_model = ValidRow(self.binary_model)\r\n\r\n return self.binary_model\r\n\r\n# ------------------------------------------------------------------------------------------------\r\n def function_step6(self):\r\n regressor = linear_model.LinearRegression()\r\n\r\n instructions = {'dim_limit': 4, 'algorithm': 'None', 'MLM_type': 'MLR'}\r\n\r\n fitting_object = fitting_scoring.FittingScoringClass()\r\n directory = os.path.join(os.getcwd(), 'Outputs')\r\n output_filename = 'MLR_Output_Project3.csv'\r\n file_path = os.path.join(directory, output_filename)\r\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\r\n fileOut = open(file_path, 'w', newline='') # create stream object for output file\r\n fileW = csv.writer(fileOut)\r\n fileW.writerow(\r\n ['Descriptor ID', 'Fitness', 'Algorithm', 'Dimen', 'R2_Train', 'R2_Valid', 'R2_Test', 'RMSE', 'MAE',\r\n 'pred_Acc'\r\n ])\r\n print('This is MLR!')\r\n\r\n for gen in range(10000):\r\n print('Current generation is: ', gen)\r\n regressor.fit(self.X_Train, self.Y_Train)\r\n\r\n self.trackDesc, self.trackFitness, self.trackModel, \\\r\n self.trackDimen, self.trackR2train, self.trackR2valid, \\\r\n self.trackR2test, self.testRMSE, self.testMAE, \\\r\n self.testAccPred = fitting_object.evaluate_population(model=regressor, instructions=instructions, data=self.data,\r\n population=self.binary_model, exportfile=fileW)\r\n\r\n print('========================================================================')\r\n\r\n nik = []\r\n traill = []\r\n for i in range(50):\r\n traill.append(i)\r\n\r\n for key in self.trackDesc.keys():\r\n nik.append(self.trackFitness[key])\r\n\r\n df = pd.DataFrame(nik)\r\n df.columns = ['fitness']\r\n\r\n df1 = pd.DataFrame(traill)\r\n df1.columns = ['order']\r\n\r\n df['order'] = df1\r\n\r\n df2 = df.sort_values('fitness')\r\n\r\n order = []\r\n\r\n order = df2['order'].values.tolist()\r\n\r\n binary_model2 = self.binary_model.copy()\r\n\r\n for i in range(len(order)):\r\n # for j in range(593):\r\n a = order[i]\r\n binary_model2[i] = self.binary_model[a]\r\n\r\n pop = binary_model2\r\n\r\n Oldpop = pop\r\n\r\n for i in range(1, 50):\r\n V = numpy.arange(593)\r\n\r\n a = random.randint(1, 49)\r\n b = random.randint(1, 49)\r\n c = random.randint(1, 49)\r\n\r\n F = 0.7\r\n\r\n for j in range(0, 593):\r\n V[j] = math.floor(abs(Oldpop[a, j] + (F * (Oldpop[b, j] - Oldpop[c, j]))))\r\n\r\n CV = 0.7\r\n CV = random.randint(0, 1)\r\n\r\n for k in range(0, 593):\r\n Random = random.uniform(0, 1)\r\n if (Random < CV):\r\n pop[i, k] = V[k]\r\n else:\r\n continue\r\n\r\n def ValidRow(binary_model):\r\n L = (0.015 * 593)\r\n while True:\r\n cc = 0\r\n for j in range(593):\r\n r = random.randint(0, 593)\r\n if r < L:\r\n binary_model[j] = 1\r\n cc += 1\r\n if cc < 5 or cc > 25:\r\n continue\r\n else:\r\n break\r\n return binary_model\r\n\r\n for i in range(0, 50):\r\n check = 0\r\n for j in range(0, 593):\r\n if pop[i, j] == 1:\r\n check += 1\r\n\r\n if check < 5 or check > 25:\r\n pop[i] = ValidRow(pop[i])\r\n\r\n return regressor, instructions, self.trackDesc, self.trackFitness, self.trackModel, self.trackDimen, self.trackR2train, self.trackR2valid, self.trackR2test, self.testRMSE, self.testMAE, self.testAccPred\r\n\r\n\r\n def function_step7(self):\r\n print('\\n\\nFitness\\t\\tAccuracy\\t\\t\\tR_SquareTrain\\tR_SquareValid\\tR_SquareTest\\tRMSE')\r\n print('========================================================================')\r\n for key in trackDesc.keys():\r\n print(str(trackFitness[key]) + '\\t\\t' + str(testAccPred[key]) + '\\t\\t\\t' + str(trackR2train[key]) \\\r\n + '\\t\\t\\t\\t' + str(trackR2valid[key]) + '\\t\\t' + str(trackR2test[key]) + '\\t\\t' + str(testRMSE[key]))\r\n# ------------------------------------------------------------------------------------------------\r\n\r\nAlzheimer1 = DrugDiscovery()\r\ndescriptors1, targets1 = Alzheimer1.function_step1()\r\nprint()\r\nprint('Original descriptors are as follow:')\r\nprint()\r\nprint(descriptors1)\r\nprint()\r\nprint('Targets are as below:')\r\nprint()\r\nprint(targets1)\r\nprint()\r\n\r\nprint('______Function1 done_________')\r\ndescriptors, targets, active_descriptors = Alzheimer1.function_step2()\r\nprint()\r\nprint('------------------------step 2 ends-------------------------')\r\n\r\ndescriptors, targets = Alzheimer1.function_step3()\r\nprint('After sorting descriptor matrix is : ')\r\nprint(descriptors)\r\nprint()\r\nprint('after sorting targets are:')\r\nprint(targets)\r\nprint('------------------------step 3 ends-------------------------')\r\nprint()\r\nX_Train, X_Valid, X_Test, Y_Train, Y_Valid, Y_Test, data = Alzheimer1.function_step4()\r\nprint()\r\nprint('------------------------step 4 ends-------------------------')\r\n\r\nbinary_model = Alzheimer1.function_step5()\r\nprint('------------------------step 5 ends-------------------------')\r\n\r\nregressor, instructions, trackDesc, trackFitness, trackModel, trackDimen, trackR2train, trackR2valid, trackR2test, testRMSE, testMAE, testAccPred = Alzheimer1.function_step6()\r\n\r\nprint('------------------------step 6 ends-------------------------')\r\nAlzheimer1.function_step7()\r\nprint('------------------------step 7 ends-------------------------')\r\n","sub_path":"Differential Evolution (DE) Algorithm/main_Multiple_Linear_Regression.py","file_name":"main_Multiple_Linear_Regression.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"283575240","text":"# Python3 program to convert a \n# decimal number to binary number \n\ndef decToBinary(n): \n\t\n\tbinaryNum = [0] * n; \n\ti = 0; \n\twhile (n > 0): \n\n\t\tbinaryNum[i] = n % 2; \n\t\tn = int(n / 2); \n\t\ti += 1; \n\tfor j in range(i - 1, -1, -1): \n\t\tprint(binaryNum[j], end = \"\"); \n\nn = 17; \ndecToBinary(n)\n","sub_path":"deci_to_bin.py","file_name":"deci_to_bin.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69210055","text":"from input_taker import take_float, take_string\nfrom price_calculator import PriceCalculator\n\n\nclass Main:\n\n calc = PriceCalculator()\n\n def current_vat_info(self):\n vat = self.calc.get_vat()\n print(\"Hi there! Current VAT is \" + str(vat))\n\n def take_vat_if_necessary(self):\n print(\"Do you want to change VAT? y/n\")\n decision = take_string()\n\n if decision == \"y\":\n print(\"Please type VAT (from 0.0 to 1.0)\")\n new_vat = take_float()\n self.calc.change_vat(new_vat)\n print(\"Got ya! New VAT is \" + str(new_vat))\n\n def calculate_gross_price(self):\n print(\"Type net price: \")\n net_price = take_float()\n gross_price = self.calc.calculate_gross_price(net_price)\n print(\"Gross price is: \" + str(gross_price))\n\n\nmain = Main()\nmain.current_vat_info()\nmain.take_vat_if_necessary()\nmain.calculate_gross_price()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"155350909","text":"import pysgpp\nimport math\nimport sys\nimport numpy as np\nimport matplotlib \nimport matplotlib.pyplot as plt\nimport weightfunction\n\n\ndef printLine():\n print(\"--------------------------------------------------------------------------------------\")\n \n\ndim = 2\nradius = 0.1 \ndegree = 2\ngrid = pysgpp.Grid.createWEBsplineGrid(dim, degree)\n\ngridStorage = grid.getStorage()\nprint(\"dimensionality: {}\".format(gridStorage.getDimension()))\n\nlevel = 3\ngrid.getGenerator().regular(level)\nprint(\"number of grid points: {}\".format(gridStorage.getSize()))\n\nalpha = pysgpp.DataVector(gridStorage.getSize(),0.0)\nbeta = pysgpp.DataVector(gridStorage.getSize(),0.0)\nprint(\"length of alpha vector: {}\".format(len(alpha)))\nprint(\"length of beta vector: {}\".format(len(beta)))\n\nprintLine()\nfor i in range(gridStorage.getSize()):\n gp = gridStorage.getPoint(i)\n alpha[i] = gp.getStandardCoordinate(0)\n beta[i] = gp.getStandardCoordinate(1)\n\n#print(\"alpha: {}\".format(alpha))\n#print(\"beta: {}\".format(beta))\n\nx = np.zeros((len(alpha),dim))\neval_circle= np.zeros(len(alpha))\nfor i in range(len(alpha)):\n x[i] = [alpha[i],beta[i]] \n eval_circle[i]=weightfunction.circle(radius, x[i])\nprint(x)\nprint(x[:,0])\nprint(eval_circle)\n#colors = np.random.rand(len(alpha))\n#area = (30 * np.random.rand(N))**2 # 0 to 15 point radii\n#plt.scatter(x, y, c=colors, alpha=0.5)\n#plt.show()\n\nprintLine()\np=0\nn=0\nfor i in range(len(eval_circle)):\n if eval_circle[i] > 0:\n p=p+1\n print(\"pos\")\n else:\n n=n+1\n print(\"neg\")\nprint(p) \nprint(n)\n \n\n\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523358360","text":"from .base_page import BasePage\nfrom selenium.webdriver.common.by import By\n\n\nclass ClassAttributePage(BasePage):\n \n PRIMARY_BUTTON = (\n By.XPATH,\n \"//button[contains(concat(' ', normalize-space(@class), ' '), ' btn-primary ')]\",\n )\n SUCCESS_BUTTON = (\n By.XPATH,\n \"//button[contains(concat(' ', normalize-space(@class), ' '), ' btn-success ')]\", \n )\n WARNING_BUTTON = (\n By.XPATH,\n \"//button[contains(concat(' ', normalize-space(@class), ' '), ' btn-warning ')]\", \n )\n ALERT_TEXT = \"Primary button pressed\"\n\n\n def click_to_primary_button(self):\n element = self.driver.find_element(*self.PRIMARY_BUTTON)\n element.click()\n\n def click_to_success_button(self):\n element = self.driver.find_element(*self.SUCCESS_BUTTON)\n element.click()\n\n def click_to_warning_button(self):\n element = self.driver.find_element(*self.WARNING_BUTTON)\n element.click()\n \n def get_alert_text(self):\n alert = self.driver.switch_to_alert()\n return alert.text\n \n def accept_alert(self):\n self.driver.switch_to_alert().accept()","sub_path":"page_objects/class_atrr_page.py","file_name":"class_atrr_page.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"59850914","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndatosFB = [ np.loadtxt(\"exp_fb_\"+str(i)+\".csv\", delimiter=',') for i in range(1,7) ]\ndatosBT = [ np.loadtxt(\"exp_bt_\"+str(i)+\".csv\", delimiter=',') for i in range(1,7) ]\n# datosBTCrec = [ np.loadtxt(\"exp_BTO_\"+str(i)+\".csv\", delimiter=',') for i in range(1,7) ]\ndatosPD = [ np.loadtxt(\"exp_pd_\"+str(i)+\".csv\", delimiter=',') for i in range(1,7) ]\n\n# plot a line, implicitly creating a subplot(111)\n# plt.plot([1,2,3])\nfig, ax = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True)#, figsize=(6,6))\n\n# linear\n# ax[0,0].subplot(231)\nax[0,0].plot(datosFB[0][:,0], datosFB[0][:,2], 'g', linewidth=2)\nax[0,0].plot(datosBT[0][:,0], datosBT[0][:,2], color='orange', linewidth=2)\nax[0,0].plot(datosPD[0][:,0], datosPD[0][:,2], 'm', linewidth=2)\nax[0,0].set_yscale('log')\nax[0,0].set_ylim(ymin=0)\nax[0,0].set_title('(a)')\n\n\n# log\n# plt.subplot(232)\nax[0,1].plot(datosFB[1][:,0], datosFB[1][:,2], 'g', linewidth=2)\nax[0,1].plot(datosBT[1][:,0], datosBT[1][:,2], color='orange', linewidth=2)\nax[0,1].plot(datosPD[1][:,0], datosPD[1][:,2], 'm', linewidth=2)\nax[0,1].set_yscale('log')\nax[0,1].set_ylim(ymin=0)\nax[0,1].set_title('(b)')\n\n\n# symmetric log\nax[0,2].plot(datosFB[2][:,0], datosFB[2][:,2], 'g', linewidth=2)\nax[0,2].plot(datosBT[2][:,0], datosBT[2][:,2], color='orange',linewidth=2)\nax[0,2].plot(datosPD[2][:,0], datosPD[2][:,2], 'm', linewidth=2)\nax[0,2].set_yscale('log')\nax[0,2].set_ylim(ymin=0)\nax[0,2].set_title('(c)')\n\n\n# symmetric log\nax[1,0].plot(datosFB[3][:,0], datosFB[3][:,2], 'g', linewidth=2)\nax[1,0].plot(datosBT[3][:,0], datosBT[3][:,2], color='orange', linewidth=2)\nax[1,0].plot(datosPD[3][:,0], datosPD[3][:,2], 'm', linewidth=2)\nax[1,0].set_yscale('log')\nax[1,0].set_ylim(ymin=0)\nax[1,0].set_title('(d)')\n\n\n# symmetric log\nax[1,1].plot(datosFB[4][:,0], datosFB[4][:,2], 'g', linewidth=2)\nax[1,1].plot(datosBT[4][:,0], datosBT[4][:,2], color='orange', linewidth=2)\nax[1,1].plot(datosPD[4][:,0], datosPD[4][:,2], 'm', linewidth=2)\nax[1,1].set_yscale('log')\nax[1,1].set_ylim(ymin=0)\nax[1,1].set_title('(e)')\n\n\n# symmetric log\nax[1,2].plot(datosFB[5][:,0], datosFB[5][:,2], 'g', linewidth=2)\nax[1,2].plot(datosBT[5][:,0], datosBT[5][:,2], color='orange', linewidth=2)\nax[1,2].plot(datosPD[5][:,0], datosPD[5][:,2], 'm', linewidth=2)\nax[1,2].set_yscale('log')\nax[1,2].set_ylim(ymin=0)\nax[1,2].set_title('(f)')\n\nfig.text(0.5, 0, 'Cantidad de elementos', ha='center', fontsize=12)\nfig.text(0, 0.5, 'Tiempo de ejecucion (ms)', va='center', rotation='vertical', fontsize=12)\n# plt.legend( (a1, a2, a3), ('Fuerza bruta', 'Backtracking', 'Programación dinámica'), \n\t # loc='upper center', bbox_to_anchor=(0.5, 0), bbox_transform=plt.gcf().transFigure )\n# plt.legend()\nplt.tight_layout()\nplt.show()","sub_path":"experimentos/grafico.py","file_name":"grafico.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"130720805","text":"from ftw.upgrade import UpgradeStep\nfrom opengever.inbox.subscribers import configure_inboxcontainer_portlets\nfrom opengever.inbox.subscribers import configure_inbox_portlets\n\nclass EnsurePortletConfigurationForInboxesAndInboxcontainers(UpgradeStep):\n \"\"\"Ensure portlet configuration for inboxes and inboxcontainers.\n \"\"\"\n\n def __call__(self):\n for obj in self.objects(\n {'portal_type': ['opengever.inbox.container']},\n u'Ensure portlet configuration for inboxcontainers'):\n\n configure_inboxcontainer_portlets(obj, event=None)\n\n for obj in self.objects(\n {'portal_type': ['opengever.inbox.inbox']},\n u'Ensure portlet configuration for inboxes'):\n\n configure_inbox_portlets(obj, event=None)\n","sub_path":"opengever/inbox/upgrades/20170502101718_ensure_portlet_configuration_for_inboxes_and_inboxcontainers/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"577178829","text":"# -*- coding: utf-8 -*-\r\n# Module for Popbill Cashbill API. It include base functionality of the\r\n# RESTful web service request and parse json result. It uses Linkhub module\r\n# to accomplish authentication APIs.\r\n#\r\n# http://www.popbill.com\r\n# Author : Jeong Yohan (code@linkhub.co.kr)\r\n# Written : 2015-03-24\r\n# Updated : 2017-03-02\r\n# Thanks for your interest.\r\n\r\nfrom .base import PopbillBase,PopbillException\r\n\r\nclass CashbillService(PopbillBase):\r\n \"\"\" 팝빌 현금영수증 API Service Implementation. \"\"\"\r\n\r\n def __init__(self,LinkID,SecretKey):\r\n \"\"\"생성자\r\n args\r\n LinkID : 링크허브에서 발급받은 링크아이디(LinkID)\r\n SecretKeye 링크허브에서 발급받은 비밀키(SecretKey)\r\n \"\"\"\r\n\r\n super(self.__class__,self).__init__(LinkID,SecretKey)\r\n self._addScope(\"140\")\r\n\r\n def getChargeInfo(self, CorpNum, UserID = None):\r\n \"\"\" 과금정보 확인\r\n args\r\n CorpNum : 회원 사업자번호\r\n UserID : 팝빌 회원아이디\r\n return\r\n 과금정보 객체\r\n raise\r\n PopbillException\r\n \"\"\"\r\n return self._httpget('/Cashbill/ChargeInfo', CorpNum, UserID)\r\n\r\n def getURL(self, CorpNum, UserID, ToGo):\r\n \"\"\" 팝빌 현금영수증 관련 URL\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n UserID : 팝빌 회원아이디\r\n ToGo : 현금영수증 관련 기능 지정 문자.(TBOX : 임시문서함, PBOX : 매출문서함)\r\n return\r\n 30초 보안 토큰을 포함한 url\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n result = self._httpget('/Cashbill?TG=' + ToGo, CorpNum, UserID)\r\n\r\n return result.url\r\n\r\n def getUnitCost(self, CorpNum):\r\n \"\"\" 현금영수증 발행단가 확인.\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n return\r\n 발행단가 by float\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n result = self._httpget('/Cashbill?cfg=UNITCOST', CorpNum)\r\n\r\n return float(result.unitCost)\r\n\r\n def checkMgtKeyInUse(self, CorpNum, MgtKey):\r\n \"\"\" 파트너 문서관리번호 사용여부 확인.\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호(최대 24자리, 숫자, 영문,'-','_'로 구성)\r\n return\r\n 사용 여부 by True/False\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n try:\r\n result = self._httpget('/Cashbill/' + MgtKey, CorpNum)\r\n\r\n return result.itemKey != None and result.itemKey != \"\"\r\n\r\n except PopbillException as PE:\r\n if PE.code == -14000003:\r\n return False\r\n raise PE\r\n\r\n def registIssue(self, CorpNum, cashbill, Memo, UserID = None):\r\n \"\"\" 현금영수증 즉시발행\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n cashbill : 등록할 현금영수증 object. made with Cashbill(...)\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if cashbill == None:\r\n raise PopbillException(-99999999,\"현금영수증 정보가 입력되지 않았습니다.\")\r\n\r\n postData = \"\"\r\n req = {}\r\n\r\n if Memo != None or Memo != '':\r\n cashbill.memo = Memo\r\n\r\n postData = self._stringtify(cashbill)\r\n\r\n return self._httppost('/Cashbill', postData, CorpNum, UserID, \"ISSUE\")\r\n\r\n\r\n def register(self, CorpNum, cashbill, UserID = None):\r\n \"\"\" 현금영수증 등록\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n cashbill : 등록할 현금영수증 object. made with Cashbill(...)\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if cashbill == None:\r\n raise PopbillException(-99999999,\"현금영수증 정보가 입력되지 않았습니다.\")\r\n\r\n postData = self._stringtify(cashbill)\r\n\r\n return self._httppost('/Cashbill',postData,CorpNum,UserID)\r\n\r\n\r\n def update(self, CorpNum, MgtKey, cashbill, UserID = None):\r\n \"\"\" 수정\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 원본 현금영수증 문서관리번호\r\n cashbill : 수정할 현금영수증 object. made with Cashbill(...)\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n if cashbill == None:\r\n raise PopbillException(-99999999,\"현금영수증 정보가 입력되지 않았습니다.\")\r\n\r\n postData = self._stringtify(cashbill)\r\n\r\n return self._httppost('/Cashbill/'+ MgtKey, postData, CorpNum, UserID, \"PATCH\")\r\n\r\n\r\n def issue(self, CorpNum, MgtKey, Memo = None, UserID = None):\r\n \"\"\" 발행\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 원본 현금영수증 문서관리번호\r\n Memo : 발행 메모\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n postData = \"\"\r\n req = {}\r\n\r\n if Memo != None or Memo != '':\r\n req[\"memo\"] = Memo\r\n\r\n postData = self._stringtify(req)\r\n\r\n return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, \"ISSUE\")\r\n\r\n def cancelIssue(self, CorpNum, MgtKey, Memo = None, UserID = None):\r\n \"\"\" 발행취소\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 원본 현금영수증 문서관리번호\r\n Memo : 발행취소 메모\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n postData = \"\"\r\n req = {}\r\n\r\n if Memo != None or Memo != '':\r\n req[\"memo\"] = Memo\r\n\r\n postData = self._stringtify(req)\r\n\r\n return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, \"CANCELISSUE\")\r\n\r\n def delete(self, CorpNum, MgtKey, UserID = None):\r\n \"\"\" 삭제\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 원본 현금영수증 문서관리번호\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n return self._httppost('/Cashbill/' + MgtKey,'', CorpNum, UserID, \"DELETE\")\r\n\r\n def search(self,CorpNum,DType,SDate,EDate,State,TradeType,TradeUsage,TaxationType,Page,PerPage,Order,UserID=None,QString=None) :\r\n \"\"\" 목록 조회\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n DType : 일자유형, R-등록일자, T-거래일자, I-발행일자 중 택 1\r\n SDate : 시작일자, 표시형식(yyyyMMdd)\r\n EDate : 종료일자, 표시형식(yyyyMMdd)\r\n State : 상태코드 배열, 2,3번째 자리에 와일드카드(*) 사용가능\r\n TradeType : 현금영수증 형태 배열, N-일반현금영수증, C-취소현금영수증\r\n TradeUsage : 거래용도 배열, P-소득공제용, C-지출증빙용\r\n TaxationType : 과세형태 배열, T-과세, N-비과세\r\n Page : 페이지번호\r\n PerPage : 페이지당 목록개수\r\n Order : 정렬방향, D-내림차순, A-오름차순\r\n UserID : 팝빌 회원아이디\r\n QString : 현금영수증 식별번호, 미기재시 전체조회\r\n \"\"\"\r\n\r\n uri = '/Cashbill/Search'\r\n uri += '?DType=' + DType\r\n uri += '&SDate=' + SDate\r\n uri += '&EDate=' + EDate\r\n uri += '&State=' + ','.join(State)\r\n uri += '&TradeUsage=' + ','.join(TradeUsage)\r\n uri += '&TradeType=' + ','.join(TradeType)\r\n uri += '&TaxationType=' + ','.join(TaxationType)\r\n uri += '&Page=' + str(Page)\r\n uri += '&PerPage=' + str(PerPage)\r\n uri += '&Order=' + Order\r\n\r\n if QString is not None :\r\n uri += '&QString=' + QString\r\n\r\n return self._httpget(uri, CorpNum,UserID)\r\n\r\n def getInfo(self, CorpNum, MgtKey):\r\n \"\"\" 상태/요약 정보 조회\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n return\r\n 문서 상태/요약 정보 object\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n return self._httpget('/Cashbill/' + MgtKey, CorpNum)\r\n\r\n def getInfos(self,CorpNum,MgtKeyList):\r\n \"\"\" 상태정보 다량 확인, 최대 1000건\r\n args\r\n CorpNum : 회원 사업자 번호\r\n MgtKeyList : 문서관리번호 목록\r\n return\r\n 상태정보 목록 as List\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKeyList == None or len(MgtKeyList) < 1:\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n postData = self._stringtify(MgtKeyList)\r\n\r\n return self._httppost('/Cashbill/States',postData,CorpNum)\r\n\r\n def getDetailInfo(self, CorpNum, MgtKey):\r\n \"\"\" 상세정보 조회\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n return\r\n 문서 상세정보 object\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n return self._httpget('/Cashbill/' + MgtKey + '?Detail', CorpNum)\r\n\r\n def sendEmail(self, CorpNum, MgtKey, ReceiverEmail, UserID = None):\r\n \"\"\" 알림메일 재전송\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n ReceiverEmail : 수신자 이메일 주소\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n if ReceiverEmail == None or ReceiverEmail == \"\" :\r\n raise PopbillException(-99999999,\"수신자 메일주소가 입력되지 않았습니다.\")\r\n\r\n postData = self._stringtify({\"receiver\" : ReceiverEmail})\r\n\r\n return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, \"EMAIL\")\r\n\r\n def sendSMS(self, CorpNum, MgtKey, Sender, Receiver, Contents, UserID = None):\r\n \"\"\" 알림문자 전송\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n Sender : 발신번호\r\n Receiver : 수신번호\r\n Contents : 문자 메시지 내용. 최대 90Byte. 초과시 길이가 조정되어 전송됨\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n if Receiver == None or Receiver == \"\" :\r\n raise PopbillException(-99999999,\"문자 수신번호가 입력되지 않았습니다.\")\r\n\r\n postData = self._stringtify({\r\n \"sender\" : Sender,\r\n \"receiver\" : Receiver,\r\n \"contents\" : Contents\r\n })\r\n\r\n return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, \"SMS\")\r\n\r\n def sendFAX(self, CorpNum, MgtKey, Sender, Receiver, UserID = None):\r\n \"\"\" 현금영수증 팩스전송\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n Sender : 발신번호\r\n Receiver : 수신번호\r\n UserID : 팝빌회원 아이디\r\n return\r\n 처리결과. consist of code and message\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n if Receiver == None or Receiver == \"\" :\r\n raise PopbillException(-99999999,\"팩스 수신번호가 입력되지 않았습니다.\")\r\n\r\n postData = self._stringtify({\r\n \"sender\" : Sender,\r\n \"receiver\" : Receiver\r\n })\r\n\r\n return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, \"FAX\")\r\n\r\n\r\n def getLogs(self, CorpNum, MgtKey):\r\n \"\"\" 문서이력 조회\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n return\r\n 문서 이력 목록 as List\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n return self._httpget('/Cashbill/' + MgtKey + '/Logs', CorpNum)\r\n\r\n def getPopUpURL(self, CorpNum, MgtKey, UserID = None):\r\n \"\"\" 현금영수증 1장의 팝빌 화면을 볼수있는 팝업 URL 확인\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n UserID : 팝빌회원 아이디\r\n return\r\n 팝빌 URL as str\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n result = self._httpget('/Cashbill/' + MgtKey + '?TG=POPUP', CorpNum, UserID)\r\n\r\n return result.url\r\n\r\n def getPrintURL(self, CorpNum, MgtKey, UserID = None):\r\n \"\"\" 공급자용 인쇄 URL 확인\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n UserID : 팝빌회원 아이디\r\n return\r\n 팝빌 URL as str\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n result = self._httpget('/Cashbill/' + MgtKey + '?TG=PRINT', CorpNum, UserID)\r\n\r\n return result.url\r\n\r\n\r\n def getEPrintURL(self, CorpNum, MgtKey, UserID = None):\r\n \"\"\" 공급받는자용 인쇄 URL 확인\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n UserID : 팝빌회원 아이디\r\n return\r\n 팝빌 URL as str\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n result = self._httpget('/Cashbill/' + MgtKey + '?TG=EPRINT', CorpNum, UserID)\r\n\r\n return result.url\r\n\r\n\r\n def getMailURL(self, CorpNum, MgtKey, UserID = None):\r\n \"\"\" 공급받는자용 메일 링크 URL 확인\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKey : 문서관리번호\r\n UserID : 팝빌회원 아이디\r\n return\r\n 팝빌 URL as str\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKey == None or MgtKey == \"\" :\r\n raise PopbillException(-99999999,\"관리번호가 입력되지 않았습니다.\")\r\n\r\n result = self._httpget('/Cashbill/' + MgtKey + '?TG=MAIL', CorpNum, UserID)\r\n\r\n return result.url\r\n\r\n\r\n def getMassPrintURL(self, CorpNum, MgtKeyList, UserID = None):\r\n \"\"\" 다량 인쇄 URL 확인\r\n args\r\n CorpNum : 팝빌회원 사업자번호\r\n MgtKeyList : 문서관리번호 배열\r\n UserID : 팝빌회원 아이디\r\n return\r\n 팝빌 URL as str\r\n raise\r\n PopbillException\r\n \"\"\"\r\n if MgtKeyList == None:\r\n raise PopbillException(-99999999,\"관리번호 배열이 입력되지 않았습니다.\")\r\n\r\n postData = self._stringtify(MgtKeyList)\r\n\r\n result = self._httppost('/Cashbill/Prints', postData, CorpNum, UserID)\r\n\r\n return result.url\r\n\r\nclass Cashbill(object):\r\n def __init__(self,**kwargs):\r\n self.__dict__ = kwargs\r\n","sub_path":"popbill/cashbillService.py","file_name":"cashbillService.py","file_ext":"py","file_size_in_byte":18733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591015054","text":"import ujson\nimport websockets\nfrom ..base import _wrap\nfrom ...base import StreamNone, StreamEnd\n\n\ndef WebSocket(foo, foo_kwargs=None, *args, **kwargs):\n return AsyncWebSocket(foo, foo_kwargs, *args, **kwargs)\n\n\ndef AsyncWebSocket(foo, foo_kwargs=None, url='', json=False, wrap=False, field=None, response=False):\n foo_kwargs = foo_kwargs or {}\n foo = _wrap(foo, foo_kwargs)\n\n async def _send(foo, url, json=False, wrap=False, field=None, response=False):\n async with websockets.connect(url) as websocket:\n async for data in foo():\n if isinstance(data, StreamNone):\n continue\n elif not data or isinstance(data, StreamEnd):\n break\n\n if wrap:\n data = [data]\n if json:\n data = ujson.dumps(data)\n\n await websocket.send(data)\n\n if response:\n msg = await websocket.recv()\n\n else:\n msg = '{}'\n\n if json:\n msg = json.loads(msg)\n\n if field:\n msg = msg[field]\n\n if wrap:\n msg = [msg]\n\n yield msg\n\n return _wrap(_send, dict(foo=foo, url=url, json=json, wrap=wrap, field=field, response=response), name='WebSocket')\n","sub_path":"tributary/reactive/output/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412029477","text":"# -*- coding: utf-8 -*-\n# @Time : 10/8/18 11:30 AM\n# @Author : Jax.Li\n# @FileName: unit.py\n# @Software: PyCharm\n# @Blog :https://blog.jaxli.com\n\nimport urllib\nimport json\nfrom redis import Redis, RedisError\nfrom datetime import datetime, timedelta\nfrom UNIT.aifunctions import AIFunctions\nfrom UNIT.jwhelper import redis\nimport base64\n\n# client_id 为从UNIT的【发布上线】模块进入百度云创建应用后获取的API Key\nclient_id = 'haiIiyEAja8xcGGXWyTIC6Zt'\n# client_secret 为从UNIT的【发布上线】模块进入百度云创建应用后获取的Secret Key\nclient_secret = 'o2NPtIze6pPgGgjweLVaRHOBSvMc6hCV'\nhost = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + client_id + '&client_secret=' + client_secret\n# 上面的XXXXXXX 要替换成自己的API Key,YYYYYY要换成自己的Secret Key\n\n# 下面的log_id在真实应用中要自己生成,可是递增的数字\nlog_id = '7758523'\n# 下面的user_id在真实应用中要是自己业务中的真实用户id、设备号、ip地址等,方便在日志分析中分析定位问题\nuser_id = '855654'\n# 下面要替换成自己的bot_id\nbot_id = '12699'\n\naccess_token = \"\"\nbot_session = '\"\"'\n\n\ndef create_unit_token():\n request = urllib.request.Request(host)\n request.add_header('Content-Type', 'application/json; charset=UTF-8')\n response = urllib.request.urlopen(request)\n global access_token\n access_token = json.load(response)[\"access_token\"]\n # 将unit token写入redis\n redis.set(\"unit_access_token\", access_token)\n dt = timedelta(days=10)\n redis.expire(\"unit_access_token\", dt)\n print(\"UNIT token not exists, write to redis \", access_token)\n\n\ndef request_unit(query, jw_token):\n global access_token\n if redis.exists(\"unit_access_token\"):\n access_token = str(redis.get(\"unit_access_token\"))\n print(\"UNIT token exists, read from redis \", access_token)\n else:\n create_unit_token()\n\n global bot_session\n url = 'https://aip.baidubce.com/rpc/2.0/unit/bot/chat?access_token=' + access_token\n post_data = '{\\\"bot_session\\\":' +bot_session +',\\\"log_id\\\":\\\"'+log_id+'\\\",\\\"request\\\":{\\\"bernard_level\\\":1,\\\"client_session\\\":\\\"{\\\\\\\"client_results\\\\\\\":\\\\\\\"\\\\\\\", \\\\\\\"candidate_options\\\\\\\":[]}\\\",\\\"query\\\":\\\"' + query + '\\\",\\\"query_info\\\":{\\\"asr_candidates\\\":[],\\\"source\\\":\\\"KEYBOARD\\\",\\\"type\\\":\\\"TEXT\\\"},\\\"updates\\\":\\\"\\\",\\\"user_id\\\":\\\"'+user_id+'\\\"},\\\"bot_id\\\":'+bot_id+',\\\"version\\\":\\\"2.0\\\"}'\n request = urllib.request.Request(url, post_data.encode('UTF-8'))\n\n request.add_header('Content-Type', 'application/json;charset=UTF-8')\n response = urllib.request.urlopen(request)\n content = response.read()\n if content:\n print(content)\n data = json.loads(content)\n print('用户问: ' + query)\n print('BOT答复: ' + data['result']['response']['action_list'][0]['say'])\n intent = data['result']['response']['schema']['intent']\n print('意图: ' + intent)\n bot_session = json.dumps(data[\"result\"][\"bot_session\"])\n slots = data['result']['response']['schema']['slots']\n # print '词槽: ' + slot[0]['name'] + \" = \" +slot[0]['original_word']\n for slot in slots:\n print('词槽: ' + slot['name'] + \" = \" + slot['original_word'])\n\n # 使用AI来处理一次bot返回的数据\n ai = AIFunctions(jw_token)\n if len(slots) > 0 and hasattr(ai, intent.lower()):\n return getattr(ai, intent.lower())(slots)\n\n if intent == \"\" and len(slots) > 1:\n answer = data['result']['response']['action_list'][0]['say']\n for slot in slots:\n answer += \"\\n\" + slot['original_word']\n return answer\n\n return data['result']['response']['action_list'][0]['say']\n\n\ndef download_department_list():\n if not redis.exists(\"department_by_name\"):\n return\n names = redis.hkeys(\"department_by_name\")\n with open(\"departments.txt\", mode=\"wt\", encoding=\"UTF-8\") as f:\n for name in names:\n depart_name = base64.b64decode(name.decode(\"UTF-8\")).decode(\"UTF-8\")\n f.write(depart_name)\n f.write(\"\\n\")\n print(depart_name)\n\n\n# download_department_list()\n","sub_path":"UNIT/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"16293962","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 4 20:49:23 2021\r\n\r\n@author: BWSI AUV Challenge Instructional Staff\r\n\"\"\"\r\nfrom BWSI_AUV import AUV\r\nfrom BWSI_BuoyField import BuoyField\r\nfrom AUV_Controller import AUVController\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nCOURSE_NUMBER=0\r\nVISIBILITY=99\r\n\r\ndef select_buoy_field(course_num):\r\n ### ################################\r\n #Set up the buoy field\r\n nGates = 100\r\n \r\n if course_num==0:\r\n nGates = 50\r\n buoy_field_config = {'nGates': nGates,\r\n 'gate_spacing': 100,\r\n 'gate_width': 10,\r\n 'max_offset': 50,\r\n 'style': 'linear',\r\n 'heading': 52.3}\r\n\r\n elif course_num==1:\r\n buoy_field_config = {'nGates': nGates,\r\n 'gate_spacing': 100,\r\n 'gate_width': 10,\r\n 'max_offset': 20,\r\n 'style': 'sine',\r\n 'sine_amplitude': 50,\r\n 'sine_period':500,\r\n 'heading': 50}\r\n elif course_num==2: \r\n buoy_field_config = {'nGates': nGates,\r\n 'gate_spacing': 100,\r\n 'gate_width': 10,\r\n 'max_offset': 20,\r\n 'style': 'sine',\r\n 'sine_amplitude': 50,\r\n 'sine_period':500,\r\n 'heading': 177}\r\n elif course_num==3:\r\n buoy_field_config = {'nGates': nGates,\r\n 'gate_spacing': 100,\r\n 'gate_width': 10,\r\n 'max_offset': 20,\r\n 'style': 'square',\r\n 'heading': 50}\r\n elif course_num==4:\r\n buoy_field_config = {'nGates': nGates,\r\n 'gate_spacing': 100,\r\n 'gate_width': 10,\r\n 'max_offset': 20,\r\n 'style': 'square',\r\n 'heading': 0}\r\n else:\r\n buoy_field_config = None\r\n\r\n return buoy_field_config\r\n\r\ndef main():\r\n doPlots = True\r\n \r\n # create a buoy field\r\n datum = (42.4, -171.3)\r\n \r\n # set up the buoy field\r\n buoyField = BuoyField(datum,\r\n position_style='P')\r\n \r\n \r\n buoy_field_config = select_buoy_field(COURSE_NUMBER)\r\n buoyField.configure(buoy_field_config)\r\n buoyField.show_field()\r\n\r\n myAUV = AUV(latlon=(42.4, -171.3), heading=buoy_field_config['heading']+22, datum=datum, visibility=VISIBILITY)\r\n \r\n #buoyField.scan_field(num_buoys=3)\r\n ##################################\r\n \r\n start_pos = myAUV.get_position()\r\n min_dist = buoyField.minimum_distance(start_pos)\r\n battery_init = 10*min_dist\r\n myAUV.set_battery(battery_init)\r\n \r\n #################################\r\n ## mission startup\r\n running_time = 0\r\n dt = 1\r\n cmd = \"ENGINE HALF AHEAD\"\r\n reply = myAUV.engine_command(cmd)\r\n print(f\"{cmd} : {reply}\")\r\n \r\n # keep the track history\r\n auv_track = list()\r\n auv_track.append(myAUV.get_position())\r\n \r\n if doPlots:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n \r\n done = False\r\n \r\n auv_state = myAUV.get_state()\r\n \r\n # ***YOUR CODE HERE****\r\n auv_controller = AUVController(auv_state)\r\n \r\n num_commands = 0\r\n frame_skip = 2\r\n frame = 0\r\n current_time = 0\r\n while not done:\r\n current_time += dt\r\n myAUV.update_state(dt)\r\n battery_remain = myAUV.get_battery()\r\n current_position = myAUV.get_position()\r\n \r\n auv_track.append(current_position)\r\n buoyField.check_buoy_gates(auv_track[-2], auv_track[-1])\r\n gsense, rsense = myAUV.read_laser(buoyField)\r\n gnext, rnext = buoyField.next_gate()\r\n if gnext is None:\r\n result_str = f\"Congratulations you have arrived at your destination, clearing {buoy_field_config['nGates']} gates in {running_time} seconds with {battery_remain/battery_init*100:.2f}% battery remaining. You issued {num_commands} commands.\"\r\n print(result_str)\r\n break\r\n \r\n # current heading of vehicle\r\n auv_state = myAUV.get_state()\r\n \r\n # ***YOUR CODE HERE*** \r\n command = auv_controller.decide(auv_state, gsense, rsense, sensor_type='RANGE_ANGLE')\r\n \r\n if command is not None:\r\n reply = myAUV.helm_command(command)\r\n print(f\"{reply}\")\r\n num_commands += 1\r\n \r\n # ***YOUR CODE HERE*** \r\n tgt_hdg = auv_controller.get_desired_heading()\r\n #print(f\"{gnext}, {rnext}, {current_position}\")\r\n print(f\"auv_heading is {auv_state['heading']}, target heading is {tgt_hdg}\")\r\n \r\n if doPlots and not (frame % frame_skip):\r\n plt.plot(gnext[0], gnext[1], 'go')\r\n plt.plot(rnext[0], rnext[1], 'ro')\r\n trk = np.array(auv_track)\r\n plt.plot(trk[-300:,0], trk[-300:,1], 'k') \r\n \r\n ax.set_xlim(current_position[0]-100, current_position[0]+100)\r\n ax.set_ylim(current_position[1]-100, current_position[1]+100)\r\n ax.set_aspect('equal')\r\n \r\n plt.pause(0.01)\r\n plt.draw() \r\n \r\n frame += 1\r\n \r\n running_time = running_time + dt\r\n \r\n # are we done?\r\n if buoyField.missed_gate(auv_track[-2], current_position, current_time):\r\n result_str = f\"End of mission (MISSED GATE): you successfully cleared {buoyField.clearedBuoys()} gates of {buoy_field_config['nGates']} in {running_time} seconds. You issued {num_commands} commands.\"\r\n print(result_str)\r\n done = True\r\n if battery_remain <= 0:\r\n result_str = f\"End of mission (OUT OF BATTERY): you successfully cleared {buoyField.clearedBuoys()} gates of {buoy_field_config['nGates']} in {running_time} seconds. You issued {num_commands} commands.\"\r\n print(result_str)\r\n done = True\r\n \r\nif __name__ == \"__main__\":\r\n main() \r\n \r\n","sub_path":"lab10/lab10_navigating_with_a_laser.py","file_name":"lab10_navigating_with_a_laser.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"61551055","text":"import argparse\nimport pickle\nimport os\n\nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence\n\nfrom config import constants\nfrom core.article_log import ArticleLog\nfrom models.mlp import MLPSentimentModel\nfrom utils.disk import read_csv_dict\nfrom utils.text import clean_text\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='train')\n parser.add_argument('-m', '--embed_dim', help='Embedding dimension.', required=True, type=int)\n parser.add_argument('-f', '--max_features', help='Max features.', required=True, type=int)\n parser.add_argument('-d', '--dropout', help='Dropout.', required=True, type=float)\n parser.add_argument('-l', '--lstm', help='LSTM Units.', required=True, type=int)\n parser.add_argument('-b', '--batch_size', help='Batch size.', required=True, type=int)\n parser.add_argument('-e', '--epochs', help='Epoch count.', required=True, type=int)\n args = parser.parse_args()\n x_train, y_train = [], []\n pos = neg = neutral = 0\n\n gt_labels = read_csv_dict(path=os.path.join(constants.LOG_DIR, 'gt_labels.csv'))\n\n for gt in gt_labels:\n article = ArticleLog.get_article(coll='articles', uuid=gt['uuid'])\n title = clean_text(article['title'], remove_stopwords=True, stem_word=False)\n sentiment = gt['title_sentiment']\n if sentiment == constants.Sentiment.POSITIVE.name:\n x_train.append(title)\n y_train.append([1, 0])\n pos += 1\n elif sentiment == constants.Sentiment.NEGATIVE.name:\n x_train.append(title)\n y_train.append([0, 1])\n neg += 1\n\n max_len = 25\n print(\"Positive: {}, Negative: {}, Neutral: {}\".format(pos, neg, neutral))\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(x_train)\n with open(os.path.join(constants.MODEL_DIR, 'tokenizer.pickle'), 'wb') as hndl:\n pickle.dump(tokenizer, hndl, protocol=pickle.HIGHEST_PROTOCOL)\n\n x_train = tokenizer.texts_to_sequences(x_train)\n x_train = sequence.pad_sequences(x_train, maxlen=max_len)\n y_train = np.array(y_train).astype(np.uint8)\n\n model = MLPSentimentModel(log_dir=constants.MODEL_DIR, max_len=max_len)\n model.build(embed_dim=args.embed_dim, max_features=args.max_features)\n model.train(x_train=x_train, y_train=y_train, batch_size=args.batch_size, epochs=args.epochs)\n","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"250929173","text":"#!/usr/bin/python3\n\n\ndef hangman():\n word = input(\"enter a word: \")\n max_questions = int(input(\"enter the max number of questions: \"))\n number_guesses = 1\n word_as_list = []\n progress = \"\"\n corr = []\n for letter in word:\n word_as_list.append(letter)\n progress = \"_\"\n while number_guesses <= max_questions:\n c = 0\n my_guess = input(\"guess a letter: \")\n my_progress = make_a_guess(my_guess, word_as_list)\n number_guesses += 1\n if my_progress:\n pro = progress.replace(progress[c], my_guess, 1)\n print(pro)\n corr.append(pro)\n print(corr)\n\n\ndef make_a_guess(guess, word_list):\n if guess in word_list:\n return True\n return False\n\n\n\n\nhangman()","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"124355574","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom type4py import logger, AVAILABLE_TYPES_NUMBER, MAX_PARAM_TYPE_DEPTH\nfrom libsa4py.merge import merge_jsons_to_dict, create_dataframe_fns, create_dataframe_vars\nfrom libsa4py.cst_transformers import ParametricTypeDepthReducer\nfrom libsa4py.cst_lenient_parser import lenient_parse_module\nfrom libsa4py.utils import list_files\nfrom typing import Tuple\nfrom ast import literal_eval\nfrom collections import Counter\nfrom os.path import exists, join\nfrom tqdm import tqdm\nimport regex\nimport os\nimport pickle\nimport pandas as pd\nimport numpy as np\n\nlogger.name = __name__\ntqdm.pandas()\n\n# Precompile often used regex\nfirst_cap_regex = regex.compile('(.)([A-Z][a-z]+)')\nall_cap_regex = regex.compile('([a-z0-9])([A-Z])')\nsub_regex = r'typing\\.|typing_extensions\\.|t\\.|builtins\\.|collections\\.'\n\n\ndef make_types_consistent(df_all: pd.DataFrame, df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Removes typing module from type annotations\n \"\"\"\n\n def remove_quote_types(t: str):\n s = regex.search(r'^\\'(.+)\\'$', t)\n if bool(s):\n return s.group(1)\n else:\n #print(t)\n return t\n \n df_all['return_type'] = df_all['return_type'].progress_apply(lambda x: regex.sub(sub_regex, \"\", str(x)) if x else x)\n df_all['arg_types'] = df_all['arg_types'].progress_apply(lambda x: str([regex.sub(sub_regex, \"\", t) \\\n if t else t for t in literal_eval(x)]))\n df_all['return_type'] = df_all['return_type'].progress_apply(remove_quote_types)\n df_all['arg_types'] = df_all['arg_types'].progress_apply(lambda x: str([remove_quote_types(t) if t else t for t in literal_eval(x)]))\n\n df_vars['var_type'] = df_vars['var_type'].progress_apply(lambda x: regex.sub(sub_regex, \"\", str(x)))\n df_vars['var_type'] = df_vars['var_type'].progress_apply(remove_quote_types)\n \n return df_all, df_vars\n\ndef resolve_type_aliasing(df_param: pd.DataFrame, df_ret: pd.DataFrame,\n df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Resolves type aliasing and mappings. e.g. `[]` -> `list`\n \"\"\"\n import libcst as cst\n # Problematic patterns: (?<=.*)Tuple\\[Any, *?.*?\\](?<=.*)\n # TODO: Handle a case like Dict[str, any] -> Dict[str, Any]\n type_aliases = {'^{}$|^Dict$|^Dict\\[\\]$|(?<=.*)Dict\\[Any, *?Any\\](?=.*)|^Dict\\[unknown, *Any\\]$': 'dict',\n '^Set$|(?<=.*)Set\\[\\](?<=.*)|^Set\\[Any\\]$': 'set',\n '^Tuple$|(?<=.*)Tuple\\[\\](?<=.*)|^Tuple\\[Any\\]$|(?<=.*)Tuple\\[Any, *?\\.\\.\\.\\](?=.*)|^Tuple\\[unknown, *?unknown\\]$|^Tuple\\[unknown, *?Any\\]$|(?<=.*)tuple\\[\\](?<=.*)': 'tuple',\n '^Tuple\\[(.+), *?\\.\\.\\.\\]$': r'Tuple[\\1]',\n '\\\\bText\\\\b': 'str',\n '^\\[\\]$|(?<=.*)List\\[\\](?<=.*)|^List\\[Any\\]$|^List$': 'list',\n '^\\[{}\\]$': 'List[dict]',\n '(?<=.*)Literal\\[\\'.*?\\'\\](?=.*)': 'Literal',\n '(?<=.*)Literal\\[\\d+\\](?=.*)': 'Literal', # Maybe int?!\n '^Callable\\[\\.\\.\\., *?Any\\]$|^Callable\\[\\[Any\\], *?Any\\]$|^Callable[[Named(x, Any)], Any]$': 'Callable',\n '^Iterator[Any]$': 'Iterator',\n '^OrderedDict[Any, *?Any]$': 'OrderedDict',\n '^Counter[Any]$': 'Counter',\n '(?<=.*)Match[Any](?<=.*)': 'Match'}\n\n def resolve_type_alias(t: str):\n org_t = t\n for t_alias in type_aliases:\n if regex.search(regex.compile(t_alias), t):\n t = regex.sub(regex.compile(t_alias), type_aliases[t_alias], t)\n return t\n\n df_param['arg_type'] = df_param['arg_type'].progress_apply(resolve_type_alias)\n df_ret['return_type'] = df_ret['return_type'].progress_apply(resolve_type_alias)\n df_vars['var_type'] = df_vars['var_type'].progress_apply(resolve_type_alias)\n\n return df_param, df_ret, df_vars\n\ndef preprocess_parametric_types(df_param: pd.DataFrame, df_ret: pd.DataFrame,\n df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Reduces the depth of parametric types\n \"\"\"\n from libcst import parse_module, ParserSyntaxError\n global s\n s = 0\n def reduce_depth_param_type(t: str) -> str:\n global s\n if regex.match(r'.+\\[.+\\]', t):\n try:\n t = parse_module(t)\n t = t.visit(ParametricTypeDepthReducer(max_annot_depth=MAX_PARAM_TYPE_DEPTH))\n return t.code\n except ParserSyntaxError:\n try:\n t = lenient_parse_module(t)\n t = t.visit(ParametricTypeDepthReducer(max_annot_depth=MAX_PARAM_TYPE_DEPTH))\n s += 1\n return t.code\n except ParserSyntaxError:\n return None\n else:\n return t\n\n df_param['arg_type'] = df_param['arg_type'].progress_apply(reduce_depth_param_type)\n df_ret['return_type'] = df_ret['return_type'].progress_apply(reduce_depth_param_type)\n df_vars['var_type'] = df_vars['var_type'].progress_apply(reduce_depth_param_type)\n logger.info(f\"Sucssesfull lenient parsing {s}\")\n\n return df_param, df_ret, df_vars\n\ndef filter_functions(df: pd.DataFrame, funcs=['str', 'unicode', 'repr', 'len', 'doc', 'sizeof']) -> pd.DataFrame:\n \"\"\"\n Filters functions which are not useful.\n :param df: dataframe to use\n :return: filtered dataframe\n \"\"\"\n\n df_len = len(df)\n logger.info(f\"Functions before dropping on __*__ methods {len(df):,}\")\n df = df[~df['name'].isin(funcs)]\n logger.info(f\"Functions after dropping on __*__ methods {len(df):,}\")\n logger.info(f\"Filtered out {df_len - len(df):,} functions.\")\n\n return df\n\ndef filter_variables(df_vars: pd.DataFrame, types=['Any', 'None', 'object', 'type', 'Type[Any]',\n 'Type[cls]', 'Type[type]', 'Type', 'TypeVar', 'Optional[Any]']):\n \"\"\"\n Filters out variables with specified types such as Any or None\n \"\"\"\n\n df_var_len = len(df_vars)\n logger.info(f\"Variables before dropping on {','.join(types)}: {len(df_vars):,}\")\n df_vars = df_vars[~df_vars['var_type'].isin(types)]\n logger.info(f\"Variables after dropping on {','.join(types)}: {len(df_vars):,}\")\n logger.info(f\"Filtered out {df_var_len - len(df_vars):,} variables.\")\n\n return df_vars\n\ndef filter_var_wo_type(df_vars: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Filters out variables without a type\n \"\"\"\n df_var_len = len(df_vars)\n logger.info(f\"Variables before dropping: {len(df_vars):,}\")\n df_vars = df_vars[df_vars['var_type'].notnull()]\n logger.info(f\"Variables after dropping dropping: {len(df_vars):,}\")\n logger.info(f\"Filtered out {df_var_len - len(df_vars):,} variables w/o a type.\")\n\n return df_vars\n\ndef gen_argument_df(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Generates a new dataframe containing all argument data.\n :param df: dataframe for which to extract argument\n :return: argument dataframe\n \"\"\"\n arguments = []\n for i, row in tqdm(df.iterrows(), total=len(df.index), desc=\"Processing arguments\"):\n for p_i, arg_name in enumerate(literal_eval(row['arg_names'])):\n\n # Ignore self arg\n if arg_name == 'self':\n continue\n\n arg_type = literal_eval(row['arg_types'])[p_i].strip('\\\"')\n\n # Ignore Any or None types\n # TODO: Ignore also object type\n # TODO: Ignore Optional[Any]\n if arg_type == '' or arg_type in {'Any', 'None', 'object'}:\n continue\n\n arg_descr = literal_eval(row['arg_descrs'])[p_i]\n arg_occur = [a.replace('self', '').strip() if 'self' in a.split() else a for a in literal_eval(row['args_occur'])[p_i]]\n other_args = \" \".join([a for a in literal_eval(row['arg_names']) if a != 'self'])\n arguments.append([row['file'], row['name'], row['func_descr'], arg_name, arg_type, arg_descr, other_args, arg_occur])\n\n return pd.DataFrame(arguments, columns=['file', 'func_name', 'func_descr', 'arg_name', 'arg_type', 'arg_comment', 'other_args',\n 'arg_occur'])\n\ndef filter_return_dp(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Filters return datapoints based on a set of criteria.\n \"\"\"\n\n logger.info(f\"Functions before dropping on return type {len(df):,}\")\n df = df.dropna(subset=['return_type'])\n logger.info(f\"Functions after dropping on return type {len(df):,}\")\n\n logger.info(f\"Functions before dropping nan, None, Any return type {len(df):,}\")\n to_drop = np.invert((df['return_type'] == 'nan') | (df['return_type'] == 'None') | (df['return_type'] == 'Any'))\n df = df[to_drop]\n logger.info(f\"Functions after dropping nan return type {len(df):,}\")\n\n logger.info(f\"Functions before dropping on empty return expression {len(df):,}\")\n df = df[df['return_expr'].apply(lambda x: len(literal_eval(x))) > 0]\n logger.info(f\"Functions after dropping on empty return expression {len(df):,}\")\n\n return df\n\ndef format_df(df: pd.DataFrame) -> pd.DataFrame:\n df['arg_names'] = df['arg_names'].apply(lambda x: literal_eval(x))\n df['arg_types'] = df['arg_types'].apply(lambda x: literal_eval(x))\n df['arg_descrs'] = df['arg_descrs'].apply(lambda x: literal_eval(x))\n df['return_expr'] = df['return_expr'].apply(lambda x: literal_eval(x))\n\n return df\n\ndef encode_all_types(df_ret: pd.DataFrame, df_params: pd.DataFrame, df_vars: pd.DataFrame,\n output_dir: str):\n all_types = np.concatenate((df_ret['return_type'].values, df_params['arg_type'].values,\n df_vars['var_type'].values), axis=0)\n le_all = LabelEncoder()\n le_all.fit(all_types)\n df_ret['return_type_enc_all'] = le_all.transform(df_ret['return_type'].values)\n df_params['arg_type_enc_all'] = le_all.transform(df_params['arg_type'].values)\n df_vars['var_type_enc_all'] = le_all.transform(df_vars['var_type'].values)\n\n unq_types, count_unq_types = np.unique(all_types, return_counts=True)\n pd.DataFrame(\n list(zip(le_all.transform(unq_types), [unq_types[i] for i in np.argsort(count_unq_types)[::-1]],\n [count_unq_types[i] for i in np.argsort(count_unq_types)[::-1]])),\n columns=['enc', 'type', 'count']\n ).to_csv(os.path.join(output_dir, \"_most_frequent_all_types.csv\"), index=False)\n\n logger.info(f\"Total no. of extracted types: {len(all_types):,}\")\n logger.info(f\"Total no. of unique types: {len(unq_types):,}\")\n\n return df_ret, df_params, le_all\n\ndef gen_most_frequent_avl_types(avl_types_dir, output_dir, top_n: int = 1024) -> pd.DataFrame:\n \"\"\"\n It generates top n most frequent available types\n :param top_n:\n :return:\n \"\"\"\n\n aval_types_files = [os.path.join(avl_types_dir, f) for f in os.listdir(avl_types_dir) if os.path.isfile(os.path.join(avl_types_dir, f))]\n\n # All available types across all Python projects\n all_aval_types = []\n\n for f in aval_types_files:\n with open(f, 'r') as f_aval_type:\n all_aval_types = all_aval_types + f_aval_type.read().splitlines()\n\n counter = Counter(all_aval_types)\n\n df = pd.DataFrame.from_records(counter.most_common(top_n), columns=['Types', 'Count'])\n df.to_csv(os.path.join(output_dir, \"top_%d_types.csv\" % top_n), index=False)\n\n return df\n\ndef encode_aval_types(df_param: pd.DataFrame, df_ret: pd.DataFrame, df_var: pd.DataFrame,\n df_aval_types: pd.DataFrame):\n \"\"\"\n It encodes the type of parameters and return according to visible type hints\n \"\"\"\n\n types = df_aval_types['Types'].tolist()\n\n def trans_aval_type(x):\n for i, t in enumerate(types):\n if x in t:\n return i\n return len(types) - 1\n\n # If the arg type doesn't exist in top_n available types, we insert n + 1 into the vector as it represents the other type.\n df_param['param_aval_enc'] = df_param['arg_type'].progress_apply(trans_aval_type)\n df_ret['ret_aval_enc'] = df_ret['return_type'].progress_apply(trans_aval_type)\n df_var['var_aval_enc'] = df_var['var_type'].progress_apply(trans_aval_type)\n\n return df_param, df_ret\n\ndef preprocess_ext_fns(output_dir: str, limit: int = None):\n \"\"\"\n Applies preprocessing steps to the extracted functions\n \"\"\"\n\n logger.info(\"Merging JSON projects\")\n merged_jsons = merge_jsons_to_dict(list_files(os.path.join(output_dir, 'processed_projects'), \".json\"), limit)\n logger.info(\"Creating functions' Dataframe\")\n create_dataframe_fns(output_dir, merged_jsons)\n logger.info(\"Creating variables' Dataframe\")\n create_dataframe_vars(output_dir, merged_jsons)\n logger.info(\"Loading vars & fns Dataframe\")\n processed_proj_fns = pd.read_csv(os.path.join(output_dir, \"all_fns.csv\"), low_memory=False)\n processed_proj_vars = pd.read_csv(os.path.join(output_dir, \"all_vars.csv\"), low_memory=False)\n\n # Split the processed files into train, validation and test sets\n if all(processed_proj_fns['set'].isin(['train', 'valid', 'test'])) and \\\n all(processed_proj_vars['set'].isin(['train', 'valid', 'test'])):\n logger.info(\"Found the sets split in the input dataset\")\n train_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'train']\n valid_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'valid']\n test_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'test']\n\n train_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'train']\n valid_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'valid']\n test_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'test']\n\n else:\n logger.info(\"Splitting sets randomly\")\n train_files, test_files = train_test_split(pd.DataFrame(processed_proj_fns['file'].unique(), columns=['file']),\n test_size=0.2)\n train_files, valid_files = train_test_split(pd.DataFrame(processed_proj_fns[processed_proj_fns['file'].isin(train_files.to_numpy().flatten())]['file'].unique(),\n columns=['file']), test_size=0.1)\n\n df_train = processed_proj_fns[processed_proj_fns['file'].isin(train_files.to_numpy().flatten())]\n logger.info(f\"No. of functions in train set: {df_train.shape[0]:,}\")\n df_valid = processed_proj_fns[processed_proj_fns['file'].isin(valid_files.to_numpy().flatten())]\n logger.info(f\"No. of functions in validation set: {df_valid.shape[0]:,}\")\n df_test = processed_proj_fns[processed_proj_fns['file'].isin(test_files.to_numpy().flatten())]\n logger.info(f\"No. of functions in test set: {df_test.shape[0]:,}\")\n\n df_var_train = processed_proj_vars[processed_proj_vars['file'].isin(train_files_vars.to_numpy().flatten())]\n logger.info(f\"No. of variables in train set: {df_var_train.shape[0]:,}\")\n df_var_valid = processed_proj_vars[processed_proj_vars['file'].isin(valid_files_vars.to_numpy().flatten())]\n logger.info(f\"No. of variables in validation set: {df_var_valid.shape[0]:,}\")\n df_var_test = processed_proj_vars[processed_proj_vars['file'].isin(test_files_vars.to_numpy().flatten())]\n logger.info(f\"No. of variables in test set: {df_var_test.shape[0]:,}\")\n\n assert list(set(df_train['file'].tolist()).intersection(set(df_test['file'].tolist()))) == []\n assert list(set(df_train['file'].tolist()).intersection(set(df_valid['file'].tolist()))) == []\n assert list(set(df_test['file'].tolist()).intersection(set(df_valid['file'].tolist()))) == []\n\n # Exclude variables without a type\n processed_proj_vars = filter_var_wo_type(processed_proj_vars)\n\n logger.info(f\"Making type annotations consistent\")\n # Makes type annotations consistent by removing `typing.`, `t.`, and `builtins` from a type.\n processed_proj_fns, processed_proj_vars = make_types_consistent(processed_proj_fns, processed_proj_vars)\n\n assert any([bool(regex.match(sub_regex, str(t))) for t in processed_proj_fns['return_type']]) == False\n assert any([bool(regex.match(sub_regex, t)) for t in processed_proj_fns['arg_types']]) == False\n assert any([bool(regex.match(sub_regex, t)) for t in processed_proj_vars['var_type']]) == False\n\n # Filters variables with type Any or None\n processed_proj_vars = filter_variables(processed_proj_vars)\n\n # Filters trivial functions such as `__str__` and `__len__` \n processed_proj_fns = filter_functions(processed_proj_fns)\n \n # Extracts type hints for functions' arguments\n processed_proj_fns_params = gen_argument_df(processed_proj_fns)\n\n # Filters out functions: (1) without a return type (2) with the return type of Any or None (3) without a return expression\n processed_proj_fns = filter_return_dp(processed_proj_fns)\n processed_proj_fns = format_df(processed_proj_fns)\n\n logger.info(f\"Resolving type aliases\")\n # Resolves type aliasing and mappings. e.g. `[]` -> `list`\n processed_proj_fns_params, processed_proj_fns, processed_proj_vars = resolve_type_aliasing(processed_proj_fns_params,\n processed_proj_fns,\n processed_proj_vars)\n\n assert any([bool(regex.match(r'^{}$|\\bText\\b|^\\[{}\\]$|^\\[\\]$', t)) for t in processed_proj_fns['return_type']]) == False\n assert any([bool(regex.match(r'^{}$|\\bText\\b|^\\[\\]$', t)) for t in processed_proj_fns_params['arg_type']]) == False\n\n logger.info(f\"Preproceessing parametric types\")\n processed_proj_fns_params, processed_proj_fns, processed_proj_vars = preprocess_parametric_types(processed_proj_fns_params,\n processed_proj_fns,\n processed_proj_vars)\n # Exclude variables without a type\n processed_proj_vars = filter_var_wo_type(processed_proj_vars)\n\n processed_proj_fns, processed_proj_fns_params, le_all = encode_all_types(processed_proj_fns, processed_proj_fns_params,\n processed_proj_vars, output_dir)\n\n # Exclude self from arg names and return expressions\n processed_proj_fns['arg_names_str'] = processed_proj_fns['arg_names'].apply(lambda l: \" \".join([v for v in l if v != 'self']))\n processed_proj_fns['return_expr_str'] = processed_proj_fns['return_expr'].apply(lambda l: \" \".join([regex.sub(r\"self\\.?\", '', v) for v in l]))\n\n # Drop all columns useless for the ML model\n processed_proj_fns = processed_proj_fns.drop(columns=['author', 'repo', 'has_type', 'arg_names', 'arg_types', 'arg_descrs', 'args_occur',\n 'return_expr'])\n\n # Visible type hints\n if exists(join(output_dir, 'MT4Py_VTHs.csv')):\n logger.info(\"Using visible type hints\")\n processed_proj_fns_params, processed_proj_fns = encode_aval_types(processed_proj_fns_params, processed_proj_fns,\n processed_proj_vars,\n pd.read_csv(join(output_dir, 'MT4Py_VTHs.csv')).head(AVAILABLE_TYPES_NUMBER))\n else:\n logger.info(\"Using naive available type hints\")\n df_types = gen_most_frequent_avl_types(os.path.join(output_dir, \"extracted_visible_types\"), output_dir, AVAILABLE_TYPES_NUMBER)\n processed_proj_fns_params, processed_proj_fns = encode_aval_types(processed_proj_fns_params, processed_proj_fns,\n processed_proj_vars, df_types)\n\n # Split parameters and returns type dataset by file into a train and test sets\n df_params_train = processed_proj_fns_params[processed_proj_fns_params['file'].isin(train_files.to_numpy().flatten())]\n df_params_valid = processed_proj_fns_params[processed_proj_fns_params['file'].isin(valid_files.to_numpy().flatten())]\n df_params_test = processed_proj_fns_params[processed_proj_fns_params['file'].isin(test_files.to_numpy().flatten())]\n\n df_ret_train = processed_proj_fns[processed_proj_fns['file'].isin(train_files.to_numpy().flatten())]\n df_ret_valid = processed_proj_fns[processed_proj_fns['file'].isin(valid_files.to_numpy().flatten())]\n df_ret_test = processed_proj_fns[processed_proj_fns['file'].isin(test_files.to_numpy().flatten())]\n\n df_var_train = processed_proj_vars[processed_proj_vars['file'].isin(train_files_vars.to_numpy().flatten())]\n df_var_valid = processed_proj_vars[processed_proj_vars['file'].isin(valid_files_vars.to_numpy().flatten())]\n df_var_test = processed_proj_vars[processed_proj_vars['file'].isin(test_files_vars.to_numpy().flatten())]\n\n\n assert list(set(df_params_train['file'].tolist()).intersection(set(df_params_test['file'].tolist()))) == []\n assert list(set(df_params_train['file'].tolist()).intersection(set(df_params_valid['file'].tolist()))) == []\n assert list(set(df_params_test['file'].tolist()).intersection(set(df_params_valid['file'].tolist()))) == []\n\n assert list(set(df_ret_train['file'].tolist()).intersection(set(df_ret_test['file'].tolist()))) == []\n assert list(set(df_ret_train['file'].tolist()).intersection(set(df_ret_valid['file'].tolist()))) == []\n assert list(set(df_ret_test['file'].tolist()).intersection(set(df_ret_valid['file'].tolist()))) == []\n\n assert list(set(df_var_train['file'].tolist()).intersection(set(df_var_test['file'].tolist()))) == []\n assert list(set(df_var_train['file'].tolist()).intersection(set(df_var_valid['file'].tolist()))) == []\n assert list(set(df_var_test['file'].tolist()).intersection(set(df_var_valid['file'].tolist()))) == []\n\n # Store the dataframes and the label encoders\n logger.info(\"Saving preprocessed functions on the disk...\")\n with open(os.path.join(output_dir, \"label_encoder_all.pkl\"), 'wb') as file:\n pickle.dump(le_all, file)\n \n df_params_train.to_csv(os.path.join(output_dir, \"_ml_param_train.csv\"), index=False)\n df_params_valid.to_csv(os.path.join(output_dir, \"_ml_param_valid.csv\"), index=False)\n df_params_test.to_csv(os.path.join(output_dir, \"_ml_param_test.csv\"), index=False)\n\n df_ret_train.to_csv(os.path.join(output_dir, \"_ml_ret_train.csv\"), index=False)\n df_ret_valid.to_csv(os.path.join(output_dir, \"_ml_ret_valid.csv\"), index=False)\n df_ret_test.to_csv(os.path.join(output_dir, \"_ml_ret_test.csv\"), index=False)\n\n df_var_train.to_csv(os.path.join(output_dir, \"_ml_var_train.csv\"), index=False)\n df_var_valid.to_csv(os.path.join(output_dir, \"_ml_var_valid.csv\"), index=False)\n df_var_test.to_csv(os.path.join(output_dir, \"_ml_var_test.csv\"), index=False)","sub_path":"type4py/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":23210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"66975785","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 08 19:44:14 2017\r\n\r\n@author: Peter\r\n\"\"\"\r\n#NOAA data make sure you download incidents from https://incidentnews.noaa.gov/raw/index\r\n\r\nimport csv\r\nfilename = 'incidents.csv'\r\nlats, lons = [],[]\r\nvolumes = []\r\npollutant = []\r\n\r\nwith open(filename) as f:\r\n reader = csv.reader(f)\r\n \r\n #ignore headers\r\n next(reader)\r\n \r\n for row in reader:\r\n try:\r\n lats.append(float(row[4]))\r\n except ValueError:\r\n row[4]=' '\r\n try:\r\n lons.append(float(row[5]))\r\n except ValueError:\r\n row[5]=' '\r\n try: \r\n volumes.append(float(row[15]))\r\n except ValueError:\r\n row[15]=' ' \r\n #pollutant.append(row[6])\r\n #would like to color code different types of chemicals with dots\r\n\r\nfrom mpl_toolkits.basemap import Basemap\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nV = np.sum(volumes)\r\n\r\nm = Basemap(projection='hammer',lon_0=-100)\r\nm.bluemarble(scale=0.5)\r\nmin_marker_size = 2.5\r\nfor lon, lat, in zip(lons, lats,):\r\n x,y = m(lons, lats)\r\n m.plot(x, y, 'ro')\r\nplt.title('NOAA locations of Oil and Hazardous Chemical Spills since 1957')\r\nplt.show\r\nprint( 'minimum', V, 'gallons of primarily oil')","sub_path":"NOAA oil spill map.py","file_name":"NOAA oil spill map.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"57175640","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\"\"\"\n封装一些公共类\n\"\"\"\n\nimport csv\nimport logging\nimport os\nimport time\n\nfrom baseView.baseView import BaseView\nfrom common.desired_caps import appium_desired\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\n\n\nclass Common(BaseView):\n\n cancelBtn = (By.ID, 'android:id/button2')\n skipBtn = (By.ID, 'com.tal.kaoyan:id/tv_skip')\n wemedia_cancle = (By.ID, 'com.tal.kaoyan:id/view_wemedia_cancel')\n\n # 检测启动页跳过按钮\n def check_cancelBtn(self):\n logging.info(\"======check cancelBtn======\")\n try:\n element = self.driver.find_element(*self.cancelBtn)\n except NoSuchElementException:\n logging.info(\"======no cancelBtn======\")\n else:\n logging.info(\"======close cancelBtn======\")\n element.click()\n\n # 检测升级弹窗\n def check_skipBtn(self):\n logging.info(\"======check skipBtn======\")\n try:\n element = self.driver.find_element(*self.skipBtn)\n except NoSuchElementException:\n logging.info(\"======no skipBtn======\")\n else:\n logging.info(\"======close skipBtn======\")\n element.click()\n\n # 检测广告弹窗\n def check_market_ad(self):\n logging.info(\"======check market_ad======\")\n try:\n element = self.driver.find_element(*self.wemedia_cancle)\n except NoSuchElementException:\n logging.info(\"======no market_ad======\")\n else:\n logging.info(\"======close market_ad======\")\n element.click()\n\n # 获取屏幕尺寸\n def get_size(self):\n x = self.driver.get_window_size()['width']\n y = self.driver.get_window_size()['height']\n return x, y\n\n # 向左滑动\n def swipeLeft(self):\n logging.info('======swipe_left======')\n lt = self.get_size()\n x1 = int(lt[0] * 0.9)\n y1 = int(lt[1] * 0.5)\n x2 = int(lt[0] * 0.1)\n self.swipe(x1, y1, x2, y1, 2000)\n\n # 获取当前时间\n def getTime(self):\n self.now = time.strftime(\"%Y-%m-%d %H-%M-%S\")\n return self.now\n\n # 截图\n def getScreenShot(self, module):\n time = self.getTime()\n image_file = os.path.dirname(os.path.dirname(__file__)) + '/screenshots/%s_%s.png' % (module, time)\n\n logging.info('get %s screenshot' % module)\n self.driver.get_screenshot_as_file(image_file)\n\n # 获取csv文件中的数据\n def get_csv_data(self, csv_file, line):\n logging.info(\"======get_csv_data=======\")\n with open(csv_file, 'r', encoding='utf-8-sig') as file:\n reader = csv.reader(file)\n for index, row in enumerate(reader, 1):\n if index == line:\n return row\n\n\nif __name__ == '__main__':\n driver = appium_desired()\n com = Common(driver)\n com.check_cancelBtn()\n # com.check_skipBtn()\n com.swipeLeft()\n com.getScreenShot('startAPP')\n","sub_path":"AppiumTest/common/common_fun.py","file_name":"common_fun.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"122345670","text":"from psyneulink import *\n\nm1 = TransferMechanism(input_states=[\"InputState A\", \"InputState B\"])\nm2 = TransferMechanism()\nc = Composition()\nc.add_node(m1, required_roles=NodeRole.INPUT)\nc.add_node(m2, required_roles=NodeRole.INPUT)\nc._analyze_graph()\nlvoc = OptimizationControlMechanism(agent_rep=RegressionCFA,\n features=[m1.input_states[0], m1.input_states[1], m2.input_state],\n objective_mechanism=ObjectiveMechanism(\n monitor=[m1, m2]),\n function=GridSearch(max_iterations=1),\n control_signals=[(SLOPE, m1), (SLOPE, m2)])\nc.add_node(lvoc)\ninput_dict = {m1: [[1], [1]], m2: [1]}\n\nc.show_graph(model_based_optimizer_color=True)\n\n# c.run(inputs=input_dict)\n\n","sub_path":"Scripts/Examples/Composition/LVOC Composition.py","file_name":"LVOC Composition.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345002164","text":"from typing import List\n\n# Declare all the note names in a list that we can rotate through\nNOTE_NAMES: List[str] = [\"C\", \"C#/Db\", \"D\", \"D#/Eb\", \"E\", \"F\", \"F#/Gb\", \"G\", \"G#/Ab\", \"A\", \"A#/Bb\", \"B\"]\n\n# Do, Ra, Mi, Fah, So, Lah, Ti, Do\nMAJOR_SCALE_OFFSET: List[int] = [0, 2, 4, 5, 7, 9, 11, 12]\n\n\ndef capture_user_note() -> str:\n \"\"\"\n Capture the note that user wishes to see the scale for, it will loop until a valid selection is made.\n :return: Name of the note as a string, it will be one of the notes in NOTE_NAMES\n \"\"\"\n user_note: str = NOTE_NAMES[0]\n while True:\n print(\"The Known Musical Notes are\")\n for n in NOTE_NAMES:\n print(\"\\t{}\".format(n), end = \" \")\n print(\"\")\n user_note = input(\"Please type in your selection: \")\n if user_note in NOTE_NAMES:\n print(\"Thanks\")\n break\n else:\n print(\"Invalid selection, have another go\")\n return user_note\n\n\ndef get_major_scale_notes(starting_note: str) -> List[str]:\n notes: List[str] = []\n # Find the starting note, defaults to C if the given note is invalid\n starting_index: int = 0\n try:\n starting_index: int = NOTE_NAMES.index(starting_note)\n except ValueError:\n print(\"Invalid note given '{}'\".format(starting_note))\n\n for relative_offset in MAJOR_SCALE_OFFSET:\n offset_index: int = (starting_index + relative_offset) % len(NOTE_NAMES)\n notes.append(NOTE_NAMES[offset_index])\n\n return notes\n\n\nmy_note: str = capture_user_note()\nmajor_scale = get_major_scale_notes(my_note)\nprint(\"{} Major is {}\".format(my_note, major_scale))\n\ninvalid_major_scale = get_major_scale_notes(\"fish\")\nprint(\"Fish Major? {}\".format(invalid_major_scale))\n","sub_path":"CORE_Programming/practicalProgramming1/subroutines2_major_scales.py","file_name":"subroutines2_major_scales.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535392839","text":"import sys\nimport re\n\npat_sub = [(r\".*(Hello|Hi)\\s([A-z]+).*\", r\"Hi stranger\"),\n (r\".*How are you\\s*(doing)?.*\", r\"i am fine, and you?\"),\n (r\".*I ('m|am) (ok|doing well).*\", r\"i am glad to hear you are \\2\")]\n\n\ndef get_eliza_response(statement):\n response = None\n for p_s in pat_sub:\n pat, sub = p_s\n p = re.compile(pat)\n response = p.sub(sub, statement)\n if response != statement:\n return response\n else:\n response = None\n\n if response is None:\n return \"Sorry i do not understand you,\" \\\n \"please re-formulate the statement\"\n\n\ndef main(argv):\n if len(argv)<=1:\n raise Exception(\"Need 2 or more arguments\")\n statement = argv[1]\n response = get_eliza_response(statement)\n print(response)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n","sub_path":"dans_book/chapter2/eliza.py","file_name":"eliza.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"482990819","text":"\"\"\"\nModule for the ConversationTabs\n\nA ConversationTab is a direct chat between two JIDs, outside of a room.\n\nThere are two different instances of a ConversationTab:\n- A DynamicConversationTab that used to implement XEP-0296 (best\n practices for resource locking), which now stays on the bare JID at\n any time. This is the default.\n- A StaticConversationTab that will stay focused on one resource all\n the time.\n\n\"\"\"\nimport curses\nimport logging\nfrom datetime import datetime\nfrom typing import Dict, Callable\n\nfrom slixmpp import JID, InvalidJID, Message as SMessage\n\nfrom poezio.tabs.basetabs import OneToOneTab, Tab\n\nfrom poezio import common\nfrom poezio import tabs\nfrom poezio import windows\nfrom poezio import xhtml\nfrom poezio.config import config, get_image_cache\nfrom poezio.core.structs import Command\nfrom poezio.decorators import refresh_wrapper\nfrom poezio.roster import roster\nfrom poezio.theming import get_theme, dump_tuple\nfrom poezio.decorators import command_args_parser\nfrom poezio.ui.types import InfoMessage, Message\nfrom poezio.text_buffer import CorrectionError\n\nlog = logging.getLogger(__name__)\n\n\nclass ConversationTab(OneToOneTab):\n \"\"\"\n The tab containing a normal conversation (not from a MUC)\n Must not be instantiated, use Static or Dynamic version only.\n \"\"\"\n plugin_commands: Dict[str, Command] = {}\n plugin_keys: Dict[str, Callable] = {}\n additional_information: Dict[str, Callable[[str], str]] = {}\n message_type = 'chat'\n\n def __init__(self, core, jid, initial=None):\n OneToOneTab.__init__(self, core, jid, initial=initial)\n self.nick = None\n self.nick_sent = False\n self.state = 'normal'\n self.upper_bar = windows.ConversationStatusMessageWin()\n self.input = windows.MessageInput()\n # keys\n self.key_func['^I'] = self.completion\n # commands\n self.register_command(\n 'version',\n self.command_version,\n desc=\n 'Get the software version of the current interlocutor (usually its XMPP client and Operating System).',\n shortdesc='Get the software version of the user.')\n self.register_command(\n 'info',\n self.command_info,\n shortdesc='Get the status of the contact.')\n self.register_command(\n 'last_activity',\n self.command_last_activity,\n usage='[jid]',\n desc='Get the last activity of the given or the current contact.',\n shortdesc='Get the activity.',\n completion=self.core.completion.last_activity)\n self.register_command(\n 'invite',\n self.core.command.impromptu,\n desc='Invite people into an impromptu room.',\n shortdesc='Invite other users to the discussion',\n completion=self.core.completion.impromptu)\n self.update_commands()\n self.update_keys()\n\n @property\n def general_jid(self):\n return self.jid.bare\n\n def get_info_header(self):\n raise NotImplementedError\n\n @staticmethod\n @refresh_wrapper.always\n def add_information_element(plugin_name, callback):\n \"\"\"\n Lets a plugin add its own information to the ConversationInfoWin\n \"\"\"\n ConversationTab.additional_information[plugin_name] = callback\n\n @staticmethod\n @refresh_wrapper.always\n def remove_information_element(plugin_name):\n del ConversationTab.additional_information[plugin_name]\n\n def completion(self):\n self.complete_commands(self.input)\n\n def handle_message(self, message: SMessage, display: bool = True):\n \"\"\"Handle a received message.\n\n The message can come from us (carbon copy).\n \"\"\"\n use_xhtml = config.get_by_tabname(\n 'enable_xhtml_im',\n message['from'].bare\n )\n tmp_dir = get_image_cache()\n # normal message, we are the recipient\n if message['to'].bare == self.core.xmpp.boundjid.bare:\n conv_jid = message['from']\n jid = conv_jid\n color = get_theme().COLOR_REMOTE_USER\n self.last_remote_message = datetime.now()\n remote_nick = self.get_nick()\n # we wrote the message (happens with carbons)\n elif message['from'].bare == self.core.xmpp.boundjid.bare:\n conv_jid = message['to']\n jid = self.core.xmpp.boundjid\n color = get_theme().COLOR_OWN_NICK\n remote_nick = self.core.own_nick\n # we are not part of that message, drop it\n else:\n return\n\n self.core.events.trigger('conversation_msg', message, self)\n\n if not message['body']:\n return\n body = xhtml.get_body_from_message_stanza(\n message, use_xhtml=use_xhtml, extract_images_to=tmp_dir)\n delayed, date = common.find_delayed_tag(message)\n\n replaced = False\n if message.get_plugin('replace', check=True):\n replaced_id = message['replace']['id']\n if replaced_id and config.get_by_tabname('group_corrections',\n conv_jid.bare):\n try:\n replaced = self.modify_message(\n body,\n replaced_id,\n message['id'],\n time=date,\n jid=jid,\n nickname=remote_nick)\n except CorrectionError:\n log.debug('Unable to correct the message: %s', message)\n if not replaced:\n msg = Message(\n txt=body,\n time=date,\n nickname=remote_nick,\n nick_color=color,\n history=delayed,\n identifier=message['id'],\n jid=jid,\n )\n if display:\n self.add_message(msg)\n else:\n self.log_message(msg)\n\n @refresh_wrapper.always\n @command_args_parser.raw\n def command_say(self, line: str, attention: bool = False, correct: bool = False):\n msg: SMessage = self.core.xmpp.make_message(\n mto=self.get_dest_jid(),\n mfrom=self.core.xmpp.boundjid\n )\n msg['type'] = 'chat'\n msg['body'] = line\n if not self.nick_sent:\n msg['nick'] = self.core.own_nick\n self.nick_sent = True\n # trigger the event BEFORE looking for colors.\n # and before displaying the message in the window\n # This lets a plugin insert \\x19xxx} colors, that will\n # be converted in xhtml.\n self.core.events.trigger('conversation_say', msg, self)\n if not msg['body']:\n return\n replaced = False\n if correct or msg['replace']['id']:\n msg['replace']['id'] = self.last_sent_message['id'] # type: ignore\n else:\n del msg['replace']\n if msg['body'].find('\\x19') != -1:\n msg.enable('html')\n msg['html']['body'] = xhtml.poezio_colors_to_html(msg['body'])\n msg['body'] = xhtml.clean_text(msg['body'])\n if config.get_by_tabname('send_chat_states', self.general_jid):\n if self.inactive:\n self.send_chat_state('inactive', always_send=True)\n else:\n msg['chat_state'] = 'active'\n if attention:\n msg['attention'] = True\n self.core.events.trigger('conversation_say_after', msg, self)\n if not msg['body']:\n return\n self.set_last_sent_message(msg, correct=correct)\n self.core.handler.on_normal_message(msg)\n # Our receipts slixmpp hack\n msg._add_receipt = True # type: ignore\n msg.send()\n self.cancel_paused_delay()\n\n @command_args_parser.quoted(0, 1)\n def command_last_activity(self, args):\n \"\"\"\n /last_activity [jid]\n \"\"\"\n if args and args[0]:\n return self.core.command.last_activity(args[0])\n\n def callback(iq):\n if iq['type'] != 'result':\n if iq['error']['type'] == 'auth':\n self.core.information(\n 'You are not allowed to see the activity of this contact.',\n 'Error')\n else:\n self.core.information('Error retrieving the activity',\n 'Error')\n return\n seconds = iq['last_activity']['seconds']\n status = iq['last_activity']['status']\n from_ = iq['from']\n msg = '\\x19%s}The last activity of %s was %s ago%s'\n user = ''\n try:\n user = JID(from_).user\n except InvalidJID:\n pass\n\n if not user:\n msg = '\\x19%s}The uptime of %s is %s.' % (\n dump_tuple(get_theme().COLOR_INFORMATION_TEXT), from_,\n common.parse_secs_to_str(seconds))\n else:\n msg = '\\x19%s}The last activity of %s was %s ago%s' % (\n dump_tuple(get_theme().COLOR_INFORMATION_TEXT),\n from_,\n common.parse_secs_to_str(seconds),\n (' and their last status was %s' % status)\n if status else '',\n )\n self.add_message(InfoMessage(msg))\n self.core.refresh_window()\n\n self.core.xmpp.plugin['xep_0012'].get_last_activity(\n self.get_dest_jid(), callback=callback)\n\n @refresh_wrapper.conditional\n @command_args_parser.ignored\n def command_info(self):\n contact = roster[self.get_dest_jid()]\n try:\n jid = JID(self.get_dest_jid())\n except InvalidJID:\n jid = JID('')\n if contact:\n if jid.resource:\n resource = contact[jid.full]\n else:\n resource = contact.get_highest_priority_resource()\n else:\n resource = None\n if resource:\n status = (\n 'Status: %s' % resource.status) if resource.status else ''\n self.add_message(\n InfoMessage(\n \"Show: %(show)s, %(status)s\" % {\n 'show': resource.presence or 'available',\n 'status': status,\n }\n ),\n )\n return True\n self.add_message(\n InfoMessage(\"No information available\"),\n )\n return True\n\n @command_args_parser.quoted(0, 1)\n async def command_version(self, args):\n \"\"\"\n /version [jid]\n \"\"\"\n if args:\n return await self.core.command.version(args[0])\n jid = self.jid\n if not jid.resource:\n if jid in roster:\n resource = roster[jid].get_highest_priority_resource()\n jid = resource.jid if resource else jid\n iq = await self.core.xmpp.plugin['xep_0092'].get_version(jid)\n self.core.handler.on_version_result(iq)\n\n def resize(self):\n self.need_resize = False\n if self.size.tab_degrade_y:\n display_bar = False\n info_win_height = 0\n tab_win_height = 0\n bar_height = 0\n else:\n display_bar = True\n info_win_height = self.core.information_win_size\n tab_win_height = Tab.tab_win_height()\n bar_height = 1\n\n self.text_win.resize(\n self.height - 2 - bar_height - info_win_height - tab_win_height,\n self.width, bar_height, 0, self._text_buffer,\n force=self.ui_config_changed\n )\n self.ui_config_changed = False\n if display_bar:\n self.upper_bar.resize(1, self.width, 0, 0)\n self.get_info_header().resize(\n 1, self.width, self.height - 2 - info_win_height - tab_win_height,\n 0)\n self.input.resize(1, self.width, self.height - 1, 0)\n\n def refresh(self):\n if self.need_resize:\n self.resize()\n log.debug(' TAB Refresh: %s', self.__class__.__name__)\n display_bar = display_info_win = not self.size.tab_degrade_y\n\n self.text_win.refresh()\n\n if display_bar:\n self.upper_bar.refresh(self.get_dest_jid(),\n roster[self.get_dest_jid()])\n self.get_info_header().refresh(\n self.get_dest_jid(), roster[self.get_dest_jid()], self.text_win,\n self.chatstate, ConversationTab.additional_information)\n\n if display_info_win:\n self.info_win.refresh()\n self.refresh_tab_win()\n self.input.refresh()\n\n def refresh_info_header(self):\n self.get_info_header().refresh(\n self.get_dest_jid(), roster[self.get_dest_jid()], self.text_win,\n self.chatstate, ConversationTab.additional_information)\n self.input.refresh()\n\n def get_nick(self):\n contact = roster[self.jid.bare]\n if contact:\n return contact.name or self.jid.user\n else:\n if self.nick:\n return self.nick\n return self.jid.user or self.jid.domain\n\n def on_input(self, key, raw):\n if not raw and key in self.key_func:\n self.key_func[key]()\n return False\n self.input.do_command(key, raw=raw)\n empty_after = self.input.get_text() == '' or (\n self.input.get_text().startswith('/')\n and not self.input.get_text().startswith('//'))\n self.send_composing_chat_state(empty_after)\n return False\n\n def on_lose_focus(self):\n contact = roster[self.get_dest_jid()]\n try:\n jid = JID(self.get_dest_jid())\n except InvalidJID:\n jid = JID('')\n if contact:\n if jid.resource:\n resource = contact[jid.full]\n else:\n resource = contact.get_highest_priority_resource()\n else:\n resource = None\n if self.input.text:\n self.state = 'nonempty'\n else:\n self.state = 'normal'\n self.text_win.remove_line_separator()\n self.text_win.add_line_separator(self._text_buffer)\n if config.get_by_tabname('send_chat_states', self.general_jid):\n if resource:\n self.send_chat_state('inactive')\n self.check_scrolled()\n\n def on_gain_focus(self):\n contact = roster[self.get_dest_jid()]\n try:\n jid = JID(self.get_dest_jid())\n except InvalidJID:\n jid = JID('')\n if contact:\n if jid.resource:\n resource = contact[jid.full]\n else:\n resource = contact.get_highest_priority_resource()\n else:\n resource = None\n\n self.state = 'current'\n curses.curs_set(1)\n if (config.get_by_tabname('send_chat_states', self.general_jid)\n and (not self.input.get_text()\n or not self.input.get_text().startswith('//'))):\n if resource:\n self.send_chat_state('active')\n\n def on_info_win_size_changed(self):\n if self.core.information_win_size >= self.height - 3:\n return\n self.text_win.resize(\n self.height - 3 - self.core.information_win_size -\n Tab.tab_win_height(), self.width, 1, 0)\n self.get_info_header().resize(\n 1, self.width, self.height - 2 - self.core.information_win_size -\n Tab.tab_win_height(), 0)\n\n def get_text_window(self):\n return self.text_win\n\n def on_close(self):\n Tab.on_close(self)\n if config.get_by_tabname('send_chat_states', self.general_jid):\n self.send_chat_state('gone')\n\n def matching_names(self):\n res = []\n jid = self.jid\n res.append((2, jid.bare))\n res.append((1, jid.user))\n contact = roster[self.name]\n if contact and contact.name:\n res.append((0, contact.name))\n return res\n\n\nclass DynamicConversationTab(ConversationTab):\n \"\"\"\n A conversation tab associated with one bare JID. It used to\n support resource locking (as described in XEP-0296), but that was a\n bad idea so it has been removed.\n Only one DynamicConversationTab can be opened for a given jid.\n \"\"\"\n plugin_commands: Dict[str, Command] = {}\n plugin_keys: Dict[str, Callable] = {}\n\n def __init__(self, core, jid, initial=None):\n self.locked_resource = None\n ConversationTab.__init__(self, core, jid, initial=initial)\n self.jid.resource = None\n self.info_header = windows.DynamicConversationInfoWin()\n self.register_command(\n 'unlock', self.unlock_command, shortdesc='Deprecated, do nothing.')\n self.resize()\n self.update_commands()\n self.update_keys()\n\n def get_info_header(self):\n return self.info_header\n\n def lock(self, resource):\n pass\n\n def unlock_command(self, arg=None):\n pass\n\n def unlock(self, from_=None):\n pass\n\n def get_dest_jid(self):\n \"\"\"\n Returns the bare jid.\n \"\"\"\n return self.jid.bare\n\n def refresh(self):\n \"\"\"\n Different from the parent class only for the info_header object.\n \"\"\"\n if self.need_resize:\n self.resize()\n log.debug(' TAB Refresh: %s', self.__class__.__name__)\n display_bar = display_info_win = not self.size.tab_degrade_y\n\n self.text_win.refresh()\n if display_bar:\n self.upper_bar.refresh(self.jid.bare, roster[self.jid.bare])\n displayed_jid = self.jid.bare\n self.get_info_header().refresh(displayed_jid, roster[self.jid.bare],\n self.text_win, self.chatstate,\n ConversationTab.additional_information)\n if display_info_win:\n self.info_win.refresh()\n\n self.refresh_tab_win()\n self.input.refresh()\n\n def refresh_info_header(self):\n \"\"\"\n Different from the parent class only for the info_header object.\n \"\"\"\n displayed_jid = self.jid.bare\n self.get_info_header().refresh(displayed_jid, roster[self.jid.bare],\n self.text_win, self.chatstate,\n ConversationTab.additional_information)\n self.input.refresh()\n\n\nclass StaticConversationTab(ConversationTab):\n \"\"\"\n A conversation tab associated with one Full JID. It cannot be locked to\n an different resource or unlocked.\n \"\"\"\n plugin_commands: Dict[str, Command] = {}\n plugin_keys: Dict[str, Callable] = {}\n\n def __init__(self, core, jid, initial=None):\n ConversationTab.__init__(self, core, jid, initial=initial)\n assert jid.resource\n self.info_header = windows.ConversationInfoWin()\n self.resize()\n self.update_commands()\n self.update_keys()\n\n def init_logs(self, initial=None) -> None:\n # Disable local logs because…\n pass\n\n def get_info_header(self):\n return self.info_header\n","sub_path":"poezio/tabs/conversationtab.py","file_name":"conversationtab.py","file_ext":"py","file_size_in_byte":19247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"197208563","text":"'''\nThe top level interface used to translate configuration data back to the\ncorrect cloud modules\n'''\n# Import python libs\nimport os\nimport copy\nimport multiprocessing\n\n# Import saltcloud libs\nimport saltcloud.utils\nimport saltcloud.loader\nimport salt.client\n\n# Import third party libs\nimport yaml\n\nclass Cloud(object):\n '''\n An object for the creation of new inages\n '''\n def __init__(self, opts):\n self.opts = opts\n self.clouds = saltcloud.loader.clouds(self.opts)\n\n def provider(self, vm_):\n '''\n Return the top level module that will be used for the given vm data\n set\n '''\n if 'provider' in vm_:\n if '{0}.create'.format(vm_['provider']) in self.clouds:\n return vm_['provider']\n if 'provider' in self.opts:\n if '{0}.create'.format(self.opts['provider']) in self.clouds:\n return self.opts['provider']\n\n def map_providers(self):\n '''\n Return a mapping of what named vms are running on what vm providers\n based on what providers are defined in the configs and vms\n '''\n provs = set()\n pmap = {}\n for vm_ in self.opts['vm']:\n provs.add(self.provider(vm_))\n for prov in provs:\n fun = '{0}.list_nodes'.format(prov)\n if not fun in self.clouds:\n print('Public cloud provider {0} is not available'.format(\n self.provider(vm_))\n )\n continue\n pmap[prov] = self.clouds[fun]()\n return pmap\n\n def create_all(self):\n '''\n Create/Verify the vms in the vm data\n '''\n for vm_ in self.opts['vm']:\n self.create(vm_)\n\n def create(self, vm_):\n '''\n Create a single vm\n '''\n fun = '{0}.create'.format(self.provider(vm_))\n if not fun in self.clouds:\n print('Public cloud provider {0} is not available'.format(\n self.provider(vm_))\n )\n priv, pub = saltcloud.utils.gen_keys(\n saltcloud.utils.get_option('keysize', self.opts, vm_)\n )\n saltcloud.utils.accept_key(self.opts['pki_dir'], pub, vm_['name'])\n vm_['pub_key'] = pub\n vm_['priv_key'] = priv\n self.clouds['{0}.create'.format(self.provider(vm_))](vm_)\n\n def run_profile(self):\n '''\n Parse over the options passed on the command line and determine how to\n handle them\n '''\n pmap = self.map_providers()\n for name in self.opts['names']:\n for vm_ in self.opts['vm']:\n if vm_['profile'] == self.opts['profile']:\n # It all checks out, make the vm\n if name in pmap[self.provider(vm_)]:\n # The specified vm already exists, don't make it anew\n continue\n vm_['name'] = name\n if self.opts['parallel']:\n multiprocessing.Process(\n target=lambda: self.create(vm_),\n ).start()\n else:\n self.create(vm_)\n\n\nclass Map(Cloud):\n '''\n Create a vm stateful map execution object\n '''\n def __init__(self, opts):\n Cloud.__init__(self, opts)\n self.map = self.read()\n\n def read(self):\n '''\n Read in the specified map file and return the map structure\n '''\n if not self.opts['map']:\n return {}\n if not os.path.isfile(self.opts['map']):\n return {}\n try:\n with open(self.opts['map'], 'rb') as fp_:\n map_ = yaml.load(fp_.read())\n except Exception:\n return {}\n if 'include' in map_:\n map_ = salt.config.include_config(map_, self.opts['map'])\n return map_\n\n def run_map(self):\n '''\n Execute the contents of the vm map\n '''\n for profile in self.map:\n for name in self.map[profile]:\n for vm_ in self.opts['vm']:\n if vm_['profile'] == profile:\n tvm = copy.deepcopy(vm_)\n tvm['name'] = name\n if self.opts['parallel']:\n multiprocessing.Process(\n target=lambda: self.cloud.create(tvm)\n ).start()\n else:\n self.cloud.create(tvm)\n","sub_path":"saltcloud/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"185462015","text":"# discord bot. /with python.\n\n# Modulos.\nimport discord\nfrom discord.ext import commands\n\nbot = commands.bot(command_prefix =\"bot\", description=\"The new jajobot\")\n\n@bot.command()\n\nasync def ping(ctx):\n await ctx.send('pong')\n\n@bot.event\nasync def on_redy():\n print(\"bot on duty\")\n\nbot.run('token here') # Saque el token pah ya tu sae ;)\n \n# another push jjijij\n# Comandos. \n# $pip install pipenv\n# $pipenv install discord.","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"302331081","text":"import quandl\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Pricing Data of Zooplus from the Frankfurt Stock Exchange Dataset using Quandl\nEquity = quandl.get(\"FSE/ZO1_X\", authtoken=\"authtoken\",paginate=True)\n\n# Making short term and long term windows\nshortWindow = 40\nlongWindow = 100\n\nsignals = pd.DataFrame(index=Equity.index)\nsignals['signal'] = 0.0\n\n# Short term moving average of short window\nsignals['short_window_moving_average'] = Equity['Close'].rolling(window=shortWindow,min_periods=1,center=False).mean()\n\n# Short term moving average of long window\nsignals['long_window_moving_average'] = Equity['Close'].rolling(window=longWindow,min_periods=1,center=False).mean()\n\n# Create signals\nsignals['signal'][shortWindow:] = np.where(signals['short_window_moving_average'][shortWindow:] > signals['long_window_moving_average'][shortWindow:],1.0,0.0)\n\n# Generate trading orders\nsignals['positions'] = signals['signal'].diff()\n\n# Initialize the plot figure\nfig = plt.figure(figsize=(20,15))\nax1 = fig.add_subplot(111,ylabel=\"Price in $\")\n\n#Plot the closing price\nEquity['Close'].plot(ax=ax1,color=\"black\",lw=2.)\n\n# PLot the moving averages for short and long windows\nsignals[['short_window_moving_average','long_window_moving_average']].plot(ax=ax1,lw=2.)\n\n# Plot the buy signals\nax1.plot(signals.loc[signals.positions == 1.0].index,signals.short_window_moving_average[signals.positions == 1.0],'^',markersize=20,color='g')\n\n# Plot the sell signals\nax1.plot(signals.loc[signals.positions == -1.0].index,signals.short_window_moving_average[signals.positions == -1.0],'v',markersize=20,color='r')\n\n# Show the plot\nplt.show()\n\n# Set the initial capital \ninitial_capital= float(100000)\n\n# Create a DataFrame 'positions'\npositions = pd.DataFrame(index=signals.index).fillna(0.0) \n\n# Buy a 1000 shares\npositions['Position'] = 1000*signals['signal'] \n\n# Initialize the portfolio with value owned\nportfolio = positions.multiply(Equity['Close'], axis=0) \n\n# Store the difference in shares owned\npos_diff = positions.diff() \n\n# Add 'holdings' to portfolio \nportfolio['holdings'] = (positions.multiply(Equity['Close'], axis=0)).sum(axis=1) \n\n# Add 'cash' to portfolio \nportfolio['cash'] = initial_capital - (pos_diff.multiply(Equity['Close'], axis=0)).sum(axis=1).cumsum() \n\n# Add 'total' to portfolio \nportfolio['total'] = portfolio['cash'] + portfolio['holdings'] \n\n# Add 'returns' to portfolio \nportfolio['returns'] = portfolio['total'].pct_change() \ndel portfolio['Position'] \n\n# Initialize the plot figure\nfig2 = plt.figure(figsize=(20,15))\nax1 = fig2.add_subplot(111,ylabel=\"Portfolio value in $\")\n\n#Plot the closing price\nportfolio['total'].plot(ax=ax1,color=\"black\",lw=2.)\n\n# PLot the moving averages for short and long windows\nsignals[['short_window_moving_average','long_window_moving_average']].plot(ax=ax1,lw=2.)\n\n# Plot the buy signals\nax1.plot(portfolio.loc[signals.positions == 1.0].index,portfolio.total[signals.positions == 1.0],'^',markersize=20,color='g')\n\n# Plot the sell signals\nax1.plot(portfolio.loc[signals.positions == -1.0].index,portfolio.total[signals.positions == -1.0],'v',markersize=20,color='r')\n\n# Show the plot\nplt.show()\n\n# Total Portfolio value and absolute return\nprint(\"The total portfolio value is \\n{}\".format(portfolio['total'].tail(1)))\nprint(\"The absolute return is \\n{}\".format(((portfolio['total'].tail(1)/float(100000))-float(1))*100))\n","sub_path":"Moving Average Crossover/smastrategy.py","file_name":"smastrategy.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574746274","text":"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Create masked LM/next sentence masked_lm TF examples for BERT.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport json\nimport collections\nimport random\nimport tokenization\nimport tensorflow as tf\nimport spacy\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"input_file\", None,\n \"Input raw text file (or comma-separated list of files).\")\n\nflags.DEFINE_string(\n \"output_file\", None,\n \"Output TF example file (or comma-separated list of files).\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\"max_seq_length\", 128, \"Maximum sequence length.\")\n\nflags.DEFINE_integer(\"max_predictions_per_seq\", 20,\n \"Maximum number of masked LM predictions per sequence.\")\n\nflags.DEFINE_integer(\"random_seed\", 12345, \"Random seed for data generation.\")\n\nflags.DEFINE_integer(\n \"dupe_factor\", 10,\n \"Number of times to duplicate the input data (with different masks).\")\n\nflags.DEFINE_float(\"masked_lm_prob\", 0.15, \"Masked LM probability.\")\n\nflags.DEFINE_float(\n \"short_seq_prob\", 0.1,\n \"Probability of creating sequences which are shorter than the \"\n \"maximum length.\")\n\nnlp = spacy.load('en_core_web_lg')\n\n\nclass TrainingInstance(object):\n \"\"\"A single training instance (sentence pair).\"\"\"\n\n def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,\n is_random_next, direction):\n self.tokens = tokens\n self.segment_ids = segment_ids\n self.is_random_next = is_random_next\n self.masked_lm_positions = masked_lm_positions\n self.masked_lm_labels = masked_lm_labels\n\n self.direction = direction\n\n def __str__(self):\n s = \"\"\n s += \"tokens: %s\\n\" % (\" \".join(\n [tokenization.printable_text(x) for x in self.tokens]))\n s += \"segment_ids: %s\\n\" % (\" \".join([str(x) for x in self.segment_ids]))\n s += \"is_random_next: %s\\n\" % self.is_random_next\n s += \"direction: %s\\n\" % self.direction\n s += \"masked_lm_positions: %s\\n\" % (\" \".join(\n [str(x) for x in self.masked_lm_positions]))\n s += \"masked_lm_labels: %s\\n\" % (\" \".join(\n [tokenization.printable_text(x) for x in self.masked_lm_labels]))\n s += \"\\n\"\n return s\n\n def __repr__(self):\n return self.__str__()\n\n\ndef write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n while len(segment_ids) < max_seq_length:\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n while len(masked_lm_ids) < max_predictions_per_seq:\n masked_lm_ids.append(0)\n while len(masked_lm_weights) < max_predictions_per_seq:\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n sentence_direction = instance.direction\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n features[\"sentence_direction\"] = create_int_feature([sentence_direction])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 2:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n doc = nlp(' '.join(doc_tokens))\n tmp = []\n for sent in doc.sents:\n if sent.text:\n if len(sent.text.split(\" \")) > 4:\n tmp.append(sent.text)\n examples.append(tmp)\n # if len(examples) > 1:\n # break\n\n return examples\n\n\ndef create_int_feature(values):\n feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n\ndef create_float_feature(values):\n feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return feature\n\n\ndef create_training_instances_squad(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n # all_documents = []\n # Input file format:\n # json file from SQUAD2\n all_documents = read_squad_examples(input_files[0], is_training=True)\n # json file from SQUAD1\n if (len(input_files) > 1):\n all_documents += read_squad_examples(input_files[1], is_training=True)\n if (len(input_files) > 2):\n with tf.gfile.GFile(input_files[2], \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n\n doc = nlp(line)\n tmp = []\n for sent in doc.sents:\n if sent.text:\n if len(sent.text.split(\" \")) > 4:\n tmp.append(sent.text)\n all_documents.append(tmp)\n\n # Remove empty documents and at least 8 words\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document_squad(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng, tokenizer))\n\n rng.shuffle(instances)\n return instances\n\n\ndef create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document_squad(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n return instances\n\n\ndef create_instances_from_document_squad(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng, tokenizer):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n # Account for [CLS], 2 [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(10, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n # case = -1\n first_segment_length = 0\n # times1 = 0\n # times2 = 0\n while i < len(document):\n segment = document[i].split(\" \")\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n # forward =1\n # unrelated=2\n # backward=0\n direction = 1\n # reduced .3 to .2 as we have direction\n if len(document) == 1 or len(current_chunk) == 1 or rng.random() < 0.2:\n is_random_next = True\n direction = 2\n target_b_length = target_seq_length - len(tokens_a)\n # times1 += 1\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n\n random_document_index = rng.randint(0, len(all_documents) - 1)\n while random_document_index == document_index:\n random_document_index = rng.randint(0, len(all_documents) - 1)\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b += (random_document[j].split(\" \"))\n if len(tokens_b) >= target_b_length:\n break\n\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n direction = 1\n for j in range(a_end, len(current_chunk)):\n tokens_b += (current_chunk[j])\n # times2 += 1\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 2\n\n assert len(tokens_b) >= 2\n # only go backward if forward\n if direction == 1:\n # 40% case do backward\n if rng.random() < 0.4:\n tmp_token = tokens_a\n tokens_a = tokens_b\n tokens_b = tmp_token\n direction = 0\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n if token:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n if token:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n assert len(segment_ids) == len(tokens)\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng, tokenizer)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n direction=direction,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n\n i += 1\n # print(\"*******times******\")\n # print(times1, times2)\n return instances\n\n\nMaskedLmInstance = collections.namedtuple(\"MaskedLmInstance\",\n [\"index\", \"label\"])\n\n\ndef create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng, tokenizer):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n output_tokens = []\n cand_indexes_tmp = []\n text = \"\"\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n output_tokens.append(token)\n continue\n cand_indexes_tmp.append(i)\n output_tokens.append(token.lower())\n text += token + \" \"\n\n num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob))))\n\n ## NER part\n cand_indexes_final = []\n cand_indexes_final1 = []\n cand_indexes_final2 = []\n ner = nlp(text)\n word_type = dict()\n ents = []\n nouns = []\n for token in ner:\n word_type[token.text.lower()] = token\n if token.pos_ == \"NOUN\" and len(token) > 2:\n nouns.append(token.text.lower())\n for entities in ner.ents:\n for word in tokenizer.tokenize(entities.text):\n if word in word_type:\n if not word_type[word].is_punct and word_type[word].pos_ != \"CCONJ\" and \\\n word_type[word].pos_ != \"DET\" and len(word) > 2:\n ents.append(word)\n\n # if ents.__len__() < max_predictions_per_seq // 3:\n # for token in ner:\n # word = token.lower()\n # if rng.random() > 0.5 and word not in ents :\n # ents.append(word)\n\n for i in cand_indexes_tmp:\n if tokens[i].lower() in ents:\n cand_indexes_final1.append(i)\n\n if i - 1 not in cand_indexes_final1 and tokens[i - 1] not in ['[CLS]', '[SEP]'] and rng.random() > .5:\n cand_indexes_final1.append(i - 1)\n if i + 1 not in cand_indexes_final1 and tokens[i - 1] not in ['[CLS]', '[SEP]'] and rng.random() > .5:\n cand_indexes_final1.append(i + 1)\n elif ents.__len__() < max_predictions_per_seq // 3 and tokens[i].lower() in nouns:\n if rng.random() > 0.75 and i not in cand_indexes_final1:\n cand_indexes_final1.append(i)\n\n if i not in cand_indexes_final1:\n cand_indexes_final2.append(i)\n rng.shuffle(cand_indexes_final2)\n rng.shuffle(cand_indexes_final1)\n cand_indexes_final = cand_indexes_final1 + cand_indexes_final2\n\n masked_lms = []\n covered_indexes = []\n for index in cand_indexes_final:\n if len(masked_lms) >= num_to_predict:\n break\n if index in covered_indexes:\n continue\n covered_indexes.append(index)\n\n masked_token = None\n\n probab = 0.8\n if index in cand_indexes_final1:\n probab = 0.5\n # 50/30% of the time, keep original\n if rng.random() > probab:\n masked_token = \"[MASK]\"\n else:\n # 70% of the time, keep original\n if rng.random() > 0.3:\n masked_token = tokens[index].lower()\n # 30% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)].lower()\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index].lower()))\n\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label.lower())\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\ndef truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n # CaseSensitivetokenizer = tokenization.FullTokenizer(\n # vocab_file=FLAGS.vocab_file, do_lower_case=False)\n # CaseSensitivetokenizer.tokenize()\n input_files = []\n for input_pattern in FLAGS.input_file.split(\",\"):\n input_files.extend(tf.gfile.Glob(input_pattern))\n\n tf.logging.info(\"*** Reading from input files ***\")\n for input_file in input_files:\n tf.logging.info(\" %s\", input_file)\n\n rng = random.Random(FLAGS.random_seed)\n instances = create_training_instances_squad(\n input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,\n FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,\n rng)\n\n output_files = FLAGS.output_file.split(\",\")\n tf.logging.info(\"*** Writing to output files ***\")\n for output_file in output_files:\n tf.logging.info(\" %s\", output_file)\n\n write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,\n FLAGS.max_predictions_per_seq, output_files)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"input_file\")\n flags.mark_flag_as_required(\"output_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n tf.app.run()\n","sub_path":"create_pretraining_data.py","file_name":"create_pretraining_data.py","file_ext":"py","file_size_in_byte":23574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580449308","text":"\n\n__author__ = 'wasi'\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport sys\n\nclass HelloEvent(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n\n layout = QVBoxLayout()\n\n self.label = QLabel(\"Label\")\n line_edit = QLineEdit(\"type something\")\n line_edit.selectAll()\n button = QPushButton(\"Close\")\n\n layout.addWidget(self.label)\n layout.addWidget(line_edit)\n layout.addWidget(button)\n\n self.setLayout(layout)\n\n button.clicked.connect(self.close)\n\n # this will work\n # line_edit.textChanged.connect(self.label.setText)\n # demonstration of user defined function to do the same\n line_edit.textChanged.connect(self.line_edit_event)\n\n def line_edit_event(self,text):\n self.label.setText(text)\n\n\ndef main():\n app = QApplication(sys.argv)\n form = HelloEvent()\n form.show()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()","sub_path":"Hello-Event/Hello-Event.py","file_name":"Hello-Event.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"615548881","text":"from PIL import Image, ImageDraw\nimport sys\nimport json\n\njson_path = sys.argv[1]\nx_size = 600\ny_size = 800\n\n# background\nimg = Image.new('RGBA', (x_size, y_size), 'white')\n\n# foregroup\ntmp = Image.new('RGBA', img.size, 'white')\ndraw = ImageDraw.Draw(tmp)\nwith open(json_path) as json_file:\n # draw rectangles as given by points\n data = json.load(json_file)\n for entry in data:\n line = []\n for point in entry[\"points\"]:\n line.append((int(point[\"x\"]*x_size), int(point[\"y\"]*y_size)))\n draw.polygon(line, fill=(128, 128, 128, 230))\n\n# merge images\nimg = Image.alpha_composite(img, tmp)\nimg.save(json_path + \".png\", \"PNG\")","sub_path":"json_to_image.py","file_name":"json_to_image.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578942300","text":"#!/usr/bin/env python3\n\"\"\"Nov 19 2016 Mail room Exercise with exceptions and comprehension in print function.\"\"\"\nimport operator\ndonor_dict = {'Alfred Hitchcock': [10, 20, 30],\n 'Martin Scorsese': [20, 30, 40],\n 'Steven Spielberg': [40, 20, 30, 10],\n 'Francis Coppola': [10, 10, 10],\n 'Ridley Scott': [10, 20, 30, 10]}\n\ndonor_summary_dict = {}\n\n\ndef print_report():\n \"\"\"Print Report.\"\"\"\n print(\"This will print a report.\")\n # donor_list_new = []\n report_header = (\"{0:<30s} {1:<30s} {2:<30s} {3:<30s}\".\n format('Donor Name',\n 'Total Amount', 'No.of Donations',\n 'Avg.Donation'))\n\n # report_header = report_header.replace(',', '')\n print(report_header)\n for donorname in donor_dict.keys():\n donation_amounts = donor_dict[donorname]\n donation_sum = sum(donation_amounts)\n donation_avg = donation_sum / len(donation_amounts)\n donor_summary_dict[donorname] = [donation_sum, len(donation_amounts),\n donation_avg]\n sorted_donors = sorted(donor_summary_dict.items(),\n key=operator.itemgetter(1), reverse=True)\n for donor in (sorted_donors):\n print('{0:<30s} {1:<30.2f} {2:<30d} {3:<30.2f}'.\n format(donor[0], donor[1][0], donor[1][1], donor[1][2]))\n\n\ndef send_thanks():\n \"\"\"Send thanks to donors.\"\"\"\n print(\"This will write a thank you note\")\n print(\"Enter the word 'list' to see list of donors or \\\n enter donor name followed by amount.\")\n user_input = input(\"Enter the word 'list' or donor's full name:\")\n if (user_input == 'list'):\n print(\"List of Donors:\", donor_dict)\n user_input = input(\"Enter full name of donor:\")\n if user_input in donor_dict.keys():\n try:\n d_amount = int(input(\"Please enter the donation amount:\"))\n except ValueError:\n d_amount = int(input(\"Please enter only numbers for donation amount:\"))\n donor_dict.setdefault(user_input, []).append(d_amount)\n print('''Dear {name}, Thank you very much for your generous \\\n donation of ${amount}'''\n .format(name=user_input, amount=d_amount))\n else:\n try:\n d_amount = int(input(\"Please enter the donation amount:\"))\n except ValueError:\n d_amount = int(input(\"Please enter only numbers for donation amount:\"))\n donor_dict.setdefault(user_input, []).append(d_amount)\n print(\"Adding new donor.\")\n print('''Dear {name}, Thank you very much for your generous donation of ${amount}'''\n .format(name=user_input, amount=d_amount))\n\n\ndef write_letter():\n \"\"\"Writing a individual thank you letter to disk.\"\"\"\n for donor in donor_dict.keys():\n with open('/Users/Mandava/Documents/python/Class2_20161004/IntroPython2016/students/smandava/session4/thankyouletter_{}.txt'.format(donor), 'w') as thankyou_file:\n # thankyou_file = open('/Users/Mandava/Documents/python/Class2_20161004/IntroPython2016/students/smandava/session4/thankyouletter_{}.txt'.format(donor), 'w')\n thankyou_note = ('''Dear {donor}, \\n \\n Thank you very much for your generous donation of ${amount}.\n \\n \\n Regards, \\n Seattle Charity Org \\n'''\n .format(donor=donor, amount=donor_dict[donor]))\n thankyou_file.write(thankyou_note)\n print(\"Created Thank you letter!\")\n # thankyou_file.close()\n\n\n# here is where triple quoted strings can be helpful\nmsg = \"\"\"\nWhat would you like to do?\nTo send a thank you: type \"s\"\nTo print a report: type \"p\"\nTo write a thank you letter to disk: type \"w\"\nTo exit: type \"x\"\n\"\"\"\n\n\ndef main():\n \"\"\"Run the main interactive loop.\"\"\"\n response = ''\n select = {'p': print_report, 's': send_thanks, 'w': write_letter,\n 'x': 'break'}\n # keep asking until the users responds with an 'x'\n while True:\n print(msg)\n response = input(\"==> \").strip() # strip()in case there are any spaces\n if response == 'p':\n select['p']()\n elif response == 's':\n select['s']()\n elif response == 'w':\n select['w']()\n elif response == 'x':\n break\n else:\n print('please type \"s\", \"p\", or \"x\"')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students/smandava/session4/sm_mailroom_dict.py","file_name":"sm_mailroom_dict.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131011940","text":"from errors import NotFound\n\n\nclass BlogEntry(object):\n\n def __init__(self, **kwargs):\n self.body = kwargs.get('body', '')\n self.title = kwargs.get('title', '')\n self.id = kwargs.get('id', None)\n\n @property\n def url(self):\n return \"/entry/{}\".format(self.id)\n\n @property\n def api_url(self):\n return \"/api/{}\".format(self.id)\n\n def save(self, db):\n if not self.title:\n raise ValueError(\"Title is required to save\")\n\n if not self.body:\n raise ValueError(\"Body is required to save\")\n\n if not self.id:\n self._insert_sql(db)\n else:\n self._update_sql(db)\n\n def _insert_sql(self, db):\n cursor = db.cursor()\n cursor.execute('insert into entries (title, body) values (?, ?)',\n [self.title, self.body])\n self.id = cursor.lastrowid\n db.commit()\n return self.id\n\n def _update_sql(self, db):\n db.execute('UPDATE entries SET title=?, body=? WHERE id=?',\n [self.title, self.body, self.id])\n db.commit()\n\n @classmethod\n def get(cls, db, blog_id):\n cur = db.execute(\n 'select id, title, body from entries where id = ?',\n [blog_id]\n )\n row = cur.fetchone()\n if not row:\n raise NotFound\n blog_entry = cls(id=row[0], title=row[1], body=row[2])\n return blog_entry\n\n @classmethod\n def get_all(cls, db):\n cur = db.execute('select id, title, body from entries order by id desc')\n rows = cur.fetchall()\n entries = [\n cls(id=row[0], title=row[1], body=row[2])\n for row in rows\n ]\n return entries\n\n def to_dict(self):\n blog_dict = {\n 'id': self.id,\n 'title': self.title,\n 'body': self.body,\n 'url': self.url,\n 'api_url': self.api_url,\n }\n return blog_dict\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"\".format(self.id, self.title)\n","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"520551972","text":"# raresymbols.py\n# Code to solve problem #2 of the python challenge, which can be found at pythonchallenge.com\n# Sean Brickley (September 29th, 2016)\n\n# The file 'ordered_counter.py' is necessary for this program to work properly.\n# It should be in the same repository on my GitHub profile.\nfrom ordered_counter import OrderedCounter\n\n# Read the text file (which contains the relevant section of the source code) into a list\nwith open('raresymbols.txt') as file_object:\n full_text = file_object.read()\n\n# Use an or counter to get a count of each symbol that occurs in the file.\ncounts = OrderedCounter(full_text)\n# Make an empty list to store the rare symbols in.\nrare_symbols = []\n\n# Loop through each of the items in the dictionary. \n# If the current symbol's count == 1, append that to the list.\nfor symbol, count in counts.items():\n if count == 1:\n rare_symbols.append(symbol)\n \nnew_url = ''.join(rare_symbols)\nprint(\"Type this into the url: \" + new_url + \".html\")\n\n\n\n\n\n","sub_path":"raresymbols.py","file_name":"raresymbols.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"411910784","text":"# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom thriftpy.transport.socket import TSocket, TServerSocket\nfrom thriftpy.transport import TTransportException\n\n\ndef test_close():\n server = TServerSocket(host=\"localhost\", port=12345)\n client = TSocket(host=\"localhost\", port=12345)\n\n server.listen()\n client.open()\n\n c = server.accept()\n\n client.close()\n\n with pytest.raises(TTransportException) as e:\n c.read(1024)\n assert \"TSocket read 0 bytes\" in e.value.message\n\n c.write(b\"world\")\n c.close()\n\n assert c.handle is None\n","sub_path":"tests/test_socket.py","file_name":"test_socket.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"182375993","text":"\"\"\"\nDjango settings for Qbase project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '_4*2bdpg(0f=v&0w4u63l4$*$$i1aigz4yg5ayn86x0kx#51&^'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django.contrib.sites',\n # 'allauth',\n # 'allauth.account',\n # 'allauth.socialaccount',\n # 'allauth.socialaccount.providers.facebook',\n # 'allauth.socialaccount.providers.twitter',\n #'theme',\n 'questionapp',\n 'multiprofile',\n 'bootstrap3',\n \"taggit\",\n # \"badger\",\n)\n\nSITE_ID = 1\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.media\",\n 'django.core.context_processors.request',\n 'django.core.context_processors.static',\n 'django.contrib.messages.context_processors.messages',\n\n # \"allauth.account.context_processors.account\",\n # \"allauth.socialaccount.context_processors.socialaccount\",\n\n \"questionapp.context_processors.get_quser\",\n)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n # 'badger.middleware.RecentBadgeAwardsMiddleware',\n\n)\n\n\nAUTHENTICATION_BACKENDS = (\n\n # Needed to login by username in Django admin, regardless of `allauth`\n \"multiprofile.auth_backends.MultiModelBackend\",\n\n\n # # `allauth` specific authentication methods, such as login by e-mail\n # \"allauth.account.auth_backends.AuthenticationBackend\",\n\n)\n\n\nROOT_URLCONF = 'config.urls'\n\nWSGI_APPLICATION = 'Qbase.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'qbase', # Or path to database file if using sqlite3.\n 'USER': 'qbase', # Not used with sqlite3.\n 'PASSWORD': '8773322', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\nTIME_ZONE = 'Europe/Dublin'\nLANGUAGE_CODE = 'en-gb'\nUSE_I18N = False\nUSE_L10N = False\nSITE_ID = 1\n\n\n\n# Absolute path to the directory that holds media.\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'shared_static')\nSTATIC_URL = '/shared_static/'\n\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'questionapp/static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n os.path.join(BASE_DIR, 'questionapp/templates'),\n)\n\n# auth and allauth settings\nLOGIN_REDIRECT_URL = '/'\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = {\n 'facebook': {\n 'SCOPE': ['email', 'publish_stream'],\n 'METHOD': 'js_sdk' # instead of 'oauth2'\n }\n}\n\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'\n\n\nBADGER_TEMPLATE_BASE = 'badger'\n\n\nPROFILE_MODELS = ['questionapp.models.QUser',]\n\n# minimum points before action can be taken\nMIN_POINTS = {\n \"VOTE_UP\" : 15,\n \"FLAG_OFFENSIVE\" : 15,\n \"POST_IMAGES\" : 15,\n \"LEAVE_COMMENTS\" : 50,\n \"VOTE_DOWN\" : 25,\n \"CLOSE_OWN_QUESTIONS\": 15,\n \"CLOSE_OTHER_QUESTIONS\" : 50,\n \"RETAG_OTHER_QUESTIONS\" : 25,\n \"EDIT_COMMUNITY_WIKI_POSTS\" : 25,\n \"EDIT_OTHER_POSTS\" : 25,\n \"DELETE_COMMENTS\" : 25,\n \"VIEW_OFFENSIVE_FLAGS\" : 25,\n \"LOCK_POSTS\" : 25,\n }\n\nAWARD_POINTS = {\n \"ACCEPT_ANSWER\" : 2,\n \"MY_ANSWER_ACCEPTED\" : 15,\n \"ASK_QUESTION\" : 15,\n \"VOTE_ANSWER_UP\" : 3,\n \"VOTE_ANSWER_DOWN\" : -1,\n \"MY_ANSWER_VOTE_ANSWER_UP\" : 10,\n \"MY_ANSWER_VOTE_ANSWER_DOWN\" : -2,\n \"VOTE_QUESTION_UP\" : 3,\n \"VOTE_QUESTION_DOWN\" : -1,\n \"MY_QUESTION_VOTE_QUESTION_UP\" : 5,\n \"MY_QUESTION_VOTE_QUESTION_DOWN\" : -2,\n \"VOTE_COMMENT_UP\" : 1,\n \"VOTE_COMMENT_DOWN\" : -1,\n \"MY_COMMENT_VOTE_COMMENT_UP\" : 1,\n \"MY_COMMENT_VOTE_COMMENT_DOWN\" : -1,\n\n}\ntry:\n from settings_local import *\nexcept ImportError:\n pass","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"116047415","text":"\"\"\"\nCopyright (c) 2004-Present Pivotal Software, Inc.\n\nThis program and the accompanying materials are made available under\nthe terms of the under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport tinctest\nimport os\n\nfrom gppylib.commands.base import Command, REMOTE\nfrom mpp.lib.config import GPDBConfig\nfrom mpp.lib.PSQL import PSQL\nfrom time import sleep\nfrom tinctest import TINCTestCase\nfrom mpp.gpdb.tests.utilities.recoverseg.gprecoverseg_tests.fault.genFault import Fault\nfrom mpp.lib.config import GPDBConfig\n\nfrom mpp.lib.gprecoverseg import GpRecoverseg\nfrom utilities.gppersistentrebuild import PTTestCase\n\n# Environmental variable to be set priror to the gprecoverseg run.\nENV_VAR=\"GP_MPP_12038_INJECT_DELAY\" \n\nclass FaultInjectorTestCase (TINCTestCase):\n \"\"\"\n \n @description Injects the specific faults in Primary or Mirror\n @created 2009-01-27 14:00:00\n @modified 2013-09-12 17:10:15\n @tags storage schema_topology \n @product_version gpdb:4.2.x,gpdb:main\n \"\"\"\n\n def test_kill_primary(self):\n \"\"\"\n [feature]: Kills primary gp0 segment\n \n \"\"\"\n \n newfault = Fault()\n hosts = newfault.get_segment_host(preferred_role='p',content=0) \n if not newfault.kill_primary_gp0(hosts):\n self.fail(\"Could not the kill the primary process, cannot proceed further!\")\n rtrycnt = 0\n while( not newfault.is_changetracking()):\n tinctest.logger.info(\"Waiting [%s] for DB to go into CT mode\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n\n def test_kill_mirror(self):\n \"\"\"\n [feature]: Kills mirror gp0 segment \n \n \"\"\"\n\n newfault = Fault()\n hosts = newfault.get_segment_host(preferred_role='m',content=0)\n if not newfault.kill_mirror_gp0(hosts):\n self.fail(\"Could not the kill the mirror process, cannot proceed further!\")\n rtrycnt = 0\n while( not newfault.is_changetracking()):\n tinctest.logger.info(\"Waiting [%s] for DB to go in CT mode\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n\n def test_kill_primary_group(self):\n \"\"\"\n [feature]: Kill a group of primary segments\n \n \"\"\"\n\n newfault = Fault()\n seglist = newfault.get_seginfo_for_primaries()\n seglist = seglist[:(len(seglist) + 1 ) / 2]\n for seg in seglist:\n tinctest.logger.info('Killing segment %s' % seg.getSegmentDataDirectory())\n newfault.kill_primary(seg.getSegmentHostName(), seg.getSegmentDataDirectory(), seg.getSegmentPort())\n rtrycnt = 0\n while (not newfault.is_changetracking()):\n tinctest.logger.info('Waiting [%s] for DB to go in CT mode' % rtrycnt)\n rtrycnt += 1\n\n def test_drop_pg_dirs_on_primary(self):\n \"\"\"\n [feature]: Drops primary gp0 folder \n \n \"\"\"\n\n newfault = Fault()\n (host, fileLoc) = newfault.get_segment_host_fileLoc()\n newfault.drop_pg_dirs_on_primary(host, fileLoc)\n rtrycnt = 0\n max_rtrycnt = 300\n while( not newfault.is_changetracking()):\n tinctest.logger.info(\"Waiting [%s] for DB to go into CT mode\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n \n def test_use_gpfaultinjector_to_mark_segment_down(self):\n \"\"\"\n [feature]: Use gpfaultinjector to mark a segment down in the configuration, but the \n process is still running on the segment.\n \n \"\"\"\n\n newfault = Fault()\n seginfo = newfault.get_seginfo(preferred_role='m', content=1)\n newfault.inject_using_gpfaultinjector(fault_name='filerep_consumer', fault_mode='async', fault_type='fault', segdbid=seginfo.getSegmentDbId())\n rtrycnt = 0\n while (not newfault.is_changetracking()):\n tinctest.logger.info(\"Waiting [%s] for DB to go into CT mode\" % rtrycnt)\n rtrycnt += 1\n\n def test_create_symlink_for_seg(self):\n \"\"\"\n [feature]: Creates a symlink to the data directory for a given segment\n \n \"\"\"\n \n newfault = Fault()\n seginfo = newfault.get_seginfo(preferred_role='m', content=1)\n newfault.create_remote_symlink(seginfo.getSegmentHostName(), seginfo.getSegmentDataDirectory())\n tinctest.logger.info('Creating symlink for seg %s on host %s' % (seginfo.getSegmentDataDirectory(), seginfo.getSegmentHostName()))\n\n def test_remove_symlink_for_seg(self):\n \"\"\"\n [feature]: Remove symlink for datadirectory and restore the orignal directory\n for a given segment.\n \n \"\"\"\n\n newfault = Fault()\n seginfo = newfault.get_seginfo(preferred_role='m', content=1)\n newfault.remove_remote_symlink(seginfo.getSegmentHostName(), seginfo.getSegmentDataDirectory())\n tinctest.logger.info('Removed symlinks for seg %s on host %s' % (seginfo.getSegmentDataDirectory(), seginfo.getSegmentHostName()))\n\n def test_corrupt_persistent_tables(self):\n \"\"\"\n [feature]: corrupts PT tables for segment that has been marked down \n \n \"\"\"\n \n newfault = Fault()\n seginfo = newfault.get_seginfo(preferred_role='p', content=1)\n pt = PTTestCase('corrupt_persistent_table')\n pt.corrupt_persistent_table(seginfo.getSegmentHostName(), seginfo.getSegmentPort())\n tinctest.logger.info('Finished corruption of PT tables')\n\n def test_rebuild_persistent_tables(self):\n \"\"\"\n [feature]: rebuilds PT tables for segment that has been marked down \n \n \"\"\"\n cmd = Command(name='Running gppersistentrebuild tool', cmdStr = 'echo \"y\\ny\\n\" | $GPHOME/sbin/gppersistentrebuild -c 1')\n cmd.run(validateAfter=True)\n tinctest.logger.info('Finished rebuild of PT tables')\n\n def test_shared_mem_is_cleaned(self):\n \"\"\"\n [feature]: Check if the shared memory is cleaned\n \n \"\"\"\n newfault = Fault()\n seginfo = newfault.get_seginfo(preferred_role='p',content=0) \n cmd = Command('check for shared memory', cmdStr=\"ipcs -a\", ctxt=REMOTE, remoteHost=seginfo.getSegmentHostName())\n cmd.run(validateAfter=True)\n result = cmd.get_results().stdout.split('\\n')\n for r in result:\n if r and r.split()[-1] == '0':\n raise Exception('Shared memory not cleaned up for %s' % r)\n\n def test_wait_till_segments_in_change_tracking(self):\n \"\"\"\n [feature]: Wait until segments for into change tracking\n \n \"\"\"\n newfault = Fault()\n rtrycnt = 0\n while( not newfault.is_changetracking()):\n tinctest.logger.info(\"Waiting [%s] for DB to go in CT mode\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n\nclass GprecoversegClass(TINCTestCase):\n \"\"\"\n \n @description Performs different types of Recovery process.\n @created 2009-01-27 14:00:00\n @modified 2013-09-12 17:10:15\n @tags storage schema_topology \n @product_version gpdb:4.2.x,gpdb:main\n \"\"\"\n\n def test_recovery_with_new_loc(self):\n \"\"\"\n [feature]: Performs recovery by creating a configuration file with new segment locations \n \n \"\"\"\n\n newfault = Fault()\n config = GPDBConfig()\n hosts = newfault.get_segment_host()\n newfault.create_new_loc_config(hosts, orig_filename='recovery.conf', new_filename='recovery_new.conf')\n if not newfault.run_recovery_with_config(filename='recovery_new.conf'):\n self.fail(\"*** Incremental recovery with config file recovery_new.conf failed\")\n rtrycnt = 0\n while (not config.is_not_insync_segments()):\n tinctest.logger.info(\"Waiting [%s] for DB to recover\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n \n def test_do_incremental_recovery(self):\n \"\"\"\n [feature]: Performs Incremental Recovery \n \n \"\"\"\n\n config = GPDBConfig()\n recoverseg = GpRecoverseg()\n tinctest.logger.info('Running Incremental gprecoverseg...')\n recoverseg.run()\n rtrycnt = 0\n while (not config.is_not_insync_segments()):\n tinctest.logger.info(\"Waiting [%s] for DB to recover\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n \n def test_do_full_recovery(self):\n \"\"\"\n [feature]: Performs Full Recovery\n \n \"\"\"\n\n config = GPDBConfig()\n recoverseg = GpRecoverseg()\n tinctest.logger.info('Running Full gprecoverseg...')\n recoverseg.run(option = '-F')\n rtrycnt = 0\n while (not config.is_not_insync_segments()):\n tinctest.logger.info(\"Waiting [%s] for DB to recover\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n \n def test_invalid_state_recoverseg(self):\n \"\"\"\n [feature]: Sets the ENV_VAR and runs the incremental recoverseg\n \n \"\"\"\n ''' '''\n # setting the ENV_VAR\n os.environ[ENV_VAR] = '1'\n recoverseg = GpRecoverseg()\n config = GPDBConfig()\n tinctest.logger.info('Running Incremental gprecoverseg...')\n recoverseg.run()\n rtrycnt = 0\n while (not config.is_not_insync_segments()):\n tinctest.logger.info(\"Waiting [%s] for DB to recover\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n\n def test_incremental_recovery_skip_persistent_tables_check(self):\n \"\"\"\n [feature]: Run incremental recoverseg with persistent tables check option \n \n \"\"\"\n\n config = GPDBConfig()\n recoverseg = GpRecoverseg()\n tinctest.logger.info('Running gprecoverseg...')\n recoverseg.run()\n self.assertNotIn('Performing persistent table check', recoverseg.stdout)\n rtrycnt = 0\n while (not config.is_not_insync_segments()):\n tinctest.logger.info(\"Waiting [%s] for DB to recover\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n\n def test_full_recovery_skip_persistent_tables_check(self):\n \"\"\"\n [feature]: Run recoverseg with persistent tables check option \n \n \"\"\"\n\n config = GPDBConfig()\n recoverseg = GpRecoverseg()\n tinctest.logger.info('Running gprecoverseg...')\n recoverseg.run(option='-F')\n self.assertNotIn('Performing persistent table check', recoverseg.stdout)\n rtrycnt = 0\n while (not config.is_not_insync_segments()):\n tinctest.logger.info(\"Waiting [%s] for DB to recover\" %rtrycnt)\n rtrycnt = rtrycnt + 1\n\n def test_incremental_recovery_with_persistent_tables_corruption(self):\n \"\"\"\n [feature]: Run incremental recoverseg with persistent tables corruption \n \n \"\"\"\n\n recoverseg = GpRecoverseg()\n tinctest.logger.info('Running gprecoverseg...')\n try:\n recoverseg.run(option='--persistent-check', validate=False)\n except Exception as e:\n tinctest.logger.info('Encountered exception while running incremental recovery with corrupt persistent table')\n self.assertIn('Performing persistent table check', recoverseg.stdout)\n\n def test_full_recovery_with_persistent_tables_corruption(self):\n \"\"\"\n [feature]: Run recoverseg with persistent tables corruption \n \n \"\"\"\n\n recoverseg = GpRecoverseg()\n tinctest.logger.info('Running gprecoverseg...')\n try:\n recoverseg.run(option='-F --persistent-check', validate=False)\n except Exception as e:\n tinctest.logger.info('Encountered exception while running full recovery with corrupt persistent table')\n self.assertIn('Performing persistent table check', recoverseg.stdout)\n\nclass GPDBdbOps(TINCTestCase):\n \"\"\"\n \n @description GPDB admin operations\n @created 2009-01-27 14:00:00\n @modified 2013-09-12 17:10:15\n @tags storage schema_topology \n @product_version gpdb:4.2.x,gpdb:main\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(GPDBdbOps,cls).setUpClass()\n tinctest.logger.info('GPDB Operations')\n\n def gprestartdb(self):\n ''' Restarts the Database '''\n newfault = Fault()\n newfault.stop_db()\n newfault.start_db()\n sleep(30)\n \n def check_if_not_in_preferred_role(self):\n ''' Checks if the segments are in preferred role or not '''\n newfault = Fault()\n result = newfault.check_if_not_in_preferred_role()\n if result == True:\n self.fail(\"Segments are not in preferred roles!!!\")\n \n \nclass SegmentConfigurations(TINCTestCase):\n \"\"\"\n \n @description Checks the segment's configuration for any invalid states\n @created 2009-01-27 14:00:00\n @modified 2013-09-12 17:10:15\n @tags storage schema_topology \n @product_version gpdb:4.2.x,gpdb:main\n \"\"\"\n \n @classmethod\n def setUpClass(cls):\n super(SegmentConfigurations,cls).setUpClass()\n tinctest.logger.info('Running all the invalid state tests...')\n # Sleep introduced so that the gprecoverseg starts before the invalid state tests\n tinctest.logger.info('Sleep introduced for 15 secs...')\n sleep(15)\n\n def test_if_primary_down(self):\n \"\"\"\n [feature]: Check for invalid state - Primary is marked down\n \n \"\"\"\n sql_stmt = \"SELECT 'down_segment' FROM gp_segment_configuration \" \\\n \"WHERE role = 'p' \" \\\n \"AND status = 'd'\"\n out = PSQL.run_sql_command(sql_stmt)\n if len(out) == 0:\n error_msg = 'Could not connect to the sever!!'\n tinctest.logger.error(error_msg)\n self.fail(error_msg)\n out = out.count('down_segment') - 1\n if out == 0:\n tinctest.logger.info('Primary is marked down => 0 rows')\n else:\n error_msg = \"%s down segments found\" %out\n tinctest.logger.info(error_msg)\n self.fail(error_msg)\n \n def test_if_mirror_down_and_primary_in_CT(self):\n \"\"\"\n [feature]: Check for invalid state - Mirror is down but primary is not in change tracking \n \n \"\"\"\n sql_stmt = \"SELECT p.content, p.dbid AS p_dbid, m.dbid AS m_dbid, \" \\\n \"p.role AS p_role, m.role AS m_role, \" \\\n \"p.preferred_role AS p_pref_role, m.preferred_role AS m_pref_role, \" \\\n \"p.address AS p_address, m.address AS m_address, \" \\\n \"p.status AS p_status, m.status AS m_status, \" \\\n \"p.mode AS p_mode, m.mode AS m_mode \" \\\n \"FROM gp_segment_configuration p, gp_segment_configuration m \" \\\n \"WHERE ( (p.content = m.content) AND (p.dbid <> m.dbid) ) \" \\\n \"AND p.status = 'u' and m.status = 'd' \" \\\n \"AND p.mode <> 'c'\" \n out = PSQL.run_sql_command(sql_stmt)\n if len(out) == 0:\n error_msg = 'Could not connect to the sever!!'\n tinctest.logger.error(error_msg)\n self.fail(error_msg)\n \n out = out.split('\\n')[3].find('0 rows')\n if out > 0:\n tinctest.logger.info('Mirror is down but primary is not in change tracking => 0 rows')\n else:\n error_msg = \"%s down segments found\" %out\n tinctest.logger.info(error_msg)\n self.fail(error_msg)\n \n def test_if_primary_in_CT_but_mirror_not_down(self):\n \"\"\"\n [feature]: Check for invalid state - Primary is in change tracking but mirror is not down\n \n \"\"\"\n sql_stmt = \"SELECT p.content, p.dbid AS p_dbid, m.dbid AS m_dbid, \" \\\n \"p.role AS p_role, m.role AS m_role, \" \\\n \"p.preferred_role AS p_pref_role, m.preferred_role AS m_pref_role, \" \\\n \"p.address AS p_address, m.address AS m_address, \" \\\n \"p.status AS p_status, m.status AS m_status, \" \\\n \"p.mode AS p_mode, m.mode AS m_mode \" \\\n \"FROM gp_segment_configuration p, gp_segment_configuration m \" \\\n \"WHERE ( (p.content = m.content) AND (p.dbid <> m.dbid) ) \" \\\n \"AND p.status = 'u' and p.mode = 'c' \" \\\n \"AND m.status <> 'd'\" \n out = PSQL.run_sql_command(sql_stmt)\n if len(out) == 0:\n error_msg = 'Could not connect to the sever!!'\n tinctest.logger.error(error_msg)\n self.fail(error_msg)\n out = out.split('\\n')[3].find('0 rows')\n if out > 0:\n tinctest.logger.info('Primary is in change tracking but mirror is not down => 0 rows')\n else:\n error_msg = \"%s down segments found\" %out\n tinctest.logger.info(error_msg)\n self.fail(error_msg)\n \n def test_if_primary_up_resync_and_mirror_down_not_in_resync(self):\n \"\"\"\n [feature]: Check for invalid state - Primary is Up/In resync, Mirror is not in resync or is marked down\n \n \"\"\"\n sql_stmt = \"SELECT p.content, p.dbid AS p_dbid, m.dbid AS m_dbid, \" \\\n \"p.role AS p_role, m.role AS m_role, \" \\\n \"p.preferred_role AS p_pref_role, m.preferred_role AS m_pref_role, \" \\\n \"p.address AS p_address, m.address AS m_address, \" \\\n \"p.status AS p_status, m.status AS m_status, \" \\\n \"p.mode AS p_mode, m.mode AS m_mode \" \\\n \"FROM gp_segment_configuration p, gp_segment_configuration m \" \\\n \"WHERE ( (p.content = m.content) AND (p.dbid <> m.dbid) ) \" \\\n \"AND p.status = 'u' and p.mode = 'r' \" \\\n \"AND ( (m.mode <> 'r') OR (m.status = 'd') )\" \n out = PSQL.run_sql_command(sql_stmt)\n if len(out) == 0:\n error_msg = 'Could not connect to the sever!!'\n tinctest.logger.error(error_msg)\n self.fail(error_msg)\n out = out.split('\\n')[3].find('0 rows')\n if out > 0:\n tinctest.logger.info('Primary is Up/In resync, Mirror is not in resync or is marked down => 0 rows')\n else:\n error_msg = \"%s down segments found\" %out\n tinctest.logger.info(error_msg)\n self.fail(error_msg)\n","sub_path":"src/test/tinc/tincrepo/mpp/gpdb/tests/utilities/recoverseg/gprecoverseg_tests/fault/fault.py","file_name":"fault.py","file_ext":"py","file_size_in_byte":18715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"593130002","text":"\"\"\"\nStudent: Imke Lansky\nStudent number: 10631194\nFile: Data.py\nDate: 23-05-2016\n\nDescription:\nThis file implements the Data class which reads a given\ndatafile and performs transformations on it to make it useful\nfor further research.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom json import load\nfrom re import findall\nfrom numpy import mean, array, vstack, ndarray\n\n\nclass Data(object):\n \"\"\"\n Implements the Data Class which collects the data from\n the data set and stores it in a dictionary. It also calculates\n averages data points and plots the data.\n \"\"\"\n\n raw_data = {}\n info_dict = {}\n\n def __init__(self, filepath):\n \"\"\"\n Read the data file, extract the points and create a set\n that can be worked with. Also order the dictionary to the configuration\n order given in the data set.\n\n PARAMETERS\n ==============\n filepath: str\n The path to the datafile that will be used for the simulation.\n \"\"\"\n\n self.raw_data = load(open(filepath))\n self.extract_data()\n self.average_datapoints()\n self.order_dictionary()\n\n def extract_data(self):\n \"\"\"\n Extracts the data from the given data set. Stores all\n the needed information for the simulation in a dictionary.\n \"\"\"\n\n def extr_num(string):\n \"\"\"\n Perform a regular expression on the given string\n to extract the numbers and store them in an array.\n\n PARAMETERS\n ==============\n string: str\n The string we want to extract the numbers from.\n\n RETURNS\n ==============\n array: arr\n Array with the numbers extracted from the string.\n \"\"\"\n\n return [int(num) for num in findall(r'\\b\\d+\\b', string)]\n\n for m_key, subdict in self.raw_data.items():\n for s_key, val in subdict.items():\n\n # configOrder is a list, we don't want that.\n if not isinstance(val, list):\n\n # Create sub dictionaries for each stage.\n if s_key not in self.info_dict:\n self.info_dict[s_key] = {}\n\n for info in val.keys():\n # Coordinates.\n if isinstance(self.raw_data[m_key][s_key][info], list):\n\n # Create new entry if the key does not exist.\n if info not in self.info_dict[s_key]:\n self.info_dict[s_key][info] = \\\n array(self.raw_data[m_key][s_key][info])\n else:\n self.info_dict[s_key][info] = \\\n vstack((self.info_dict[s_key][info],\n self.raw_data[m_key][s_key][info]))\n\n # Other information. We only store the information\n # from the last data set. [We do lose some info here]\n else:\n self.info_dict[s_key][info] = \\\n extr_num(self.raw_data[m_key][s_key][info])\n\n def average_datapoints(self):\n \"\"\"\n Calculates the average data points for the multidimensional\n arrays stored in the information dictionary.\n \"\"\"\n\n for m_key, subdict in self.info_dict.items():\n for s_key, val in subdict.items():\n if isinstance(val, ndarray):\n if val.ndim >= 2:\n self.info_dict[m_key][s_key] = \\\n mean(self.info_dict[m_key][s_key], axis=0)\n\n def order_dictionary(self):\n \"\"\"\n Define the correct order in which the stages take place and sort\n the dictionary according to this order.\n \"\"\"\n\n config_order = ['blastula1', 'gastrula1', 'gastrula1_1', 'gastrula2',\n 'gastrula3', 'gastrula4', 'gastrula5']\n\n self.info_dict = OrderedDict((key, self.info_dict[key])\n for key in config_order)\n","sub_path":"NematostellaMorphGen-master/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"374431579","text":"INI_CONFIG_PATH=\"/Users/luxiaming/Documents/work_program/config.ini\"\nD_TYPE_LIST=['redis','codis','mysql','mongo','pika']\n\ndef get_db(type, city):\n\n not_bj = {\n \"borough\": \"spider\",\n \"plathouse\": \"spider\",\n \"new-plathouse\": \"spider\",\n \"sell-old\": \"spider\",\n \"sell-new\" : \"spider\"\n }\n bj = {\n \"brokerhouse\": \"brokerhouse\",\n \"complex\": \"newhouse\",\n \"building\": \"building\",\n \"brokerbd\": \"brokerswarehouse\",\n \"broker\": \"brokers\",\n \"rent\": \"rent\",\n \"analysis\": \"thor\",\n \"new-analysis\": \"thor\",\n \"sell-analysis\": \"spider\",\n \"sell-api\": \"spider\",\n \"rent-t\": \"rent\",\n \"new-rent\": \"rent\",\n \"zhuge-dm\": \"data\"\n }\n mag = {\n \"dm\": \"zhuge_dm\",\n \"ccmag\": \"comphousemag\",\n \"comphousemag\": \"comphousemag\",\n \"brokers\": \"brokers\",\n \"hidden_etl\": \"count\",\n \"hidden_new\": \"count\",\n \"rules_template\": \"spider_rules\",\n \"avgprice\": \"statistics\",\n \"localhost\": \"test\",\n \"appraisal\": \"spider\",\n \"operation\": \"operation\",\n \"newhouse_dock\": \"newhouse_dock\",\n \"zhuge_user\": \"zhuge_user\"\n }\n special = {\n \"design\":\"deco\"\n }\n if type in bj:\n db = bj.get(type) + \"_\" + city\n return db\n elif type in not_bj:\n db = not_bj.get(type) if city == \"bj\" else not_bj.get(type) + \"_\" + city\n return db\n elif type in mag:\n return mag.get(type)\n elif type in special:\n return special.get(type) if city == \"\" else special.get(type) + \"_\" + city\n\n\n\n\n\nsell_old = ('bj', 'bd', 'cc', 'cd', 'cq', 'cs', 'cz', 'dg', 'dl', 'fz', 'guilin', 'gy', 'gz', 'heb', 'hf', 'hhht', 'hn',\n 'huizhou', 'hz', 'jh', 'jm', 'jn', 'km', 'ks', 'lf', 'ly', 'lz', 'nb', 'nc', 'nj', 'nn', 'nt', 'qd', 'qhd',\n 'qz', 'sh', 'sjz', 'su', 'sx', 'sy', 'sz', 'taiyuan', 'taizhou', 'tj', 'ts', 'weihai', 'wf', 'wh', 'wx',\n 'xa', 'xaxq', 'xm', 'xz', 'yc', 'yinchuan', 'yt', 'yz', 'zh', 'zs', 'zz')\n\nes_new = ('changle', 'dangtu', 'haimen', 'haicheng', 'jingjiang', 'laizhou', 'lianjiang', 'leiyang', 'sdpy', 'qianan',\n 'shangyu', 'sg', 'tianmen', 'wuzhishan', 'xinji', 'yizheng', 'fjax', 'alaer', 'anning', 'anqiu', 'baoying',\n 'gxby', 'boluo', 'hbbz', 'changshu', 'cqchangshou', 'changxing', 'cixi', 'changyi', 'chunan', 'ahcf', 'sdcl',\n 'chongzhou', 'cn', 'deqing', 'dongtai', 'donggang', 'dangyang', 'dengfeng', 'dehui', 'dingzhou', 'dayi',\n 'dongfang', 'dujiangyan', 'dengzhou', 'donghai', 'enping', 'emeishan', 'fq', 'fengdu', 'fuling', 'feicheng',\n 'fengcheng', 'feixi', 'feidong', 'jsfx', 'fuan', 'jxfc', 'gaomi', 'gongyi', 'gaoyou', 'guangrao',\n 'gaobeidian', 'huian', 'hechuan', 'haining', 'haian', 'huidong', 'huxian', 'hengxian', 'heshan', 'haiyang',\n 'hailin', 'huadian', 'jiangyin', 'jiangdu', 'cqjiangjin', 'jiyuan', 'jintan', 'jiande', 'jinxian', 'jr',\n 'scjt', 'sdjy', 'jianyang', 'kaiping', 'kuitun', 'longhai', 'longkou', 'sxly', 'linhai', 'liyang', 'lujiang',\n 'laiyang', 'lantian', 'liaozhong', 'liuyang', 'luanxian', 'luannan', 'hblt', 'gdlm', 'linqing', 'liling',\n 'lhk', 'mengjin', 'cqnanchuan', 'ninghai', 'nanan', 'nongan', 'ningxiang', 'pingtan', 'pizhou', 'pulandian',\n 'pinghu', 'peixian', 'pengzhou', 'puning', 'qijiang', 'qidong', 'qingzhou', 'qionglai', 'qianxi', 'qingxu',\n 'qixia', 'qingzhen', 'ruzhou', 'rugao', 'ruijin', 'rudong', 'ruyang', 'renqiu', 'shunde', 'shennongjia',\n 'shuyang', 'shihezi', 'shishi', 'jssn', 'sdsh', 'shangzhi', 'hbsz', 'hbsh', 'cqtongliang', 'taixing',\n 'tengzhou', 'tongxiang', 'zjtl', 'taishan', 'wanzhou', 'wujiaqu', 'wenling', 'wafangdian', 'hbwj', 'wuchang',\n 'wenan', 'wanning', 'wuan', 'wg', 'xinzheng', 'zjxs', 'xingyang', 'xinjian', 'xinmin', 'yongchuan', 'yixing',\n 'yiwu', 'yanling', 'yangchun', 'hnyz', 'yuyao', 'yichuan', 'yanshi', 'yidu', 'yongdeng', 'yuzhong',\n 'yongchun', 'hnyy', 'yutian', 'jlys', 'hbys', 'yongcheng', 'ya', 'hbyc', 'zhangjiagang', 'zhuozhou',\n 'zhangqiu', 'zhucheng', 'zy', 'zhuji', 'zoucheng', 'zouping', 'zunhua', 'zhaodong', 'zhongmou', 'lnzh',\n 'zhouzhi', 'zhongxiang', 'hbzy', 'anlu', 'beipiao', 'beiliu', 'chibi', 'cenxi', 'dafeng', 'danyang', 'dengta',\n 'dunhuang', 'dongyang', 'fuding', 'guangshui', 'geermu', 'guanghan', 'guiping', 'gaoan', 'huanghua', 'hejian',\n 'hancheng', 'hanchuan', 'jinjiang', 'jiangshan', 'kaiyuan', 'linzhou', 'lingbao', 'lengshuijiang', 'lianyuan',\n 'lufeng', 'leping', 'laoling', 'meihekou', 'mengzhou', 'ningguo', 'penglai', 'qinyang', 'rushan', 'renhuai',\n 'shengzhou', 'songzi', 'shahe', 'tianchang', 'wuyishan', 'wuxue', 'yangzhong', 'yucheng', 'yuanjiang',\n 'zixing', 'zhangshu', 'zjfy', 'linan', 'jimo', 'jiaonan', 'jiashan', 'baisha', 'danzhou', 'chengmai',\n 'dingan', 'qiongzhong', 'tunchang', 'wenchang', 'lingshui', 'baoting', 'jinan', 'jianhu', 'gaoling', 'hbjz',\n 'ynyl', 'yongtai', 'hbzx', 'anda', 'aj', 'binxian', 'chaohu', 'changge', 'changli', 'jncq', 'dh', 'dingxing',\n 'faku', 'funing', 'guan', 'guzhen', 'njgc', 'gongzhuling', 'huoqiu', 'hailaer', 'huaiyuan', 'whhn', 'huairen',\n 'haiyan', 'jiangyan', 'jizhou', 'hbjs', 'jinhu', 'kangping', 'kaiyang', 'kuerle', 'linqu', 'luoning',\n 'lankao', 'luoyuan', 'luanchuan', 'minqing', 'hbps', 'qianjiang', 'qj', 'qionghai', 'hbql', 'quangang',\n 'ruian', 'taicang', 'tongcheng', 'wujiang', 'wuhe', 'xinyi', 'xianghe', 'xiantao', 'xinghua', 'xintai',\n 'xinmi', 'hnxa', 'xiuwen', 'xinle', 'youxian', 'xiangxiang', 'xinjin', 'xilinhaote', 'yanjiao', 'yueqing',\n 'yongkang', 'yuhuan', 'yongning', 'ksys', 'zhaozhou', 'zhaoyuan', 'zhijiang', 'benxi', 'sanmenxia', 'dxal',\n 'diqing', 'rikaze', 'gannan', 'ali', 'guantao', 'hexian', 'kenli', 'minggang', 'yongxin', 'zhengding',\n 'zhangbei')\n\n\n\n\ndef getConfigType(city, type):\n # sell_old = []\n # result = CityConfig.CityConfigService().get_cityinfo(city=\"bj\", where='{\"sell_db\": 1}')\n # for i in result:\n # sell_old.append(i.get(\"logogram\"))\n\n if \"sell\" in type:\n if city in sell_old:\n return \"sell-old\"\n else:\n return \"sell-new\"\n if \"broker\" in type:\n if city in sell_old:\n return \"broker-old\"\n else:\n return \"broker-new\"\n elif \"plathouse\" in type:\n if city in sell_old:\n return \"plathouse-old\"\n else:\n return \"plathouse-new\"\n return type\n\n\n# 获取es配置名称\ndef getEsConfigName(city,service_type):\n if \"sell\" == service_type:\n if city in es_new:\n return \"new_es\"\n else:\n return \"sell\"\n return service_type\n\n\nif __name__ == '__main__':\n pass\n # a = getConfigName(\"jx\", \"sell\")\n # print(a)\n","sub_path":"configs/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":6978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"434183028","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport sys\nimport numpy as np\nimport pandas as pd \n\nfrom surprise import Reader\nfrom surprise import Dataset\nfrom surprise import KNNBaseline\nfrom surprise import SVD\nfrom surprise import CoClustering\nfrom surprise.model_selection import train_test_split\n\nfrom helpers import load_data, calculate_rmse\n\ndef load_data_forSP(path):\n # Import data\n path_dataset = path\n \n print('Loading the data...')\n ratings = load_data(path_dataset)\n \n coo = ratings.tocoo(copy = False)\n ratings_sp_df = pd.DataFrame({'item': coo.row, 'user': coo.col, 'rating': coo.data})[['item', 'user', 'rating']].reset_index(drop=True)\n \n # A reader is still needed but only the rating_scale param is requiered.\n reader = Reader(rating_scale=(1, 5))\n \n # The columns must correspond to user id, item id and ratings (in that order).\n data = Dataset.load_from_df(ratings_sp_df[['user', 'item', 'rating']], reader)\n \n return data\n\n\ndef svd(data, kwargs):\n # Set algorithm\n n_factors = kwargs.get('k_features')\n n_epochs = kwargs.get('maxiter')\n lr_pu = kwargs.get('lr_pu')\n lr_qi = kwargs.get('lr_qi')\n reg_bu = kwargs.get('reg_bu')\n reg_qi = kwargs.get('reg_qi')\n \n \n algo = SVD(n_factors[0], n_epochs, \n lr_pu[0] , lr_qi[0] , \n reg_bu[0] , reg_qi[0] , \n random_state = kwargs['random_seed'] )\n \n # Train the algorithm on the data, and predict ratings for the testset\n algo.fit(data)\n \n # Predict the full matrix\n prediction = np.zeros([10000,1000])\n for row in range(10000):\n for col in range(1000):\n prediction[row,col] = algo.predict(str(row+1),str(col+1)).est\n \n return prediction\n \n\ndef KNN(data, kwargs):\n # Set algorithm\n k_neigbor = kwargs.get('n_neigbor')\n min_neighb = kwargs.get('min_neigbor')\n similarity = kwargs.get('similarity')\n \n options = {'name': similarity}\n algo = KNNBaseline(k = k_neigbor, \n min_k = min_neighb, \n sim_options = options)\n \n # Train the algorithm on the data, and predict ratings for the testset\n algo.fit(data)\n \n prediction = np.zeros([10000,1000])\n for row in range(10000):\n for col in range(1000):\n prediction[row,col] = algo.predict(str(row+1),str(col+1)).est\n \n return prediction\n \n \ndef cluster(data, kwargs): \n # Set algorithm\n cluster_u = kwargs.get('user_cluster')\n cluster_i = kwargs.get('item_cluster')\n n_epochs = kwargs.get('maxiter')\n \n # Set algorithm\n algo = CoClustering(n_cltr_u = cluster_u[0], n_cltr_i = cluster_i[0],\n n_epochs = n_epochs , random_state = kwargs['random_seed'] )\n \n # Train the algorithm on the data, and predict ratings for the testset\n algo.fit(data)\n \n prediction = np.zeros([10000,1000])\n for row in range(10000):\n for col in range(1000):\n prediction[row,col] = algo.predict(str(row+1),str(col+1)).est\n \n return prediction\n\n\ndef get_splib_predictions(name, training_data, testing_data, kwargs):\n '''\n name : name of the algorihm to be used. Now the KNN, Cosluster and SVD are supported.\n \n training_data : training data. If \"load data\" is set and the \"path\" is given in the kwargs, this data will be ignored\n \n testing_data : testing data use for calculating rmse of prediction.\n \n random_seed : random seed for SVD to train the recommendation system, default is \n \n kwargs : kwargs for surpriseLib, different algorithms need different args.\n SVD: { 'k_features', 'maxiter', 'lr_pu', 'lr_qi', 'reg_bu', 'reg_qi', 'random_seed' }\n KNN: { 'n_neigbor' , 'min_neigbor', 'similarity'}\n cluster: {'user_cluster', 'item_cluster', 'maxiter', 'random_seed'}\n '''\n # 1. if \"load data\" is in the kwargs and is True, and the path is also given, the program will load data from the path\n # 2. else if data is given, use the given data\n # 3. else there is no avaliable data, so an error will be reported\n # The data is treated as a training set \n if (kwargs.get('loaddata') is not None) and (kwargs.get('path') is not None):\n if kwargs['loaddata']:\n trainset = load_data_forSP(kwargs['path'])\n elif training_data is not None:\n coo = training_data.tocoo(copy = False)\n ratings_sp_df = pd.DataFrame({'item': coo.row, 'user': coo.col, 'rating': coo.data})[['item', 'user', 'rating']].reset_index(drop=True) \n # A reader is still needed but only the rating_scale param is requiered.\n reader = Reader(rating_scale=(1, 5))\n # The columns must correspond to user id, item id and ratings (in that order).\n data = Dataset.load_from_df(ratings_sp_df[['user', 'item', 'rating']], reader) \n trainset, testset = train_test_split(data, test_size=0.01, random_state = 0)\n \n else:\n sys.exit('No input data for surpries!')\n \n # train the model\n if name.lower() == 'svd':\n prediction = svd(trainset, kwargs)\n \n elif name.lower() == 'knn':\n prediction = KNN(trainset, kwargs)\n \n elif name.lower() == 'cluster':\n prediction = cluster(trainset, kwargs) \n \n else:\n sys.exit('The algorothm', name, 'is not supported yet.')\n \n \n testrmse = calculate_rmse( prediction[testing_data.nonzero()], testing_data[testing_data.nonzero()].toarray()[0] )\n trainrmse = calculate_rmse( prediction[training_data.nonzero()], training_data[training_data.nonzero()].toarray()[0] )\n \n return prediction, trainrmse, testrmse\n ","sub_path":"project 2 Recommendation system for Netflix/SurpriseLib.py","file_name":"SurpriseLib.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"518912128","text":"from collections import Counter\n\n\ndef detect_anagrams0(word, candidates):\n word_lower, len_word, anagrams = word.lower(), len(word), []\n for candidate in candidates or word_lower == candidate.lower():\n if len_word != len(candidate):\n continue\n word_ = '%s%s' % (word_lower, word.upper())\n anagram = ''.join([w for w in candidate if w in word_])\n anagram_lower, anagram_len = anagram.lower(), len(anagram)\n\n if anagram_len == len_word and anagram_lower != word_lower:\n anagram_count = Counter(anagram_lower)\n word_count = Counter(word_lower)\n if all(anagram_count[a] == word_count[a] for a in anagram_lower):\n anagrams.append(anagram)\n\n return anagrams\n\n\ndef detect_anagrams(word, candidates):\n word_ = word.lower()\n return [x for x in candidates\n if x.lower() != word_ and sorted(x.lower()) == sorted(word_)]\n","sub_path":"python/anagram/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"82517050","text":"import inspect\nimport DebugFS\nimport globalLists\nimport os\n\ndef functionInfo(func,level):\n\tfuncInfo=''\n\tf=inspect.getargspec(func)\n\tfuncInfo += '\\t'*level+\"Argument List : \"+str(f[0])+\"\\n\"\n\tfuncInfo += '\\t'*level+\"Variable Argument List : \"+str(f[1])+\"\\n\"\n\tfuncInfo += '\\t'*level+\"Keyword Argument List : \"+str(f[2])+\"\\n\"\n\tfuncInfo += '\\t'*level+\"Default Values List : \"+str(f[3])+\"\\n\"\n\treturn funcInfo\n\n\ndef formatFrameMetaData(Glo):\n\tframeInfo=''\n\tframeInfo+='Package Name : '+str(Glo['__package__'])+\"\\n\"\n\tframeInfo+='File Name : '+str(Glo['__file__'])+\"\\n\"\n\tframeInfo+='Module DocString : '+str(Glo['__doc__'])+\"\\n\"\n\tframeInfo+='Module NameSpace : '+str(Glo['__name__'])+'\\n'\n\treturn frameInfo\n\ndef inspectPreviousFrames():\n\tframe=inspect.currentframe()\n\tframesinfo=inspect.getouterframes(frame)\n\tframesInfo=[]\n\tfor frames in framesinfo:\n\t\tframeInfo=''\n\t\tframe=frames[0]\n\t\tGlo=frame.f_globals\n\t\tif Glo['__name__']!=__name__:\n\n\t\t\tframeInfo+=formatFrameMetaData(Glo)\n\n\t\t\tglobalLists.initialize()\n\n\t\t\tfor f in Glo:\n\t\t\t\tif f not in ['__builtins__','__package__','__file__','__doc__','__name__']:\n\t\t\t\t\tif inspect.isfunction(Glo[f]):\n\t\t\t\t\t\tfuncInfo=''\n\t\t\t\t\t\tfuncInfo+=str(f)+\" : \"+str(Glo[f])+\"\\n\"+functionInfo(Glo[f],level=1)+\"\\n\"\n\t\t\t\t\t\tglobalLists.functions.append(funcInfo)\n\t\t\t\t\telif inspect.isclass(Glo[f]):\n\t\t\t\t\t\tclassInfo=''\n\t\t\t\t\t\tclassInfo+=str(f)+\" : \"+str(Glo[f])+\"\\n\"\n\t\t\t\t\t\tglobalLists.classes.append(classInfo)\n\t\t\t\t\telif inspect.ismodule(Glo[f]):\n\t\t\t\t\t\tmoduleInfo=''\n\t\t\t\t\t\tmoduleInfo+=str(f)+\" : \"+str(Glo[f])+\"\\n\"\n\t\t\t\t\t\tglobalLists.modules.append(moduleInfo)\n\t\t\t\t\telif inspect.isbuiltin(Glo[f]):\n\t\t\t\t\t\tbuiltinInfo=''\n\t\t\t\t\t\tbuiltinInfo+=str(f)+\" : \"+str(Glo[f])+\"\\n\"\n\t\t\t\t\t\tglobalLists.builtins.append(builtinInfo)\n\t\t\t\t\telse:\n\t\t\t\t\t\tglobalLists.variables.append(str(f)+\" : \"+str(Glo[f])+\"\\n\")\n\n\t\t\tglobalLists.sortLists()\n\n\t\t\tframeInfo+=globalLists.formatListData()\n\t\t\tframesInfo.append(frameInfo)\n\n\tfinalFrameInfo=''\n\tfor framesInfor in framesInfo:\n\t\tfinalFrameInfo+=framesInfor\n\treturn finalFrameInfo\n\n\nclass DInit:\n\tdef __init__(self,argv):\n\t\tself.mountpoint=os.path.abspath(argv)\n\t\tself.period=10\n\n\tdef initialize(self):\n\t\tos.system('python DebugFS.py \"'+self.mountpoint+'\"')\n\n\tdef writeCurrentState(self):\n\t\tself.filename='currentState.frInfo'\n\t\tif self.mountpoint[-1]!='/':\n\t\t\tself.absfilename=self.mountpoint+'/'+self.filename\n\t\telse:\n\t\t\tself.absfilename=self.mountpoint+self.filename\n\t\tself.currentStateFrameInfo=inspectPreviousFrames()\n\t\tfhandle=open(self.absfilename,\"w\")\n\t\tfhandle.write(self.currentStateFrameInfo)\n\t\tfhandle.close()\n\n\n\tdef closeVFS():\n\t\tos.system(\"fusermount -u \"+self.mountpoint)\n","sub_path":"DebugInit.py","file_name":"DebugInit.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"257546413","text":"import sys\nimport time\n\n\ndef waiting_bar(dots):\n # Check if dots is integer\n if not isint(dots):\n dots = 5\n\n for i in range(0, dots):\n print(\".\", end=\"\")\n time.sleep(0.8)\n sys.stdout.flush()\n print(\"\")\n\n\ndef isint(value):\n if isinstance(value, int):\n return True\n\n\ndef print_choices():\n # Please don't forget to update the range in menu selector\n # in case adding another option here.\n # Row to be updated looks like this: while choice in (0, 3):\n print(\"\\nHere are the available reports: \")\n time.sleep(0.1)\n print(\"1 - Most popular three articles of all time\")\n time.sleep(0.1)\n print(\"2 - Most popular three article authors of all time\")\n time.sleep(0.1)\n print(\"3 - Days which more than 1% of request lead to an error\")\n time.sleep(0.1)\n print(\"0 - Exit\\n\")\n\n\ndef leave(conn):\n conn.close()\n sys.exit()\n","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"66341669","text":"import pandas\n\ndef merge_datasets(DCC_list, DCC_dir, CE2_list, read_coverage_list, out_dir):\n\n# Where:\n# DCC_list = a .txt file of just the file names of each DCC circRNA file\n# DCC_dir = The full path to the directory where those files are located\n# CE2_list = .txt file with the full path to CIRCexplorer2 output files. Each line must match the samples in the DCC_list\n# read_coverage = .txt file with each line matching the order of samples in the DCC and CE2 files\n# out_dir = The directory where you want the results stored\n\n DCC_filenames = open(DCC_list).read().splitlines()\n CE2_filepath = open(CE2_list).read().splitlines()\n read_coverage = open(read_coverage_list).read().splitlines()\n\n\n for i, j, k in zip(DCC_filenames, CE2_filepath, read_coverage):\n print(i)\n x = pandas.read_csv(DCC_dir+i, sep='\\t', header=None)\n y = pandas.read_csv(j, sep='\\t', header=None)\n\n # Adding 1 to the circRNA start column of CIRCexplorer2 to compensate for different coordinate systems\n y[1] += 1\n\n # Making the circRNA_ID column\n y[0] = y.apply(lambda x:'%s:%s' % (x[0],x[1]),axis=1)\n y[0] = y.apply(lambda x:'%s-%s' % (x[0],x[2]),axis=1)\n\n # Merging The dataframe\n merged_df = pandas.merge(x, y, on=0)\n\n merged_df['1_x'] = pandas.to_numeric(merged_df['1_x'])\n\n merged_df[12] = pandas.to_numeric(merged_df[12])\n\n merged_df[18] = ((merged_df['1_x'] + merged_df[12])/2)\n\n merged_df['CPM'] = ((merged_df[18]/int(k))*10**6)\n\n merged_df = merged_df[merged_df.CPM >= 0.1]\n\n print(merged_df[merged_df.duplicated(subset=0)])\n\n merged_df.to_csv(out_dir+i, sep='\\t', index=False)\n\n\n","sub_path":"merge_datasets.py","file_name":"merge_datasets.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131627204","text":"import os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \r\nimport numpy as np\r\nimport pandas as pd\r\nimport librosa\r\nimport csv\r\n\r\nfrom commonFunctions import musicFeatureExtraction\r\nfrom commonFunctions import displayVAgraph\r\n\r\n# function to parse CSV file of valence and arousal\r\ndef parseDEAM(pathEmotions):\r\n emotions = pd.read_csv(pathEmotions, index_col=0, sep=',')\r\n return emotions\r\n\r\nsubfolder = 'Dataset/'\r\n\r\n\r\nemotions = parseDEAM(subfolder+'emotions/filtered_annotations_vse.csv') # filtered_annotations_vse valence_arousal_vse_normalized\r\nprint(emotions['valence_mean'])\r\n\r\nfeaturesdf = pd.read_pickle(subfolder+'pickle/exported_features_valence_arousal_normalized.pkl')\r\n\r\n\r\nmypath = subfolder+'metadata/'\r\nwith open(mypath+'allGeneres.csv', newline='', encoding='utf-8') as csvfile:\r\n fileReader = pd.read_csv(csvfile, sep=',')\r\n ids = fileReader.values[:,0]\r\n allGeneres = fileReader.values[:,1]\r\n\r\nmyset = set(allGeneres)\r\nprint(myset)\r\n\r\nnumberGenere = []\r\nfor genere in allGeneres:\r\n numberGenere.append()\r\n\r\nfeaturesdf_new = featuresdf.drop(columns=['valence', 'arousal'])\r\n\r\nfeaturesdf_new['genere'] = allGeneres\r\nfeaturesdf_new['valence'] = emotions['valence_mean'].tolist()\r\nfeaturesdf_new['arousal'] = emotions['arousal_mean'].tolist()\r\nprint(featuresdf_new)\r\n\r\nfeaturesdf_new.to_pickle(subfolder+'pickle/features_genere_valence_arousal.pkl')","sub_path":"combining_features.py","file_name":"combining_features.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"179143898","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2013-2015 Open Business Solutions, SRL.\n# Write by Ernesto Mendez (tecnologia@obsdr.com)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import osv, fields\nimport base64\nfrom openerp.tools.translate import _\nimport time\nfrom openerp import tools\nimport pdb\nfrom openerp.exceptions import Warning\nimport os\n\n#_logger = logging.getLogger(__name__)\n\nclass bpd_payroll_report(osv.Model):\n _name = 'bdp.payslip.run.report'\n _description = 'Extraccion Archivo TXT Nomina Banco BPD'\n\n def _line_count(self, cr, uid, ids, context=None):\n bpd_payslip_run_obj = self.pool.get('bdp.payslip.run.report')\n payslip_run_report = bpd_payslip_run_obj.browse(cr, uid, ids, context=context)[0]\n return len(payslip_run_report.payslip_run_line_report_ids)\n\n def _get_updated_fields(self, cr, uid, ids, context=None):\n vals = {}\n vals['company_id'] = 1\n vals['line_count'] = self._line_count(cr, uid, ids, context=context)\n return vals\n\n _columns = {\n #'name': fields.char('Nombre'),\n 'company_id': fields.many2one('res.company', u'Compañia', required=True),\n 'payslip_run_id': fields.many2one('hr.payslip.run', u'Lote de Nomina', required=True,),\n 'line_count': fields.integer(u\"Total de registros\", readonly=True),\n 'report': fields.binary(u\"Reporte\", readonly=True),\n #'subsidiary_id': fields.many2one('res.company.subsidiary', 'Subsidiaria', required=True, readonly=True),\n 'report_name': fields.char(u\"Nombre de Reporte\", 40, readonly=True),\n 'payslip_run_line_report_ids': fields.one2many('bpd.payslip.run.line.report', 'payslip_run_report_id', u'Lineas'),\n 'state': fields.selection((('draft','Pendiente'),('sent','Enviado'),('cancel','Cancel')), 'State', readonly=True),\n\n 'type' : fields.selection((('01', 'Nomina Automatica'), ('02', 'Pago Suplidores'), ('03', 'Cobros Automaticos'), \n ('04', 'Pago Prestamo'), ('05', 'Pago Tarjeta'), ('06', 'Transferencia a Cta.')), default='01', string='Tipo de Servicio'),\n 'effective_date': fields.date('Fecha Efectiva', help='Fecha Futura cuando se aplicaran los pagos', required=True),\n 'numero_afiliacion': fields.char('Numero de Afiliacion', help='Numero de Afiliacion a CARDNAT'),\n 'email': fields.char('Email', 40,help='Correo Electronico para recibir archivo y reporte del fin del proceso'),\n 'num_cuenta': fields.integer('Numero de Cuenta', digits=1, help='Cuenta que la empresa usara para DB/CR [1/9]'),\n }\n\n _defaults = {\n 'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,\n 'state': 'draft',\n }\n\n def create(self, cr, uid, values, context=None):\n\n res = super(bpd_payroll_report, self).create(cr, uid, values, context=context)\n\n # Loads all purchases\n self.create_payslip_run_line(cr, uid, res, values['payslip_run_id'], context=context)\n\n # Update readonly fields\n vals = self._get_updated_fields(cr, uid, [res], context=None)\n self.write(cr, uid, [res], vals)\n return res\n\n def re_create_payslip_run_line(self, cr, uid, ids, context=None):\n lines_obj = self.pool.get('bpd.payslip.run.line.report')\n report = self.browse(cr, uid, ids[0])\n line_ids = [line.id for line in report.payslip_run_line_report_ids]\n lines_obj.unlink(cr, uid, line_ids)\n\n result = self.create_payslip_run_line(cr, uid, report.id, report.payslip_run_id.id, context=context)\n\n vals = self._get_updated_fields(cr, uid, ids, context=None)\n self.write(cr, uid, ids, vals)\n\n return result\n\n def create_payslip_run_line(self, cr, uid, payslip_run_report_id, payslip_run_id, context=None):\n \n #payslip_run_id = self.browse(cr, uid, payslip_run_id)\n hr_payslip_obj = self.pool.get('hr.payslip')\n hr_payslip_run_obj = self.pool.get('hr.payslip.run')\n hr_payslip_line_obj = self.pool.get('hr.payslip.line')\n bpd_payslip_run_line_obj = self.pool.get('bpd.payslip.run.line.report')\n hr_salary_rule_category_obj = self.pool.get('hr.salary.rule.category')\n\n #draft_payslip_ids = hr_payslip_obj.search(cr, uid, [(\"state\", \"not in\", [\"draft\", \"verify\", \"cancel\"]),(\"payslip_run_id\", \"=\", payslip_run_id)])\n draft_payslip_ids = hr_payslip_obj.search(cr, uid, [(\"state\", \"in\", [\"draft\", \"verify\", \"cancel\"]),(\"payslip_run_id\", \"=\", payslip_run_id)])\n #if draft_payslip_ids:\n # raise osv.except_osv(_(u'Nominas en Borrador o en Espera de Verificacion!'), _(u\"Asegúrese que todas las nominas de este lote esten validadas.\"))\n\n payslip_ids = hr_payslip_obj.search(cr, uid, [(\"state\", \"in\", [\"done\",'draft']),(\"payslip_run_id\", \"=\", payslip_run_id)])\n #category_id = hr_salary_rule_category_obj.search\n\n sequence = 1\n line = 1\n warnings = 0\n names = []\n \n for payslip_id in payslip_ids:\n payslip = hr_payslip_obj.browse(cr, uid, payslip_id)\n if payslip.employee_id.bank_account_id:\n continue\n if not payslip.employee_id.bank_account_id:\n name = str(payslip.employee_id.name).decode(\"utf-8\") + ' ' + str(payslip.employee_id.first_lastname).decode(\"utf-8\") + ' ' + str(payslip.employee_id.second_lastname).decode(\"utf-8\")\n \n names.append(name)\n #raise Warning(_('Error !'), _('Uno o algunos de los empleados en esta nomina no posee cuenta bancaria asignada. Por favor revise!'), names) \n #else:\n # pass\n\n #if names:\n # raise Warning(_('Error !'), _('Uno o algunos de los empleados en esta nomina no posee cuenta bancaria asignada. Por favor revise!'), names)\n\n for payslip_id in payslip_ids:\n\n payslip = hr_payslip_obj.browse(cr, uid, payslip_id) \n #if not payslip.employee_id.bank_account_id:\n # name = str(payslip.employee_id.names) + ' ' + str(payslip.employee_id.first_lastname) + ' ' + str(payslip.employee_id.second_lastname)\n # raise Warning(_('Error !'), _('Uno o algunos de los empleados en esta nomina no posee cuenta bancaria asignada. Por favor revise!'), name)\n\n slip_ids = hr_payslip_line_obj.search(cr, uid, [(\"slip_id\", \"=\", payslip.id),(\"category_id.code\",\"=\",\"NET\")])\n\n LEYENDA = 'Pago'\n MONTO = 0.00\n num_cuenta = str(payslip.employee_id.bank_account_id.acc_number)\n acc_number0 = str(num_cuenta.replace('-', ''))\n acc_number1 = str(acc_number0)[:7] + '-' + str(acc_number0)[7:]\n acc_number2 = str(acc_number1)[:11] + '-' + str(acc_number0)[10:]\n\n for slip_line in hr_payslip_line_obj.browse(cr, uid, slip_ids):\n MONTO += slip_line.amount\n\n values = {\n\n #u'NUM_CUENTA':payslip.employee_id.bank_account_id.acc_number,\n u'NUM_CUENTA': acc_number2,\n u'NOMBRE': payslip.employee_id.name,\n u'SECUENCIA': sequence,\n u'MONTO': abs(MONTO),\n u'LEYENDA': LEYENDA,\n u'line': line,\n u'payslip_run_report_id': payslip_run_report_id\n }\n\n sequence += 1\n line += 1\n\n bpd_payslip_run_line_obj.create(cr, uid, values, context=context)\n #self.action_generate_txt(cr, uid, payslip_run_report_id, context=context)\n return True\n\n def action_generate_txt(self, cr, uid, ids, context=None):\n obj = self.pool.get('bdp.payslip.run.report').browse(cr, uid, ids)\n employee_obj = self.pool.get('hr.employee') \n COMP = obj.company_id\n IDCOMP = COMP.vat.replace('-','').zfill(15)\n \n import datetime\n \n hoy = str(datetime.datetime.now())\n YY = hoy[:4]\n MM = hoy[5:7]\n DD = hoy[8:10]\n HH = hoy[11:13]\n MI = hoy[14:16]\n \n FULL = YY+MM+DD\n \n SEQ = self.pool.get('ir.sequence').get(cr, uid, 'bpd')\n TS = obj.type\n \n name_report = 'PE{N}{TS}{MM}{DD}{SEQ}E'.format(N='01224',TS=TS, MM=MM, DD=DD, SEQ=SEQ)\n \n \n path = '/opt/odoo/{name}.txt'.format(name=name_report)\n f = open(path,'w')\n \n #f.write('{}\\r\\n'.format(HEADER))\n \n #Report header\n header_obj = self.pool.get('bdp.payslip.run.report')\n header = header_obj.browse(cr, uid, ids, context=context)\n\n document_date_start = str(header.payslip_run_id.date_start)\n document_date_end = str(header.payslip_run_id.date_end)\n document_header = document_date_start + '_' + document_date_end\n\n newline = os.linesep\n \n res_obj = self.pool.get('res.partner.bank')\n \n cod_bancos = {\n 'Banco Popular Dominicano': '10101070',\n 'Banco Popular ': '10101070',\n 'Banco de Reservas': '10101010',\n 'Banco del Progreso': '10101110',\n 'Banco BHD': '10101230',\n 'Banco BHD Leon': '10101230',\n 'Banco Leon': '10101370',\n 'Banco Santa Cruz': '10101340',\n 'Citibank': '10101060',\n 'Scotiabank': '10101030',\n 'BDI': '10101360',\n 'Banco Lope de Haro': '10101390',\n 'Banco Vimeca': '10101380',\n 'Banco Promerica': '44405900',\n 'Asociacion Popular de Ahorros y Prestamos': '47940900',\n 'Banco Caribe': '10101350', \n }\n \n \n # Report Detail Lines\n CC = 0 # Cantidad Credito \n MC = 0 # Monto Credito\n lines = '' # Almacena todas las lineas para hacer un solo WRITE al archivo\n for line in header.payslip_run_line_report_ids:\n emp_id = employee_obj.search(cr, uid, [('name','=like', line.NOMBRE)])\n emp = employee_obj.browse(cr, uid, emp_id)\n \n ced = emp.identification_id\n pasap = emp.passport_id\n \n TD = {\n 'ID': ced if ced else pasap if pasap else '0'.zfill(15),\n 'TIPO': 'CE' if ced else 'PS' if pasap else 'OT'\n }\n \n bnk = emp.bank_account_id.bank_name\n digi_bnk = emp.bank_account_id.bank_check_digit\n CODBNK = cod_bancos[bnk] if bnk in cod_bancos else cod_bancos['Banco Popular Dominicano']\n cod_ope = emp.bank_account_id.cod_operation\n tipo_cuenta = emp.bank_account_id.tipo_cuenta\n moneda = emp.bank_account_id.moneda\n \n \n CC += 1\n MC += line.MONTO \n \n CUENTAD = str(line.NUM_CUENTA).replace('-','').zfill(20)\n name = str(line.NOMBRE).upper()\n SECTRANS = str(line.SECUENCIA).zfill(7)\n MONTO = str('%.2f' % line.MONTO).replace('.','').zfill(13)\n legend = str(line.LEYENDA)\n \n line = \"N{IDCOMP}{SEC}{SECTRANS}{CUENTAD}{TipoCuenta}{MONEDA}{CODBNK}{DIGIBNK}{CODOP}{MONTO}{TIPOID}{ID}{NOMBRE}\".format(\n IDCOMP=IDCOMP, \n SEC=SEQ, \n SECTRANS=SECTRANS,\n CUENTAD=CUENTAD, \n TipoCuenta=tipo_cuenta,\n MONEDA=moneda, \n CODBNK=CODBNK, \n DIGIBNK=digi_bnk, \n CODOP=cod_ope,\n MONTO=MONTO, \n TIPOID=TD['TIPO'],\n ID=TD['ID'].replace('-', '').zfill(15),\n NOMBRE=emp.name,\n )\n \n lines += line + '\\r\\n'\n #f.write('{}\\r\\n'.format(line))\n HEADER = 'H{RNC}{NOMCOMP}{SEC}{TIPOSERV}{FECEFECT}{CD}{MD}{CC}{MC}{NAFILIA}{FEC}{HOR}{EMAIL}{CUENTA}'.format(\n RNC=IDCOMP, NOMCOMP=COMP.name, SEC=SEQ, TIPOSERV=obj.type, FECEFECT=obj.effective_date.replace('-',''),\n CD='0'.zfill(11), MD='0'.zfill(13), CC=str(CC).zfill(11), MC=str(MC).replace('.','').zfill(13), \n NAFILIA=obj.numero_afiliacion or '0'.zfill(15), FEC=FULL, HOR=HH+MI, EMAIL=obj.email, CUENTA=obj.num_cuenta,\n )\n \n f.write('{}\\r\\n'.format(HEADER))\n f.write(lines)\n \n f.close()\n\n f = open(path,'rb')\n report = base64.b64encode(f.read())\n f.close()\n report_name = name_report + '.txt'\n self.write(cr, uid, ids, {'report': report, 'report_name': report_name})\n return True\n\nclass bpd_payroll_report_line(osv.Model):\n _name = 'bpd.payslip.run.line.report'\n _order = 'line'\n\n _columns = {\n u'line': fields.integer(u'Linea'),\n u'NUM_CUENTA': fields.char(u'Numero de Cuenta', 13, required=False),\n u'NOMBRE': fields.char(u'Nombre Beneficiario', required=False),\n u'SECUENCIA': fields.integer(u'Secuencia', required=False),\n u'MONTO': fields.float(u'Monto a Pagar'),\n u'LEYENDA': fields.char(u'Leyenda'),\n u'payslip_run_report_id': fields.many2one('bdp.payslip.run.report')\n\n }\n","sub_path":"extras/l10n_do_bpd_txt_file/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"650682814","text":"import copy\nimport html\nimport json\nfrom os.path import join\nimport platform\n\nfrom bardolph.lib.i_lib import Settings\nfrom bardolph.lib.injection import inject, injected\nfrom bardolph.lib.job_control import JobControl\n\nfrom bardolph.controller.script_job import ScriptJob\nfrom bardolph.controller.snapshot import ScriptSnapshot, TextSnapshot\n\n\nclass ScriptControl:\n def __init__(self, file_name, run_background=False, title='', path='',\n background='', color='', icon=''):\n self.file_name = html.escape(file_name)\n self.run_background = run_background\n self.path = html.escape(path)\n self.title = html.escape(title)\n self.background = html.escape(background)\n self.color = html.escape(color)\n self.icon = icon\n self.running = None\n\nclass WebApp:\n \"\"\"\n The URL path for a script is also the name of the job for job_control.\n \"\"\"\n\n def __init__(self):\n self._scripts = {}\n self._jobs = JobControl()\n self._load_manifest()\n\n @inject(Settings)\n def _load_manifest(self, settings=injected):\n # If manifest_name is explicitly None, don't attempt to load a file.\n basename = settings.get_value('manifest_file_name', 'manifest.json')\n if basename is None:\n return\n fname = join('web', basename)\n config_list = json.load(open(fname))\n self._scripts = {}\n for script_config in config_list:\n file_name = script_config['file_name']\n run_background = script_config.get('run_background', False)\n title = self.get_script_title(script_config)\n path = self.get_script_path(script_config)\n background = script_config['background']\n color = script_config['color']\n icon = script_config.get('icon', 'litBulb')\n new_script = ScriptControl(file_name, run_background, title, path,\n background, color, icon)\n self._scripts[path] = new_script\n\n @inject(Settings)\n def queue_script(self, script_control, settings=injected):\n fname = join(\n settings.get_value(\"script_path\", \".\"), script_control.file_name)\n job = ScriptJob.from_file(fname)\n if script_control.run_background:\n self._jobs.spawn_job(job, script_control.path)\n else:\n self._jobs.add_job(job, script_control.path)\n return True\n\n def queue_file(self, file_name, run_background=False):\n # Use the file name for the title.\n self.queue_script(\n ScriptControl(file_name, file_name, run_background))\n\n def get_script_control(self, path) -> ScriptControl:\n script_control = self._scripts.get(path, None)\n if script_control is not None:\n script_control = copy.copy(script_control)\n script_control.running = self._jobs.is_running(script_control.path)\n return script_control\n\n def get_script_list(self) -> [ScriptControl]:\n result = []\n for script in self._scripts.values():\n script = copy.copy(script)\n script.running = self._jobs.is_running(script.path)\n result.append(script)\n return result\n\n def get_status(self):\n status = {\n 'background_jobs': self._jobs.get_background(),\n 'current_job': self._jobs.get_current(),\n 'queued_jobs': self._jobs.get_queued(),\n 'lights': TextSnapshot().generate().text,\n 'py_version': platform.python_version()\n }\n return status\n\n @inject(Settings)\n def get_path_root(self, settings=injected):\n return settings.get_value('path_root', '/')\n\n def get_script_title(self, script_config):\n title = script_config.get('title', '')\n if len(title) == 0:\n name = self.get_script_path(script_config)\n spaced = name.replace('_', ' ').replace('-', ' ')\n title = spaced.title()\n return title\n\n def get_script_path(self, script_config):\n path = script_config.get('path', '')\n if len(path) == 0:\n path = script_config['file_name']\n if path[-3:] == \".ls\":\n path = path[:-3]\n return path\n\n def stop_script(self, path) -> bool:\n return self._jobs.stop_job(path)\n\n def stop_current(self) -> bool:\n return self._jobs.stop_current()\n\n def stop_all(self) -> bool:\n self._jobs.clear_queue()\n result1 = self._jobs.stop_current()\n result2 = self._jobs.stop_background()\n return result1 and result2\n\n @inject(Settings)\n def snapshot(self, settings=injected):\n output_name = join(\n settings.get_value('script_path', '.'), '__snapshot__.ls')\n out_file = open(output_name, 'w')\n out_file.write(ScriptSnapshot().generate().text)\n out_file.close()\n","sub_path":"web/web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392138606","text":"import rlkit.misc.hyperparameter as hyp\nfrom multiworld.envs.mujoco.cameras import init_sawyer_camera_v1, \\\n init_sawyer_camera_v2, init_sawyer_camera_v3, init_sawyer_camera_v4\nfrom multiworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place import \\\n SawyerPickAndPlaceEnv\nfrom multiworld.envs.pygame.point2d import Point2DEnv\nfrom rlkit.envs.mujoco.sawyer_push_and_reach_env import (\n SawyerPushAndReachXYEasyEnv\n)\nfrom rlkit.images.camera import (\n sawyer_init_camera_zoomed_in_fixed,\n sawyer_init_camera_zoomed_in,\n)\nfrom multiworld.envs.mujoco.sawyer_xyz.sawyer_reach import (\n SawyerReachXYEnv, SawyerReachXYZEnv\n)\nfrom multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env import (\n SawyerPushAndReachXYEnv\n)\nfrom rlkit.launchers.launcher_util import run_experiment\nfrom rlkit.torch.grill.launcher import grill_her_td3_full_experiment\nfrom rlkit.torch.vae.sawyer2d_push_variable_data import generate_vae_dataset\n\nif __name__ == \"__main__\":\n variant = dict(\n # env_class=SawyerReachXYEnv,\n env_class=SawyerPushAndReachXYEnv,\n # env_class=SawyerPickAndPlaceEnv,\n # env_class=Point2DEnv,\n env_kwargs=dict(\n hide_goal_markers=True,\n # puck_low=(-0.05, 0.6),\n # puck_high=(0.05, 0.7),\n puck_low=(-0.2, 0.5),\n puck_high=(0.2, 0.7),\n hand_low=(-0.2, 0.5, 0.),\n hand_high=(0.2, 0.7, 0.5),\n mocap_low=(-0.1, 0.5, 0.),\n mocap_high=(0.1, 0.7, 0.5),\n goal_low=(-0.05, 0.55, 0.02, -0.2, 0.5),\n goal_high=(0.05, 0.65, 0.02, 0.2, 0.7),\n ),\n init_camera=init_sawyer_camera_v4,\n grill_variant=dict(\n algo_kwargs=dict(\n num_epochs=250,\n # num_steps_per_epoch=100,\n # num_steps_per_eval=100,\n # num_epochs=500,\n num_steps_per_epoch=1000,\n num_steps_per_eval=1000,\n tau=1e-2,\n batch_size=128,\n max_path_length=100,\n discount=0.99,\n num_updates_per_env_step=4,\n ),\n replay_kwargs=dict(\n max_size=int(1e6),\n fraction_goals_are_rollout_goals=0.2,\n fraction_resampled_goals_are_env_goals=0.5,\n ),\n vae_wrapped_env_kwargs=dict(\n num_goals_presampled=100,\n ),\n algorithm='GRILL-HER-TD3',\n normalize=False,\n render=False,\n exploration_noise=0.2,\n exploration_type='ou',\n training_mode='train',\n testing_mode='test',\n reward_params=dict(\n type='latent_distance',\n ),\n observation_key='latent_observation',\n desired_goal_key='latent_desired_goal',\n # vae_path='06-25-pusher-state-puck-reward-cached-goals-hard-2/06-25-pusher-state-puck-reward-cached-goals-hard-2-id0-s48265/vae.pkl',\n # vae_path=\"05-23-vae-sawyer-variable-fixed-2/05-23-vae-sawyer-variable-fixed-2_2018_05_23_16_19_33_0000--s-293-nImg-1000--cam-sawyer_init_camera_zoomed_in_fixed/params.pkl\",\n vae_path=\"06-28-train-vae-beta-5-push-and-reach-cam4-p15-range/06-28-train-vae-beta-5-push-and-reach-cam4-p15-range_2018_06_28_11_48_04_0000--s-80805/params.pkl\",\n ),\n train_vae_variant=dict(\n representation_size=16,\n beta=1.0,\n num_epochs=1000,\n generate_vae_dataset_kwargs=dict(\n N=1000,\n oracle_dataset=True,\n num_channels=3,\n # show=True,\n # use_cached=False,\n ),\n algo_kwargs=dict(\n do_scatterplot=False,\n lr=1e-3,\n ),\n vae_kwargs=dict(\n input_channels=3,\n ),\n beta_schedule_kwargs=dict(\n x_values=[0, 100, 200, 500],\n y_values=[0, 0, 1, 1],\n ),\n save_period=5,\n ),\n version='old-gripper',\n )\n\n search_space = {\n 'hand-goal-space': ['easy', 'hard'],\n 'mocap-x-range': ['0.1', '0.2'],\n # 'grill_variant.training_mode': ['test'],\n # 'grill_variant.observation_key': ['latent_observation'],\n # 'grill_variant.desired_goal_key': ['state_desired_goal'],\n # 'grill_variant.observation_key': ['state_observation'],\n # 'grill_variant.desired_goal_key': ['latent_desired_goal'],\n # 'grill_variant.vae_paths': [\n # {\"16\": \"/home/vitchyr/git/rlkit/data/doodads3/06-12-dev/06-12\"\n # \"-dev_2018_06_12_18_57_14_0000--s-28051/vae.pkl\",\n # }\n # ],\n # 'grill_variant.vae_path': [\n # \"/home/vitchyr/git/rlkit/data/doodads3/06-14-dev/06-14-dev_2018_06_14_15_21_20_0000--s-69980/vae.pkl\",\n # ]\n }\n sweeper = hyp.DeterministicHyperparameterSweeper(\n search_space, default_parameters=variant,\n )\n\n mode = 'local'\n exp_prefix = 'dev'\n\n # mode = 'ec2'\n # exp_prefix = 'dev'\n # exp_prefix = 'mw-full-grill-her-is-it-the-floor'\n # exp_prefix = 'mw-full-grill-tdm-is-it-action-scale'\n for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):\n if variant['hand-goal-space'] == 'easy':\n variant['env_kwargs']['goal_low'] = (-0.05, 0.55, 0.02, -0.2, 0.5)\n variant['env_kwargs']['goal_high'] = (0.05, 0.65, 0.02, 0.2, 0.7)\n else:\n variant['env_kwargs']['goal_low'] = (-0.2, 0.5, 0.02, -0.2, 0.5)\n variant['env_kwargs']['goal_high'] = (0.2, 0.7, 0.02, 0.2, 0.7)\n if variant['mocap-x-range'] == '0.1':\n variant['env_kwargs']['mocap_low'] = (-0.1, 0.5, 0.)\n variant['env_kwargs']['mocap_high'] = (0.1, 0.7, 0.5)\n else:\n variant['env_kwargs']['mocap_low'] = (-0.2, 0.5, 0.)\n variant['env_kwargs']['mocap_high'] = (0.2, 0.7, 0.5)\n run_experiment(\n grill_her_td3_full_experiment,\n exp_prefix=exp_prefix,\n mode=mode,\n variant=variant,\n use_gpu=True,\n # snapshot_gap=50,\n snapshot_mode='last',\n exp_id=exp_id,\n num_exps_per_instance=3,\n )\n","sub_path":"experiments/vitchyr/nips2018/full_her_grill.py","file_name":"full_her_grill.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"318362662","text":"# MRD Helper functions\nimport ismrmrd\nimport re\nimport base64\n\ndef update_img_header_from_raw(imgHead, rawHead):\n \"\"\"Populate ImageHeader fields from AcquisitionHeader\"\"\"\n\n if rawHead is None:\n return imgHead\n\n # # These fields are not translated from the raw header, but filled in\n # # during image creation by from_array\n # imgHead.data_type = \n # imgHead.matrix_size = \n # imgHead.channels = \n\n # # This is mandatory, but must be filled in from the XML header, \n # # not from the acquisition header\n # imgHead.field_of_view = \n\n imgHead.version = rawHead.version\n imgHead.flags = rawHead.flags\n imgHead.measurement_uid = rawHead.measurement_uid\n\n imgHead.position = rawHead.position\n imgHead.read_dir = rawHead.read_dir\n imgHead.phase_dir = rawHead.phase_dir\n imgHead.slice_dir = rawHead.slice_dir\n imgHead.patient_table_position = rawHead.patient_table_position\n\n imgHead.average = rawHead.idx.average\n imgHead.slice = rawHead.idx.slice\n imgHead.contrast = rawHead.idx.contrast\n imgHead.phase = rawHead.idx.phase\n imgHead.repetition = rawHead.idx.repetition\n imgHead.set = rawHead.idx.set\n\n imgHead.acquisition_time_stamp = rawHead.acquisition_time_stamp\n imgHead.physiology_time_stamp = rawHead.physiology_time_stamp\n\n # Defaults, to be updated by the user\n imgHead.image_type = ismrmrd.IMTYPE_MAGNITUDE\n imgHead.image_index = 1\n imgHead.image_series_index = 0\n\n imgHead.user_float = rawHead.user_float\n imgHead.user_int = rawHead.user_int\n\n return imgHead\n\ndef get_userParameterLong_value(metadata, name):\n \"\"\"Get a value from MRD Header userParameterLong (returns None if key not found)\"\"\"\n for param in metadata.userParameters.userParameterLong:\n if param.name == name:\n return int(param.value)\n return None\n\ndef get_userParameterDouble_value(metadata, name):\n \"\"\"Get a value from MRD Header userParameterDouble (returns None if key not found)\"\"\"\n for param in metadata.userParameters.userParameterDouble:\n if param.name == name:\n return float(param.value)\n return None\n\ndef get_userParameterString_value(metadata, name):\n \"\"\"Get a value from MRD Header userParameterDouble (returns None if key not found)\"\"\"\n for param in metadata.userParameters.userParameterDouble:\n if param.name == name:\n return float(param.value)\n return None\n\ndef get_userParameterBase64_value(metadata, name):\n \"\"\"Get a value from MRD Header userParameterBase64 (returns None if key not found)\"\"\"\n for param in metadata.userParameters.userParameterBase64:\n if param.name == name:\n return base64.b64decode(param.value).decode('utf-8')\n return None\n\ndef get_meta_value(meta, key):\n \"\"\"Get a value from MRD Meta Attributes (returns None if key not found)\"\"\"\n if key in meta.keys():\n return meta[key]\n else:\n return None\n\ndef extract_minihead_bool_param(miniHead, name):\n \"\"\"Extract a bool parameter from the serialized text of the ICE MiniHeader\"\"\"\n val = extract_minihead_param(miniHead, name, 'ParamBool')\n\n if val is None:\n return False\n elif val.strip('\" ').lower() == 'true'.lower():\n return True\n else:\n return False\n\ndef extract_minihead_long_param(miniHead, name):\n \"\"\"Extract a long parameter from the serialized text of the ICE MiniHeader\"\"\"\n val = extract_minihead_param(miniHead, name, 'ParamLong')\n\n if val is None:\n return int(0)\n else:\n return int(val)\n\ndef extract_minihead_double_param(miniHead, name):\n \"\"\"Extract a double parameter from the serialized text of the ICE MiniHeader\"\"\"\n val = extract_minihead_param(miniHead, name, 'ParamDouble')\n\n if val is None:\n return float(0)\n else:\n return float(val)\n\ndef extract_minihead_string_param(miniHead, name):\n \"\"\"Extract a string parameter from the serialized text of the ICE MiniHeader\"\"\"\n val = extract_minihead_param(miniHead, name, 'ParamString')\n\n return val.strip(' \"')\n\ndef extract_minihead_param(miniHead, name, strType):\n \"\"\"Extract a string parameter from the serialized text of the ICE MiniHeader\"\"\"\n expr = r'(?<=<' + strType + r'.\"' + name + r'\">)\\s*[^}]*\\s*'\n res = re.search(expr, miniHead)\n\n if res is None:\n return None\n\n # Strip off beginning '{' and whitespace, then split on newlines\n values = res.group(0).strip('{\\n ').split('\\n')\n\n # Lines beginning with <> are properties -- ignore them\n values = [val for val in values if bool(re.search(r'^\\s*<\\w+>', val)) is False]\n\n if len(values) != 1:\n return None\n else:\n return values[0]\n\ndef create_roi(x, y, rgb = (1, 0, 0), thickness = 1, style = 0, visibility = 1):\n \"\"\"\n Create an MRD-formatted ROI\n Parameters:\n - x (1D ndarray) : x coordinates in units of pixels, with (0,0) at the top left\n - y (1D ndarray) : y coordinates in units of pixels, matching the length of x\n - rgb (3 item tuple) : Colour as an (red, green, blue) tuple normalized to 1\n - thickness (float) : Line thickness\n - style (int) : Line style (0 = solid, 1 = dashed)\n - visibility (int) : Line visibility (0 = false, 1 = true)\n Returns:\n - roi (string list) : MRD-formatted ROI, intended to be stored as a MetaAttribute\n with field name starting with \"ROI_\"\n \"\"\"\n xy = [(x[i], y[i]) for i in range(0, len(x))] # List of (x,y) tuples\n\n roi = []\n roi.append('%f' % rgb[0])\n roi.append('%f' % rgb[1])\n roi.append('%f' % rgb[2])\n roi.append('%f' % thickness)\n roi.append('%f' % style)\n roi.append('%f' % visibility)\n\n for i in range(0, len(xy)):\n roi.append('%f' % xy[i][0])\n roi.append('%f' % xy[i][1])\n\n return roi","sub_path":"mrdhelper.py","file_name":"mrdhelper.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"650174643","text":"import random\nimport Databas as DB\nimport timeit\nfrom time import sleep\n#import Ra as RC\n# import os.path\n\nclass Map:\n def __init__(self,xsize=6,ysize=4):\n\n self.xsize=xsize\n self.ysize=ysize\n self.area=xsize*ysize\n self.MapGraph={}\n\n self.map = [[0] * self.xsize for i in range(self.ysize)]\n\n for i in range(0,self.ysize):\n for j in range(0,self.xsize):\n #print(\"i=\"+str(i)+\"j=\"+str(j))\n self.map[i][j]=\"i=\"+str(i)+\"j=\"+str(j)\n #print()\n\n def AddCurrentVal(self,val):\n self.CurrentVal=val\n\n def AddToMap(self,ReversedBinary):\n #this function requires that the integer value must be reversed so that it is more convienent to enter into the map varaible.\n for i in range(0, self.ysize):\n for j in range(0, self.xsize):\n singleChar = ReversedBinary[-1:]\n ReversedBinary = ReversedBinary[:-1]\n self.map[i][j] = singleChar\n #self.ToGraph()\n #print(self.MapGraph)\n return self.map\n\n def bfs_paths(self,graph, start, goal):\n queue = [(start, [start])]\n while queue:\n (vertex, path) = queue.pop(0)\n for next in graph[vertex] - set(path):\n if next == goal:\n yield path + [next]\n else:\n queue.append((next, path + [next]))\n\n def Clear(self):\n RBin=self.GetCurrentVal()\n self.AddToMap(RBin)\n def CreateDB(self):\n start = timeit.timeit()\n print(\"Creating Database\")\n DB.create_table()\n max=self.MaxBinVal()\n\n for j in range(0,max):\n # thread=Thread(target=self.CreateDBEntry(),args=(j*section,((j*section)+section)))\n # thread.start()\n self.CreateDBEntry(j,max)\n\n\n\n print(\"DataBase Complete\")\n end = timeit.timeit()\n print(\"Total Time taken:\")\n print(end - start)\n def CreateDBEntry(self,i,max):\n # if(os.path.isfile(\"Maps.db\")):\n # print(\"File is already create are you sure you want to do this (Y/N)\")\n # user=input().capitalize()\n # if(user==\"N\"):\n # print(\"Creating Database is cancelled.\")\n # return\n\n sNum = self.DecToFormatVal(i)\n self.AddCurrentVal(sNum)\n self.AddToMap(sNum)\n self.ToGraph()\n PathLength=0\n #print(\"path to the start and goal of program\")\n mapstart=timeit.timeit()\n #print(\"Working on map \"+str(i))\n for q in range(0,3):\n if self.map[q][0]==1:\n continue\n else:\n if PathLength > 6:\n break;\n for j in range(0,5):\n if PathLength > 6:\n break;\n goal=str(q)+str(5)\n start=str(q)+\"0\"\n listPath=list(self.bfs_paths(self.MapGraph, start, goal))\n PathofCurrentListIndex=self.findLongestPath(listPath)\n PathofCurrentList=listPath[PathofCurrentListIndex]\n startPosition=int(PathofCurrentList[0])*6\n\n\n\n\n #print(PathofCurrentList)\n if(len(PathofCurrentList)>PathLength):\n PathLength=len(PathofCurrentList)\n NsNum=sNum[:startPosition]+\"2\"+sNum[startPosition+1:]\n\n #print(PathLength)\n\n mapend = timeit.timeit()\n maptime = mapend - mapstart\n #print( \"Time to complete map \"+str(i)+\" : \"+str(maptime))\n if (PathLength > 6):\n #print(i, \" has PASSED with a length of \", PathLength)\n DB.data_entry(i, NsNum)\n else:\n print(i, \" has FAILED\")\n if q+2<4:\n q=q+1\n # print(\"Added \"+str(i)+\"to the data base\")\n\n #print(\"DataBase Complete\")\n\n\n\n def DecToFormatVal(self, val):\n # formating will tend to the needs of the size so for 3x3 there will be 9 digits and 5x5 25 digits.\n\n form = \"{0:0%db}\" % (self.area)\n sNum = form.format(val)\n # print(sNum)\n # reverse the value so it is easier to enter into the array\n sNum = sNum[::-1]\n\n # print(sNum)\n return sNum\n\n def Display(self):\n for i in range(0,self.ysize):\n for j in range(0,self.xsize):\n print(self.map[i][j], end=\" \")\n print()\n\n def GetCurrentVal(self):\n return self.CurrentVal\n\n def findLongestPath(self, ListPath):\n longestPath = 0\n for i in range(0, len(ListPath)):\n if (len(ListPath[i]) > longestPath):\n longestPath = len(ListPath[i])\n return longestPath\n\n def MaxBinVal(self):\n #this will add 1's to every possible position of the string giving us the maximum combinations\n sVal=str(1)*self.area\n return int(sVal, 2)\n\n def ToGraph(self):\n for i in range(0,self.ysize):\n for j in range(0,self.xsize):\n edges = []\n Current=str(i)+str(j)\n\n\n #north Edge\n if not (i-1<0):\n if self.map[i-1][j]==\"0\":\n singleEdge=str(i-1)+str(j)\n edges.insert(-1,singleEdge)\n #East Edge\n if not (j+1>=self.xsize):\n if self.map[i][j+1]==\"0\":\n singleEdge=str(i)+str(j+1)\n edges.insert(-1,singleEdge)\n #South Edge\n if not (i+1>=self.ysize):\n if self.map[i+1][j]==\"0\":\n singleEdge=str(i+1)+str(j)\n edges.insert(-1,singleEdge)\n #West Edge\n if not (j-1<0):\n if self.map[i][j-1]==\"0\":\n singleEdge=str(i)+str(j-1)\n edges.insert(-1,singleEdge)\n self.MapGraph[Current]=set(edges)\n\n def RandMap(self,size=3):\n #if(os.path.isfile(\"Maps.db\")):\n randMap = random.randint(0, DB.rowCount())\n\n sNum = self.DecToFormatVal(randMap)\n\n print(\"Selected val\", randMap,\"out of the possible \", DB.rowCount(),\"with the code of \",sNum)\n self.AddToMap(sNum)\n self.Display()\n return\n\n #print(\"Database is not created please enter 1 to create one\")\n #return\n\n def test(self):\n self.MaxBinVal()\n\n def getRox(self):\n print(DB.rowCount())\n\n def NumberToEnglishConverter(self,map):\n NewMap=[]\n for i in range(0,self.ysize):\n for j in range(0, self.xsize):\n if map[i][j]==\"0\":\n NewMap[i][j]=\"Path\"\n elif map[i][j]==\"1\":\n NewMap[i][j]==\"Wall\"\n elif map[i][j]==\"2\":\n NewMap[i][j]==\"Path\"\n elif map[i][j]==\"3\":\n NewMap[i][j]==\"Challenge\"\n elif map[i][j]==\"4\":\n NewMap[i][j]==\"Other\"\n else:\n print (\"Error\")\n print(NewMap)","sub_path":"MapObject.py","file_name":"MapObject.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"355813338","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 3 16:19:00 2020\r\n\r\n@author: btknzn\r\n\"\"\"\r\n#You Have 22 selections, Just blink in 10 seconds and select the food you want to eat\r\n#Our system counts your blink number from your first blick to 10 seconds later\r\nimport numpy as np\r\nimport cv2\r\nimport dlib\r\nfrom math import hypot\r\nimport time\r\n\r\ndef System():\r\n situation = True\r\n starttime=0\r\n eyeControlvalue=0\r\n lastvalueofeyecontrol=0\r\n counter=0\r\n while(True):\r\n time.sleep(0.01)\r\n ret, frame = cap.read() \r\n # Our operations on the frame come here\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n # Display the resulting frame\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n faces = detector(gray)\r\n lastvalueofeyecontrol = eyeControlvalue\r\n \r\n eyeControlvalue = eyeControl(gray, faces,frame) \r\n #print(str(counter)+' blink number')\r\n #print('Do you want '+systemTupel[counter]+'?')\r\n if eyeControlvalue == 1:\r\n if situation:\r\n situation = False\r\n starttime=time.time()\r\n \r\n \r\n if situation:\r\n cv2.putText(frame,'Sistemi Baslatmak icin Goz kirin 10 saniye içinde göz kirparak isteğinizi belirtin ',(0,150),font,0.1,(0,0,255),1)\r\n #print('Sistemi başlatmak için Bir kere Göz kırpın') \r\n\r\n \r\n if (eyeControlvalue==1) and (lastvalueofeyecontrol==0):\r\n counter= counter +1\r\n print(\"Göz kırpma sayısı \"+ str(counter))\r\n \r\n cv2.putText(frame,\"Goz Kirpma Sayisi: \"+ str(counter),(0,200),font,0.5,(0,0,255),1) \r\n if ((starttime+10)starttime+10):\r\n situation = True\r\n cv2.imshow('frame',frame)\r\n \r\n \r\n \r\n cap.release() \r\n \r\n\r\n\r\ndef eyeControl(gray, faces,frame):\r\n for face in faces:\r\n features = predictor(gray,face)\r\n \r\n R_Hori_left_x = features.part(36).x\r\n R_Hori_left_y = features.part(36).y\r\n R_Hori_left = (R_Hori_left_x,R_Hori_left_y)\r\n R_Hori_right_x = features.part(39).x\r\n R_Hori_right_y = features.part(39).y\r\n R_Hori_right = (R_Hori_right_x,R_Hori_right_y)\r\n cv2.line(frame,R_Hori_left,R_Hori_right,(0,255,0),1)\r\n R_upper_mid_x = int((features.part(37).x + features.part(38).x)/2)\r\n R_upper_mid_y = int((features.part(37).y + features.part(38).y)/2)\r\n R_upper_mid = (R_upper_mid_x,R_upper_mid_y)\r\n R_bottom_mid_x = int((features.part(41).x + features.part(40).x)/2)\r\n R_bottom_mid_y = int((features.part(41).y + features.part(40).y)/2)\r\n R_bottom_mid = (R_bottom_mid_x,R_bottom_mid_y)\r\n cv2.line(frame,R_upper_mid,R_bottom_mid,(0,255,0),1)\r\n # find the lenght for both horizontal and vertical line\r\n R_hori_lenght = hypot(R_Hori_left_x - R_Hori_right_x, R_Hori_left_y - R_Hori_right_y)\r\n R_ver_lenght = hypot(R_upper_mid[0] - R_bottom_mid[0], R_upper_mid[1] - R_bottom_mid[1])\r\n features = predictor(gray,face)\r\n L_Hori_left_x = features.part(42).x\r\n L_Hori_left_y = features.part(42).y\r\n L_Hori_left = (L_Hori_left_x,L_Hori_left_y)\r\n L_Hori_right_x = features.part(45).x\r\n L_Hori_right_y = features.part(45).y\r\n L_Hori_right = (L_Hori_right_x,L_Hori_right_y)\r\n cv2.line(frame,L_Hori_left,L_Hori_right,(0,255,0),1)\r\n L_upper_mid_x = int((features.part(43).x + features.part(44).x)/2)\r\n L_upper_mid_y = int((features.part(43).y + features.part(44).y)/2)\r\n L_upper_mid = (L_upper_mid_x,L_upper_mid_y)\r\n L_bottom_mid_x = int((features.part(47).x + features.part(46).x)/2)\r\n L_bottom_mid_y = int((features.part(47).y + features.part(46).y)/2)\r\n L_bottom_mid = (L_bottom_mid_x,L_bottom_mid_y)\r\n cv2.line(frame,L_upper_mid,L_bottom_mid,(0,255,0),1)\r\n L_hori_lenght = hypot(L_Hori_left_x-L_Hori_right_x,L_Hori_left_y-L_Hori_right_y)\r\n L_ver_lenght = hypot(L_upper_mid[0]-L_bottom_mid[0],L_upper_mid[1]-L_bottom_mid[1])\r\n L_ratio = L_hori_lenght/(L_ver_lenght+0.000000001)\r\n R_ratio = R_hori_lenght/(R_ver_lenght+0.000000001)\r\n ratio = (L_ratio + R_ratio)/2\r\n if ratio > 5:\r\n return 1\r\n return 0\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\ndetector = dlib.get_frontal_face_detector()\r\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\r\ncap.set(3,320)\r\ncap.set(4,256)\r\nsystemTupel = (\"\",\"Elma\",\"Armut\",\"Çilek\",\"muz\",\"ananas\",\"nar\",\"üzüm\",\"şeftali\",\"kayısı\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\")\r\nfont = cv2.FONT_HERSHEY_COMPLEX\r\nSystem()","sub_path":"projemaın/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"443553659","text":"# coding: UTF-8\nimport os\nimport random\n\nimport math\nimport re\nimport time\n\nimport numpy\nfrom hyperopt import hp\n\nfrom src import (\n highest,\n lowest,\n avg_price,\n typ_price,\n sma,\n crossover,\n crossunder,\n over,\n under,\n last,\n rci,\n rsi,\n double_ema,\n ema,\n triple_ema,\n wma,\n ssma,\n hull,\n logger,\n notify,\n atr,\n willr,\n bbands,\n supertrend,\n heikinashi,\n)\nfrom src.bitmex import BitMex\nfrom src.binance_futures import BinanceFutures\nfrom src.bitmex_stub import BitMexStub\nfrom src.binance_futures_stub import BinanceFuturesStub\nfrom src.bot import Bot\nfrom dotenv import load_dotenv\n\n# Channel breakout strategy\nfrom src.gmail_sub import GmailSub\n\nload_dotenv()\n\n\ndef get_calc_lot(lot, decimal_num: int, leverage: float, actual_leverage: float):\n calc_lot = lot / leverage\n calc_lot *= actual_leverage\n calc_lot -= calc_lot % (10 ** -decimal_num)\n calc_lot = round(calc_lot, decimal_num)\n return calc_lot\n\n\ndef calc_entry_price(price, long, price_decimals=2):\n if long:\n return round(price - (0.1 / 100 * price), price_decimals)\n else:\n return round(price + (0.1 / 100 * price), price_decimals)\n\n\nclass Doten(Bot):\n def __init__(self):\n Bot.__init__(self, \"2h\")\n\n def options(self):\n return {\n \"length\": hp.randint(\"length\", 1, 30, 1),\n }\n\n def strategy(self, open, close, high, low, volume):\n lot = self.exchange.get_lot()\n length = self.input(\"length\", int, 9)\n up = last(highest(high, length))\n dn = last(lowest(low, length))\n self.exchange.plot(\"up\", up, \"b\")\n self.exchange.plot(\"dn\", dn, \"r\")\n self.exchange.entry(\"Long\", True, round(lot / 2), stop=up)\n self.exchange.entry(\"Short\", False, round(lot / 2), stop=dn)\n\n\n# SMA CrossOver\nclass SMA(Bot):\n def __init__(self):\n Bot.__init__(self, \"2h\")\n\n def options(self):\n return {\n \"fast_len\": hp.quniform(\"fast_len\", 1, 30, 1),\n \"slow_len\": hp.quniform(\"slow_len\", 1, 30, 1),\n }\n\n def strategy(self, open, close, high, low, volume):\n lot = self.exchange.get_lot()\n fast_len = self.input(\"fast_len\", int, 9)\n slow_len = self.input(\"slow_len\", int, 16)\n fast_sma = sma(close, fast_len)\n slow_sma = sma(close, slow_len)\n golden_cross = crossover(fast_sma, slow_sma)\n dead_cross = crossunder(fast_sma, slow_sma)\n if golden_cross:\n self.exchange.entry(\"Long\", True, lot)\n if dead_cross:\n self.exchange.entry(\"Short\", False, lot)\n\n\n# Rci\nclass Rci(Bot):\n def __init__(self):\n Bot.__init__(self, \"5m\")\n\n def options(self):\n return {\n \"rcv_short_len\": hp.quniform(\"rcv_short_len\", 1, 10, 1),\n \"rcv_medium_len\": hp.quniform(\"rcv_medium_len\", 5, 15, 1),\n \"rcv_long_len\": hp.quniform(\"rcv_long_len\", 10, 20, 1),\n }\n\n def strategy(self, open, close, high, low, volume):\n lot = self.exchange.get_lot()\n\n itv_s = self.input(\"rcv_short_len\", int, 5)\n itv_m = self.input(\"rcv_medium_len\", int, 9)\n itv_l = self.input(\"rcv_long_len\", int, 15)\n\n rci_s = rci(close, itv_s)\n rci_m = rci(close, itv_m)\n rci_l = rci(close, itv_l)\n\n long = ((-80 > rci_s[-1] > rci_s[-2]) or (-82 > rci_m[-1] > rci_m[-2])) and (rci_l[-1] < -10 and rci_l[-2] > rci_l[-2])\n short = ((80 < rci_s[-1] < rci_s[-2]) or (rci_m[-1] < -82 and rci_m[-1] < rci_m[-2])) and (10 < rci_l[-1] < rci_l[-2])\n close_all = 80 < rci_m[-1] < rci_m[-2] or -80 > rci_m[-1] > rci_m[-2]\n\n if long:\n self.exchange.entry(\"Long\", True, lot)\n elif short:\n self.exchange.entry(\"Short\", False, lot)\n elif close_all:\n self.exchange.close_all()\n\n\n# OCC\nclass OCC(Bot):\n variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull]\n eval_time = None\n\n def __init__(self):\n Bot.__init__(self, \"1m\")\n\n def ohlcv_len(self):\n return 15 * 30\n\n def options(self):\n return {\n \"variant_type\": hp.quniform(\"variant_type\", 0, len(self.variants) - 1, 1),\n \"basis_len\": hp.quniform(\"basis_len\", 1, 30, 1),\n \"resolution\": hp.quniform(\"resolution\", 1, 10, 1),\n \"sma_len\": hp.quniform(\"sma_len\", 1, 15, 1),\n \"div_threshold\": hp.quniform(\"div_threshold\", 1, 6, 0.1),\n }\n\n def strategy(self, open, close, high, low, volume):\n lot = self.exchange.get_lot()\n\n variant_type = self.input(defval=5, title=\"variant_type\", type=int)\n basis_len = self.input(defval=19, title=\"basis_len\", type=int)\n resolution = self.input(defval=2, title=\"resolution\", type=int)\n sma_len = self.input(defval=9, title=\"sma_len\", type=int)\n div_threshold = self.input(defval=3.0, title=\"div_threshold\", type=float)\n\n source = self.exchange.security(str(resolution) + \"m\")\n\n if self.eval_time is not None and self.eval_time == source.iloc[-1].name:\n return\n\n series_open = source[\"open\"].values\n series_close = source[\"close\"].values\n\n variant = self.variants[variant_type]\n\n val_open = variant(series_open, basis_len)\n val_close = variant(series_close, basis_len)\n\n if val_open[-1] > val_close[-1]:\n high_val = val_open[-1]\n low_val = val_close[-1]\n else:\n high_val = val_close[-1]\n low_val = val_open[-1]\n\n sma_val = sma(close, sma_len)\n logger.info(\"lagging log\")\n self.exchange.plot(\"val_open\", val_open[-1], \"b\")\n self.exchange.plot(\"val_close\", val_close[-1], \"r\")\n\n self.exchange.entry(\"Long\", True, lot, stop=math.floor(low_val), when=(sma_val[-1] < low_val))\n self.exchange.entry(\"Short\", False, lot, stop=math.ceil(high_val), when=(sma_val[-1] > high_val))\n\n open_close_div = sma(numpy.abs(val_open - val_close), sma_len)\n\n if open_close_div[-1] > div_threshold and open_close_div[-2] > div_threshold < open_close_div[-2]:\n self.exchange.close_all()\n\n self.eval_time = source.iloc[-1].name\n\n\n# TradingView\n\n\nclass TV(Bot):\n subscriber = None\n\n def __init__(self):\n Bot.__init__(self, \"1m\")\n\n user_id = os.environ.get(\"GMAIL_ADDRESS\")\n if user_id is None:\n raise Exception(\"Please set GMAIL_ADDRESS into env to use Trading View Strategy.\")\n self.subscriber = GmailSub(user_id)\n self.subscriber.set_from_address(\"noreply@tradingview.com\")\n\n def __on_message(self, messages):\n for message in messages:\n if \"payload\" not in message:\n continue\n if \"headers\" not in message[\"payload\"]:\n continue\n subject_list = [header[\"value\"] for header in message[\"payload\"][\"headers\"] if header[\"name\"] == \"Subject\"]\n if len(subject_list) == 0:\n continue\n subject = subject_list[0]\n if subject.startswith(\"TradingViewアラート:\"):\n action = subject.replace(\"TradingViewアラート:\", \"\")\n self.__action(action)\n\n def __action(self, action):\n lot = self.exchange.get_lot()\n if re.search(\"buy\", action, re.IGNORECASE):\n self.exchange.entry(\"Long\", True, lot)\n elif re.search(\"sell\", action, re.IGNORECASE):\n self.exchange.entry(\"Short\", True, lot)\n elif re.search(\"exit\", action, re.IGNORECASE):\n self.exchange.close_all()\n\n def run(self):\n if self.hyperopt:\n raise Exception(\"Trading View Strategy dose not support hyperopt Mode.\")\n elif self.back_test:\n raise Exception(\"Trading View Strategy dose not support backtest Mode.\")\n elif self.stub_test:\n # if you want to use binance futures\n # self.exchange = BinanceFuturesStub(account=self.account, pair=self.pair)\n self.exchange = BitMexStub(account=self.account, pair=self.pair)\n logger.info(f\"Bot Mode : Stub\")\n else:\n # if you want to use binance\n # self.exchange = BinanceFutures(account=self.account, pair=self.pair, demo=self.test_net)\n self.exchange = BitMex(account=self.account, pair=self.pair, demo=self.test_net)\n logger.info(f\"Bot Mode : Trade\")\n\n logger.info(f\"Starting Bot\")\n logger.info(f\"Strategy : {type(self).__name__}\")\n logger.info(f\"Balance : {self.exchange.get_balance()}\")\n\n notify(f\"Starting Bot\\n\" f\"Strategy : {type(self).__name__}\\n\" f\"Balance : {self.exchange.get_balance()/100000000} XBT\")\n\n self.subscriber.on_message(self.__on_message)\n\n def stop(self):\n self.subscriber.stop()\n\n\n# candle tester\n\n\nclass CandleTester(Bot):\n def __init__(self):\n Bot.__init__(self, \"1m\")\n\n # this is for parameter optimization in hyperopt mode\n def options(self):\n return {}\n\n def strategy(self, open, close, high, low, volume):\n logger.info(f\"open: {open[-1]}\")\n logger.info(f\"high: {high[-1]}\")\n logger.info(f\"low: {low[-1]}\")\n logger.info(f\"close: {close[-1]}\")\n logger.info(f\"volume: {volume[-1]}\")\n\n\n# sample strategy\n\n\nclass Sample(Bot):\n # set variables\n long_entry_signal_history = []\n short_entry_signal_history = []\n\n def __init__(self):\n # set time frame here\n Bot.__init__(self, \"1m\")\n\n def options(self):\n return {}\n\n def round_decimals_down(self, number: float, decimals: int = 2):\n \"\"\"\n Returns a value rounded down to a specific number of decimal places.\n \"\"\"\n if not isinstance(decimals, int):\n raise TypeError(\"decimal places must be an integer\")\n elif decimals < 0:\n raise ValueError(\"decimal places has to be 0 or more\")\n elif decimals == 0:\n return math.floor(number)\n\n factor = 10 ** decimals\n return math.floor(number * factor) / factor\n\n def strategy(self, open, close, high, low, volume):\n\n # get lot or set your own value which will be used to size orders\n # careful default lot is about 20x your account size !!!\n lot = self.exchange.get_lot()\n pos_size = self.exchange.get_position_size()\n\n # indicator lengths\n fast_len = self.input(\"fast_len\", int, 6)\n slow_len = self.input(\"slow_len\", int, 18)\n\n # setting indicators, they usually take source and length as arguments\n sma1 = sma(close, fast_len)\n sma2 = sma(close, slow_len)\n\n # entry conditions\n long_entry_condition = crossover(sma1, sma2)\n short_entry_condition = crossunder(sma1, sma2)\n\n # setting a simple stop loss and profit target in % using built-in simple profit take and stop loss implementation\n # which is placing the sl and tp automatically after entering a position\n self.exchange.sltp(\n profit_long=1.25,\n profit_short=1.25,\n stop_long=1,\n stop_short=1.1,\n round_decimals=0,\n )\n\n # example of calculation of stop loss price 0.8% round on 2 decimals hardcoded inside this class\n # sl_long = round(close[-1] - close[-1]*0.8/100, 2)\n # sl_short = round(close[-1] - close[-1]*0.8/100, 2)\n\n # order execution logic\n if pos_size == 0:\n if long_entry_condition:\n # entry - True means long for every other order other than entry use self.exchange.order() function\n self.exchange.entry(\"Long\", True, self.round_decimals_down(lot / 20, 3))\n # stop loss hardcoded inside this class\n # self.exchange.order(\"SLLong\", False, lot/20, stop=sl_long, reduce_only=True, when=False)\n\n if short_entry_condition:\n # entry - False means short for every other order other than entry use self.exchange.order() function\n self.exchange.entry(\"Short\", False, self.round_decimals_down(lot / 20, 3))\n # stop loss hardcoded inside this class\n # self.exchange.order(\"SLShort\", True, lot/20, stop=sl_short, reduce_only=True, when=False)\n\n # storing history for entry signals, you can store any variable this way to keep historical values\n self.long_entry_signal_history.append(long_entry_condition)\n self.short_entry_signal_history.append(short_entry_condition)\n\n # OHLCV and indicator data, you can access history using list index\n # log indicator values\n logger.info(f\"sma1: {sma1[-1]}\")\n logger.info(f\"second last sma2: {sma2[-2]}\")\n # log last candle OHLCV values\n logger.info(f\"open: {open[-1]}\")\n logger.info(f\"high: {high[-1]}\")\n logger.info(f\"low: {low[-1]}\")\n logger.info(f\"close: {close[-1]}\")\n logger.info(f\"volume: {volume[-1]}\")\n # second last candle OHLCV values\n logger.info(f\"second last open: {open[-2]}\")\n logger.info(f\"second last high: {high[-2]}\")\n logger.info(f\"second last low: {low[-2]}\")\n logger.info(f\"second last close: {close[-2]}\")\n logger.info(f\"second last volume: {volume[-2]}\")\n # logger.info(f\"position: {pos_size}\")\n # log history entry signals\n # logger.info(f\"long_entry_hist: {self.long_entry_signal_history}\")\n # logger.info(f\"short_entry_hist: {self.short_entry_signal_history}\")\n\n\n# SMA CrossOver\n\n\nclass SMA2(Bot):\n decimal_num = int(os.environ.get(\"BOT_DECIMAL_NUM\", 3))\n price_decimal_num = int(os.environ.get(\"BOT_PRICE_DECIMAL_NUM\", 2))\n rr_ratio = 2\n risk = 0.5\n\n def __init__(self):\n Bot.__init__(self, \"1m\")\n\n def options(self):\n return {\n \"fast_len\": hp.quniform(\"fast_len\", 1, 20, 1),\n \"slow_len\": hp.quniform(\"slow_len\", 1, 60, 1),\n \"trend_len\": hp.quniform(\"trend_len\", 1, 99, 1),\n }\n\n def strategy(self, open, close, high, low, volume):\n\n lot = self.exchange.get_lot()\n lot = get_calc_lot(lot=lot, decimal_num=self.decimal_num, leverage=20.0, actual_leverage=3.0)\n\n fast_len = self.input(\"fast_len\", int, int(os.environ.get(\"BOT_FAST_LEN\", 5)))\n slow_len = self.input(\"slow_len\", int, int(os.environ.get(\"BOT_SLOW_LEN\", 18)))\n trend_len = self.input(\"trend_len\", int, 90)\n\n logger.info(f\"fast_len: {fast_len}\")\n logger.info(f\"slow_len: {slow_len}\")\n logger.info(f\"trend_len: {trend_len}\")\n\n fast_sma = sma(close, fast_len)\n slow_sma = sma(close, slow_len)\n trend_sma = sma(close, trend_len)\n\n uptrend = True if trend_sma[-1] > trend_sma[-3] or trend_sma[-1] > trend_sma[-10] else False\n downtrend = True if trend_sma[-1] < trend_sma[-3] or trend_sma[-1] < trend_sma[-10] else False\n\n golden_cross = crossover(fast_sma, slow_sma)\n dead_cross = crossunder(fast_sma, slow_sma)\n # inc_trend = fast_sma[-1] > slow_sma[-1]\n # dec_trend = fast_sma[-1] < slow_sma[-1]\n\n reward = self.risk * self.rr_ratio\n self.exchange.sltp(\n profit_long=reward,\n profit_short=reward,\n stop_long=self.risk,\n stop_short=self.risk,\n round_decimals=self.price_decimal_num,\n )\n\n # if float(self.exchange.get_position()['notional']) == 0.0:\n if self.exchange.get_position_size() == 0.0:\n\n self.exchange.cancel_all()\n\n if golden_cross:\n print(\"inc_trend detected\")\n while True:\n # check if in long position\n if float(self.exchange.get_position()[\"notional\"]) > 0.0 or downtrend:\n print(\"long position opened\")\n break\n print(\"trying to open long position...\")\n self.exchange.entry(\"Long\", True, lot)\n\n if dead_cross:\n print(\"dec_trend detected\")\n while True:\n # check if in short position\n if float(self.exchange.get_position()[\"notional\"]) < 0.0 or uptrend:\n print(\"short position opened\")\n break\n print(\"trying to open short position...\")\n self.exchange.entry(\"Short\", False, lot)\n\n # OHLCV and indicator data, you can access history using list index\n # log indicator values\n print()\n logger.info(f\"fast_sma: {fast_sma[-1]}\")\n logger.info(f\"slow_sma: {slow_sma[-1]}\")\n logger.info(f\"trend_sma: {trend_sma[-1]}\")\n logger.info(f\"uptrend: {uptrend}\")\n logger.info(f\"downtrend: {downtrend}\")\n logger.info(f\"golden_cross: {golden_cross}\")\n logger.info(f\"dead_cross: {dead_cross}\")\n # log last candle OHLCV values\n logger.info(f\"open: {open[-1]}\")\n logger.info(f\"high: {high[-1]}\")\n logger.info(f\"low: {low[-1]}\")\n logger.info(f\"close: {close[-1]}\")\n logger.info(f\"volume: {volume[-1]}\")\n # second last candle OHLCV values\n\n\nclass YYY(Bot):\n decimal_num = int(os.environ.get(\"BOT_DECIMAL_NUM\", 3))\n price_decimal_num = int(os.environ.get(\"BOT_PRICE_DECIMAL_NUM\", 2))\n\n def __init__(self):\n Bot.__init__(self, \"1m\")\n\n def ohlcv_len(self):\n return int(os.environ.get(\"BOT_TREND_LEN\", 1200)) + 10\n\n def strategy(self, open, close, high, low, volume):\n lot = self.exchange.get_lot()\n lot = int(round(lot / 6, self.decimal_num))\n\n price = self.exchange.get_market_price()\n pos_size = self.exchange.get_position_size()\n\n fast_len = self.input(\"fast_len\", int, int(os.environ.get(\"BOT_FAST_LEN\", 5)))\n slow_len = self.input(\"slow_len\", int, int(os.environ.get(\"BOT_SLOW_LEN\", 18)))\n trend_len = self.input(\"trend_len\", int, int(os.environ.get(\"BOT_TREND_LEN\", 1200)))\n\n fast_sma = sma(close, fast_len)\n slow_sma = sma(close, slow_len)\n trend_sma = sma(close, trend_len)\n\n uptrend = True if trend_sma[-1] > trend_sma[-3] or trend_sma[-1] > trend_sma[-10] else False\n downtrend = True if trend_sma[-1] < trend_sma[-3] or trend_sma[-1] < trend_sma[-10] else False\n\n golden_cross = crossover(fast_sma, slow_sma)\n dead_cross = crossunder(fast_sma, slow_sma)\n\n nc = \"golden\" if round(fast_sma[-1] - slow_sma[-1], self.price_decimal_num) < 0 else \"dead\"\n ct = \"sideways\" if downtrend and uptrend else (\"down\" if downtrend else \"up\")\n\n np = \"short\" if nc == \"golden\" and (pos_size > 0 or (pos_size >= 0 and downtrend)) else \"long\"\n nt = \"golden\" if (nc == \"golden\" and np == \"short\") else (\"dead\" if nc == \"dead\" and np == \"long\" else not nc)\n\n logger.info(f\"--------------------------------------\")\n logger.info(f\"trend: {ct}\")\n logger.info(f'next trade @ {nt} cross > {np} {lot} @ {calc_entry_price(price, False, self.price_decimal_num) if np == \"short\" else calc_entry_price(price, True, self.price_decimal_num)}')\n if trend_sma[-1] != trend_sma[-1] or trend_sma[-3] != trend_sma[-3] or trend_sma[-10] != trend_sma[-10]:\n logger.info(f\"--------------------------------------\")\n logger.info(f\"Bot status: NEEDS RESTART\")\n # logger.info(f'--------------------------------------')\n # logger.info(f'{abs(pos_size)}')\n\n if not eval(os.environ.get(\"BOT_TEST\", \"False\")):\n if dead_cross and uptrend:\n # self.exchange.cancel_orders_by_side('BUY')\n self.exchange.order(\n \"Long\",\n True,\n lot,\n limit=calc_entry_price(price, True, self.price_decimal_num),\n when=True,\n post_only=True,\n )\n logger.info(\"in dead_cross and uptrend for long\")\n\n if float(self.exchange.get_position()[\"notional\"]) > 0.0:\n self.exchange.order(\n \"Long\",\n False,\n lot,\n limit=calc_entry_price(price, False, self.price_decimal_num),\n when=golden_cross,\n post_only=True,\n )\n\n if golden_cross and downtrend:\n # self.exchange.cancel_orders_by_side('SELL')\n self.exchange.entry(\n \"Short\",\n False,\n lot,\n limit=calc_entry_price(price, False, self.price_decimal_num),\n when=True,\n post_only=True,\n )\n logger.info(\"in golden_cross and downtrend for short\")\n\n if float(self.exchange.get_position()[\"notional\"]) < 0.0:\n self.exchange.order(\n \"Short\",\n True,\n lot,\n limit=calc_entry_price(price, True, self.price_decimal_num),\n stop=(calc_entry_price(price, True, self.price_decimal_num)),\n when=dead_cross,\n post_only=True,\n )\n\n\nclass Heikinashi(Bot):\n variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull, heikinashi]\n eval_time = None\n\n def __init__(self):\n Bot.__init__(self, \"1m\")\n\n def options(self):\n return {\n \"fast_len\": hp.quniform(\"fast_len\", 1, 60, 1),\n \"slow_len\": hp.quniform(\"slow_len\", 1, 240, 1),\n }\n\n def strategy(self, open, close, high, low, volume):\n\n lot = self.exchange.get_lot()\n lot = int(round(lot / 6, self.decimal_num))\n\n resolution = self.input(defval=1, title=\"resolution\", type=int)\n variant_type = self.input(defval=5, title=\"variant_type\", type=int)\n basis_len = self.input(defval=19, title=\"basis_len\", type=int)\n\n fast_len = self.input(\"fast_len\", int, 1)\n slow_len = self.input(\"slow_len\", int, 30)\n trend_len = self.input(\"slow_len\", int, 60)\n longtrend_len = self.input(\"slow_len\", int, 120)\n\n source = self.exchange.security(str(resolution) + \"m\")\n\n hadf = heikinashi(source)\n hadf_fast = heikinashi(hadf)\n\n ha_open_values = hadf_fast[\"HA_open\"].values\n ha_close_values = hadf_fast[\"HA_close\"].values\n variant = self.variants[variant_type]\n\n ha_open_fast = variant(ha_open_values, fast_len)\n ha_close_fast = variant(ha_close_values, fast_len)\n haopen_fast = ha_open_fast[-1]\n haclose_fast = ha_close_fast[-1]\n haup_fast = haclose_fast > haopen_fast\n hadown_fast = haclose_fast <= haopen_fast\n # logger.info('haup_fast:%s\\n' % haup_fast)\n\n ha_open_slow = variant(ha_open_values, slow_len)\n ha_close_slow = variant(ha_close_values, slow_len)\n haopen_slow = ha_open_slow[-1]\n haclose_slow = ha_close_slow[-1]\n haup_slow = haclose_slow > haopen_slow\n hadown_slow = haclose_slow <= haopen_slow\n # logger.info('haup_slow:%s\\n' % haup_slow)\n\n ha_open_trend = variant(ha_open_values, trend_len)\n ha_close_trend = variant(ha_close_values, trend_len)\n haopen_trend = ha_open_trend[-1]\n haclose_trend = ha_close_trend[-1]\n haup_trend = haclose_trend > haopen_trend\n hadown_trend = haclose_trend <= haopen_trend\n # logger.info('haup_trend:%s\\n' % haup_trend)\n\n ha_open_longtrend = variant(ha_open_values, longtrend_len)\n ha_close_longtrend = variant(ha_close_values, longtrend_len)\n haopen_longtrend = ha_open_longtrend[-1]\n haclose_longtrend = ha_close_longtrend[-1]\n haup_longtrend = haclose_longtrend > haopen_longtrend\n hadown_longtrend = haclose_longtrend <= haopen_longtrend\n logger.info(\"ha_close_longtrend:%s\\n\" % ha_close_longtrend)\n logger.info(\"ha_open_longtrend:%s\\n\" % ha_open_longtrend)\n\n if not eval(os.environ.get(\"BOT_TEST\", \"False\")):\n \"long\"\n self.exchange.entry(\"Long\", True, lot, when=crossover(ha_close_longtrend, ha_open_longtrend))\n \" short \"\n self.exchange.entry(\n \"Short\",\n False,\n lot,\n when=crossunder(ha_close_longtrend, ha_open_longtrend),\n )\n\n\nclass Will_Rci(Bot):\n\n inlong = False\n inshort = False\n\n decimal_num = int(os.environ.get(\"BOT_DECIMAL_NUM\", 3))\n price_decimal_num = int(os.environ.get(\"BOT_PRICE_DECIMAL_NUM\", 2))\n lot_percent = 100 / int(os.environ.get(\"BOT_LOT_PERCENT\", 10))\n take_profit_percent = 100 / int(os.environ.get(\"BOT_TAKE_PROFIT_PERCENT\", 50))\n\n def __init__(self):\n Bot.__init__(self, \"1m\")\n\n def ohlcv_len(self):\n return 6790\n\n def options(self):\n return {\n \"rcv_short_len\": hp.quniform(\"rcv_short_len\", 1, 21, 1),\n \"rcv_medium_len\": hp.quniform(\"rcv_medium_len\", 21, 34, 1),\n \"rcv_long_len\": hp.quniform(\"rcv_long_len\", 34, 55, 1),\n }\n\n def strategy(self, open, close, high, low, volume):\n # logger.info('strategy start ctime : %s' % time.ctime())\n # start = time.time() # 시작 시간 저장\n lot = self.exchange.get_lot()\n lot = round(lot / self.lot_percent, self.decimal_num)\n\n pos_size = self.exchange.get_position_size()\n pos_margin = (abs(pos_size) * self.exchange.get_position_entry_price()) / 20\n tp_order = self.exchange.get_open_order(\"TP\")\n\n itv_s = self.input(\"rcv_short_len\", int, 21)\n itv_m = self.input(\"rcv_medium_len\", int, 34)\n itv_l = self.input(\"rcv_long_len\", int, 55)\n\n rci_s = rci(close, itv_s)\n rci_m = rci(close, itv_m)\n rci_l = rci(close, itv_l)\n\n ra = rci_s[-1] / 2 - 50\n rb = rci_m[-1] / 2 - 50\n rc = rci_l[-1] / 2 - 50\n\n # willr for five willilams\n a = willr(high, low, close, period=55)\n b = willr(high, low, close, period=144)\n c = willr(high, low, close, period=610)\n x = willr(high, low, close, period=4181)\n y = willr(high, low, close, period=6785)\n\n buycon1 = True if (a[-1] < -97 and (b[-1] < -97 or c[-1] < -97) and (x[-1] < -80 or y[-1] < -80)) else False\n buycon2 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -90) and (x[-1] > -35 or y[-1] > -35)) else False\n buycon3 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] > -70) and (x[-1] > -50 or y[-1] > -25)) else False\n buycon4 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -97) and (x[-1] > -50 or y[-1] > -50)) else False\n buycon5 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -75) and (x[-1] > -25 or y[-1] > -25)) else False\n buycon6 = True if ((b[-1] + 100) * (c[-1] + 100) == 0 and (c[-1] < -75 and x[-1] > -30 or y[-1] > -30)) else False\n buycon7 = True if ((b[-1] + 100) == 0 and (c[-1] > -30 and x[-1] > -30 or y[-1] > -30)) else False\n buycon8 = True if c[-1] < -97 else False\n buycon9 = True if a[-1] < -97 and b[-1] < -97 and c[-1] > -50 else False\n\n sellcon1 = True if (a[-1] > -3 and (b[-1] > -3 or c[-1] > -3) and (x[-1] > -20 or y[-1] > -20)) else False\n sellcon2 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -10) and (x[-1] < -65 or y[-1] < -65)) else False\n sellcon3 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -30) and (x[-1] < -50 or y[-1] < -75)) else False\n sellcon4 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -3) and (x[-1] < -50 or y[-1] < -50)) else False\n sellcon5 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -25) and (x[-1] < -75 or y[-1] < -75)) else False\n sellcon6 = True if (((b[-1]) * (c[-1])) == 0 and c[-1] > -25 and (x[-1] < -70 or y[-1] < -70)) else False\n sellcon7 = True if ((b[-1]) == 0 and (c[-1] < -70 and x[-1] < -70 or y[-1] < -70)) else False\n sellcon8 = True if c[-1] > -3 else False\n sellcon9 = True if a[-1] > -3 and b[-1] > -3 and c[-1] < -50 else False\n\n buyRCIfillerCon = True if rc < -80 else False\n sellRCIfillerCon = True if rc > -20 else False\n\n buyWillfilterCon = buycon1 or buycon2 or buycon3 or buycon4 or buycon5 or buycon6 or buycon7 or buycon8 or buycon9\n sellWillFilrerCon = sellcon1 or sellcon2 or sellcon3 or sellcon4 or sellcon5 or sellcon6 or sellcon7 or sellcon8 or sellcon9\n\n # set condition\n buyCons = buyWillfilterCon and buyRCIfillerCon\n sellCons = sellWillFilrerCon and sellRCIfillerCon\n\n buyCon = True if buyCons else False\n sellCon = True if sellCons else False\n\n # buyCloseCon = sellRCIfillerCon\n buyCloseCon = sellWillFilrerCon\n\n # sellCloseCon = buyRCIfillerCon\n sellCloseCon = buyWillfilterCon\n\n if not eval(os.environ.get(\"BOT_TEST\", \"False\")):\n # self.exchange.exit(profit=(float(pos_margin / self.take_profit_percent)))\n if tp_order is None and pos_size != 0:\n if pos_size < 0:\n self.exchange.order(\"TP\", True, abs(pos_size), take_profit=round(self.exchange.get_position_entry_price() * (1 - (1 / self.take_profit_percent) / 20), self.price_decimal_num), reduce_only=True)\n if pos_size > 0:\n self.exchange.order(\"TP\", False, abs(pos_size), take_profit=round(self.exchange.get_position_entry_price() * ((1 / self.take_profit_percent) / 20 + 1), self.price_decimal_num), reduce_only=True)\n\n if (buyCloseCon and pos_size > 0) or (sellCloseCon and pos_size < 0):\n self.exchange.close_all()\n self.exchange.cancel_all()\n\n if buyCon and pos_size <= 0:\n self.exchange.order(\"Long\", True, lot)\n if sellCon and pos_size >= 0:\n self.exchange.order(\"Short\", False, lot)\n\n logger.info(f\"--------------------------------------\")\n\n logger.info(f\"a: {round(a[-1], 2)}\")\n logger.info(f\"b: {round(b[-1], 2)}\")\n logger.info(f\"c: {round(c[-1], 2)}\")\n logger.info(f\"x: {round(x[-1], 2)}\")\n logger.info(f\"y: {round(y[-1], 2)}\")\n logger.info(f\"rc: {round(rc, 2)}\")\n logger.info(f\"lot: {round(lot, self.decimal_num)} ({round(lot + abs(pos_size), self.decimal_num)})\")\n\n logger.info(f\"--------------------------------------\")\n\n logger.info(f\"WILLR Buy conditions: {sum([buycon1, buycon2, buycon3, buycon4, buycon5, buycon6, buycon7, buycon8, buycon9])}/9\")\n logger.info(f\"WILLR Sell conditions: {sum([sellcon1, sellcon2, sellcon3, sellcon4, sellcon5, sellcon6, sellcon7, sellcon8, sellcon9])}/9\")\n\n logger.info(f\"RCI Buy conditions: {buyRCIfillerCon}\")\n logger.info(f\"RCI Sell conditions: {sellRCIfillerCon}\")\n logger.info(f\"In {'LONG' if pos_size > 0 else ('SHORT' if pos_size < 0 else 'no')} position\")\n # if pos_size != 0:\n # logger.info(f'{float(self.exchange.get_position()[\"unRealizedProfit\"])} / {(pos_margin / self.take_profit_percent)}')\n\n # logger.info('all strategy processing time : %s' % str(time.time() - start))\n","sub_path":"src/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":31199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"331986426","text":"from __future__ import print_function\n\nimport csv as csv\nimport sys\nimport os as os\n\n\nclass Paper:\n def __init__(self, data):\n self.id = int(data[0])\n self.year = int(data[1])\n self.title = str(data[2]).strip()\n self.event_type = str(data[3]).strip()\n self.pdf_name = (data[4]).strip()\n self.abstract = (data[5]).strip()\n self.text = (data[6]).strip()\n\n def __str___(self):\n return str(\"blah\")\n\n\ndef ensure_dir( path ):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\"Usage: python process.py [csv file] [files_to_process]\")\n exit(1)\n\n filename = sys.argv[1]\n fin = open(filename, \"r\")\n paper_dir = \"./papers/\"\n abs_dir = \"./abstracts/\"\n\n ensure_dir(paper_dir)\n ensure_dir(abs_dir)\n\n to_do = float(\"inf\")\n\n if len(sys.argv) == 3:\n try:\n to_do = int(sys.argv[2])\n except ValueError:\n print(\"Invalid number passed: \", sys.argv[2])\n to_do = float(\"inf\")\n\n\n if not fin or fin.closed:\n print(\"Unable to open file: \", filename)\n exit(1)\n\n reader = csv.reader(fin)\n\n idx = 0 \n\n for row in reader:\n if reader.line_num - 1 == 0:\n keys = row\n idx += 1\n continue\n\n\n d = Paper(row)\n new_title = d.title.replace(\" \", \"_\").replace(\"/\", \"\").replace(\"\\\\\", \"\") + \".txt\"\n\n fout = open(paper_dir + new_title, \"w+\")\n \n fout.write(d.text)\n fout.close()\n\n fout = open(abs_dir + new_title, \"w+\")\n fout.write(d.abstract);\n fout.close()\n\n if not fout.closed:\n print(\"Error closing file: \", filename)\n exit(1)\n if idx >= to_do:\n break\n idx += 1\n\n\n\n\n\n\nmain()\n","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"36164953","text":"import collections\nimport copy\nimport functools\nimport logging\n\nimport pkg_resources\nfrom jsonschema import Draft4Validator, ValidationError, draft4_format_checker\nfrom jsonschema.validators import extend\nfrom werkzeug.datastructures import FileStorage\n\nfrom ..exceptions import (BadRequestProblem, ExtraParameterProblem,\n UnsupportedMediaTypeProblem)\nfrom ..http_facts import FORM_CONTENT_TYPES\nfrom ..json_schema import Draft4RequestValidator, Draft4ResponseValidator\nfrom ..utils import all_json, boolean, is_json_mimetype, is_null, is_nullable\n\n_jsonschema_3_or_newer = pkg_resources.parse_version(\n pkg_resources.get_distribution(\"jsonschema\").version) >= \\\n pkg_resources.parse_version(\"3.0.0\")\n\nlogger = logging.getLogger('connexion.decorators.validation')\n\nTYPE_MAP = {\n 'integer': int,\n 'number': float,\n 'boolean': boolean,\n 'object': dict\n}\n\n\nclass TypeValidationError(Exception):\n def __init__(self, schema_type, parameter_type, parameter_name):\n \"\"\"\n Exception raise when type validation fails\n\n :type schema_type: str\n :type parameter_type: str\n :type parameter_name: str\n :return:\n \"\"\"\n self.schema_type = schema_type\n self.parameter_type = parameter_type\n self.parameter_name = parameter_name\n\n def __str__(self):\n msg = \"Wrong type, expected '{schema_type}' for {parameter_type} parameter '{parameter_name}'\"\n return msg.format(**vars(self))\n\n\ndef coerce_type(param, value, parameter_type, parameter_name=None):\n\n def make_type(value, type_literal):\n type_func = TYPE_MAP.get(type_literal)\n return type_func(value)\n\n param_schema = param.get(\"schema\", param)\n if is_nullable(param_schema) and is_null(value):\n return None\n\n param_type = param_schema.get('type')\n parameter_name = parameter_name if parameter_name else param.get('name')\n if param_type == \"array\":\n converted_params = []\n if parameter_type == \"header\":\n value = value.split(',')\n for v in value:\n try:\n converted = make_type(v, param_schema[\"items\"][\"type\"])\n except (ValueError, TypeError):\n converted = v\n converted_params.append(converted)\n return converted_params\n elif param_type == 'object':\n if param_schema.get('properties'):\n def cast_leaves(d, schema):\n if type(d) is not dict:\n try:\n return make_type(d, schema['type'])\n except (ValueError, TypeError):\n return d\n for k, v in d.items():\n if k in schema['properties']:\n d[k] = cast_leaves(v, schema['properties'][k])\n return d\n\n return cast_leaves(value, param_schema)\n return value\n else:\n try:\n return make_type(value, param_type)\n except ValueError:\n raise TypeValidationError(param_type, parameter_type, parameter_name)\n except TypeError:\n return value\n\n\ndef validate_parameter_list(request_params, spec_params):\n request_params = set(request_params)\n spec_params = set(spec_params)\n\n return request_params.difference(spec_params)\n\n\nclass RequestBodyValidator(object):\n\n def __init__(self, schema, consumes, api, is_null_value_valid=False, validator=None,\n strict_validation=False):\n \"\"\"\n :param schema: The schema of the request body\n :param consumes: The list of content types the operation consumes\n :param is_null_value_valid: Flag to indicate if null is accepted as valid value.\n :param validator: Validator class that should be used to validate passed data\n against API schema. Default is jsonschema.Draft4Validator.\n :type validator: jsonschema.IValidator\n :param strict_validation: Flag indicating if parameters not in spec are allowed\n \"\"\"\n self.consumes = consumes\n self.schema = schema\n self.has_default = schema.get('default', False)\n self.is_null_value_valid = is_null_value_valid\n validatorClass = validator or Draft4RequestValidator\n self.validator = validatorClass(schema, format_checker=draft4_format_checker)\n self.api = api\n self.strict_validation = strict_validation\n\n def validate_formdata_parameter_list(self, request):\n request_params = request.form.keys()\n spec_params = self.schema.get('properties', {}).keys()\n return validate_parameter_list(request_params, spec_params)\n\n def __call__(self, function):\n \"\"\"\n :type function: types.FunctionType\n :rtype: types.FunctionType\n \"\"\"\n\n @functools.wraps(function)\n def wrapper(request):\n if all_json(self.consumes):\n data = request.json\n\n empty_body = not(request.body or request.form or request.files)\n if data is None and not empty_body and not self.is_null_value_valid:\n try:\n ctype_is_json = is_json_mimetype(request.headers.get(\"Content-Type\", \"\"))\n except ValueError:\n ctype_is_json = False\n\n if ctype_is_json:\n # Content-Type is json but actual body was not parsed\n raise BadRequestProblem(detail=\"Request body is not valid JSON\")\n else:\n # the body has contents that were not parsed as JSON\n raise UnsupportedMediaTypeProblem(\n \"Invalid Content-type ({content_type}), expected JSON data\".format(\n content_type=request.headers.get(\"Content-Type\", \"\")\n ))\n\n logger.debug(\"%s validating schema...\", request.url)\n if data is not None or not self.has_default:\n self.validate_schema(data, request.url)\n elif self.consumes[0] in FORM_CONTENT_TYPES:\n data = dict(request.form.items()) or (request.body if len(request.body) > 0 else {})\n data.update(dict.fromkeys(request.files, '')) # validator expects string..\n logger.debug('%s validating schema...', request.url)\n\n if self.strict_validation:\n formdata_errors = self.validate_formdata_parameter_list(request)\n if formdata_errors:\n raise ExtraParameterProblem(formdata_errors, [])\n\n if data:\n props = self.schema.get(\"properties\", {})\n errs = []\n for k, param_defn in props.items():\n if k in data:\n try:\n data[k] = coerce_type(param_defn, data[k], 'requestBody', k)\n except TypeValidationError as e:\n errs += [str(e)]\n print(errs)\n if errs:\n raise BadRequestProblem(detail=errs)\n\n self.validate_schema(data, request.url)\n\n response = function(request)\n return response\n\n return wrapper\n\n def validate_schema(self, data, url):\n # type: (dict, AnyStr) -> Union[ConnexionResponse, None]\n if self.is_null_value_valid and is_null(data):\n return None\n\n try:\n self.validator.validate(data)\n except ValidationError as exception:\n error_path = '.'.join(str(item) for item in exception.path)\n error_path_msg = \" - '{path}'\".format(path=error_path) \\\n if error_path else \"\"\n logger.error(\n \"{url} validation error: {error}{error_path_msg}\".format(\n url=url, error=exception.message,\n error_path_msg=error_path_msg),\n extra={'validator': 'body'})\n raise BadRequestProblem(detail=\"{message}{error_path_msg}\".format(\n message=exception.message,\n error_path_msg=error_path_msg))\n\n return None\n\n\nclass ResponseBodyValidator(object):\n def __init__(self, schema, validator=None):\n \"\"\"\n :param schema: The schema of the response body\n :param validator: Validator class that should be used to validate passed data\n against API schema. Default is jsonschema.Draft4Validator.\n :type validator: jsonschema.IValidator\n \"\"\"\n ValidatorClass = validator or Draft4ResponseValidator\n self.validator = ValidatorClass(schema, format_checker=draft4_format_checker)\n\n def validate_schema(self, data, url):\n # type: (dict, AnyStr) -> Union[ConnexionResponse, None]\n try:\n self.validator.validate(data)\n except ValidationError as exception:\n logger.error(\"{url} validation error: {error}\".format(url=url,\n error=exception),\n extra={'validator': 'response'})\n raise exception\n\n return None\n\n\nclass ParameterValidator(object):\n def __init__(self, parameters, api, strict_validation=False):\n \"\"\"\n :param parameters: List of request parameter dictionaries\n :param api: api that the validator is attached to\n :param strict_validation: Flag indicating if parameters not in spec are allowed\n \"\"\"\n self.parameters = collections.defaultdict(list)\n for p in parameters:\n self.parameters[p['in']].append(p)\n\n self.api = api\n self.strict_validation = strict_validation\n\n @staticmethod\n def validate_parameter(parameter_type, value, param, param_name=None):\n if value is not None:\n if is_nullable(param) and is_null(value):\n return\n\n try:\n converted_value = coerce_type(param, value, parameter_type, param_name)\n except TypeValidationError as e:\n return str(e)\n\n param = copy.deepcopy(param)\n param = param.get('schema', param)\n if 'required' in param:\n del param['required']\n try:\n if parameter_type == 'formdata' and param.get('type') == 'file':\n if _jsonschema_3_or_newer:\n extend(\n Draft4Validator,\n type_checker=Draft4Validator.TYPE_CHECKER.redefine(\n \"file\",\n lambda checker, instance: isinstance(instance, FileStorage)\n )\n )(param, format_checker=draft4_format_checker).validate(converted_value)\n else:\n Draft4Validator(\n param,\n format_checker=draft4_format_checker,\n types={'file': FileStorage}).validate(converted_value)\n else:\n Draft4Validator(\n param, format_checker=draft4_format_checker).validate(converted_value)\n except ValidationError as exception:\n debug_msg = 'Error while converting value {converted_value} from param ' \\\n '{type_converted_value} of type real type {param_type} to the declared type {param}'\n fmt_params = dict(\n converted_value=str(converted_value),\n type_converted_value=type(converted_value),\n param_type=param.get('type'),\n param=param\n )\n logger.info(debug_msg.format(**fmt_params))\n return str(exception)\n\n elif param.get('required'):\n return \"Missing {parameter_type} parameter '{param[name]}'\".format(**locals())\n\n def validate_query_parameter_list(self, request):\n request_params = request.query.keys()\n spec_params = [x['name'] for x in self.parameters.get('query', [])]\n return validate_parameter_list(request_params, spec_params)\n\n def validate_formdata_parameter_list(self, request):\n request_params = request.form.keys()\n try:\n spec_params = [x['name'] for x in self.parameters['formData']]\n except KeyError:\n # OAS 3\n return set()\n return validate_parameter_list(request_params, spec_params)\n\n def validate_query_parameter(self, param, request):\n \"\"\"\n Validate a single query parameter (request.args in Flask)\n\n :type param: dict\n :rtype: str\n \"\"\"\n val = request.query.get(param['name'])\n return self.validate_parameter('query', val, param)\n\n def validate_path_parameter(self, param, request):\n val = request.path_params.get(param['name'].replace('-', '_'))\n return self.validate_parameter('path', val, param)\n\n def validate_header_parameter(self, param, request):\n val = request.headers.get(param['name'])\n return self.validate_parameter('header', val, param)\n\n def validate_cookie_parameter(self, param, request):\n val = request.cookies.get(param['name'])\n return self.validate_parameter('cookie', val, param)\n\n def validate_formdata_parameter(self, param_name, param, request):\n if param.get('type') == 'file' or param.get('format') == 'binary':\n val = request.files.get(param_name)\n else:\n val = request.form.get(param_name)\n\n return self.validate_parameter('formdata', val, param)\n\n def __call__(self, function):\n \"\"\"\n :type function: types.FunctionType\n :rtype: types.FunctionType\n \"\"\"\n\n @functools.wraps(function)\n def wrapper(request):\n logger.debug(\"%s validating parameters...\", request.url)\n\n if self.strict_validation:\n query_errors = self.validate_query_parameter_list(request)\n formdata_errors = self.validate_formdata_parameter_list(request)\n\n if formdata_errors or query_errors:\n raise ExtraParameterProblem(formdata_errors, query_errors)\n\n for param in self.parameters.get('query', []):\n error = self.validate_query_parameter(param, request)\n if error:\n raise BadRequestProblem(detail=error)\n\n for param in self.parameters.get('path', []):\n error = self.validate_path_parameter(param, request)\n if error:\n raise BadRequestProblem(detail=error)\n\n for param in self.parameters.get('header', []):\n error = self.validate_header_parameter(param, request)\n if error:\n raise BadRequestProblem(detail=error)\n\n for param in self.parameters.get('cookie', []):\n error = self.validate_cookie_parameter(param, request)\n if error:\n raise BadRequestProblem(detail=error)\n\n for param in self.parameters.get('formData', []):\n error = self.validate_formdata_parameter(param[\"name\"], param, request)\n if error:\n raise BadRequestProblem(detail=error)\n\n return function(request)\n\n return wrapper\n","sub_path":"connexion/decorators/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":15637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"220302994","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.core import serializers\nfrom django.utils import timezone\nfrom django.db.models import Q\nfrom django.conf import settings\n\nimport re\nimport json\nimport datetime\nimport pytz\n\nfrom .models import *\nfrom .models import Machine\n\n\n# Create your views here.\n\n\ndef GetStringGetMethod(request):\n mid = ''\n b = ''\n c = ''\n d = ''\n e = ''\n ts = ''\n count = ''\n weight = ''\n ps = ''\n site_time = ''\n shift = ''\n\n mid = request.GET.get('a')\n b = request.GET.get('b')\n c = request.GET.get('c')\n d = request.GET.get('d')\n e = request.GET.get('e')\n m = request.GET.get('m')\n if re.search(r'\\w+\\$\\w+\\$\\w+\\$\\w+$', m):\n flag = 'ts'\n for i in m:\n if i == '$':\n if flag == 'ts':\n flag = 'count'\n elif flag == 'count':\n flag = 'weight'\n elif flag == 'weight':\n flag = 'ps'\n else:\n if flag == 'ts':\n ts += i\n elif flag == 'count':\n count += i\n elif flag == 'weight':\n weight += i\n else:\n ps += i\n try:\n if ts.isdigit():\n tsdatetime = datetime.datetime.fromtimestamp(\n int(ts), tz=pytz.timezone(settings.TIME_ZONE))\n site_time = timezone.localtime(timezone.now())\n try:\n MachineStatus.objects.create(machine_id=Machine.objects.get(machine_id=mid), active_date_time=site_time)\n except:\n MachineStatus.objects.filter(machine_id=Machine.objects.get(machine_id=mid)).update(active_date_time=site_time)\n if(ts != \"999\"):\n site_time = tsdatetime\n\n machine_shift = Machine.objects.get(machine_id = mid)\n shift1_time = (machine_shift.shift1_start_time,machine_shift.shift1_end_time)\n shift2_time = (machine_shift.shift2_start_time,machine_shift.shift2_end_time)\n shift3_time = (machine_shift.shift3_start_time,machine_shift.shift3_end_time)\n\n shiftstart1 = site_time.replace(\n hour=shift1_time(0).hour, minute=shift1_time(0).minute, second=shift1_time(0).second, microsecond=0)\n shiftend1 = site_time.replace(\n hour=shift1_time(1).hour, minute=shift1_time(1).minute, second=shift1_time(1).second, microsecond=0)\n shiftstart2 = site_time.replace(\n hour=shift2_time(0).hour, minute=shift2_time(0).minute, second=shift2_time(0).second, microsecond=0)\n shiftend2 = site_time.replace(\n hour=shift2_time(1).hour, minute=shift2_time(1).minute, second=shift2_time(1).second, microsecond=0)\n\n if shiftstart1 <= site_time and site_time <= shiftend1:\n shift = \"1\"\n elif shiftstart2 <= site_time and site_time <= shiftend2:\n shift = \"2\"\n print(\"shift3_time\",shift3_time)\n if shift3_time is not None:\n shiftstart3 = site_time.replace(\n hour=shift3_time(0).hour, minute=shift3_time(0).minute, second=shift3_time(0).second, microsecond=0)\n shiftend3 = site_time.replace(\n hour=shift3_time(1).hour, minute=shift3_time(1).minute, second=shift3_time(1).second, microsecond=0)\n if shiftstart3 <= site_time and site_time <= shiftend3:\n shift = \"3\"\n\n \"\"\"if shiftstart1 <= site_time and site_time <= shiftend1:\n shift = \"1\"\n elif shiftstart2 <= site_time and site_time <= shiftend2:\n shift = \"2\"\n elif shiftstart3 <= site_time and site_time <= shiftend3:\n shift = \"3\"\"\"\n \n \"\"\"shiftstart1 = site_time.replace(\n hour=7, minute=0, second=0, microsecond=0)\n shiftend1 = site_time.replace(\n hour=14, minute=59, second=59, microsecond=0)\n shiftstart2 = site_time.replace(\n hour=15, minute=0, second=0, microsecond=0)\n shiftend2 = site_time.replace(\n hour=22, minute=59, second=59, microsecond=0)\n if shiftstart1 <= site_time and site_time <= shiftend1:\n shift = \"1\"\n elif shiftstart2 <= site_time and site_time <= shiftend2:\n shift = \"2\"\n else:\n shift = \"3\"\"\"\n except :\n pass\n\n if ts != '999':\n try:\n if len(c) == 2:\n outer_diameter_code = c[0]\n pipe_length_code = c[1]\n elif len(c) == 4:\n outer_diameter_code = c[0] + c[1]\n pipe_length_code = c[2] + c[3]\n elif len(c) == 3:\n if int(c[0] + c[1]) > 19:\n outer_diameter_code = c[0]\n pipe_length_code = c[1] + c[2]\n else:\n outer_diameter_code = c[0] + c[1]\n pipe_length_code = c[2]\n else:\n outer_diameter_code = None\n pipe_length_code = None\n except:\n outer_diameter_code = None\n pipe_length_code = None\n try:\n bms = BasicMetarialStandard.objects.filter(code=b[0])[0]\n basic_metarial = bms.toDic().get(\"basic_metarial\")\n except:\n bms = None\n basic_metarial = None\n try:\n stc = StandardTypeClassification.objects.filter(\n basic_metarial=bms, code=b[1])[0]\n standard_type_classification = stc.toDic().get(\"standard_type_classification\")\n except:\n stc = None\n standard_type_classification = None\n try:\n if(len(b) == 4):\n pts_code = b[2] + b[3]\n else:\n pts_code = b[2]\n pts = PressureTypeSpecification.objects.filter(\n basic_metarial=bms, standard_type_classification=stc, code=pts_code)[0]\n pressure_type_specification = pts.toDic().get(\"pressure_type_specification\")\n except:\n pts = None\n pressure_type_specification = None\n try:\n pod = PipeOuterDiameter.objects.filter(\n standard_type_classification=stc, code=outer_diameter_code)[0]\n pod_dic = pod.toDic()\n outer_diameter_unit=str(pod_dic.get(\"unit\"))\n outer_diameter = float(pod_dic.get(\"outer_diameter\"))\n except:\n pod = None\n outer_diameter = None\n outer_diameter_unit = None\n try:\n pl = PipeLength.objects.filter(\n standard_type_classification=stc, code=pipe_length_code)[0]\n pl_dic = pl.toDic()\n length = float(pl_dic.get(\"length\"))\n length_unit = str(pl_dic.get(\"unit\"))\n except:\n print(e)\n pl = None\n length = None\n length_unit = None\n try:\n if int(d) - int(weight) < 0:\n weightloss = int(d) - int(weight)\n weightgain = 0\n else:\n weightgain = int(d) - int(weight)\n weightloss = 0\n if ps == '0':\n pass_status = 'Underweight'\n elif ps == '1':\n pass_status = 'Overweight'\n elif ps == '2':\n pass_status = 'Passed'\n PipeDataProcessed.objects.create(machine_id=mid, basic_metarial=basic_metarial, standard_type_classification=standard_type_classification, pressure_type_specification=pressure_type_specification, outer_diameter=outer_diameter, outer_diameter_unit=outer_diameter_unit, length = length, length_unit = length_unit, timestamp = int(ts), count = int(count), weight = int(weight), maxweight = int(d), minweight = int(e), weightgain = weightgain, weightloss = weightloss, pass_status = pass_status, site_time = site_time, shift = shift)\n except Exception as excep:\n print(excep)\n try:\n site_time = site_time.isoformat()\n except:\n pass\n try:\n PipeData.objects.create(mid=mid, b=b, c=c, d=d, e=e, ts=ts, count=count,\n weight=weight, ps=ps, site_time=site_time, shift=shift)\n except:\n print(\"PipeData.objects.create ERROR\")\n return HttpResponse(status=200)\n\n\n\n\n\n\ndef get_synced_data(request):\n mid = request.GET.get('a', None)\n b = request.GET.get('b', None)\n c = request.GET.get('c', None)\n d = request.GET.get('d', None)\n e = request.GET.get('e', None)\n ts = request.GET.get('ts', None)\n count = request.GET.get('count', None)\n weight = request.GET.get('weight', None)\n ps = request.GET.get('ps', None)\n site_time = request.GET.get('site_time', None)\n shift = request.GET.get('shift', None)\n if mid != None and b != None and c != None and d != None and e != None and ts != None and count != None and weight != None and ps != None and site_time != None and shift != None:\n try:\n MachineStatus.objects.create(machine_id=Machine.objects.get(machine_id=mid), active_date_time=timezone.now())\n except:\n MachineStatus.objects.filter(machine_id=Machine.objects.get(machine_id=mid)).update(active_date_time=timezone.now())\n if len(c) == 2:\n outer_diameter_code = c[0]\n pipe_length_code = c[1]\n elif len(c) == 4:\n outer_diameter_code = c[0] + c[1]\n pipe_length_code = c[2] + c[3]\n elif len(c) == 3:\n if int(c[0] + c[1]) > 19:\n outer_diameter_code = c[0]\n pipe_length_code = c[1] + c[2]\n else:\n outer_diameter_code = c[0] + c[1]\n pipe_length_code = c[2]\n else:\n outer_diameter_code = None\n pipe_length_code = None\n try:\n bms = BasicMetarialStandard.objects.get(code=b[0])\n basic_metarial = bms.basic_metarial\n except:\n bms = None\n basic_metarial = None\n try:\n stc = StandardTypeClassification.objects.get(\n basic_metarial=bms, code=b[1])\n standard_type_classification = stc.standard_type_classification\n except:\n stc = None\n standard_type_classification = None\n try:\n if(len(b) == 4):\n pts_code = b[2] + b[3]\n else:\n pts_code = b[2]\n pts = PressureTypeSpecification.objects.get(\n basic_metarial=bms, standard_type_classification=stc, code=pts_code)\n pressure_type_specification = pts.pressure_type_specification\n except:\n pts = None\n pressure_type_specification = None\n try:\n pod = PipeOuterDiameter.objects.get(\n standard_type_classification=stc, code=outer_diameter_code)\n outer_diameter_unit=pod.unit.unit\n outer_diameter = float(pod.outer_diameter)\n except:\n pod = None\n outer_diameter = None\n outer_diameter_unit = None\n try:\n pl = PipeLength.objects.get(\n standard_type_classification=stc, code=pipe_length_code)\n length = float(pl.length)\n length_unit = str(pl.unit.unit)\n except:\n pl = None\n length = None\n length_unit = None\n if int(d) - int(weight) < 0:\n weightloss = int(d) - int(weight)\n weightgain = 0\n else:\n weightgain = int(d) - int(weight)\n weightloss = 0\n if ps == '0':\n pass_status = 'Underweight'\n elif ps == '1':\n pass_status = 'Overweight'\n elif ps == '2':\n pass_status = 'Passed'\n try:\n PipeDataProcessed.objects.create(\n machine_id=mid, \n basic_metarial=basic_metarial, \n standard_type_classification=standard_type_classification, \n pressure_type_specification=pressure_type_specification, \n outer_diameter=outer_diameter, \n outer_diameter_unit=outer_diameter_unit, \n length=length, \n length_unit=length_unit, \n timestamp=int(ts), \n count=int(count), \n weight=int(weight), \n maxweight=int(d), \n minweight=int(e), \n weightgain=weightgain, \n weightloss=weightloss, \n pass_status=pass_status, \n site_time=timezone.make_aware(datetime.datetime.strptime(site_time[:19], \"%Y-%m-%dT%H:%M:%S\")),\n shift=shift\n )\n except Exception as exception:\n print(str(exception))\n try:\n PipeData.objects.create(mid=mid, b=b, c=c, d=d, e=e, ts=ts, count=count,\n weight=weight, ps=ps, site_time=site_time, shift=shift)\n except Exception as exception:\n print(str(exception))\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=400)\n \n\n \n\n\n\n\n\n\ndef index(request):\n data = {\"data\": []}\n for i in PipeData.objects.all()[: : -1][:100]:\n data[\"data\"].append(i.toDic())\n return JsonResponse(data, json_dumps_params={'indent': 4})\n\n\n@login_required(login_url='/')\ndef fetch(request):\n data = {\"data\": []}\n for i in PipeDataProcessed.objects.all()[: 100]:\n data[\"data\"].append(i.toDic())\n return JsonResponse(data, json_dumps_params={'indent': 4})\n\n\n\n\n\n\n\n","sub_path":"api/views_old.py","file_name":"views_old.py","file_ext":"py","file_size_in_byte":14016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"275060965","text":"import sys\nsys.path.append('../')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport QuantumTomography as qt\nfrom qiskit import QuantumCircuit\nfrom qiskit.providers.aer.noise import NoiseModel, thermal_relaxation_error\nfrom qiskit.visualization import plot_gate_map\nimport matplotlib\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import ticker\nimport seaborn as sns\nimport networkx as nx\ncolor_map = sns.cubehelix_palette(reverse=True, as_cmap=True)\nfrom joblib import Parallel, delayed\n\n\ndef get_noise( job ):\n readout_error = [ job.properties().readout_error(j) for j in range(7) ]\n T1 = [ job.properties().t1(j) for j in range(7) ]\n return readout_error, T1\n\ndef plot_error_map( backend, single_gate_errors, double_gate_errors ):\n\n single_gate_errors = 100*single_gate_errors\n single_norm = matplotlib.colors.Normalize( vmin=min(single_gate_errors), vmax=max(single_gate_errors))\n q_colors = [color_map(single_norm(err)) for err in single_gate_errors]\n \n double_gate_errors = 100*double_gate_errors\n double_norm = matplotlib.colors.Normalize( vmin=min(double_gate_errors), vmax=max(double_gate_errors))\n l_colors = [color_map(double_norm(err)) for err in double_gate_errors]\n \n figsize=(12, 9)\n fig = plt.figure(figsize=figsize)\n gridspec.GridSpec(nrows=2, ncols=3)\n\n grid_spec = gridspec.GridSpec(\n 12, 12, height_ratios=[1] * 11 + [0.5], width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]\n )\n\n left_ax = plt.subplot(grid_spec[2:10, :1])\n main_ax = plt.subplot(grid_spec[:11, 1:11])\n right_ax = plt.subplot(grid_spec[2:10, 11:])\n bleft_ax = plt.subplot(grid_spec[-1, :5])\n bright_ax = plt.subplot(grid_spec[-1, 7:])\n\n plot_gate_map(backend, qubit_color=q_colors, line_color=l_colors, line_width=5,\n plot_directed=False,\n ax=main_ax )\n\n main_ax.axis(\"off\")\n main_ax.set_aspect(1)\n\n single_cb = matplotlib.colorbar.ColorbarBase(\n bleft_ax, cmap=color_map, norm=single_norm, orientation=\"horizontal\"\n )\n tick_locator = ticker.MaxNLocator(nbins=5)\n single_cb.locator = tick_locator\n single_cb.update_ticks()\n single_cb.update_ticks()\n bleft_ax.set_title(f\"H error rate\")\n\n cx_cb = matplotlib.colorbar.ColorbarBase(\n bright_ax, cmap=color_map, norm=double_norm, orientation=\"horizontal\"\n )\n tick_locator = ticker.MaxNLocator(nbins=5)\n cx_cb.locator = tick_locator\n cx_cb.update_ticks()\n bright_ax.set_title(f\"CNOT error rate\")\n \n return fig\n\ndef get_backend_conectivity(backend):\n\t\"\"\"\n\tGet the connected qubit of a backend. Has to be a quantum computer.\n\n\tParameters\n\t----------\n\tbackend: qiskit.backend\n\n\tReturn\n\t------\n\tconnexions: (list)\n\t\tList with the connected qubits\n\t\"\"\"\n\tdefaults = backend.defaults()\n\tconnexions = [indx for indx in defaults.instruction_schedule_map.qubits_with_instruction('cx')]\n\treturn connexions\n\n\ndef marginal_counts_dictionary( counts , idx ):\n \n if len(idx) == 0 :\n marginal_counts = counts\n else:\n marginal_counts = {}\n for key in counts:\n sub_key = ''\n for k in idx:\n sub_key += key[k]\n if sub_key in marginal_counts:\n marginal_counts[sub_key] += counts[key]\n else:\n marginal_counts[sub_key] = counts[key]\n \n return marginal_counts\n\n\ndef dict2array(counts, n_qubits ):\n\n p = np.zeros( 2**n_qubits )\n\n for idx in counts :\n p[ int(idx[::-1],2) ] = counts[idx]\n \n return p.reshape( n_qubits*[2] )\n\n\ndef resampling_counts( counts, resampling=0 ):\n \n if resampling > 0 :\n keys = counts.keys()\n probs = np.array(list(counts.values()))\n probs = np.random.multinomial( resampling, probs/np.sum(probs) )\n counts = dict(zip(keys, probs) )\n \n return counts\n\n\ndef tomographic_gate_set(n=1):\n \"\"\"\n Create circuits for perform Pauli tomography of a single qubit.\n \"\"\"\n \n circ_0 = QuantumCircuit(n)\n\n circ_x = QuantumCircuit(n)\n circ_x.x(range(n))\n\n circ_h = QuantumCircuit(n)\n circ_h.h(range(n))\n\n circ_k = QuantumCircuit(n)\n circ_k.u( np.pi/2, np.pi/2, -np.pi/2, range(n))\n\n circ_gates = [ circ_0, circ_x, circ_h, circ_k]\n\n return circ_gates\n\n\nclass tomographic_gate_set_tomography:\n \n def __init__( self, n ):\n self._n = n\n \n def circuits( self ):\n n = self._n\n circ_gates = tomographic_gate_set(n)\n circ_gst = [] \n\n for circ_j in circ_gates :\n for circ_i in circ_gates :\n for circ_k in circ_gates :\n qc = QuantumCircuit(n)\n qc.compose( circ_i, range(n), inplace=True )\n qc.compose( circ_j, range(n), inplace=True )\n qc.compose( circ_k, range(n), inplace=True )\n qc.measure_all()\n circ_gst.append( qc )\n \n self._circ_gst = circ_gst\n\n return circ_gst \n \n def fit( self, results, circ_gst=None, resampling=0 ):\n \n if circ_gst is None :\n circ_gst = self._circ_gst\n \n self._counts = []\n for qc in circ_gst:\n counts = resampling_counts( results.get_counts(qc), resampling=resampling )\n self._counts.append( counts )\n \n rho = np.array([1,0,0,0])\n Detector = np.array([ [1,0], [0,0], [0,0], [0,1] ])\n I = np.kron( np.eye(2), np.eye(2) )\n X = np.kron( qt.PauliMatrices(1), qt.PauliMatrices(1) )\n H = np.kron( qt.PauliMatrices(1) + qt.PauliMatrices(3), qt.PauliMatrices(1) + qt.PauliMatrices(3) )/2\n K = np.kron( qt.PauliMatrices(0) + 1j*qt.PauliMatrices(1), qt.PauliMatrices(0) - 1j*qt.PauliMatrices(1) )/2\n Gates = np.array([I,X,H,K])\n\n rho_hat_all = []\n Detetor_hat_all = []\n Gates_hat_all = []\n for m in range(self._n) :\n probs = []\n for counts in self._counts:\n probs_temp = dict2array( marginal_counts_dictionary( counts, [m] ), 1 ) \n probs.append( probs_temp/np.sum(probs_temp) )\n del probs_temp\n probs = np.array( probs ).reshape(4,4,4,2)\n rho_hat, Detetor_hat, Gates_hat = qt.MaximumLikelihoodGateSetTomography( probs, rho, \n Detector, Gates, 'detector')\n rho_hat_all.append( rho_hat )\n Detetor_hat_all.append( Detetor_hat )\n Gates_hat_all.append( Gates_hat )\n \n return [rho_hat_all, Detetor_hat_all, Gates_hat_all]\n \n \nclass measurement_process_tomography: \n \n def __init__( self, n=1, p=None ):\n \n self._n = n\n self._p = p\n \n \n def circuits(self):\n \n n = self._n\n p = self._p\n \n circ_0, circ_x, circ_h, circ_k = tomographic_gate_set(p)\n \n if n == 1 :\n \n circs_mpt = []\n for circ_hk in [circ_0, circ_h, circ_k ]:\n for circ_0x in [ circ_0, circ_x ]:\n for circ_xyz in [circ_0, circ_h, circ_k ]:\n qc = QuantumCircuit(p,2*p)\n qc.compose( circ_0x, range(p), inplace=True )\n qc.compose( circ_hk, range(p), inplace=True )\n qc.barrier()\n qc.measure( range(p), range(p) )\n qc.compose( circ_xyz, range(p), inplace=True )\n qc.barrier()\n qc.measure( range(p), range(p,2*p) )\n circs_mpt.append( qc )\n \n else :\n circs_state_s = []\n for circ_hk in [circ_0, circ_h, circ_k ]:\n for circ_0x in [ circ_0, circ_x ]:\n qc = QuantumCircuit(p)\n qc.compose( circ_0x, range(p), inplace=True )\n qc.compose( circ_hk, range(p), inplace=True )\n circs_state_s.append( qc )\n \n circs_measure_s = [circ_0, circ_h, circ_k ] \n \n circ_state = []\n for j in range(n):\n list_qubits = range(j,n*p,n)\n if j == 0 :\n for circ in circs_state_s:\n qc0 = QuantumCircuit( n*p )\n qc0.compose(circ.copy(), qubits=list_qubits, inplace=True)\n circ_state.append(qc0)\n else:\n circ_loop = circ_state.copy()\n circ_state = []\n for qc1 in circ_loop:\n for circ in circs_state_s:\n qc2 = qc1.compose(circ.copy(), qubits=list_qubits)\n circ_state.append(qc2)\n \n circ_measure = []\n for j in range(n):\n list_qubits = range(j,n*p,n)\n if j == 0 :\n for circ in circs_measure_s:\n qc0 = QuantumCircuit( n*p )\n qc0.compose(circ.copy(), qubits=list_qubits, inplace=True)\n circ_measure.append(qc0)\n else:\n circ_loop = circ_measure.copy()\n circ_measure = []\n for qc1 in circ_loop:\n for circ in circs_measure_s:\n qc2 = qc1.compose(circ.copy(), qubits=list_qubits)\n circ_measure.append(qc2) \n \n circs_mpt = []\n for i in range(6**n):\n for j in range(3**2): \n qc = QuantumCircuit( n*p, 2*n*p )\n qc.compose( circ_state[i], qubits=range(n*p), inplace=True )\n qc.measure( range(n*p), range(n*p) )\n qc.compose( circ_measure[j], qubits=range(n*p), inplace=True )\n qc.measure( range(n*p), range(n*p,2*n*p) )\n circs_mpt.append( qc )\n \n self._circuits = circs_mpt\n \n return circs_mpt\n \n \n def fit( self, results, circuits=None, gate_set = None, resampling = 0, out = 0 ): \n \n if circuits is None :\n circuits = self._circuits\n elif self._p is None : \n self._p = int( circuits[0].num_qubits / self._n )\n \n if self._n == 1:\n self._counts = []\n for qc in circuits:\n self._counts.append( resampling_counts( results.get_counts(qc), resampling=resampling ) )\n \n if gate_set is None :\n self._gateset = False \n self._states = np.array( [ [[1,0],[0,0]],\n [[0,0],[0,1]],\n [[1/2,1/2],[1/2,1/2]],\n [[1/2,-1/2],[-1/2,1/2]],\n [[1/2,-1j/2],[1j/2,1/2]],\n [[1/2,1j/2],[-1j/2,1/2]],\n ]).reshape( 6,4 ).T\n \n self._measurements = self._states / 3 \n else :\n self._gateset = True\n self._states, self._measurements = gate_set\n \n\n Υ_hat_all = []\n for m in range(self._p) :\n ran = [ m, self._p+m ] \n probs = []\n for counts in self._counts:\n probs_temp = dict2array( marginal_counts_dictionary( counts, ran ), 2 ) \n probs.append( probs_temp/np.sum(probs_temp) )\n del probs_temp\n probs = np.array(probs).reshape([6,3,2,2]).transpose(0,1,3,2).reshape(6,6,2).transpose(1,0,2)\n if self._gateset is False :\n Υ_hat = qt.MaximumLikelihoodCompleteDetectorTomography( self._states, \n self._measurements, \n probs , Func = 0, \n vectorized=True, out=out )\n elif self._gateset is True : \n Υ_hat = qt.MaximumLikelihoodCompleteDetectorTomography( self._states[m], \n self._measurements[m], \n probs, Func = 0, \n vectorized=True, out=out )\n Υ_hat_all.append( Υ_hat)\n else: \n self._counts = []\n for qc in circuits:\n self._counts.append( resampling_counts( results.get_counts(qc), resampling=resampling ) ) \n \n if gate_set is None :\n self._gateset = False\n states_s = np.array( [ [[1,0],[0,0]],\n [[0,0],[0,1]],\n [[1/2,1/2],[1/2,1/2]],\n [[1/2,-1/2],[-1/2,1/2]],\n [[1/2,-1j/2],[1j/2,1/2]],\n [[1/2,1j/2],[-1j/2,1/2]],\n ])\n \n measures_s = states_s.reshape(3,2,2,2)\n \n states = []\n for s1 in states_s:\n for s2 in states_s:\n state_temp = np.kron( s1, s2 )\n states.append( state_temp.flatten() )\n self._states = np.array(states).T\n \n measures = []\n for r1 in range(3):\n for r2 in range(3):\n for s1 in range(2):\n for s2 in range(2):\n measures_temp = np.kron( measures_s[r1,s1], measures_s[r2,s2] )\n measures.append( measures_temp.flatten() ) \n self._measures = np.array(measures).T/9 \n \n \n else :\n self._gateset = True\n states_s, measures_s = gate_set \n measures_s = np.array(measures_s).reshape(self._n*self._p,4,3,2).transpose(0,2,3,1)\n \n self._states = []\n self._measures = []\n \n for m in range(self._p):\n \n states = []\n for s1 in range(6):\n for s2 in range(6):\n state_temp = qt.Outer2Kron( np.kron( states_s[m*self._n][:,s1], \n states_s[m*self._n+1][:,s2] ), [2,2] )\n states.append( state_temp.flatten() )\n self._states.append( np.array(states).T )\n \n \n measures = []\n for r1 in range(3):\n for r2 in range(3):\n for s1 in range(2):\n for s2 in range(2):\n measures_temp = qt.Outer2Kron( np.kron( measures_s[m*self._n,r1,s1], \n measures_s[m*self._n+1,r2,s2] ), [2,2] )\n measures.append( measures_temp.flatten() ) \n self._measures.append( np.array(measures).T ) \n \n Υ_hat_all = []\n for m in range(self._p) :\n ran = [ m, self._p+m ] \n probs = []\n for counts in self._counts:\n probs_temp = dict2array( marginal_counts_dictionary( counts, ran ), 4 ) \n probs.append( probs_temp/np.sum(probs_temp) )\n del probs_temp\n probs_loop = np.array(probs).reshape(36,9,4,4).transpose(0,1,3,2).reshape(6**self._n,\n 6**self._n,\n 2**self._n).transpose(1,0,2)\n if self._gateset is False :\n Υ_hat = qt.MaximumLikelihoodCompleteDetectorTomography( self._states, \n self._measures, \n probs_loop, Func = 0, \n vectorized=True, out=out )\n elif self._gateset is True : \n Υ_hat = qt.MaximumLikelihoodCompleteDetectorTomography( self._states[m], \n self._measures[m], \n probs_loop, Func = 0, \n vectorized=True, out=out )\n Υ_hat_all.append( Υ_hat ) \n \n if len(Υ_hat_all) == 1 :\n return Υ_hat_all[0]\n else:\n return Υ_hat_all\n \n\n\n############# Device Tomography ################\n\nclass device_process_measurement_tomography :\n \n def __init__( self, backend, max_qobj=900 ) :\n \n self._backend = backend\n self._num_qubits = len( backend.properties().qubits )\n self._max_qobj = max_qobj\n \n coupling_map = get_backend_conectivity( self._backend )\n \n G = nx.Graph()\n G.add_node( range(self._num_qubits) )\n G.add_edges_from(coupling_map)\n G = nx.generators.line.line_graph(G)\n G_coloring = nx.coloring.greedy_color(G)\n degree = max( G_coloring.values() ) + 1\n parall_qubits = degree*[None]\n for x in G_coloring:\n if parall_qubits[G_coloring[x]] is None:\n parall_qubits[G_coloring[x]] = []\n parall_qubits[G_coloring[x]].append(x)\n \n \n circs_all = [ tomographic_gate_set_tomography( self._num_qubits ).circuits(), \n measurement_process_tomography( 1, self._num_qubits ).circuits() ]\n \n for pairs in parall_qubits :\n \n p = len(pairs)\n qubits = pairs\n qubits = [item for t in qubits for item in t]\n circ_double = measurement_process_tomography( 2, p ).circuits()\n circs = []\n for circ_loop in circ_double:\n circ = QuantumCircuit( self._num_qubits, 4*p )\n circ.compose(circ_loop, qubits=qubits, inplace=True)\n circs.append( circ )\n circs_all.append( circs )\n \n circs_pkg = []\n circ_temp = []\n pkg_idx = []\n \n idx = 0\n for circs in circs_all:\n if len(circ_temp) + len(circs) <= self._max_qobj:\n circ_temp += circs.copy()\n pkg_idx.append( idx ) \n else :\n circs_pkg.append( circ_temp )\n circ_temp = circs.copy()\n idx += 1\n circs_pkg.append( circ_temp )\n pkg_idx.append( idx ) \n \n self._circs_all = circs_all\n self._circs_pkg = circs_pkg\n self._pkg_idx = pkg_idx\n self._parall_qubits = parall_qubits\n \n\n def circuits( self ):\n \"\"\"\n Circuits to perform the process measurement tomography of each pair of connected qubits on a device\n \n In:\n backend\n out:\n circs_pkg : efficient storage of the circuits for execution.\n \n \"\"\"\n \n return self._circs_pkg \n \n \n def fit( self, results, out=1, resampling=0, paralell=True ):\n \n gateset = tomographic_gate_set_tomography( self._num_qubits ).fit( results[self._pkg_idx[0]] , \n self._circs_all[0], \n resampling = resampling )\n \n states_gst= []\n measures_gst = []\n for m in range(self._num_qubits):\n rho = gateset[0][m]\n Pi = gateset[1][m]\n Y = gateset[2][m]\n states_gst_temp = []\n measures_gst_temp = []\n for v in [ np.eye(4), Y[2], Y[3] ]:\n for u in [ np.eye(4), Y[1] ]:\n states_gst_temp.append( v@u@rho )\n measures_gst_temp.append( v.T.conj()@Pi ) \n \n states_gst.append( np.array(states_gst_temp).T )\n measures_gst.append( np.array(measures_gst_temp).transpose(1,0,2).reshape(4,-1)/3 )\n \n states_gst = np.array( states_gst )\n measures_gst = np.array( measures_gst )\n \n choi_single = measurement_process_tomography(1,self._num_qubits).fit( results[self._pkg_idx[1]], \n self._circs_all[1], \n resampling=resampling,\n out = out)\n \n if paralell is False:\n choi_double = []\n for k in range(2,len(self._circs_all)) :\n choi_double.append( measurement_process_tomography(2).fit( results[self._pkg_idx[k]], \n self._circs_all[k], \n resampling=resampling, \n out = out ) )\n elif paralell is True:\n fun_par = lambda k : measurement_process_tomography(2).fit( results[self._pkg_idx[k]], \n self._circs_all[k], \n resampling=resampling,\n out = out )\n choi_double = Parallel(n_jobs=-1)( delayed( fun_par )(k) for k in range(2,len(self._circs_all)) ) \n \n return choi_single, choi_double, gateset \n \n\n \n\n\n\n############# Noise model ################\n\ndef decoherence_noise( T1=5e3, T2=200e3 ):\n\n # T1 and T2 values for qubits 0-3\n T1s = np.random.normal( T1, np.sqrt(T1), 7) # Sampled from normal distribution mean 50 microsec\n T2s = np.random.normal( T2, np.sqrt(T2), 7) # Sampled from normal distribution mean 50 microsec\n \n # Truncate random T1s <= 0\n T1s[T1s<0]=0\n \n # Truncate random T2s <= T1s\n T2s = np.array([min(T2s[j], 2 * T1s[j]) for j in range(7)])\n\n # Instruction times (in nanoseconds)\n time_u1 = 0 # virtual gate\n time_u2 = 50 # (single X90 pulse)\n time_u3 = 100 # (two X90 pulses)\n time_cx = 300\n time_reset = 1000 # 1 microsecond\n time_measure = 1000 # 1 microsecond\n\n # QuantumError objects\n errors_reset = [thermal_relaxation_error(t1, t2, time_reset)\n for t1, t2 in zip(T1s, T2s)]\n errors_measure = [thermal_relaxation_error(t1, t2, time_measure)\n for t1, t2 in zip(T1s, T2s)]\n errors_u1 = [thermal_relaxation_error(t1, t2, time_u1)\n for t1, t2 in zip(T1s, T2s)]\n errors_u2 = [thermal_relaxation_error(t1, t2, time_u2)\n for t1, t2 in zip(T1s, T2s)]\n errors_u3 = [thermal_relaxation_error(t1, t2, time_u3)\n for t1, t2 in zip(T1s, T2s)]\n errors_cx = [[thermal_relaxation_error(t1a, t2a, time_cx).expand(\n thermal_relaxation_error(t1b, t2b, time_cx))\n for t1a, t2a in zip(T1s, T2s)]\n for t1b, t2b in zip(T1s, T2s)]\n\n # Add errors to noise model\n noise_thermal = NoiseModel()\n for j in range(7):\n noise_thermal.add_quantum_error(errors_reset[j], \"reset\", [j])\n noise_thermal.add_quantum_error(errors_measure[j], \"measure\", [j])\n noise_thermal.add_quantum_error(errors_u1[j], \"u1\", [j])\n noise_thermal.add_quantum_error(errors_u2[j], \"u2\", [j])\n noise_thermal.add_quantum_error(errors_u3[j], \"u3\", [j])\n for k in range(4):\n noise_thermal.add_quantum_error(errors_cx[j][k], \"cx\", [j, k])\n \n return noise_thermal \n\n\n\n\n\n############ Quantities ###################\n\ndef readout_fidelity( Pi ):\n d, N = Pi.shape\n d = int(np.sqrt(d))\n f = 0.\n for n in range(N):\n f += Pi[:,n].reshape(d,d)[n,n]/N\n return np.real( f ) \n\ndef qnd_fidelity( choi ):\n N = len(choi)\n d = int(np.sqrt(choi[0].shape[0]))\n f = 0\n for n in range(N):\n f += choi[n][(1+d)*n,(1+d)*n]/N\n return np.real( f ) \n \ndef destructiveness( chois ):\n choi = np.sum( chois, axis=0 )\n d = int(np.sqrt(choi.shape[0]))\n if d == 2:\n O = np.array([1,0,0,-1])/np.sqrt(2)\n D = np.linalg.norm( O - choi.T.conj()@O )/np.sqrt(8)\n else:\n P = np.eye(d)\n Bs = np.zeros((d**2,d),dtype=complex)\n for k in range(d):\n pp = np.kron( P[:,k], P[:,k] )\n Bs[:,k] = pp - choi.T.conj()@pp \n B = Bs.T.conj()@Bs\n vals, vecs = np.linalg.eigh(B)\n D = 0.5 * np.sqrt( np.max(vals)/2 )\n return D \n \ndef Quantities( Pi, choi ):\n \n f = readout_fidelity( Pi )\n q = qnd_fidelity( choi )\n d = 1 - destructiveness( choi )\n \n return f, q, d\n \ndef Kron_Choi( Choi_1, Choi_2 ):\n Y0 = [] \n for i in range( len(Choi_1) ):\n for j in range(len(Choi_2)):\n Y_loop = np.kron( Choi_1[i], Choi_2[j]) \n Y_loop = Y_loop.reshape(8*[2]).transpose(0,2,1,3,4,6,5,7).reshape(16,16) \n Y0.append( Y_loop )\n return Y0\n\ndef Cross_QNDness( Choi_single_1, Choi_single_2, Choi_double ):\n Y0 = [ qt.Process2Choi( A )/2 for A in Kron_Choi( Choi_single_1, Choi_single_2 )]\n Y1 = [ qt.Process2Choi( A )/2 for A in Choi_double]\n f = 0\n for i in range(4):\n f += qt.Fidelity( Y0[i], Y1[i] )/2\n return f\n\ndef Cross_Fidelity( Pi_single_1, Pi_single_2, Pi_double ):\n Pi0 = [ np.kron(A,B)/2 for A in Pi_single_1.reshape(2,2,2).transpose(1,2,0) for B in Pi_single_2.reshape(2,2,2).transpose(1,2,0) ]\n Pi1 = Pi_double.reshape(4,4,4).transpose(1,2,0)/2\n f = 0\n for i in range(4):\n f += qt.Fidelity( Pi0[i], Pi1[i] )/2\n return f\n\ndef Cross_Quantities( Pi1, Choi1, Pi2, Choi2, Pi12, Choi12 ):\n \n f = Cross_Fidelity( Pi1, Pi2, Pi12 )\n q = Cross_QNDness( Choi1, Choi2, Choi12 )\n \n return f, q\n\n####################### Plots ############################\n\ndef sph2cart(r, theta, phi):\n '''spherical to Cartesian transformation.'''\n x = r * np.sin(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.sin(phi)\n z = r * np.cos(theta)\n return x, y, z\n\ndef sphview(ax):\n '''returns the camera position for 3D axes in spherical coordinates'''\n r = np.square(np.max([ax.get_xlim(), ax.get_ylim()], 1)).sum()\n theta, phi = np.radians((90-ax.elev, ax.azim))\n return r, theta, phi\n\ndef getDistances(view, xpos, ypos, dz):\n distances = []\n for i in range(len(xpos)):\n distance = (xpos[i] - view[0])**2 + (ypos[i] - view[1])**2 + (dz[i] - view[2])**2\n distances.append(np.sqrt(distance))\n return distances\n\ndef Bar3D( A , ax = None, xpos=None, ypos=None, zpos=None, dx=None, dy=None, M = 0, **args ):\n \n d = A.shape[0]\n camera = np.array([13.856, -24. ,0])\n \n if xpos is None :\n xpos = np.arange(d) \n if ypos is None :\n ypos = np.arange(d)\n xpos, ypos = np.meshgrid( xpos, ypos )\n xpos = xpos.flatten()\n ypos = ypos.flatten()\n \n if zpos is None :\n zpos = np.zeros_like(xpos)\n else :\n zpos = zpos.flatten()\n \n if dx is None :\n dx = 0.5 * np.ones_like(xpos)\n else :\n dx = dx * np.ones_like(ypos)\n \n if dy is None :\n dy = 0.5 * np.ones_like(ypos)\n else :\n dy = dy * np.ones_like(ypos)\n \n dz = A.flatten()\n z_order = getDistances(camera, xpos, ypos, zpos)\n \n if ax == None :\n fig = plt.figure() \n ax = fig.add_subplot( 1,1,1, projection='3d') \n maxx = np.max(z_order) + M\n \n# plt.rc('font', size=15) \n for i in range(xpos.shape[0]):\n pl = ax.bar3d(xpos[i], ypos[i], zpos[i], \n dx[i], dy[i], dz[i], \n zsort='max', **args )\n pl._sort_zpos = maxx - z_order[i]\n# ax.set_xticks( [0.25,1.25,2.25,3.25] )\n# ax.set_xticklabels((r'$|gg\\rangle$',r'$|ge\\rangle$',\n# r'$|eg\\rangle$',r'$|ee\\rangle$'))\n# ax.set_yticks( [0.25,1.25,2.25,3.25] )\n# ax.set_yticklabels((r'$\\langle gg|$',r'$\\langle ge|$',\n# r'$\\langle eg|$',r'$\\langle ee|$'))\n# ax.set_title( label, loc='left', fontsize=20, x = 0.1, y=.85)\n ax.set_zlim([0,1])\n return ax \n\n\ndef Abs_Bars3D(Y):\n fig = plt.figure(figsize=(len(Y)*4,5)) \n for y in range(len(Y)):\n ax = fig.add_subplot( 1, len(Y), y+1, projection='3d')\n Bar3D( np.abs( Y[y] ).T, ax=ax ) \n return fig\n\n\n\n \n \n \n \n \n \n \n \n \n \n","sub_path":"codes/.ipynb_checkpoints/main-checkpoint.py","file_name":"main-checkpoint.py","file_ext":"py","file_size_in_byte":29568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"47835597","text":"#! /usr/bin/python3\n\nimport subprocess\nimport os\nimport re\nimport getpass\n\n\ndef get_dev_list() -> list:\n dev_list = []\n file_list = os.listdir(path=\"/dev/\")\n for file in file_list:\n if re.match('sd[a-z]', file) and (3 == len(file)):\n dev_list.append('/dev/' + file)\n dev_list.sort()\n return dev_list\n\n\ndef get_partition_list() -> list:\n partition_list = []\n file_list = os.listdir(path=\"/dev/\")\n for file in file_list:\n if re.match('sd[a-z][0-9]', file) and (4 == len(file)):\n partition_list.append('/dev/' + file)\n partition_list.sort()\n return partition_list\n\n\ndef get_disk_dict(devl, parl) -> dict:\n disk_dict = {}\n for dev in devl:\n dev_partitions = []\n for par in parl:\n if re.match(dev, par):\n dev_partitions.append(par)\n disk_dict[dev] = dev_partitions\n return disk_dict\n\n\ndef list2str(the_list=[]) -> str:\n ret = ''\n for elem in the_list:\n ret += str(elem) + ' '\n return ret\n\n\ndef list2nstr(l=[]) -> str:\n ret = ''\n for elem in l:\n ret += str(elem) + '\\n'\n return ret\n\n\ndef get_mounted_dict() -> dict:\n mounted_dict = {}\n file = open('/proc/mounts', 'rt')\n if file is None:\n return mounted_dict\n all_text = file.read()\n lines = re.split('\\n', all_text)\n for line in lines:\n fields = re.split(' ', line)\n # first filter /dev/sd[a-z][0-9]\n if (not re.match('/dev/sd[a-z][0-9]', fields[0])) and (not (9 == fields[0])):\n continue\n # second filter /media/${USER}/\n splitted_path = re.split('/', fields[1])\n if '' == splitted_path[0]:\n del splitted_path[0]\n # print(splitted_path)\n if 'media' != splitted_path[0]:\n continue\n user = getpass.getuser()\n if user != splitted_path[1]:\n continue\n mounted_dict[fields[0]] = fields[1]\n\n return mounted_dict\n\n\nmd = get_mounted_dict()\nfor point in md:\n print(point[:8])\n print('\\t' + point + ' ' + md[point])\n\n\n","sub_path":"linux/bin/find_usb_flash.py","file_name":"find_usb_flash.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317488435","text":"# coding: utf-8\nimport matplotlib.pyplot as plt\nimport random\nplt.xlabel('Eixo X')\nplt.ylabel('Eixo Y')\nplt.title(u'Exemplo básico')\n#random.shuffle(lista_dados)\n#l = map(lambda x: x * x, lista_dados)\nl = [x ** 2 for x in range(100)]\nl.reverse()\nplt.plot(l)\nplt.show()\n","sub_path":"MachineLearning/python/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"613646509","text":"from tkinter import filedialog\nimport tkinter as tk\nimport time\nimport pickle\nfrom PIL import Image,ImageTk\nfrom stream import videostream,_canvas_size,colorize\nimport json\nimport os\nimport cv2\nimport numpy as np\nEVEN=0\nODD=1\n#farmbot_position: X:503, Y:413\nclass Sample:\n def __init__(self):\n self.rgbtop=None\n self.rgbside=None\n self.depth=None\n self.depthrgb=None\n self.timestamp=None\n self.parity=EVEN\n self.quality=\"a\"\n\nclass app(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self._frame=None\n self.stream = videostream()\n self.show_frame(Frame1)\n def show_frame(self,framename):\n frame=framename(self)\n if self._frame is not None:\n self._frame.destroy()\n self._frame=frame\n self._frame.pack_propagate(0)\n self._frame.pack()\n def exit(self):\n print(\"exiting...\")\n self.stream.stop_streaming()\n exit(0)\n\nclass Frame1(tk.Frame):\n def __init__(self,parent):\n tk.Frame.__init__(self,parent)\n parent.title(\"Sampling software\")\n label=tk.Label(self,text=\"Seedling Database\",font=\"Times 24 bold\")\n label.pack()\n button1=tk.Button(self,text=\"Seedling sampler\",command=lambda :parent.show_frame(Frame2))\n button1.pack()\n button2 = tk.Button(self, text=\"Database checker\", command=lambda: parent.show_frame(Frame3))\n button2.pack()\n self.config(width=500,height=500)\n\nclass Frame2(tk.Frame):\n def __init__(self,parent):\n tk.Frame.__init__(self,parent)\n parent.title(\"Seedling sampler\")\n self.parent = parent\n self.seedlingnum=0\n self.samplenum=0\n self.quality=\"a\"\n self.parity=EVEN\n self.selectionq=tk.IntVar()\n self.selectionp = tk.IntVar()\n self.config(width=800, height=800)\n self.folder_database=None\n self.doc=None\n self.dol=None\n #files of interest\n self.RGBtopimage=None\n self.RGBsideimage=None\n self.Depthimage=None\n self.Depthrgbimage=None\n self.Depthcolorized=None\n #flags\n self.rgb_captured=False\n self.depth_captured=False\n self.stop=False\n\n #Frames construction\n self.headerframe=tk.Frame(self,height=200,width=600)\n self.headerframe.grid(row=0,column=0)\n self.bodyframe=tk.Frame(self)\n self.bodyframe.grid(row=1,column=0)\n self.RGBframe=tk.Frame(self.bodyframe)\n self.RGBframe.grid(row=0,column=0)\n self.RGB_photoframe=tk.Frame(self.RGBframe,bg=\"gray\",height=_canvas_size[1]+20,width=2*_canvas_size[0]+50)\n self.RGB_photoframe.grid_propagate(0)\n self.RGB_photoframe.grid(row=1,column=1)\n self.RGB_buttonframe=tk.Frame(self.RGBframe)\n self.RGB_buttonframe.grid(row=1,column=0)\n self.depthframe=tk.Frame(self.bodyframe)\n self.depthframe.grid(row=1,column=0)\n self.depth_photoframe=tk.Frame(self.depthframe,bg=\"gray\",height=_canvas_size[1]+20,width=2*_canvas_size[0]+50)\n self.depth_photoframe.grid_propagate(0)\n self.depth_photoframe.grid(row=1,column=1)\n self.depth_buttonframe=tk.Frame(self.depthframe)\n self.depth_buttonframe.grid(row=1,column=0)\n self.quality_frame=tk.Frame(self.bodyframe)\n self.quality_frame.grid(row=0,column=1)\n self.special_buttons=tk.Frame(self.bodyframe)\n self.special_buttons.grid(row=1,column=1)\n self.logsframe=tk.Frame(self.bodyframe,bd=10,width=600,height=200)\n self.logsframe.grid_propagate(0)\n self.logsframe.grid(row=2,column=0)\n self.info_frame=tk.Frame(self.bodyframe)\n self.info_frame.grid(row=2,column=1)\n\n #Labels construction\n self.Seedling_label=tk.Label(self.headerframe,text=\"Processed Seedlings: {}\".format(self.seedlingnum),font=\"Helvetica 24 bold\")\n self.Seedling_label.grid(row=0,column=1)\n self.Samples_label=tk.Label(self.headerframe,text=\"Samples generated: {}\".format(self.samplenum),font=\"Helvetica 24 bold\")\n self.Samples_label.grid(row=0,column=2,padx=50)\n self.RGB_title=tk.Label(self.RGBframe,text=\"RGB Cameras\",font=\"Times 14 underline\")\n self.RGB_title.grid(row=0,column=0)\n self.depth_title=tk.Label(self.depthframe,text=\"Depth camera\",font=\"Times 14 underline\")\n self.depth_title.grid(row=0,column=0)\n self.quality_label=tk.Label(self.quality_frame,text=\"Quality\", font=\"Helvetica 14 italic\")\n self.quality_label.grid(row=0,column=1)\n self.logs=tk.Label(self.logsframe,font=\"Times 14 bold\")\n self.logs.grid(row=0,column=0)\n\n #buttons construction\n self.rgb_position_button=tk.Button(self.RGB_buttonframe,text=\"Position\",font=\"Times 12\")\n self.rgb_position_button.grid(row=0,column=0)\n self.rgb_capture_button=tk.Button(self.RGB_buttonframe,text=\"Capture\",font=\"Times 12\",command=self.rgb_capture)\n self.rgb_capture_button.grid(row=1,column=0)\n self.rgb_clear_button=tk.Button(self.RGB_buttonframe,text=\"Clear\",font=\"Times 12\",command=self.rgb_clear)\n self.rgb_clear_button.grid(row=2,column=0)\n self.depth_position_button=tk.Button(self.depth_buttonframe,text=\"Position\",font=\"Times 12\")\n self.depth_position_button.grid(row=0,column=0)\n self.depth_capture_button=tk.Button(self.depth_buttonframe,text=\"Capture\",font=\"Times 12\",command=self.depth_capture)\n self.depth_capture_button.grid(row=1,column=0)\n self.depth_clear_button=tk.Button(self.depth_buttonframe,text=\"Clear\",font=\"Times 12\",command=self.depth_clear)\n self.depth_clear_button.grid(row=2,column=0)\n self.delete_last_button=tk.Button(self.special_buttons,text=\"Delete Last\",font=\"Times 12\")\n self.delete_last_button.grid(row=0,column=0)\n self.generate_button=tk.Button(self.special_buttons,text=\"Generate sample\",font=\"Times 12\",command=self.generate_sample)\n self.generate_button.grid(row=1,column=0)\n self.select_button=tk.Button(self.special_buttons,text=\"Select folder\",font=\"Times 12\",command=self.select)\n self.select_button.grid(row=2,column=0)\n self.exit_button=tk.Button(self.special_buttons,text=\"EXIT\",fg=\"white\",bg=\"red\",font=\"Times 12 bold\",command=self.exit2)\n self.exit_button.grid(row=3,column=0)\n\n #radio-buttons contruction\n self.quality_a_rb=tk.Radiobutton(self.quality_frame,text=\"A\",variable=self.selectionq,value=0,command=self.selq)\n self.quality_b_rb = tk.Radiobutton(self.quality_frame, text=\"B\", variable=self.selectionq, value=1,command=self.selq)\n self.quality_c_rb = tk.Radiobutton(self.quality_frame, text=\"C\", variable=self.selectionq, value=2,command=self.selq)\n self.quality_a_rb.grid(row=1,column=0)\n self.quality_b_rb.grid(row=1, column=1)\n self.quality_c_rb.grid(row=1, column=2)\n\n self.canvas_top=tk.Label(self.RGB_photoframe,width=_canvas_size[0],height=_canvas_size[1])\n self.canvas_side = tk.Label(self.RGB_photoframe, width=_canvas_size[0], height=_canvas_size[1])\n self.canvas_depth = tk.Label(self.depth_photoframe, width=_canvas_size[0], height=_canvas_size[1])\n self.canvas_depthrgb = tk.Label(self.depth_photoframe, width=_canvas_size[0], height=_canvas_size[1])\n self.canvas_top.grid(row=0,column=0,padx=10,pady=10)\n self.canvas_side.grid(row=0, column=1,padx=10,pady=10)\n self.canvas_depth.grid(row=3, column=0,padx=10,pady=10)\n self.canvas_depthrgb.grid(row=3,column=2,padx=10,pady=10)\n parent.stream.start_streaming()\n self.refresh_canvas()\n def refresh_canvas(self):\n if self.rgb_captured is False:\n self.RGBtopimage=self.parent.stream.rgbtopframe\n self.RGBsideimage=self.parent.stream.rgbsideframe\n if self.depth_captured is False:\n self.Depthcolorized=self.parent.stream.colorized\n self.Depthrgbimage=self.parent.stream.depthrgb\n self.Depthimage=self.parent.stream.depthimage\n image = cv2.cvtColor(self.RGBtopimage, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image,_canvas_size,cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_top.configure(image=img)\n self.canvas_top.image = img\n image = cv2.cvtColor(self.RGBsideimage, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, _canvas_size, cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_side.configure(image=img)\n self.canvas_side.image = img\n image = cv2.cvtColor(self.Depthrgbimage, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, _canvas_size, cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_depthrgb.configure(image=img)\n self.canvas_depthrgb.image = img\n image = cv2.cvtColor(self.Depthcolorized, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, _canvas_size, cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_depth.configure(image=img)\n self.canvas_depth.image = img\n self.Seedling_label.configure(text=\"Processed Seedlings: {}\".format(self.seedlingnum))\n self.Samples_label.configure(text=\"Samples generated: {}\".format(self.samplenum))\n if self.stop is False:\n self.after(150, self.refresh_canvas)\n def rgb_capture(self):\n self.rgb_captured=True\n self.RGB_photoframe.configure(bg=\"green\")\n def depth_capture(self):\n self.depth_captured=True\n self.depth_photoframe.configure(bg=\"green\")\n def rgb_clear(self):\n self.rgb_captured = False\n self.RGB_photoframe.configure(bg=\"gray\")\n def depth_clear(self):\n self.depth_captured = False\n self.depth_photoframe.configure(bg=\"gray\")\n def generate_sample(self):\n if self.rgb_captured and self.depth_captured:\n if self.parity is EVEN:\n parity=\"even\"\n elif self.parity is ODD:\n parity=\"odd\"\n if self.quality is 0:\n quality=\"a\"\n elif self.quality is 1:\n quality=\"b\"\n elif self.quality is 2:\n quality=\"c\"\n name=\"Sample_num\"+\"{}\".format(self.samplenum)+\"Q\"+quality+\"P\"+parity\n cv2.imwrite(self.folder_database+\"/RGBtop\"+name+\".jpg\",self.RGBtopimage)\n cv2.imwrite(self.folder_database+\"/RGBside\"+name+\".jpg\",self.RGBsideimage)\n np.save(self.folder_database+\"/Depth\"+name+\".npy\",self.Depthimage)\n cv2.imwrite(self.folder_database+\"/Depthrgb\"+name+\".jpg\",self.Depthrgbimage)\n sample=Sample()\n sample.rgbtop=\"RGBtop\"+name+\".jpg\"\n sample.rgbside=\"/RGBside\"+name+\".jpg\"\n sample.depth=\"/Depth\"+name+\".npy\"\n sample.depthrgb=\"/Depthrgb\"+name+\".jpg\"\n sample.quality=quality\n sample.parity=parity\n file=open(self.folder_database+\"/Sample{}.pkl\".format(self.samplenum),\"wb\")\n pickle.dump(sample,file)\n secs=time.time()\n self.dol=time.ctime(secs)\n self.logs.configure(text=\"Sample {} generated succesfully\".format(self.samplenum))\n self.samplenum+=1\n self.seedlingnum+=3\n self.create_json()\n else:\n self.logs.configure(text=\"Please capture from both cameras\")\n def selq(self):\n self.quality=self.selectionq.get()\n def select(self):\n self.folder_database=filedialog.askdirectory(parent=self,title=\"Select database folder\",initialdir=\"~/\")\n self.logs.configure(text=\"Dabase folder is \"+self.folder_database)\n for root, dir, files in os.walk(self.folder_database):\n if \"db.json\" in files:\n file=open(self.folder_database+\"/db.json\",\"r\")\n database=json.load(file)\n self.seedlingnum=database[\"seedling_counter\"]\n self.samplenum=database[\"sample_counter\"]\n self.doc=database[\"doc\"]\n self.dol=database[\"dol\"]\n self.logs.configure(text=\"Database imported succesfully, last sample at {}\".format(self.dol))\n else:\n sec=time.time()\n self.doc=time.ctime(sec)\n self.create_json()\n def create_json(self):\n database={\"seedling_counter\":self.seedlingnum,\"sample_counter\":self.samplenum,\"doc\":self.doc,\"dol\":self.dol}\n with open(self.folder_database+\"/db.json\",\"w\") as file:\n json.dump(database,file)\n def exit2(self):\n self.parent.stream.stop_streaming()\n self.stop=True\n time.sleep(0.5)\n exit()\n\nclass Frame3(tk.Frame):\n def __init__(self,parent):\n tk.Frame.__init__(self,parent)\n parent.title(\"Database checker\")\n self.samplenum=0\n #frames construction\n self.totalsamples=0\n self.totalseedlings=0\n self.config(width=800, height=800)\n self.headerframe=tk.Frame(self)\n self.headerframe.grid(row=0,column=0)\n self.bodyframe=tk.Frame(self)\n self.bodyframe.grid(row=1,column=0)\n self.RGBframe = tk.Frame(self.bodyframe)\n self.RGBframe.grid(row=0, column=0)\n self.RGB_photoframe = tk.Frame(self.RGBframe, bg=\"gray\", height=_canvas_size[1] + 20,width=2 * _canvas_size[0] + 50)\n self.RGB_photoframe.grid_propagate(0)\n self.RGB_photoframe.grid(row=1, column=1)\n self.depthframe = tk.Frame(self.bodyframe)\n self.depthframe.grid(row=1, column=0)\n self.depth_photoframe = tk.Frame(self.depthframe, bg=\"gray\", height=_canvas_size[1] + 20,width=2 * _canvas_size[0] + 50)\n self.depth_photoframe.grid_propagate(0)\n self.depth_photoframe.grid(row=1, column=1)\n self.special_buttons=tk.Frame(self.bodyframe)\n self.special_buttons.grid(row=1,column=1)\n self.logsframe=tk.Frame(self.bodyframe,bd=10,width=600,height=200)\n self.logsframe.grid_propagate(0)\n self.logsframe.grid(row=2,column=0)\n self.info_frame=tk.Frame(self.bodyframe)\n self.info_frame.grid(row=2,column=1)\n\n #labels construction\n self.title=tk.Label(self.headerframe,text=\"Sample {}\".format(self.samplenum),font=\"Helvetica 25\")\n self.title.grid(row=0,column=1)\n self.RGB_title = tk.Label(self.RGBframe, text=\"RGB Cameras\", font=\"Times 14 underline\")\n self.RGB_title.grid(row=0, column=0)\n self.depth_title = tk.Label(self.depthframe, text=\"Depth camera\", font=\"Times 14 underline\")\n self.depth_title.grid(row=0, column=0)\n self.logs=tk.Label(self.logsframe)\n self.logs.grid(row=0,column=0)\n self.info_label=tk.Label(self.info_frame)\n self.info_label.grid(row=0,column=0)\n\n #canvas\n self.canvas_top = tk.Label(self.RGB_photoframe, width=_canvas_size[0], height=_canvas_size[1])\n self.canvas_side = tk.Label(self.RGB_photoframe, width=_canvas_size[0], height=_canvas_size[1])\n self.canvas_depth = tk.Label(self.depth_photoframe, width=_canvas_size[0], height=_canvas_size[1])\n self.canvas_depthrgb = tk.Label(self.depth_photoframe, width=_canvas_size[0], height=_canvas_size[1])\n self.canvas_top.grid(row=0, column=0, padx=10, pady=10)\n self.canvas_side.grid(row=0, column=1, padx=10, pady=10)\n self.canvas_depth.grid(row=3, column=0, padx=10, pady=10)\n self.canvas_depthrgb.grid(row=3, column=2, padx=10, pady=10)\n\n #buttons\n self.select_button = tk.Button(self.special_buttons, text=\"Select folder\", font=\"Times 12\", command=self.select)\n self.select_button.grid(row=0, column=0)\n self.next_button=tk.Button(self.special_buttons,text=\"Next\", font=\"Times 12\",command=self.next)\n self.next_button.grid(row=1,column=0)\n self.back_button=tk.Button(self.special_buttons,text=\"Back\",font=\"Times 12\",command=self.back)\n self.back_button.grid(row=2,column=0)\n self.exit_button=tk.Button(self.special_buttons,text=\"EXIT\",fg=\"white\",bg=\"red\",font=\"Times 12 bold\",command=self.exit3)\n self.exit_button.grid(row=3,column=0)\n def select(self):\n self.folder_database = filedialog.askdirectory(parent=self, title=\"Select database folder\", initialdir=\"~/\")\n for root, dir, files in os.walk(self.folder_database):\n if \"db.json\" in files:\n file = open(self.folder_database + \"/db.json\", \"r\")\n database = json.load(file)\n self.totalseedlings = database[\"seedling_counter\"]\n self.totalsamples = database[\"sample_counter\"]\n self.doc = database[\"doc\"]\n self.dol = database[\"dol\"]\n self.logs.configure(text=\"Database imported succesfully, last sample at {}\".format(self.dol))\n else:\n self.logs.configure(text=\"No database found\")\n def back(self):\n if self.samplenum > 0:\n self.samplenum-=1\n self.title.configure(text=\"Sample {}\".format(self.samplenum))\n with open(self.folder_database+\"/Sample{}.pkl\".format(self.samplenum), \"rb\") as file:\n Sample = pickle.load(file)\n self.RGBtopimage = cv2.imread(self.folder_database+\"/\"+Sample.rgbtop, 1)\n self.RGBsideimage = cv2.imread(self.folder_database+\"/\"+Sample.rgbside, 1)\n self.Depthrgbimage = cv2.imread(self.folder_database+\"/\"+Sample.depthrgb, 1)\n depth = np.load(self.folder_database+\"/\"+Sample.depth)\n self.Depthcolorized = colorize(depth)\n self.refresh_canvas()\n\n def next(self):\n self.title.configure(text=\"Sample {}\".format(self.samplenum))\n with open(self.folder_database + \"/Sample{}.pkl\".format(self.samplenum), \"rb\") as file:\n Sample = pickle.load(file)\n self.RGBtopimage = cv2.imread(self.folder_database + \"/\" + Sample.rgbtop, 1)\n self.RGBsideimage = cv2.imread(self.folder_database + \"/\" + Sample.rgbside, 1)\n self.Depthrgbimage = cv2.imread(self.folder_database + \"/\" + Sample.depthrgb, 1)\n depth = np.load(self.folder_database + \"/\" + Sample.depth)\n self.Depthcolorized = colorize(depth)\n self.refresh_canvas()\n self.samplenum+=1\n if self.samplenum > self.totalsamples -1 :\n self.samplenum-=1\n def refresh_canvas(self):\n image = cv2.cvtColor(self.RGBtopimage, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, _canvas_size, cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_top.configure(image=img)\n self.canvas_top.image = img\n image = cv2.cvtColor(self.RGBsideimage, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, _canvas_size, cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_side.configure(image=img)\n self.canvas_side.image = img\n image = cv2.cvtColor(self.Depthrgbimage, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, _canvas_size, cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_depthrgb.configure(image=img)\n self.canvas_depthrgb.image = img\n image = cv2.cvtColor(self.Depthcolorized, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, _canvas_size, cv2.INTER_LINEAR)\n img = ImageTk.PhotoImage(image=Image.fromarray(image))\n self.canvas_depth.configure(image=img)\n self.canvas_depth.image = img\n def exit3(self):\n exit()\n\n\nif __name__==\"__main__\":\n app1=app()\n app1.mainloop()\n","sub_path":"Sampling_app.py","file_name":"Sampling_app.py","file_ext":"py","file_size_in_byte":19702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"61434947","text":"# Author: DINDIN Meryll\n# Date: 27 July 2019\n# Project: AsTeR\n\ntry: from service_NLP.imports import *\nexcept: from imports import *\n\nclass Senti_IBM:\n\n def __init__(self, credentials='configs/key_ibm.json'):\n\n with open(credentials, 'r') as raw: crd = json.load(raw)\n arg = {'iam_apikey': crd['key'], 'url': crd['url']}\n self.api = NaturalLanguageUnderstandingV1(version='2018-11-16', **arg)\n self.voc = list(pd.read_parquet('models/vocabulary.pq').index)\n \n sen = SentimentOptions(targets=self.voc)\n key = KeywordsOptions(sentiment=True, limit=10)\n self.fea = Features(sentiment=sen, keywords=key)\n\n def request(self, message):\n\n req = self.api.analyze(text=message, features=self.fea).get_result()\n emo = req['sentiment']['document']['score']\n wrd = [[e['text'], e['relevance']] for e in req['keywords']]\n \n return emo, wrd\n \nclass KeyWd_RAI:\n \n def __init__(self, credentials='configs/key_rapidai.json'):\n \n with open(credentials) as raw: key = json.load(raw)['key']\n self.h_1 = {\"X-RapidAPI-Key\": key}\n self.u_1 = \"https://textanalysis-keyword-extraction-v1.p.rapidapi.com/keyword-extractor-text\"\n self.h_2 = {\"X-RapidAPI-Key\": key, \"Content-Type\": \"application/json\"}\n self.u_2 = \"https://microsoft-azure-text-analytics-v1.p.rapidapi.com/keyPhrases\"\n \n def request(self, message):\n \n r_1 = requests.post(self.u_1, headers=self.h_1, data={\"text\": message, \"wordnum\": 10})\n try: r_1 = json.loads(r_1.content)['keywords']\n except: r_1 = []\n inp = {'documents': [{'language': 'en', 'id': 'string', 'text': message}]}\n r_2 = requests.post(self.u_2, headers=self.h_2, data=(str(inp)))\n try: r_2 = json.loads(r_2.content)['documents'][0]['keyPhrases']\n except: r_2 = []\n \n return r_1 + r_2\n \nclass GetClass:\n \n def __init__(self, directory='models'):\n \n self.r_f = joblib.load('/'.join([directory, 'ml_model.jb']))\n self.vec = joblib.load('/'.join([directory, 'vectorizer.jb']))\n \n def request(self, message):\n \n return self.r_f.predict(self.vec.transform([message]).astype('float'))[0]\n \nclass AnalyzeTranscript:\n \n def __init__(self, directory='.'):\n \n self.ibm = Senti_IBM(credentials='/'.join([directory, 'configs/key_ibm.json']))\n self.rai = KeyWd_RAI(credentials='/'.join([directory, 'configs/key_rapidai.json']))\n self.cls = GetClass(directory='/'.join([directory, 'models']))\n self.voc = pd.read_parquet('/'.join([directory, 'models/vocabulary.pq']))\n self.stp = set(joblib.load('/'.join([directory, 'models/stopwords.jb'])))\n \n @staticmethod\n def relevance_map(request):\n \n res = dict()\n for words, relevance in request:\n for word in words.split():\n res[word] = relevance\n\n return res\n\n def preprocess(self, message):\n \n # remove punctation\n res = message.translate(str.maketrans('', '', string.punctuation))\n # tokenize into words (all lower case)\n res = res.lower()\n # cast type\n res = res.encode('ascii', 'ignore').decode('ascii')\n\n lst = res.split() \n lst = [w for w in lst if not w in self.stp]\n res = ' '.join(lst)\n\n return res\n \n def importance_from_vocabulary(self, word):\n\n try: return float(self.voc.loc[word].importance)\n except: return 0.0\n\n @staticmethod\n def importance_from_relevance(word, mapper):\n\n try: return mapper[word]\n except: return 0.0\n \n def run(self, message):\n \n new = self.preprocess(message)\n e,l = self.ibm.request(new)\n rai = self.rai.request(new)\n cls = self.cls.request(new)\n kys = list(np.unique(rai + [e[0] for e in l]))\n \n rev = self.relevance_map(l)\n i_v = np.asarray([self.importance_from_vocabulary(w) for w in new.split()])\n i_r = np.asarray([self.importance_from_relevance(w, rev) for w in new.split()])\n sco = np.sum(i_v*i_r)\n \n return {'emotion': e, 'score': sco, 'keysections': kys, 'class': cls}\n","sub_path":"service_NLP/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"133769097","text":"# -*- coding: utf-8 -*-\n\"\"\"\nEste código esta basado en el curso de DeepLearning del profesor Andrew Ng\n\n\"\"\"\n\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom testCases_v_fb_empty import *\nfrom dnn_utils_v_fb_empty import sigmoid, sigmoid_backward, relu, relu_backward\n\n\n#%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n#%load_ext autoreload\n#%autoreload 2\n\nnp.random.seed(1)\n\ndef inicializar_parametros(n_x, n_h, n_y):\n \"\"\"\n Esta función da valores aleatorios a una red neuronal perceptron multicapa con una arquitectura de\n 3 capas (1 capa de entrada con n_x neuronas, 1 capa oculta con n_h neuronas, 1 capa de salida con n_y neuronas)\n \n Argumentos:\n n_x -- Número de neuronas de la capa de entrada\n n_h -- Número de neuronas de la capa de oculta\n n_y -- Número de neuronas de la capa de salida\n \n Returna:\n \n parametros -- Diccionario de Python que contiene a:\n W1 -- Matriz de pesos de tamaño (n_x, n_h)\n b2 -- Vector bias de tamaño (n_h, 1)\n W2 -- Matriz de pesos de tamaño(n_h, n_y)\n b3 -- Vector bias de tamaño (n_y, 1)\n \"\"\"\n \n np.random.seed(1)\n \n W1 = np.random.randn(n_x, n_h)*0.01\n b2 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_h, n_y)*0.01\n b3 = np.zeros((n_y, 1))\n \n assert(W1.shape == (n_x, n_h))\n assert(b2.shape == (n_h, 1))\n assert(W2.shape == (n_h, n_y))\n assert(b3.shape == (n_y, 1))\n \n parameters = {\"w1\": W1,\n \"b2\": b2,\n \"w2\": W2,\n \"b3\": b3}\n \n return parameters \n\n#parameters = inicializar_parametros(3,2,1)\n#print(\"w1 = \" + str(parameters[\"w1\"]))\n#print(\"b2 = \" + str(parameters[\"b2\"]))\n#print(\"w2 = \" + str(parameters[\"w2\"]))\n#print(\"b3 = \" + str(parameters[\"b3\"]))\n\n\n# =================================== Resultado de inicializar_Parametros =========\n# \n# W1 = [[ 0.01624345 -0.00611756]\n# [-0.00528172 -0.01072969]\n# [ 0.00865408 -0.02301539]]\n# b2 = [[ 0.]\n# [ 0.]]\n# W2 = [[ 0.01744812]\n# [-0.00761207]]\n# b3 = [[ 0.]]\n# =============================================================================\n\n#\ndef inicializar_parametros_profundos(layer_dims):\n \"\"\"\n Argumenots:\n layer_dims -- Array de Python que contiene las dimensiones de cada capa de la red (arquitectura de la red)\n \n Returna:\n parameters -- Diccionario de Python que contiene a: \"W1\", \"b2\", ..., \"WL-1\", \"bL\":\n Wl -- Matriz de pesos de tamaño (layer_dims[l-1], layer_dims[l])\n b(l+1) -- Vector bias de tamaño (layer_dims[l], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n \n parameters['W' + str(l)] = np.random.randn(layer_dims[l-1],layer_dims[l] )*0.01\n parameters['b'+ str(l+1)] = np.zeros((layer_dims[l], 1))\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l-1],layer_dims[l]))\n assert(parameters['b' + str(l+1)].shape == (layer_dims[l], 1))\n\n \n return parameters\n\n\n#parameters = inicializar_parametros_profundos([5,4,3])\n#print(\"W1 = \" + str(parameters[\"W1\"]))\n#print(\"b2 = \" + str(parameters[\"b2\"]))\n#print(\"W2 = \" + str(parameters[\"W2\"]))\n#print(\"b3 = \" + str(parameters[\"b3\"]))\n\n\n# ================================= Resultados de inicializar_parametros_profundos=======\n# W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493]\n# [-0.00277388 -0.00354759 -0.00082741 -0.00627001]\n# [-0.00043818 -0.00477218 -0.01313865 0.00884622]\n# [ 0.00881318 0.01709573 0.00050034 -0.00404677]\n# [-0.0054536 -0.01546477 0.00982367 -0.01101068]]\n# b2 = [[ 0.]\n# [ 0.]\n# [ 0.]\n# [ 0.]]\n# W2 = [[-0.01185047 -0.0020565 0.01486148]\n# [ 0.00236716 -0.01023785 -0.00712993]\n# [ 0.00625245 -0.00160513 -0.00768836]\n# [-0.00230031 0.00745056 0.01976111]]\n# b3 = [[ 0.]\n# [ 0.]\n# [ 0.]]\n# =============================================================================\n\n\ndef linear_forward(A, W, b):\n \"\"\"\n Implementa la parte lineal de la propagación hacia adelante\n\n Arguments:\n A -- activationes de la capa anterior (o de los datos de entrada): (tamaño de la capa anterior, número de ejemplos)\n W -- Matriz de pesos: Matriz de tamaño (Tamaño de la capa anterior , Tamaño de la capa actual)\n b -- Vector bias, Vector de tamaño (Tamaño de la capa actual, 1)\n\n Returns:\n Z -- el nivel de activación Z de la capa actual.\n cache -- Diccionario de Python que contiene a: \"A\", \"W\" and \"b\" ; son almacenados para hallar el backward de cada capa\n \"\"\"\n \n ### Haga su código acá ###(≈ 1 line of code) \n \n Z = np.dot(W.T, A) + b\n \n ### FIN ###\n \n assert(Z.shape == (W.T.shape[0], A.shape[1]))\n cache = (A, W, b)\n \n return Z, cache\n\n#A, W, b = linear_forward_test_case()\n#Z, linear_cache = linear_forward(A, W, b)\n#print(\"Z = \" + str(Z))\n\n# ============================Results of linear_forward========================\n# Z = [[ 3.26295337 -1.23429987]]\n# =============================================================================\n\n\n#\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implementa la propagación hacia adelante para la capa LINEAR->ACTIVATION (activaciòn de la capa)\n\n Arguments:\n A_prev -- activaciones de la capa anterior (o de los datos de entrada): (tamaño de la capa anterior, número de ejemplos)\n W -- Matriz de pesos: Matriz de tamaño (Tamaño de la capa anterior , Tamaño de la capa actual)\n b -- Vector bias, Vector de tamaño (Tamaño de la capa actual, 1)\n activation -- La función de activaciòn que será usada en esta capa, string: \"sigmoid\" o \"relu\"\n\n Returns:\n A -- La salida de la función de activación\n cache --Diccionario de Python que contiene a: \"linear_cache\" y \"activation_cache\";\n son almacenados para hallar el backward de cada capa\n \"\"\"\n \n if activation == \"sigmoid\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n \n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n \n \n elif activation == \"relu\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n \n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n \n \n assert (A.shape == (W.shape[1], A.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\n#A_prev, W, b = linear_activation_forward_test_case()\n#A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"sigmoid\")\n#print(\"With sigmoid: A = \" + str(A))\n#\n#A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"relu\")\n#print(\"With ReLU: A = \" + str(A))\n\n\n# ====================Results of linear_activation_forward ====================\n# With sigmoid: A = [[ 0.96890023 0.11013289]]\n# With ReLU: A = [[ 3.43896131 0. ]]\n# =============================================================================\n\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implementa toda la propagación hacia adelante de la red para una arquitectura\n [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID\n \n Arguments:\n X -- datos de entrada, Array de tamaño (tamaño de entrada, número de ejemplos)\n parameters -- Salida de la función of inicializar_parametros_profundos()\n \n Returns:\n AL -- Valores de la función de activación de las neuronas de la capa de salida\n caches -- Lista de caches:\n cada cache de linear_activation_forward() (hay L-1 caches, indexados de 0 a L-1)\n \"\"\"\n\n caches = []\n A = X\n L = len(parameters) // 2 #W+b # number of layers - 1 en la red\n \n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A \n \n A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)], parameters['b'+str(l+1)], 'relu')\n caches.append(cache)\n \n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n \n AL, cache = linear_activation_forward(A, parameters['W'+str(L)], parameters['b'+str(L+1)], 'sigmoid')\n caches.append(cache)\n\n assert(AL.shape == (1,X.shape[1]))\n \n return AL, caches\n\n#X, parameters = L_model_forward_test_case_2hidden()\n#AL, caches = L_model_forward(X, parameters)\n#print(\"AL = \" + str(AL))\n#print(\"Length of caches list = \" + str(len(caches)))\n\n# ===============Results of L_model_forward =========================\n# AL = [[ 0.03921668 0.70498921 0.19734387 0.04728177]]\n# Length of caches list = 3\n# =============================================================================\n\ndef compute_cost(AL, YS, costFuntion):\n \"\"\"\n Implementa la función de costo: cross-entropy cost(logistic Regression) o Median Squared Error\n\n Arguments:\n AL -- vector de probabilidades que corresponde a la predicción, tamaño (1, número de ejemplos)\n YS -- Vector de etiquetas deseadas (ejemplo: conteniendo 0 y 1), tamaño (1, número de ejemplos)\n\n Returna:\n cost -- cross-entropy cost o Median Squared Error\n \"\"\"\n \n m = YS.shape[1]\n\n # Halla la perdida desde AL and YS.\n \n ### Haga su código acá ### (≈ 4 lines of code) usar np.multiply, np.sum, np.log\n \n if costFuntion == \"LG\":\n \n cost = -np.sum(np.multiply(np.log(AL),YS) + np.multiply(np.log(1-AL),(1-YS)))/m\n \n elif costFuntion == \"MSE\":\n \n cost = (np.sum((AL-YS)*(AL-YS))/2)/m\n \n \n ### FIN ###\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n \n return cost\n\n#YS, AL = compute_cost_test_case()\n#\n#print(\"cost LG= \" + str(compute_cost(AL, Y, \"LG\")))\n#print(\"cost MSE= \" + str(compute_cost(AL, Y, \"MSE\")))\n\n\n# ================Results of compute_cost==================================\n# cost LG= 0.414931599615\n# cost MSE= 0.0683333333333\n# =============================================================================\n\n# GRADED FUNCTION: linear_backward\n\ndef linear_backward(dZ, cache):\n \"\"\"\n Implementa la parte de propagación hacia atrás para una sola capa l\n\n Argumentos:\n dZ -- Gradiente del costo con respecto a la salida lineal de la capa actual l\n cache -- tupla: (A_prev, W, b) que llega desde la propagación hacia adelante en la capa actual\n\n Returna:\n dA_prev -- Gradiente del costo con respecto a la activación (de la capa anterior l-1), el tamaño es el mismo de A_prev\n dW -- Gradiente del costo con respecto a W (capa actual l), el tamaño es el mismo de W\n db -- Gradiente del costo con respecto a b (capa actual l), el tamaño es el mismo de b\n \"\"\"\n A_prev, W, b = cache\n m = A_prev.shape[1]\n \n dW = np.dot(A_prev, dZ.T)/m\n db = np.sum(dZ,axis=1,keepdims=True)/m \n dA_prev = np.dot(W, dZ)\n \n ### FIN ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db\n\n#dZ, linear_cache = linear_backward_test_case()\n#dA_prev, dW, db = linear_backward(dZ, linear_cache)\n#print (\"dA_prev = \"+ str(dA_prev))\n#print (\"dW = \" + str(dW))\n#print (\"db = \" + str(db))\n\n# ==================Results of linear_backward ===============================\n# dA_prev = [[ 0.51822968 -0.19517421]\n# [-0.40506361 0.15255393]\n# [ 2.37496825 -0.89445391]]\n# dW = [[-0.10076895]\n# [ 1.40685096]\n# [ 1.64992505]]\n# db = [[ 0.50629448]]\n# =============================================================================\n#\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implementa la propagación hacia atrás backward-propagation para la capa LINEAR->ACTIVATION.\n \n Arguments:\n dA -- gradient de la activación de la capa l \n cache -- Tupla: (linear_cache, activation_cache)\n activation -- El tipo de función de activación a usar en esta capa, string: \"sigmoid\" or \"relu\"\n \n Returna:\n dA_prev -- Gradiente del costo con respecto a la activación (de la capa anterior l-1), el tamaño es el mismo de A_prev\n dW -- Gradiente del costo con respecto a W (capa actual l), el tamaño es el mismo de W\n db -- Gradiente del costo con respecto a b (capa actual l), el tamaño es el mismo de b\n \"\"\"\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n \n ### Haga su código acá ### (≈ 2 lines of code)\n \n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n ### FIN ###\n \n elif activation == \"sigmoid\":\n \n ### Haga su código acá ### (≈ 2 lines of code)\n \n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n ### FIN ###\n \n return dA_prev, dW, db\n\n#dAL, linear_activation_cache = linear_activation_backward_test_case()\n#\n#dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = \"sigmoid\")\n#print (\"sigmoid:\")\n#print (\"dA_prev = \"+ str(dA_prev))\n#print (\"dW = \" + str(dW))\n#print (\"db = \" + str(db) + \"\\n\")\n#\n#dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = \"relu\")\n#print (\"relu:\")\n#print (\"dA_prev = \"+ str(dA_prev))\n#print (\"dW = \" + str(dW))\n#print (\"db = \" + str(db))\n\n\n# ==============Results of linear_backward====================================\n# sigmoid:\n# dA_prev = [[ 0.11017994 0.01105339]\n# [ 0.09466817 0.00949723]\n# [-0.05743092 -0.00576154]]\n# dW = [[ 0.10266786]\n# [ 0.09778551]\n# [-0.01968084]]\n# db = [[-0.05729622]]\n# \n# relu:\n# dA_prev = [[ 0.44090989 -0. ]\n# [ 0.37883606 -0. ]\n# [-0.2298228 0. ]]\n# dW = [[ 0.44513824]\n# [ 0.37371418]\n# [-0.10478989]]\n# db = [[-0.20837892]]\n# =============================================================================\n#\n#\ndef L_model_backward(AL, YS, caches, costFuntion):\n \"\"\"\n Implementa la propagación hacia atrás backward-propagation para toda la red [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID\n \n Arguments:\n AL -- vector de probabilidades que corresponde a la predicción, tamaño (1, número de ejemplos)\n YS -- Vector de etiquetas deseadas (ejemplo: conteniendo 0 y 1), tamaño (1, número de ejemplos)\n \n caches -- lista of caches que contiene a:\n cada cache de linear_activation_forward() con \"relu\" (caches[l], for l in range(L-1) i.e l = 0...L-2)\n la cache de linear_activation_forward() con \"sigmoid\" (caches[L-1])\n \n Returns:\n grads -- Un diccionario con los gradientes\n grads[\"dA\" + str(l+1)] = ... \n grads[\"dW\" + str(l+1)] = ...\n grads[\"db\" + str(l+2)] = ... \n \"\"\"\n grads = {}\n L = len(caches)+1 # número de capas\n m = AL.shape[1]\n YS = YS.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation\n \n if costFuntion == \"LG\":\n \n dAL = - (np.divide(YS, AL) - np.divide(1 - YS, 1 - AL))\n \n elif costFuntion == \"MSE\":\n \n dAL = - (YS - AL)\n \n \n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"dAL, current_cache\". Outputs: \"grads[\"dAL-1\"], grads[\"dWL-1\"], grads[\"dbL\"] \n current_cache = caches[L-2]\n grads[\"dA\" + str(L-1)], grads[\"dW\" + str(L-1)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, 'sigmoid')\n \n # Loop from l=L-3 to l=0\n \n for l in reversed(range(L-2)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], current_cache\". Outputs: \"grads[\"dA\" + str(l+1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 2)] \n \n ### Haga su código acá ### (approx. 5 lines)\n \n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads['dA' + str(l + 2)], current_cache, 'relu')\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 2)] = db_temp \n \n ### FIN ###\n\n return grads\n\n#AL, Y_assess, caches = L_model_backward_test_case()\n#grads = L_model_backward(AL, Y_assess, caches,\"LG\")\n#print(\"grads LG\")\n#print (\"dW1 = \"+ str(grads[\"dW1\"]))\n#print (\"db2 = \"+ str(grads[\"db2\"]))\n#print (\"dA2 = \"+ str(grads[\"dA2\"])) \n#\n#grads = L_model_backward(AL, Y_assess, caches,\"MSE\")\n#print(\"grads MSE\")\n#print (\"dW1 = \"+ str(grads[\"dW1\"]))\n#print (\"db2 = \"+ str(grads[\"db2\"]))\n#print (\"dA2 = \"+ str(grads[\"dA2\"])) \n\n\n# ============Results of L_model_backward======================================\n# grads LG\n# dW1 = [[ 0.41010002 0. 0.05283652]\n# [ 0.07807203 0. 0.01005865]\n# [ 0.13798444 0. 0.01777766]\n# [ 0.10502167 0. 0.0135308 ]]\n# db2 = [[-0.22007063]\n# [ 0. ]\n# [-0.02835349]]\n# dA2 = [[ 0.12913162 -0.44014127]\n# [-0.14175655 0.48317296]\n# [ 0.01663708 -0.05670698]]\n# grads MSE\n# dW1 = [[ 0.10087189 0. 0.01299615]\n# [ 0.0192033 0. 0.00247412]\n# [ 0.03393989 0. 0.00437275]\n# [ 0.02583208 0. 0.00332816]]\n# db2 = [[-0.05413055]\n# [ 0. ]\n# [-0.00697408]]\n# dA2 = [[-0.18214833 -0.10826111]\n# [ 0.19995659 0.11884557]\n# [-0.02346765 -0.01394816]]\n# =============================================================================\n\n#\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Actualizar los parametros usando la regla del descenso del gradiente\n \n Argumentos:\n parameters -- Diccionario en Python conteniendo los parameters \n grads -- Diccionario en Python conteniendo los gradientes, salida de L_model_backward\n \n Returns:\n parameters -- Diccionario en Python conteniendo los parameters actualizados \n parameters[\"W\" + str(l+1)] = ... \n parameters[\"b\" + str(l+2)] = ...\n \"\"\"\n \n L = len(parameters) // 2 + 1 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L-1):\n parameters[\"W\" + str(l+1)] -= learning_rate*grads['dW' + str(l+1)]\n parameters[\"b\" + str(l+2)] -= learning_rate*grads['db' + str(l+2)]\n \n return parameters\n\n#parameters, grads = update_parameters_test_case()\n#parameters = update_parameters(parameters, grads, 0.1)\n#\n#print (\"W1 = \"+ str(parameters[\"W1\"]))\n#print (\"b2 = \"+ str(parameters[\"b2\"]))\n#print (\"W2 = \"+ str(parameters[\"W2\"]))\n#print (\"b3 = \"+ str(parameters[\"b3\"]))\n\n# =================Results of update_parameters================================\n# W1 = [[-0.59562069 -1.76569676 -1.0535704 ]\n# [-0.09991781 -0.80627147 -0.86128581]\n# [-2.14584584 0.51115557 0.68284052]\n# [ 1.82662008 -1.18258802 2.20374577]]\n# b2 = [[-0.04659241]\n# [-1.28888275]\n# [ 0.53405496]]\n# W2 = [[-0.55569196]\n# [ 0.0354055 ]\n# [ 1.32964895]]\n# b3 = [[-0.84610769]]\n# =============================================================================\n\n\n","sub_path":"Forward-Backward-ANN-Empty/forward_backward_DNN_empty.py","file_name":"forward_backward_DNN_empty.py","file_ext":"py","file_size_in_byte":19293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"368617544","text":"from django.shortcuts import render, render_to_response\nfrom django.template import Context, loader, RequestContext\nfrom django.http import HttpResponse\n\nlists = [\n\n]\n\n\ndef log(request):\n template = loader.get_template('patient_log/log.html')\n context = Context({\n 'title': '患者ログツール'\n })\n return HttpResponse(template.render(context))\n\n# Create your views here.\n","sub_path":"patient_log/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"529821708","text":"# -*- coding:utf-8 -*-\n# @Author : 'longguangbin'\n# @Contact : lgb453476610@163.com\n# @Date : 2018/12/4\n\"\"\" \nUsage Of 'test_hyper_ts_2' : \n\"\"\"\n\nfrom statsmodels.tsa.arima_model import _arma_predict_out_of_sample\n\n\ndef MA_predict(data, p, w=None, step=1):\n # params = [0.5] * order[0]\n # steps = 3\n # residuals = [0]\n # p = order[0]\n # q = order[1]\n # k_exog = 0\n # k_trend = 0\n # y = a\n # _arma_predict_out_of_sample(params, steps, residuals, p, q, k_trend, k_exog, endog=y, exog=None, start=len(y))\n w = w[::-1] or [1.0 / p] * p\n residuals = [0]\n q = 0\n k_exog = 0\n k_trend = 0\n res = _arma_predict_out_of_sample(w, step, residuals, p, q, k_trend, k_exog, endog=data)\n return res\n\n\ndata = range(10)\np = 2\nw = [0.3, 0.7]\nMA_predict(data, p, w=w, step=3)\n","sub_path":"src/hyper_opt/HyperOPt/test_hyper_ts_2.py","file_name":"test_hyper_ts_2.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"327695261","text":"\"\"\"\nthis file contains all the necessary functions for setting and resetting\nflags and data packets in [pi_packets.pkl] and [pi_flags.pkl]\n\"\"\"\n\nimport cPickle as pickle\nimport binascii\n\n\n\n# ====================================================================\n#This function creates a dummy file with necessary text\n# in the current directory\ndef crtDummyFile(fileName, paste_str):\n fo = open(fileName, 'w') #creates or overwrites a file to contain text\n fo.write(paste_str) # writes wifi information to file\n fo.close() # closes the file\n# ====================================================================\n\n\n\n# ====================================================================\n# this function sets the starting flags if RPi_on = False\ndef setStartFlags():\n # load the file for the flags: [pi_start_flags.pkl]\n try:\n rpi_start_th = pickle.load(open('pi_start_flags.pkl', 'rb'))\n rpi_flags = pickle.load(open('pi_flags.pkl', 'rb'))\n \n rpi_start_th['RPi_on'] = True\n rpi_flags['RPi_restart'] = True\n rpi_start_th['RPi_restart_noti'] = True\n rpi_start_th['BT_connecting'] = False\n rpi_flags['BT_connected'] = False\n rpi_start_th['WIFI_connecting'] = False\n rpi_flags['WIFI_connected'] = False\n\n try:\n pickle.dump(rpi_flags, open('pi_flags.pkl', 'wb'))\n pickle.dump(rpi_start_th, open('pi_start_flags.pkl', 'wb'))\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n\n \n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n\n# end of function setStartFlags()\n# =======================================================\n\n\n\n\n# =======================================================\n# this function sets the starting flags if RPi_on = False\ndef setUSB1Flag():\n # load the file for the flags: [pi_flags.pkl]\n try:\n rpi_flags = pickle.load(open('pi_flags.pkl', 'rb'))\n # set the flag\n rpi_flags['USB_1_connected'] = True\n # dump the updated flag\n try:\n pickle.dump(rpi_flags, open('pi_flags.pkl', 'wb'))\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n \n# end of function setStartFlags()\n# =======================================================\n\n\n\n# =======================================================\n# this function resets the starting flags if RPi_on = False\ndef resetUSB1Flag():\n # load the file for the flags: [pi_flags.pkl]\n try:\n rpi_flags = pickle.load(open('pi_flags.pkl', 'rb'))\n #reset the flag\n rpi_flags['USB_1_connected'] = False\n # dump the updated flag\n try:\n pickle.dump(rpi_flags, open('pi_flags.pkl', 'wb'))\n except EOFError:\n pass\n #print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n pass\n #print('\\n---------IO---------\\n')\n #continue\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n \n# end of function resetStartFlags()\n# =======================================================\n\n\n\n# =======================================================\n# this function is for resetting all the data packets except USB-1\ndef resetDataPack():\n # load the file for the flags: [pi_flags.pkl]\n try:\n pack_rec = pickle.load(open('pi_packets.pkl', 'rb'))\n pack_rec['HW_RCV_PACKET_1'] = False\n pack_rec['HW_RCV_PACKET_2'] = False\n pack_rec['HW_RCV_PACKET_3'] = False\n \n pack_rec['SW_RCV_PACKET_1'] = False\n pack_rec['SW_RCV_PACKET_2'] = False\n pack_rec['SW_RCV_PACKET_3'] = False\n\n # dump the updated flag\n try:\n pickle.dump(pack_rec, open('pi_packets.pkl', 'wb'))\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue \n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue \n# end of function resetDataPack()\n# =======================================================\n\n\n\n# =======================================================\n# this function sets USB1 (LCU) data packets to HW_RCV_PACKET\ndef setUSB1DataPackHW(mypack):\n # set HW_RCV_PACKET_4 = mypack\n # load the file for the flags: [pi_flags.pkl]\n try:\n pack_rec = pickle.load(open('pi_packets.pkl', 'rb'))\n print('Test: loaded - pi_packets.pkl - from setUSB1DataPackHW')\n pack_rec['HW_RCV_PACKET_4'] = mypack\n # dump the updated flag\n try:\n pickle.dump(pack_rec, open('pi_packets.pkl', 'wb'))\n except EOFError:\n print('\\n---------EOF from setUSB1DataPackHW dump---------\\n')\n #continue\n except IOError:\n print('\\n---------IO from setUSB1DataPackHW dump---------\\n')\n #continue\n except EOFError:\n print('\\n---------EOF from setUSB1DataPackHW load---------\\n')\n #continue\n except IOError:\n print('\\n---------IO from setUSB1DataPackHW load---------\\n')\n #continue\n# end of function setUSB1DataPackHW(mypack)\n# =======================================================\n\n\n\n# =======================================================\n# this function sets USB1 (LCU) data packets to HW_RCV_PACKET\ndef setUSB1DataPackSW():\n # check if the HW_RCV_PACKET is empty or not\n # load the file for the flags: [pi_flags.pkl]\n try:\n pack_rec = pickle.load(open('pi_packets.pkl', 'rb'))\n if(pack_rec['HW_RCV_PACKET_4'] != False):\n # transfer [HW_RCV_PACKET_4] to [SW_RCV_PACKET_4]\n pack_rec['SW_RCV_PACKET_4'] = pack_rec['HW_RCV_PACKET_4']\n try:\n rpi_flags = pickle.load(open('pi_flags.pkl', 'rb')) # loading flag file\n rpi_flags['SW_RCV_FLAG_4'] = True # set SW_RCV_FLAG_4 = True\n try:\n pickle.dump(rpi_flags, open('pi_flags.pkl', 'wb'))\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n pack_rec['HW_RCV_PACKET_4'] = False # reset HW_RCV_PACKET_4 = True \n # dump the updated flag\n try:\n pickle.dump(pack_rec, open('pi_packets.pkl', 'wb'))\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n# end of function setUSB1DataPackSW()\n# ====================================================================\n\n\n\n# ====================================================================\n# this function takes a list of decimal numbers and coverts it to hex string \ndef dectohex(decim):\n mystring = \"\"\n for i in range(0,len(decim)):\n if(decim[i] <= 15):\n mystring += \"0\"\n mystring += hex(decim[i])[2:]\n mystring += \" \"\n else:\n mystring += hex(decim[i])[2:]\n mystring += \" \"\n #print mystring\n #hexedVal = bytearray.fromhex(mystring)\n return mystring\n# ====================================================================\n\n\n\n# ====================================================================\n# this function takes a packet of bytes and coverts it to hex string \ndef packtohex(mypack):\n mydecim = []\n for myd in mypack:\n d = ord(myd)\n mydecim.append(d) \n hexedVal = dectohex(mydecim) \n return hexedVal\n# ====================================================================\n\n\n# ====================================================================\n# this function converts 2 byte packets to int to add 1 and then converts to byte packet again\ndef add1to2byteHexPack(C):\n print('459685')\n hexstr = packtohex(C[0])[0:2] + packtohex(C[1])[0:2] # convert to 4 byte hex string\n print(hexstr) # test purpose\n intval = int(hexstr, 16) + 1\n print(intval) # test purpose\n intstr = hex(intval)[2:]\n print(intstr) # test purpose\n if(len(intstr) == 3):\n intstr = '0' + intstr\n retpack = binascii.unhexlify(intstr)\n return retpack\n# ====================================================================\n\n\n# ====================================================================\ndef add1tobyteHexPack(C):\n hexstr = packtohex(C[0])[0:2] + packtohex(C[1])[0:2] # convert to 4 byte hex string\n intval = int(hexstr, 16) + 1\n intstr = hex(intval)[2:]\n retpack = binascii.unhexlify(intstr)\n return retpack\n# ====================================================================\n\n\n# ==================================================================== \n# this function prints the value of the dictionary element\ndef printPacketData(mystr):\n # load the file for the flags: [pi_flags.pkl]\n try:\n pack_rec = pickle.load(open('pi_packets.pkl', 'rb'))\n except EOFError:\n print('\\n---------EOF---------\\n')\n #continue\n except IOError:\n print('\\n---------IO---------\\n')\n #continue\n \n print('packet' + mystr + ':\\n')\n if(pack_rec[mystr] == True or pack_rec[mystr] == False):\n print(pack_rec[mystr])\n else:\n print(packtohex(pack_rec[mystr]))\n print('\\n')\n# ====================================================================\n\n\n# ====================================================================\n# this function prints a packet in well formatted hex\ndef printFormatHex(mypack):\n print(packtohex(mypack))\n# ====================================================================\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n","sub_path":"set_get_stuffs.py","file_name":"set_get_stuffs.py","file_ext":"py","file_size_in_byte":10785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"205208388","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import griddata\n\n# load the raw data (which is in a matrix form)\ndata_raw = np.loadtxt('2SiO2.Al2O3_raw', comments='#')\n\n# set the values for the lower and upper bounds corresponding to the experimental measurement ranges:\n# columns (i.e. bound on the left, bound on the right of the raw data)\nx_lower_bound = 1000.0\nx_upper_bound = 8300.0\nx_count = data_raw.shape[1]\n\n# rows (i.e. bound at the top, bound at the bottom of the raw data)\ny_lower_bound = 140.0\ny_upper_bound = 1000.0\ny_count = data_raw.shape[0]\n\nx = np.linspace(x_lower_bound, x_upper_bound, x_count)\ny = np.linspace(y_lower_bound, y_upper_bound, y_count)\nxx, yy = np.meshgrid(x, y)\npoints = np.dstack([xx,yy]).reshape(-1,2) # \"2\" here implies make two columns, \"-1\" here implies automatic number of rows\n\n# do the transformation of coordinates\n# eg. x_transformed = x + 0.3*y\n# eg. y_transformed = y\npoints_transformed[:,0] = points[:,0] + 0.3*points[:,1]\npoints_transformed[:,1] = points[:,1]\n\nvalues = data_raw.flatten()\nnp.savetxt('longform_data.txt', np.column_stack((points, values)), fmt='%.6f')\n","sub_path":"unwrap_to_xyvalue_form.py","file_name":"unwrap_to_xyvalue_form.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"213569230","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python import debug as tf_debug\n\n\nxs = np.linspace(-0.5,0.49,100)\nx = tf.placeholder(tf.float32,shape=[None],name='x')\ny = tf.placeholder(tf.float32,shape=[None],name='y')\n\nk = tf.Variable([0.0],name=\"k\")\ny_hat = tf.multiply(k,x,name=\"y_hat\")\nsse = tf.reduce_sum(tf.square(y-y_hat),name=\"sse\")\ntrain_op = tf.train.GradientDescentOptimizer(learning_rate=0.02).minimize(sse)\nsess = tf.Session()\n\nsess.run(tf.global_variables_initializer())\nsess = tf_debug.LocalCLIDebugWrapperSession(sess)\nfor _ in range(10):\n sess.run(train_op,feed_dict={x:xs,y:42*xs})\n","sub_path":"kaggle/titanic/tfdbg_try.py","file_name":"tfdbg_try.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461992589","text":"#! /usr/bin/env python\n# -*- coding: iso-8859-15 -*-\n##############################################################################\n# Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II\n# Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI\n#\n# Distributed under the Boost Software License, Version 1.0\n# See accompanying file LICENSE.txt or copy at\n# http://www.boost.org/LICENSE_1_0.txt\n##############################################################################\n\n\n\"\"\"update of def syntax : transitionnal\n\"\"\"\n__author__ = \"Lapreste Jean-thierry (lapreste@univ-bpclermont.fr)\"\n__version__ = \"$Revision: 1.0 $\"\n__date__ = \"$Date: 2010 $\"\n__copyright__ = \"\"\" Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II\n Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI\"\"\"\n__license__ = \"Boost Software License, Version 1.0\"\n\nimport os\nimport sys\nsys.path.insert(0,os.path.join(os.path.dirname(os.path.realpath(__file__)),'..',\"utils\"))\nsys.path.insert(0,os.path.join(os.path.dirname(os.path.realpath(__file__)),'..',\"nt2_basics\"))\nimport datetime\nimport shutil\nimport re\nfrom datetime import datetime\nfrom files_utils import write, exist, read\nfrom nt2_base_infos import Nt2_base_infos\nfrom pprint import PrettyPrinter\nfrom nt2_tb_props import Nt2_tb_props\nfrom nt2_fct_props import Nt2_fct_props\nfrom nt2_fct_internals import Nt2_fct_internals\nfrom modify_base import Modify_base\nsys.path.pop(0)\nsys.path.pop(0)\n\n \n\nclass Nt2_update_def(Nt2_fct_props) :\n def __init__(self, tb_name, fct_name) :\n Nt2_fct_props.__init__(self, tb_name, fct_name)\n self.__oldtxt = self.get_fct_def_text()\n\n def duplicate_def(self) :\n pin = self.get_fct_def_path()\n pout = pin+'.old'\n Modify_base.duplicate(pin,pout)\n\n def restore_def(self) :\n pout = self.get_fct_def_path()\n pin = pout+'.old'\n Modify_base.restore(pin,pout)\n\n def deplace(self, pattern, txt) :\n for i,l in enumerate(txt) :\n if re.match(pattern,l) :\n txt[i],txt[i+1]=txt[i+1],txt[i][2:]\n return txt\n return txt \n\n def replace(self, prefix,pattern, sub, txt) :\n for i,l in enumerate(txt) :\n if re.search(prefix+pattern,l.lower()) :\n print(l)\n txt[i] = re.sub('(?i)'+pattern,sub,l)\n print(txt[i])\n return txt\n \n def modify_def(self, tryonly='tryonly') :\n \"\"\" text is always modified from old one\"\"\"\n style = self.get_tb_style()\n fct_name = self.get_fct_name()\n print(\"modifying def for %s with style %s\"% (fct_name,style))\n new = self.get_fct_def_path()\n old = new+'.old'\n if not exist(old) : self.duplicate_def()\n oldtxt = read(old)\n# PrettyPrinter().pprint(oldtxt)\n s = '\\n'.join(oldtxt)\n s = re.sub('\\t',' ',s)\n if style == 'sys' :\n s = re.sub('functors','tag',s)\n else :\n pattern = 'namespace %s' % self.get_tb_name()\n s = re.sub(pattern,pattern+' { namespace tag',s)\n pattern = '%s::'%self.get_tb_name()\n s = re.sub(pattern,pattern+'tag::',s)\n s = re.sub(\"(}\\s*})\",\"}\\n } }\",s)\n# print(\"===========================\")\n newtxt = s.split('\\n')\n if self.get_tb_style()=='usr' :\n newtxt = self.deplace(\"\\s*NT2_FUNCTION_IMPLEMENTATION.*\",newtxt)\n newtxt = self.deplace(\"\\s*NT2_CRLIBM_FUNCTION_IMPLEMENTATION.*\",newtxt)\n newtxt = self.replace('(?i)(:| )',fct_name,fct_name.lower(),newtxt)\n newtxt += ['// modified by %s the %s' % (self.get_author(), datetime.now().strftime(\"%d/%m/%Y\"))]\n if tryonly != 'tryonly' :\n write(new,newtxt,False)\n else :\n print(\"---------------------------\")\n PrettyPrinter().pprint(oldtxt)\n print(\"===========================\")\n PrettyPrinter().pprint(newtxt)\n print(\"===========================\")\n \nif __name__ == \"__main__\" :\n nud = Nt2_update_def(\"gsl_specfun\",\"gsl_sf_bessel_i0_scaled\")\n print(\"ok\")\n## nud.duplicate_def()\n## nud.restore_def()\n nud.modify_def()\n","sub_path":"scriptpython/py_py/nt2_updating/nt2_update_def.py","file_name":"nt2_update_def.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"619366885","text":"from bs4 import BeautifulSoup\nfrom konlpy.tag import Twitter\nfrom collections import Counter\nimport multiprocessing as mp\nimport math\nimport time\nimport datetime as dt\nimport urllib\n\ndef find(start_from,go_to,cnt,good_cnt,reply,q):\n #마이너 갤러리 저장소 버전\n base_form=\"https://mgall.app/list/sunshine?page={}\"\n front_link=\"https://mgall.app\"\n words=\"\"\n\n for i in range(start_from,go_to): #게시글 칸 번호 (한칸당 40)\n url=base_form.format(i)\n req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n html = urllib.request.urlopen(req)\n soup = BeautifulSoup(html,\"lxml\")\n link = soup.find_all(\"td\",{\"class\":\"title\"})\n #게시글들을 가져옴\n\n #각각 하나씩 뜯어봄\n for m in link:\n #념글인가?\n cnt.value+=1\n if \"icon_pic_b\" in m.attrs['class'] or \"icon_txt_b\" in m.attrs['class']:\n good_cnt.value+=1\n if m.find(\"em\",{\"class\":\"comment_count\"}):\n reply.value+=int(m.em.text)\n #print(m.a.text)\n if(cnt.value%100==0):\n print(i,\"페이지\",cnt.value,\"번째 글\",good_cnt.value,\"념글수\",\"지금까지 써진 댓글 수\",reply.value)\n words+=m.a.text+\" \"\n #print(front_link+m.a.attrs['href']+'\\n')#링크\n \n #게시글 확인\n\n cont_url = front_link+m.a.attrs['href']\n cont_req = urllib.request.Request(cont_url, headers={'User-Agent': 'Mozilla/5.0'})\n cont_html = urllib.request.urlopen(cont_req)\n cont_soup = BeautifulSoup(cont_html,\"lxml\")\n \n cont_soup=cont_soup.find(\"div\",{\"style\":\"overflow:hidden;\"})\n #print(cont_soup.text+'\\n\\n')\n words+=cont_soup.text+\" \"\n #아오 이거때문에\n q.put(words)\n \nif __name__ == \"__main__\":\n #출력하는데 걸린 시간\n stt = time.time()\n\n #이 큐는 단어를 전달할 큐\n q = mp.Queue()\n #나머지는 잡다한 변수들\n good_cnt=mp.Value('i', 0)\n reply=mp.Value('i', 0)\n cnt=mp.Value('i', 0)\n\n #proc_num개의 프로세스를 만들 예정\n procs=[]\n range_index=[]\n index_start_num=187\n index_end_num=1506\n index_num=index_end_num-index_start_num+1\n proc_num=24\n\n #인덱스 구간 나눠줌\n for i in range(proc_num):\n range_index.append(( math.ceil(i*(index_num/proc_num))+index_start_num, math.ceil((i+1)*(index_num/proc_num))+index_start_num ))\n print(( math.ceil(i*(index_num/proc_num))+index_start_num, math.ceil((i+1)*(index_num/proc_num))+index_start_num ))\n\n #프로세스 각각 할당해줌\n for st,end in range_index:\n proc = mp.Process(target=find,args=(st,end,cnt,good_cnt,reply,q))\n procs.append(proc)\n proc.start()\n\n #작업 끝날때까지 기다림\n while q.qsize() != proc_num:\n time.sleep(1)\n print(\"크롤링 대기중... 완료된 프로세스 :\",q.qsize())\n\n #끝난 시간 출력\n print(\"크롤링 완료. 크롤링 소요 시간 :\", time.time() - stt)\n\n #완료시 워드로 밀어넣기\n words=\"\"\n while q.empty()!=True:\n words+=q.get()\n spliter = Twitter()\n # konlpy의 Twitter객체\n nouns = spliter.nouns(words)\n # nouns 함수를 통해서 text에서 명사만 분리/추출\n count = Counter(nouns)\n # Counter객체를 생성하고 참조변수 nouns할당\n return_list = [] # 명사 빈도수 저장할 변수\n \n #대용량으로 크롤링한걸 모두 다 저장할것인가? 평소엔 주석처리 해노셈\n all_file = open(\"all_output.txt\",'w',-1,\"utf-8\")\n all_file.write(words)\n all_file.close()\n\n open_output_file = open(\"output.txt\",'w',-1,\"utf-8\")\n open_output_file.write('{}\\n'.format(dt.datetime.today()))\n open_output_file.write('작성된 글:{} 개념글:{} 작성된 댓글:{}\\n'.format(cnt.value,good_cnt.value,reply.value))\n for n, c in count.most_common(3000):\n temp = {'tag': n, 'count': c}\n return_list.append(temp)\n open_output_file.write('{}\\t{}\\n'.format(n, c))\n # most_common 메소드는 정수를 입력받아 객체 안의 명사중 빈도수\n # 큰 명사부터 순서대로 입력받은 정수 갯수만큼 저장되어있는 객체 반환\n # 명사와 사용된 갯수를 return_list에 저장합니다.\n open_output_file.close()\n print(dt.datetime.today())\n","sub_path":"MGCrawler_title_cont_ver2.py","file_name":"MGCrawler_title_cont_ver2.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"420607419","text":"#!/usr/bin/env python\nimport sys\n\nsnps=[]\nwith open(sys.argv[1]) as fh:\n for line in fh:\n cols = line.split()\n snps.append((cols[1], cols[2], cols[3], cols[4]))\n\nwith open(sys.argv[2]) as fh:\n for line in fh:\n cols = line.split()\n rs=\"NULL\"\n for snp in snps:\n if cols[4] == snp[1] and cols[5] == snp[2] and cols[6] == snp[3]:\n rs = snp[0]\n break\n print(line.strip() + \"\\t\" + rs)\n","sub_path":"modules/annovar/merge_snp_dropped_2_exonic_variant_function.abortion.genes.py","file_name":"merge_snp_dropped_2_exonic_variant_function.abortion.genes.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"156079926","text":"import ipywidgets as widgets\nfrom nwbwidgets.controllers import float_range_controller, move_int_slider_down, move_int_slider_up, \\\n move_range_slider_down, move_range_slider_up\nimport unittest\n\n\ndef test_float_range_controller():\n assert isinstance(float_range_controller(tmin=1, tmax=26), widgets.Widget)\n\n \nclass MoveSliderTestCase(unittest.TestCase):\n\n def setUp(self):\n self.slider = widgets.IntSlider(\n value=7,\n min=2,\n max=10,\n step=1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d')\n \n def test_move_int_slider_down(self):\n move_int_slider_down(self.slider)\n assert(self.slider.value == 6)\n \n def test_move_int_slider_up(self):\n move_int_slider_up(self.slider)\n assert(self.slider.value == 8)\n\n\nclass RangeSliderTestCase(unittest.TestCase):\n\n def setUp(self):\n self.slider = widgets.IntRangeSlider(\n value=[5, 7],\n min=0,\n max=10,\n step=1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d')\n \n def test_move_range_slider_down_bigger(self):\n self.slider.value = (4, 6)\n move_range_slider_down(self.slider)\n assert(self.slider.value == (2, 4))\n\n def test_move_range_slider_down_smaller(self):\n self.slider.value = (2, 6)\n move_range_slider_down(self.slider)\n assert(self.slider.value == (0, 4))\n \n def test_move_range_slider_up_smaller(self):\n self.slider.value = (5, 7)\n move_range_slider_up(self.slider)\n assert(self.slider.value == (7, 9))\n\n def test_move_range_slider_up_bigger(self):\n self.slider.value = (5, 8)\n move_range_slider_up(self.slider)\n assert(self.slider.value == (7, 10))\n","sub_path":"nwbwidgets/test/test_controllers.py","file_name":"test_controllers.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"256885227","text":"# Copyright (C) 2018, Anthony Oteri\n# All rights reserved.\n\nimport contextlib\nimport logging\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nimport sqlite3\n\nlog = logging.getLogger(__name__)\nSession = scoped_session(sessionmaker(expire_on_commit=False))\n\nDB_CONNECT_ARGS = {\"detect_types\": sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES}\n\n\nclass _Base(object):\n def __eq__(self, other):\n for k in self.__table__.columns.keys():\n try:\n if getattr(self, k) != getattr(other, k):\n return False\n except AttributeError:\n return False\n return True\n\n def __str__(self):\n fields = (\n '%s=\"%s\"' % (k, v)\n for k, v in self.__dict__.items()\n if not k.startswith(\"_\")\n )\n return \"<%s[%s]>\" % (self.__class__.__name__, \", \".join(fields))\n\n def __repr__(self):\n fields = (\n '%s=\"%r\"' % (k, v)\n for k, v in self.__dict__.items()\n if not k.startswith(\"_\")\n )\n return \"<%s[%s]>\" % (self.__class__.__name__, \", \".join(fields))\n\n\nBase = declarative_base(cls=_Base)\n\n\n@contextlib.contextmanager\ndef transaction():\n \"\"\"Access the session.\"\"\"\n session = Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n raise\n finally:\n Session.remove()\n\n\ndef connect(db_url=\"sqlite:///timetrack.db\", echo=False):\n \"\"\"\n Create a persistent connection to the given database.\n\n :param db_url: The URL to the database.\n (Default value = 'sqlite:///timetrack.db')\n :param echo: Log all interactions with the database.\n (Default value = False)\n \"\"\"\n log.info(\"Connecting to database %s\", db_url)\n\n engine = create_engine(\n db_url, connect_args=DB_CONNECT_ARGS, native_datetime=True, echo=echo\n )\n Base.metadata.create_all(engine)\n Session.configure(bind=engine)\n","sub_path":"tt/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"55903554","text":"# Redis is a very quick in memory store. The benefits of using redis is that\n# things will generally speedy, and it can be (mostly) persistent by dumping\n# the data to disk (see: https://redis.io/topics/persistence). The drawbacks\n# to using redis is you have a higher chance of encountering data loss (in\n# this case, 'forgetting' that a token was revoked), when events like\n# power outages occur.\n#\n# When does it make sense to use redis for a blacklist? If you are blacklisting\n# every token on logout, and not doing nothing besides that (such as keeping\n# track of what tokens are blacklisted, providing options to un-revoke\n# blacklisted tokens, or view tokens that are currently active for a user),\n# then redis is a great choice. In the worst case, a few tokens might slip\n# between the cracks in the case of a power outage or other such event, but\n# 99.99% of the time tokens will be properly blacklisted.\n#\n# Redis also has the benefit of supporting an expires time when storing data.\n# Utilizing this, you will not need to manually prune down the stored tokens\n# to keep it from blowing up over time. This code includes how to do this.\n#\n# If you intend to use some other features in your blacklist (tracking\n# what tokens are currently active, option to revoke or unrevoke specific\n# tokens, etc), data integrity is probably more important to you then\n# raw performance. In this case a database solution (such as postgres) is\n# probably a better fit for your blacklist. Check out the \"database_blacklist\"\n# example for how that might work.\nimport redis\nfrom datetime import timedelta\nfrom flask import Flask, request, jsonify\nfrom flask_jwt_extended import (\n JWTManager, create_access_token, create_refresh_token, get_jti,\n jwt_refresh_token_required, get_jwt_identity, jwt_required, get_raw_jwt\n)\n\napp = Flask(__name__)\napp.secret_key = 'ChangeMe!'\n\n# Setup the flask-jwt-extended extension. See:\nACCESS_EXPIRES = timedelta(minutes=15)\nREFRESH_EXPIRES = timedelta(days=30)\napp.config['JWT_ACCESS_TOKEN_EXPIRES'] = ACCESS_EXPIRES\napp.config['JWT_REFRESH_TOKEN_EXPIRES'] = REFRESH_EXPIRES\napp.config['JWT_BLACKLIST_ENABLED'] = True\napp.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']\njwt = JWTManager(app)\n\n# Setup our redis connection for storing the blacklisted tokens\nrevoked_store = redis.StrictRedis(host='localhost', port=6379, db=0,\n decode_responses=True)\n\n\n# Create our function to check if a token has been blacklisted. In this simple\n# case, we will just store the tokens jti (unique identifier) in redis\n# whenever we create a new token (with the revoked status being 'false'). This\n# function will return the revoked status of a token. If a token doesn't\n# exist in this store, we don't know where it came from (as we are adding newly\n# created tokens to our store with a revoked status of 'false'). In this case\n# we will consider the token to be revoked, for safety purposes.\n@jwt.token_in_blacklist_loader\ndef check_if_token_is_revoked(decrypted_token):\n jti = decrypted_token['jti']\n entry = revoked_store.get(jti)\n if entry is None:\n return True\n return entry == 'true'\n\n\n@app.route('/auth/login', methods=['POST'])\ndef login():\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n if username != 'test' or password != 'test':\n return jsonify({\"msg\": \"Bad username or password\"}), 401\n\n # Create our JWTs\n access_token = create_access_token(identity=username)\n refresh_token = create_refresh_token(identity=username)\n\n # Store the tokens in redis with a status of not currently revoked. We\n # can use the `get_jti()` method to get the unique identifier string for\n # each token. We can also set an expires time on these tokens in redis,\n # so they will get automatically removed after they expire. We will set\n # everything to be automatically removed shortly after the token expires\n access_jti = get_jti(encoded_token=access_token)\n refresh_jti = get_jti(encoded_token=refresh_token)\n revoked_store.set(access_jti, 'false', ACCESS_EXPIRES * 1.2)\n revoked_store.set(refresh_jti, 'false', REFRESH_EXPIRES * 1.2)\n\n ret = {\n 'access_token': access_token,\n 'refresh_token': refresh_token\n }\n return jsonify(ret), 201\n\n\n# A blacklisted refresh tokens will not be able to access this endpoint\n@app.route('/auth/refresh', methods=['POST'])\n@jwt_refresh_token_required\ndef refresh():\n # Do the same thing that we did in the login endpoint here\n current_user = get_jwt_identity()\n access_token = create_access_token(identity=current_user)\n access_jti = get_jti(encoded_token=access_token)\n revoked_store.set(access_jti, 'false', ACCESS_EXPIRES * 1.2)\n ret = {'access_token': access_token}\n return jsonify(ret), 201\n\n\n# Endpoint for revoking the current users access token\n@app.route('/auth/access_revoke', methods=['DELETE'])\n@jwt_required\ndef logout():\n jti = get_raw_jwt()['jti']\n revoked_store.set(jti, 'true', ACCESS_EXPIRES * 1.2)\n return jsonify({\"msg\": \"Access token revoked\"}), 200\n\n\n# Endpoint for revoking the current users refresh token\n@app.route('/auth/refresh_revoke', methods=['DELETE'])\n@jwt_refresh_token_required\ndef logout2():\n jti = get_raw_jwt()['jti']\n revoked_store.set(jti, 'true', REFRESH_EXPIRES * 1.2)\n return jsonify({\"msg\": \"Refresh token revoked\"}), 200\n\n\n# A blacklisted access token will not be able to access this any more\n@app.route('/protected', methods=['GET'])\n@jwt_required\ndef protected():\n return jsonify({'hello': 'world'})\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"examples/redis_blacklist.py","file_name":"redis_blacklist.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"489787600","text":"'''Require: pip install torch transformers datasets flaml[blendsearch,ray]\r\n'''\r\nimport time\r\nimport numpy as np\r\nimport os\r\n\r\ntry:\r\n import ray\r\n from datasets import (\r\n load_dataset,\r\n load_metric,\r\n )\r\n from transformers import (\r\n AutoModelForSequenceClassification,\r\n AutoTokenizer,\r\n Trainer,\r\n TrainingArguments,\r\n )\r\n import flaml\r\n MODEL_CHECKPOINT = \"microsoft/deberta-base\"\r\n task_to_keys = {\r\n \"cola\": (\"sentence\", None),\r\n \"mnli\": (\"premise\", \"hypothesis\"),\r\n \"mrpc\": (\"sentence1\", \"sentence2\"),\r\n \"qnli\": (\"question\", \"sentence\"),\r\n \"qqp\": (\"question1\", \"question2\"),\r\n \"rte\": (\"sentence1\", \"sentence2\"),\r\n \"sst2\": (\"sentence\", None),\r\n \"stsb\": (\"sentence1\", \"sentence2\"),\r\n \"wnli\": (\"sentence1\", \"sentence2\"),\r\n }\r\n max_seq_length = 128\r\n overwrite_cache = False\r\n pad_to_max_length = True\r\n padding = \"max_length\"\r\n\r\n TASK = \"qnli\"\r\n # HP_METRIC, MODE = \"loss\", \"min\"\r\n HP_METRIC, MODE = \"accuracy\", \"max\"\r\n\r\n sentence1_key, sentence2_key = task_to_keys[TASK]\r\n # Define tokenize method\r\n tokenizer = AutoTokenizer.from_pretrained(MODEL_CHECKPOINT, use_fast=True)\r\n\r\n def tokenize(examples):\r\n args = (\r\n (examples[sentence1_key],) if sentence2_key is None else (\r\n examples[sentence1_key], examples[sentence2_key])\r\n )\r\n return tokenizer(*args, padding=padding, max_length=max_seq_length,\r\n truncation=True)\r\n\r\nexcept ImportError:\r\n print(\"pip install torch transformers datasets flaml[blendsearch,ray]\")\r\n\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\nos.makedirs('logs', exist_ok=True)\r\nlogger.addHandler(logging.FileHandler('logs/tune_deberta.log'))\r\nlogger.setLevel(logging.INFO)\r\n\r\n\r\ndef train_deberta(config: dict):\r\n\r\n # Load dataset and apply tokenizer\r\n data_raw = load_dataset(\"glue\", TASK)\r\n data_encoded = data_raw.map(tokenize, batched=True)\r\n train_dataset, eval_dataset = data_encoded[\"train\"], data_encoded[\"validation\"]\r\n\r\n NUM_LABELS = len(train_dataset.features[\"label\"].names)\r\n\r\n metric = load_metric(\"glue\", TASK)\r\n\r\n def compute_metrics(eval_pred):\r\n predictions, labels = eval_pred\r\n predictions = np.argmax(predictions, axis=1)\r\n return metric.compute(predictions=predictions, references=labels)\r\n\r\n model = AutoModelForSequenceClassification.from_pretrained(\r\n MODEL_CHECKPOINT, num_labels=NUM_LABELS\r\n )\r\n\r\n training_args = TrainingArguments(\r\n output_dir='.',\r\n do_eval=False,\r\n disable_tqdm=True,\r\n logging_steps=20000,\r\n save_total_limit=0,\r\n fp16=True,\r\n **config,\r\n )\r\n\r\n trainer = Trainer(\r\n model,\r\n training_args,\r\n train_dataset=train_dataset,\r\n eval_dataset=eval_dataset,\r\n tokenizer=tokenizer,\r\n compute_metrics=compute_metrics,\r\n )\r\n\r\n # train model\r\n trainer.train()\r\n\r\n # evaluate model\r\n eval_output = trainer.evaluate()\r\n\r\n flaml.tune.report(\r\n loss=eval_output[\"eval_loss\"],\r\n accuracy=eval_output[\"eval_accuracy\"],\r\n )\r\n\r\n try:\r\n from azureml.core import Run\r\n run = Run.get_context()\r\n run.log('accuracy', eval_output[\"eval_accuracy\"])\r\n run.log('loss', eval_output[\"eval_loss\"])\r\n run.log('config', config)\r\n except ImportError:\r\n pass\r\n\r\n\r\ndef _test_deberta(method='BlendSearch'):\r\n\r\n max_num_epoch = 100\r\n num_samples = -1\r\n time_budget_s = 3600\r\n\r\n search_space = {\r\n # You can mix constants with search space objects.\r\n \"num_train_epochs\": flaml.tune.loguniform(1, max_num_epoch),\r\n \"learning_rate\": flaml.tune.loguniform(3e-5, 1.5e-4),\r\n \"weight_decay\": flaml.tune.uniform(0, 0.3),\r\n \"per_device_train_batch_size\": flaml.tune.choice([16, 32, 64, 128]),\r\n \"seed\": flaml.tune.choice([12, 22, 33, 42]),\r\n }\r\n\r\n start_time = time.time()\r\n ray.init(num_cpus=4, num_gpus=4)\r\n if 'ASHA' == method:\r\n algo = None\r\n elif 'BOHB' == method:\r\n from ray.tune.schedulers import HyperBandForBOHB\r\n from ray.tune.suggest.bohb import tuneBOHB\r\n algo = tuneBOHB(max_concurrent=4)\r\n scheduler = HyperBandForBOHB(max_t=max_num_epoch)\r\n elif 'Optuna' == method:\r\n from ray.tune.suggest.optuna import OptunaSearch\r\n algo = OptunaSearch()\r\n elif 'CFO' == method:\r\n from flaml import CFO\r\n algo = CFO(low_cost_partial_config={\r\n \"num_train_epochs\": 1,\r\n \"per_device_train_batch_size\": 128,\r\n })\r\n elif 'BlendSearch' == method:\r\n from flaml import BlendSearch\r\n algo = BlendSearch(low_cost_partial_config={\r\n \"num_train_epochs\": 1,\r\n \"per_device_train_batch_size\": 128,\r\n })\r\n elif 'Dragonfly' == method:\r\n from ray.tune.suggest.dragonfly import DragonflySearch\r\n algo = DragonflySearch()\r\n elif 'SkOpt' == method:\r\n from ray.tune.suggest.skopt import SkOptSearch\r\n algo = SkOptSearch()\r\n elif 'Nevergrad' == method:\r\n from ray.tune.suggest.nevergrad import NevergradSearch\r\n import nevergrad as ng\r\n algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)\r\n elif 'ZOOpt' == method:\r\n from ray.tune.suggest.zoopt import ZOOptSearch\r\n algo = ZOOptSearch(budget=num_samples)\r\n elif 'Ax' == method:\r\n from ray.tune.suggest.ax import AxSearch\r\n algo = AxSearch(max_concurrent=3)\r\n elif 'HyperOpt' == method:\r\n from ray.tune.suggest.hyperopt import HyperOptSearch\r\n algo = HyperOptSearch()\r\n scheduler = None\r\n if method != 'BOHB':\r\n from ray.tune.schedulers import ASHAScheduler\r\n scheduler = ASHAScheduler(\r\n max_t=max_num_epoch,\r\n grace_period=1)\r\n scheduler = None\r\n analysis = ray.tune.run(\r\n train_deberta,\r\n metric=HP_METRIC,\r\n mode=MODE,\r\n resources_per_trial={\"gpu\": 4, \"cpu\": 4},\r\n config=search_space, local_dir='logs/',\r\n num_samples=num_samples, time_budget_s=time_budget_s,\r\n keep_checkpoints_num=1, checkpoint_score_attr=HP_METRIC,\r\n scheduler=scheduler, search_alg=algo)\r\n\r\n ray.shutdown()\r\n\r\n best_trial = analysis.get_best_trial(HP_METRIC, MODE, \"all\")\r\n metric = best_trial.metric_analysis[HP_METRIC][MODE]\r\n\r\n logger.info(f\"method={method}\")\r\n logger.info(f\"n_trials={len(analysis.trials)}\")\r\n logger.info(f\"time={time.time()-start_time}\")\r\n logger.info(f\"Best model eval {HP_METRIC}: {metric:.4f}\")\r\n logger.info(f\"Best model parameters: {best_trial.config}\")\r\n\r\n\r\ndef _test_deberta_cfo():\r\n _test_deberta('CFO')\r\n\r\n\r\ndef _test_deberta_dragonfly():\r\n _test_deberta('Dragonfly')\r\n\r\n\r\ndef _test_deberta_skopt():\r\n _test_deberta('SkOpt')\r\n\r\n\r\ndef _test_deberta_nevergrad():\r\n _test_deberta('Nevergrad')\r\n\r\n\r\ndef _test_deberta_zoopt():\r\n _test_deberta('ZOOpt')\r\n\r\n\r\ndef _test_deberta_ax():\r\n _test_deberta('Ax')\r\n\r\n\r\ndef __test_deberta_hyperopt():\r\n _test_deberta('HyperOpt')\r\n\r\n\r\ndef _test_deberta_optuna():\r\n _test_deberta('Optuna')\r\n\r\n\r\ndef _test_deberta_asha():\r\n _test_deberta('ASHA')\r\n\r\n\r\ndef _test_deberta_bohb():\r\n _test_deberta('BOHB')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n _test_deberta()\r\n","sub_path":"test/hf/test_deberta.py","file_name":"test_deberta.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"569272451","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 19.1a\n\nПеределать функцию send_show_command из задания 19.1 таким образом,\nчтобы обрабатывалось исключение, которое генерируется\nпри ошибке аутентификации на устройстве.\n\nПри возникновении ошибки, должно выводиться сообщение исключения.\n\nДля проверки измените пароль на устройстве или в файле devices.yaml.\n'''\n\nimport yaml\nimport netmiko\nfrom netmiko import ConnectHandler\n\ncommand = 'sh ip int br'\n\ndef send_show_command(dev_to_send, commands_to_send):\n for each in dev_to_send:\n try:\n with ConnectHandler(**each) as ssh:\n ssh.enable()\n \n result = ssh.send_command(commands_to_send)\n print(result)\n \n except netmiko.ssh_exception.NetMikoAuthenticationException:\n #incorrect credentials\n print('Authentication failed!')\n continue\n\nwith open('devices.yaml') as dev:\n devs_param = yaml.load(dev)\n for zzz in devs_param['routers']:\n #print(zzz)\n p = devs_param['routers']\n \nsend_show_command(p, command)\n","sub_path":"exercises/19_ssh_telnet/task_19_1a.py","file_name":"task_19_1a.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"9812417","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"R2D\")\n\nprocess.load(\"EventFilter.RPCRawToDigi.RPCSQLiteCabling_cfi\")\n\n##process.load(\"EventFilter.RPCRawToDigi.rpcUnpacker_cfi\")\n\n#include \"MagneticField/Engine/data/volumeBasedMagneticField.cfi\"\n#include \"Geometry/MuonCommonData/data/muonIdealGeometryXML.cfi\"\n#include \"Geometry/RPCGeometry/data/rpcGeometry.cfi\"\n##process.load(\"Geometry.MuonCommonData.data.muonIdealGeometryXML_cfi\")\n##process.load(\"Geometry.RPCGeometry.data.rpcGeometry_cfi\")\n##process.load(\"DQMServices.Core.DQM_cfg\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.muonRPCDigis = cms.EDFilter(\"RPCUnpackingModule\",\n InputLabel = cms.untracked.InputTag(\"source\"),\n doSynchro = cms.bool(False)\n )\n\nprocess.ltcUnpack = cms.EDProducer(\"LTCRawToDigi\")\n\nprocess.source = cms.Source ('NewEventStreamFileReader',\n fileNames = cms.untracked.vstring\n ( 'file:/opt/CMS/data/TTUdata/76401/PrivMuon.00076401.2933.A.storageManager.00.0001.dat' ) )\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n debugModules = cms.untracked.vstring('muonRPCDigis'),\n destinations = cms.untracked.vstring('cout'),\n cout = cms.untracked.PSet( threshold = cms.untracked.string('INFO'))\n )\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string('file:digis.root'),\n outputCommands = cms.untracked.vstring('keep *_muonRPCDigis_*_*',\n 'keep L1GtTechnicalTriggerRecord_*_*_*',\n 'keep *_ltcUnpack_*_*' )\n )\n\nprocess.p = cms.Path(process.ltcUnpack*process.muonRPCDigis)\n\nprocess.ep = cms.EndPath(process.out)\n\n","sub_path":"minidaqDQM/r2d-andres.py","file_name":"r2d-andres.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"368192069","text":"with open('data/interim/vocabulary.tsv', 'r') as dictfile:\n vocabulary = set()\n for line in dictfile:\n wd, freq = line.strip().split('\\t')\n if int(freq) > 50 and len(wd) > 1:\n vocabulary.add(wd.strip())\nprint(len(vocabulary))\nwith open('data/interim/full_text_wiki.txt', 'r') as infile:\n with open('data/processed/filtered_wiki.txt', 'w') as outfile:\n for line in infile:\n line = line.strip().split()\n lineset = set(line)\n wordintersect = vocabulary.intersection(lineset)\n line = [wd for wd in line if wd in wordintersect]\n line = ' '.join(line) + '\\n'\n outfile.write(line)\n","sub_path":"src/data_tasks/filter_words.py","file_name":"filter_words.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"11384329","text":"\n# import libraries\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nimport helpers\nimport problem_unittests as tests\n\ncsv_file = 'data/file_information.csv'\nplagiarism_df = pd.read_csv(csv_file)\n\n# print out the first few rows of data info\nplagiarism_df.head()\n\n\n# Dict mapping category names to category value and class\ncat_dict = {'non': (0, 0),\n 'heavy': (1, 1),\n 'light': (2, 1),\n 'cut': (3, 1),\n 'orig': (-1, -1)}\n\ndef convert_to_numerical(row):\n \"\"\"\n Convert the Category column from string to numerical values.\n Create a new column labeling plagiarism\n \n Arguments\n :param row: one row of a dataframe\n \"\"\"\n cat_value, cat_class = cat_dict[row['Category']]\n row['Category'] = cat_value\n row['Class'] = cat_class\n return row\n\n# Read in a csv file and return a transformed dataframe\ndef numerical_dataframe(csv_file='data/file_information.csv'):\n \"\"\"\n Reads in a csv file which is assumed to have `File`, `Category` and `Task` columns.\n This function does two things:\n 1) converts `Category` column values to numerical values\n 2) Adds a new, numerical `Class` label column.\n The `Class` column will label plagiarized answers as 1 and non-plagiarized as 0.\n Source texts have a special label, -1.\n \n Arguments:\n :param csv_file: The directory for the file_information.csv file\n \n Return:\n :return: A dataframe with numerical categories and a new `Class` label column\n \"\"\"\n df = pd.read_csv(csv_file)\n df = df.apply(convert_to_numerical, axis=1)\n return df\n\n# informal testing, print out the results of a called function\n# create new `transformed_df`\ntransformed_df = numerical_dataframe(csv_file ='data/file_information.csv')\n\n# check work\n# check that all categories of plagiarism have a class label = 1\n#print(transformed_df.head(10))\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# create a text column \ntext_df = helpers.create_text_column(transformed_df)\ntext_df.head()\n\n\n# after running the cell above\n# check out the processed text for a single file, by row index\nrow_idx = 0 # feel free to change this index\n\nsample_text = text_df.iloc[0]['Text']\n\n#print('Sample processed text:\\n\\n', sample_text)\n\nrandom_seed = 1 # can change; set for reproducibility\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# create new df with Datatype (train, test, orig) column\n# pass in `text_df` from above to create a complete dataframe, with all the information you need\ncomplete_df = helpers.train_test_dataframe(text_df, random_seed=random_seed)\n\n# check results\n#print(complete_df.tail(10))\n\n\n# Calculate the ngram containment for one answer file/source file pair in a df\ndef calculate_containment(df, n, answer_filename):\n \"\"\"\n Calculates the containment between a given answer text and its associated source text.\n This function creates a count of ngrams (of a size, n) for each text file in our data.\n Then calculates the containment by finding the ngram count for a given answer text, \n and its associated source text, and calculating the normalized intersection of those counts.\n \n Arguments\n :param df: A dataframe with columns,\n 'File', 'Task', 'Category', 'Class', 'Text', and 'Datatype'\n :param n: An integer that defines the ngram size\n :param answer_filename: A filename for an answer text in the df, ex. 'g0pB_taskd.txt'\n \n Return\n :return: A single containment value that represents the similarity\n between an answer text and its source text.\n \"\"\"\n \n answer_df = df.query('File == @answer_filename')\n a_text = answer_df.iloc[0].at['Text']\n source_filename = 'orig_task'+answer_filename[9]+'.txt'\n source_df = df.query('File == @source_filename')\n s_text = source_df.iloc[0].at['Text']\n\n counts = CountVectorizer(analyzer='word', ngram_range=(n,n))\n ngrams = counts.fit_transform([a_text, s_text])\n ngram_array = ngrams.toarray()\n\n intersection = [min(a, s) for a, s in zip(*ngram_array)]\n c_value = sum(intersection) / sum(ngram_array[0])\n\n return c_value\n\n# select a value for n\nn = 3\n\n# indices for first few files\ntest_indices = range(5)\n\n# iterate through files and calculate containment\ncategory_vals = []\ncontainment_vals = []\nfor i in test_indices:\n # get level of plagiarism for a given file index\n category_vals.append(complete_df.loc[i, 'Category'])\n # calculate containment for given file and n\n filename = complete_df.loc[i, 'File']\n c = calculate_containment(complete_df, n, filename)\n containment_vals.append(c)\n\n# print out result, does it make sense?\nprint('Original category values: \\n', category_vals)\nprint()\nprint(str(n)+'-gram containment values: \\n', containment_vals)\n\n# Compute the normalized LCS given an answer text and a source text\ndef lcs_norm_word(answer_text, source_text):\n \"\"\"\n Computes the longest common subsequence of words in two texts;\n returns a normalized value.\n \n Arguments:\n :param answer_text: The pre-processed text for an answer text\n :param source_text: The pre-processed text for an answer's associated source text\n \n Return:\n :return: A normalized LCS value\n \"\"\"\n\n answer_words = answer_text.split()\n answer_len = len(answer_words)\n source_words = source_text.split()\n source_len = len(source_words)\n\n lcs_matrix = np.zeros(shape=(source_len+1,answer_len+1), dtype='int')\n\n for i, s in enumerate(source_words, 1):\n for j, a in enumerate(answer_words, 1):\n if a == s:\n lcs_matrix[i,j] = lcs_matrix[i-1,j-1] + 1\n else:\n lcs_matrix[i,j] = max(lcs_matrix[i,j-1], lcs_matrix[i-1,j])\n\n return lcs_matrix[source_len][answer_len] / answer_len\n\n# Run the test scenario from above\n# does your function return the expected value?\n\nA = \"i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents\"\nS = \"pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents\"\n\n# calculate LCS\nlcs = lcs_norm_word(A, S)\n#print('LCS = ', lcs)\n\n\n# expected value test\nassert lcs==20/27., \"Incorrect LCS value, expected about 0.7408, got \"+str(lcs)\n\n#print('Test passed!')\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# Function returns a list of containment features, calculated for a given n \n# Should return a list of length 100 for all files in a complete_df\ndef create_containment_features(df, n, column_name=None):\n \n containment_values = []\n \n if(column_name==None):\n column_name = 'c_'+str(n) # c_1, c_2, .. c_n\n \n # iterates through dataframe rows\n for i in df.index:\n file = df.loc[i, 'File']\n # Computes features using calculate_containment function\n if df.loc[i,'Category'] > -1:\n c = calculate_containment(df, n, file)\n containment_values.append(c)\n # Sets value to -1 for original tasks \n else:\n containment_values.append(-1)\n \n print(str(n)+'-gram containment features created!')\n return containment_values\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# Function creates lcs feature and add it to the dataframe\ndef create_lcs_features(df, column_name='lcs_word'):\n \n lcs_values = []\n \n # iterate through files in dataframe\n for i in df.index:\n # Computes LCS_norm words feature using function above for answer tasks\n if df.loc[i,'Category'] > -1:\n # get texts to compare\n answer_text = df.loc[i, 'Text'] \n task = df.loc[i, 'Task']\n # we know that source texts have Class = -1\n orig_rows = df[(df['Class'] == -1)]\n orig_row = orig_rows[(orig_rows['Task'] == task)]\n source_text = orig_row['Text'].values[0]\n\n # calculate lcs\n lcs = lcs_norm_word(answer_text, source_text)\n lcs_values.append(lcs)\n # Sets to -1 for original tasks \n else:\n lcs_values.append(-1)\n\n print('LCS features created!')\n return lcs_values\n\n\n# Define an ngram range\nngram_range = range(1,11)\n\n\n# The following code may take a minute to run, depending on your ngram_range\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nfeatures_list = []\n\n# Create features in a features_df\nall_features = np.zeros((len(ngram_range)+1, len(complete_df)))\n\n# Calculate features for containment for ngrams in range\ni=0\nfor n in ngram_range:\n column_name = 'c_'+str(n)\n features_list.append(column_name)\n # create containment features\n all_features[i]=np.squeeze(create_containment_features(complete_df, n))\n i+=1\n\n# Calculate features for LCS_Norm Words \nfeatures_list.append('lcs_word')\nall_features[i]= np.squeeze(create_lcs_features(complete_df))\n\n# create a features dataframe\nfeatures_df = pd.DataFrame(np.transpose(all_features), columns=features_list)\n\n# Print all features/columns\n#print()\n#print('Features: ', features_list)\n#print()\n\n\n# print some results \n#print(features_df.head(10))\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# Create correlation matrix for just Features to determine different models to test\ncorr_matrix = features_df.corr().abs().round(2)\n\n# display shows all of a dataframe\nprint((corr_matrix))\n\nfor i in range(corr_matrix.shape[0]):\n corr_matrix.iloc[i, i] = np.nan\n\nprint('\\nCorrelation Matrix description:')\nprint(corr_matrix.describe())\n\nprint('\\nCorrelation Matrix boxplot:')\ncorr_matrix.plot.box()\nplt.show()\n\n\n# Takes in dataframes and a list of selected features (column names) \n# and returns (train_x, train_y), (test_x, test_y)\ndef train_test_data(complete_df, features_df, selected_features):\n \"\"\"\n Gets selected training and test features from given dataframes, and \n returns tuples for training and test features and their corresponding class labels.\n \n Arguments:\n :param complete_df: A dataframe with all of our processed text data, datatypes, and labels\n :param features_df: A dataframe of all computed, similarity features\n :param selected_features: An array of selected features that correspond to certain columns in `features_df`\n \n Return:\n :return: training and test features and labels: (train_x, train_y), (test_x, test_y)\n \"\"\"\n\n train_test_df = complete_df.join(features_df[selected_features])\n train_df = train_test_df.query('Datatype == \"train\"')\n test_df = train_test_df.query('Datatype == \"test\"')\n \n # get the training features\n train_x = train_df[selected_features]\n # And training class labels (0 or 1)\n train_y = train_df['Class']\n\n # get the test features and labels\n test_x = test_df[selected_features]\n test_y = test_df['Class']\n\n return (train_x.values, train_y.values), (test_x.values, test_y.values)\n\n# Select your list of features, this should be column names from features_df\n# ex. ['c_1', 'lcs_word']\nselected_features = ['lcs_word', 'c_10', 'c_1']\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n\n(train_x, train_y), (test_x, test_y) = train_test_data(complete_df, features_df, selected_features)\n\n# check that division of samples seems correct\n# these should add up to 95 (100 - 5 original files)\nprint('Training size: ', len(train_x))\nprint('Test size: ', len(test_x))\nprint()\nprint('Training df sample: \\n', train_x[:10])\n\ndef make_csv(x, y, filename, data_dir):\n \"\"\"\n Merges features and labels and converts them into one csv file with labels in the first column.\n \n Arguments:\n :param x: Data features\n :param y: Data labels\n :param file_name: Name of csv file, ex. 'train.csv'\n :param data_dir: The directory where files will be saved\n \"\"\"\n\n # make data dir, if it does not exist\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n file_path = str(data_dir)+'/'+str(filename)\n\n df = pd.DataFrame([(label, *cols) for (label, cols) in zip(y, x)])\n df.dropna(axis=0)\n\n df.to_csv(file_path, header=False, index=False)\n\n # nothing is returned, but a print statement indicates that the function has run\n print('Path created: '+file_path)\n\n\n# can change directory, if you want\ndata_dir = 'plagiarism_data'\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n\nmake_csv(train_x, train_y, filename='train.csv', data_dir=data_dir)\nmake_csv(test_x, test_y, filename='test.csv', data_dir=data_dir)","sub_path":"react/ML_React_App_Template/service/codes/semantic/udacity/source_pytorch/plagiarism_feature_engineering.py","file_name":"plagiarism_feature_engineering.py","file_ext":"py","file_size_in_byte":12705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"500690151","text":"import torch\nimport torch.autograd\nimport torch.optim as optim\nimport torch.nn as nn\nfrom networks import Actor, Critic\nfrom DDPG.utils import *\nfrom torch.autograd.variable import Variable\n\n\nclass Agent:\n def __init__(self, env, hidden_size=256, actor_lr=1e-4, critic_lr=1e-3, gamma=0.99, tau=1e-3, max_memory=int(1e6)):\n obs = env.reset()\n self.num_states = obs['desired_goal'].shape[0] + obs['observation'].shape[0]\n self.num_actions = env.action_space.shape[0]\n self.gamma = gamma\n self.tau = tau\n self.action_max = env.action_space.high[0]\n\n self.actor = Actor(self.num_states, hidden_size, self.num_actions)\n self.critic = Critic(self.num_states + self.num_actions, hidden_size, 1)\n\n self.target_actor = Actor(self.num_states, hidden_size, self.num_actions)\n self.target_critic = Critic(self.num_states + self.num_actions, hidden_size, 1)\n\n for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data)\n\n for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data)\n\n self.experience_replay = ExperienceReplay(max_memory)\n self.critic_loss_func = nn.MSELoss()\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)\n self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr)\n\n def get_action(self, state):\n state = Variable(torch.from_numpy(state).float().unsqueeze(0))\n action = self.actor.forward(state)\n action = action.detach().numpy()[0]\n return action\n\n def update(self, size):\n states, actions, rewards, next_states, _ = self.experience_replay.sample(size)\n states = torch.FloatTensor(states)\n actions = torch.FloatTensor(actions)\n rewards = torch.FloatTensor(rewards)\n next_states = torch.FloatTensor(next_states)\n\n with torch.no_grad():\n next_actions = self.target_actor.forward(next_states)\n q_next = self.target_critic.forward(next_states, next_actions).detach()\n target_q = rewards.reshape((128,1)) + self.gamma * q_next\n target_q = target_q.detach()\n c = 1/ (1-self.gamma)\n target_q = torch.clamp(target_q, -c, 0)\n\n real_q = self.critic.forward(states, actions)\n dif = (target_q - real_q)\n critic_loss = dif.pow(2).mean()\n real_actions = self.actor.forward(states)\n actor_loss = -self.critic.forward(states, real_actions).mean()\n actor_loss += (real_actions/self.action_max).pow(2).mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n\n\n # update target networks\n for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n\n for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n","sub_path":"DDPG/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565136439","text":"#!/usr/bin/python\n#\n# Brainfuck Interpreter\n# Copyright 2011 Sebastian Kaspari\n# Copyright 2013 yangfl\n# This programm is licensed under the terms of the\n# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE.\n\nimport random, re\nimport willie.module\n\nBUFFER_SIZE = 5000\nMAX_STEPS = 10000\n\n@willie.module.commands('bf')\n@willie.module.example('.bf +[.+]')\ndef bf(bot, trigger):\n \"\"\".bf -- executes brainfuck program \"\"\"\n if not trigger.group(2):\n return\n\n code = re.sub('[^][<>+-.,]', '', trigger.group(2)) # filter illegal characters, can be omitted\n\n temp_bracestack, bracemap = [], {}\n for position, command in enumerate(code):\n if command == \"[\": temp_bracestack.append(position)\n if command == \"]\":\n start = temp_bracestack.pop()\n bracemap[start] = position\n bracemap[position] = start\n if len(temp_bracestack):\n bot.reply('unbalanced brackets')\n\n output = ''\n code_len = len(code)\n steps = 0\n cells, codeptr, cellptr = [0], 0, 0\n\n while codeptr < code_len and cellptr < BUFFER_SIZE and steps < MAX_STEPS:\n command = code[codeptr]\n if command == \">\":\n cellptr += 1\n if cellptr == len(cells):\n cells.append(0)\n elif command == \"<\":\n cellptr = 0 if cellptr <= 0 else cellptr - 1\n elif command == \"+\":\n cells[cellptr] = cells[cellptr] + 1 if cells[cellptr] < 255 else 0\n elif command == \"-\":\n cells[cellptr] = cells[cellptr] - 1 if cells[cellptr] > 0 else 255\n elif command == \"[\" and cells[cellptr] == 0:\n codeptr = bracemap[codeptr]\n elif command == \"]\" and cells[cellptr] != 0:\n codeptr = bracemap[codeptr]\n elif command == \".\":\n output += chr(cells[cellptr])\n if len(output) > 500:\n break\n elif command == \",\": cells[cellptr] = random.randint(0, 255)\n\n codeptr += 1\n steps += 1\n\n if output == '':\n output = '(no output)'\n if steps == MAX_STEPS:\n output += ' [exceeded %d steps]' % MAX_STEPS\n elif cellptr == BUFFER_SIZE:\n output += ' [exceeded %d buffers]' % BUFFER_SIZE\n\n stripped_output = re.sub(r'[\\x00-\\x1F]', '', output)[:430]\n if stripped_output == '':\n bot.reply('no printable output')\n bot.reply(stripped_output)\n","sub_path":"bf.py","file_name":"bf.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"110992894","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Main file to train and evaluate the models\n\"\"\"\n\nimport argparse\nimport os\nimport tensorflow as tf\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom deep_adversarial_network.adversarial_training import DeepGAN\nfrom deep_adversarial_network.discriminator import (get_available_discriminators, make_discriminator)\nfrom deep_adversarial_network.generator import (get_available_generators, make_generator)\nfrom deep_adversarial_network.logging.logger import rootLogger\nfrom deep_adversarial_network.utils import (get_available_datasets,\n make_dataset)\n\n\n# Optimizers\nOPTIMIZERS = {\n 'adam': tf.train.AdamOptimizer,\n 'adagrad': tf.train.AdagradOptimizer,\n 'sgd': tf.train.GradientDescentOptimizer,\n 'rms_prop': tf.train.RMSPropOptimizer\n}\n\n# Losses\nLOSSES = {\n 'l1': tf.losses.absolute_difference,\n 'l2': tf.losses.mean_squared_error\n}\n\n# General Paths\nLOG_PATH = os.path.join(os.getcwd(), 'logs/')\nPLOT_PATH = os.path.join(os.getcwd(), 'plots/')\nMODEL_PATH = os.path.join(os.getcwd(), 'models/')\n\n# training settings\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n# general\nparser.add_argument('-d', '--dataset', type=str, default='coseg',\n help=\"dataset, {'\" + \\\n \"', '\".join(get_available_datasets()) + \\\n \"'}\")\nparser.add_argument('--data-dirpath', type=str, default='data/',\n help='directory for storing downloaded data')\n\nparser.add_argument('--n-workers', type=int, default=2,\n help='how many threads to use for I/O')\n\nparser.add_argument('--gpu', type=str, default='0',\n help=\"ID of the GPU to train on (or '' to train on CPU)\")\n\nparser.add_argument('-rs', '--random-seed', type=int, default=1,\n help=\"random seed for training\")\n\n# GAN-related\nparser.add_argument('-dr', '--discriminator', type=str, default='patch',\n help=\"discriminator architecture name, {'\" + \\\n \"', '\".join(get_available_discriminators()) + \\\n \"'}\")\n\nparser.add_argument('-gr', '--generator', type=str, default='multi2',\n help=\"generator architecture name, {'\" + \\\n \"', '\".join(get_available_generators()) + \\\n \"'}\")\n\nparser.add_argument('-d_lr', '--d_lr', type=float, default=1e-4,\n help='discriminator learning rate')\n\nparser.add_argument('-g_lr', '--g_lr', type=float, default=1e-4,\n help='generator learning rate')\n\nparser.add_argument('-b', '--batch_size', type=int, default=16,\n help='input batch size for training')\n\nparser.add_argument('-d_opt', '--d_optim', type=str, default='adam',\n help=\"optimizer, {'\" + \\\n \"', '\".join(OPTIMIZERS.keys()) + \\\n \"'}\")\n\nparser.add_argument('-g_opt', '--g_optim', type=str, default='adam',\n help=\"optimizer, {'\" + \\\n \"', '\".join(OPTIMIZERS.keys()) + \\\n \"'}\")\nparser.add_argument('-m', '--model_name', type=str,\n default='gan_model', help='name for model')\n\nparser.add_argument('-e', '--epochs', type=int, default=1000,\n help='number of epochs')\n\nparser.add_argument('-rl', '--recon_loss', type=str, default='l2',\n help=\"losses, {'\" + \\\n \"', '\".join(LOSSES.keys()) + \\\n \"'}\")\n\n# Plot related\nparser.add_argument('-tf', '--tf_logs', type=str, default='tf_logs',\n help=\"log folder for tensorflow logging\")\n\nparser.add_argument('-mp', '--plot_matplotlib', type=str, default='n',\n help=\"whether to plot matplotlib plots\")\n\n# parse and validate parameters\nargs = parser.parse_args()\n\nfor k, v in args._get_kwargs():\n if isinstance(v, str):\n setattr(args, k, v.strip().lower())\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n# print arguments\nrootLogger.info(\"Running with the following parameters:\")\nrootLogger.info(vars(args))\n\n\ndef main(args=args):\n \"\"\"\n main function that parses the arguments and trains\n :param args: arguments related\n :return: None\n \"\"\"\n batch_size = args.batch_size\n mplib = True if args.plot_matplotlib == 'y' else False\n\n # load and shuffle data\n train_dataset = make_dataset(name=args.dataset, base_path=os.getcwd() + '/data/' + args.dataset + '/train/',\n transform=transforms.Compose(\n [transforms.ToPILImage(), transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), transforms.Normalize([0.5] * 3, [0.5] * 3)]))\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=args.n_workers)\n val_dataset = make_dataset(name=args.dataset, base_path=os.getcwd() + '/data/' + args.dataset + '/val/',\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize([0.5] * 3, [0.5] * 3)]))\n val_loader = DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=args.n_workers)\n\n recon_loss = LOSSES.get(args.recon_loss, None)\n if not recon_loss:\n raise ValueError(\"invalid loss: '{0}'\".format(args.recon_loss))\n\n tf_log_path = os.path.join(os.getcwd(), args.tf_logs + '/')\n\n # build discriminator model\n discriminator = make_discriminator(name=args.discriminator)\n\n # build generator model\n generator = make_generator(name=args.generator)\n\n # get optimizer\n d_optim = OPTIMIZERS.get(args.d_optim, None)\n if not d_optim:\n raise ValueError(\"invalid optimizer: '{0}'\".format(args.d_optim))\n\n g_optim = OPTIMIZERS.get(args.g_optim, None)\n if not g_optim:\n raise ValueError(\"invalid optimizer: '{0}'\".format(args.g_optim))\n\n # get learning rate\n d_lr = args.d_lr\n g_lr = args.g_lr\n\n # Create GAN according to params\n model = DeepGAN(discriminator=discriminator, generator=generator, model_name=args.model_name, recon_loss=recon_loss,\n dataset=args.dataset, batch_size=args.batch_size, d_optim=d_optim, g_optim=g_optim, d_lr=d_lr,\n g_lr=g_lr,\n epochs=args.epochs, mplib=mplib, tf_log_path=tf_log_path)\n # Train the model\n model.adversarial_train(train_loader=train_loader, test_loader=val_loader, model_path=MODEL_PATH)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"368677259","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Assignment 2a Due 9/8\n# \n# In this assigment you will explore and become familiar with writing and using algorithms to numerically approxomate definite integrals.\n# \n# Assignment Overview:\n# * Impliment your own midpoint, trapazoid, and Simpson's rule integration algorithms\n# * Use those to solve a definite integral\n# * Compare your algorithms to those from scipy\n# * Compare all approximate solutions to the exact solutions\n# \n\n# You can import only the following packages: numpy, math, scipy, matplotlib.pyplot\n\n# In[104]:\n\n\n#your import cell\nimport numpy as np\nimport math\nimport scipy as sc\nfrom scipy import integrate\nimport matplotlib as mpl\n\n\n# Now we can define our integration algorithms\n\n# In[204]:\n\n\n#define midpoint rule\ndef midpointintg(function,xmin,xmax,dxcount):\n dxarray=np.linspace(xmin,xmax, 2*dxcount+1) #everyother point given from linsapce is used to go into function\n placeholder =1\n area = 0\n while placeholder < len(dxarray):\n y = function(dxarray[placeholder])\n dx = dxarray[placeholder + 1]-dxarray[placeholder - 1] #dx is equidistant around x, +/_ one linspace interval\n area = area + (y*dx)\n placeholder = placeholder + 2\n return area\n \ntestfun = lambda x: 2*x\nmidpointintg( testfun ,0,2,10) \n\n\n# In[205]:\n\n\n#define trapazoidal rule\ndef trapintg(function,xmin,xmax,dxcount):\n dxtrap=np.linspace(xmin,xmax,dxcount+1) #\n tarea = 0\n for i in range(dxcount):#array has i+1 elements\n yone = function(dxtrap[i])\n ytwo= function(dxtrap[i+1])#number of points including start and finish is one more than the dx count\n tdx = dxtrap[i+1]-dxtrap[i] \n tarea = tarea + (1/2*tdx*(yone+ytwo))\n return tarea\n \ntestfuntwo = lambda x: x**2\ntrapintg(testfuntwo ,0,2,20)\n\n\n# In[68]:\n\n\n#define simpson's rule \ndef simpsons(function,xmin,xmax,dxcount):\n sarea = (2*midpointintg(function,xmin,xmax,dxcount)+trapintg(function,xmin,xmax,dxcount))/3 #using weighted average of previous values\n return sarea\n\n\n# # Question \n# Sanity checking your code is very important. How can you check that your functions are working properly?\n\n# \n\n# For this notebook assigment we would like to solve the integral\\\n# $\\int_{a}^{b} \\frac{2}{(2^2 + x^2)^{3/2}}dx$\n\n# # Question\n# Have you seen an integral like this before in physics? From where?\n\n# \n\n# Solve the above integral using the numerical integrations that you wrote. When defining the function we will integrate over do so using python's **lambda** function.\n# \n# For the integration take $a = 0,\\; b = 2,\\;$ and we will split it up into 6 partitions.\n\n# # Question\n# What is the exact solution?\n\n# In[288]:\n\n\n#exact solution to the integral\nexact = (1/4)*math.sqrt(2)\nprint(exact)\n\n\n# In[287]:\n\n\n#solve the integral using your integration functions\nfunction= lambda x: 2*(4+x**2)**(-3/2)\nam=midpointintg(function,0,2,6)\nprint(\"midpoint\",am)\nat=trapintg(function,0,2,6)\nprint(\"trapezoid\",at)\nasi=simpsons(function,0,2,6)\nprint(\"simpson\", asi)\n\n\n# Use the trapz and simps methods from scipy's integration library to solve the integral.\n# \n# Note: Shit+tab at the end of a function in jupyter notebook reveals lots of information about the function.\n\n# In[286]:\n\n\n#scipy integration\ndef arraycrea(n):\n xar= np.array(range(n))\n return xar\ndef arraycreate(n):\n yl=list()\n for i in range (n):\n yv=2*(4+i**2)**(-3/2)\n yl.append(yv)\n return yl\nfox=np.array(arraycreate(6))\nsci_trap = sc.trapz([fox])\nsci_simp = integrate.simps([fox])\nprint('sci_simp yields:',sci_simp)\nprint('sci_trap yields:', sci_trap)\n\n\n# # Question\n# How do scipy integrals compare to yours? Support your answer. \n\n# \n\n# In[284]:\n\n\n#print the numerical values\ndef pdif(a,e):\n dif=100*((a-e)/e)\n return dif\nprint(\"trapmethod:\" , pdif(at,sci_trap))\nprint(\"simpsons method:\" , pdif(at,sci_simp))\n\n\n# Numerically compare your midpoint, trapazoid, and Simpson method results to the exact value by calculating the quantity (approx - exact) / exact for each method. \n\n# In[285]:\n\n\n#numerical comparisons\nprint(\"all answers given as a percent\")\nprint(\"midpoint\",pdif(am,exact))\nprint(\"Trapezoid\",pdif(at,exact))\nprint(\"simposon\",pdif(asi,exact))\n\n\n# # Question\n# Which method provides the best approximation to the exact solution?\n\n# \n\n# It is often useful to represent the data graphically. Make a scatter plot showing the percent difference $100 \\times \\frac{approx. - exact}{exact}$ vs. integration method. Integration method on your x-axis should be strings such as *midpoint, trapazoid, and Simpson*. You should also draw on your plot a line at *y = 0*, which will help guide the eye where the perfect approximation (the exact solution) would sit. \n\n# In[283]:\n\n\n#plot \n#i dont understand what this is asking, if its asking to present a graph showing % difference between methods\n#a scatter plot doesn't make sense\nimport matplotlib.pyplot as mpl\n\nxv=(\"midpoint\",\"trapezoid\",\"simpson\")\nyv=(pdif(am,exact),pdif(at,exact),pdif(asi,exact))\nyp=(0,0,0)\nfig= mpl.figure()\n\nax=fig.add_axes([0.8,0.8,0.8,0.8])\nax.plot(xv,yp, label=\"no error\")\nax.scatter(xv,yv,marker='X')\nax.legend()\nax.set_xlabel('method used')\nax.set_ylabel('percent difference')\n\n\n# Using the scipy *quad* integration method, approximate the following integral\n# \n# $$\\int_{-\\infty}^{\\infty} e^{-x^{2}} dx$$\n# \n# Hint: Reference numpy and see what it has to offer in terms of functions and *special* values\n\n# In[282]:\n\n\n#solve integral above\nfrom sympy import limit\n\nfunctione = lambda x: math.exp(-1*x**2) \nquadintg =integrate.quad(functione,-np.inf,np.inf)\nprint(quadintg)\n\n\n# # Question\n# If you square your solution, does the resulting number have any special meaning?\n\n# \n","sub_path":"Module02/2a_adamhersh.py","file_name":"2a_adamhersh.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"513322611","text":"#########\n# Imports\n#########\nimport json\nimport csv\nfrom numpy import where\nfrom numpy import array\nfrom glob import glob\nfrom HTMLParser import HTMLParser\nfrom scipy import sparse as sparse\n\n\n\n###################\n# Helper functions\n###################\n\n# Load several JSON files into one array of dictionaries\ndef load_json_data_from_files(file_name_pattern):\n\treturn_data = []\n\tfor file_name in glob(file_name_pattern):\n\t\t#print \"Processing file: \" + file_name\n\t\tinput_file = open(file_name)\n\t\tlines = [line.strip() for line in input_file]\n\t\tfor line in lines:\n\t\t\treturn_data.append(json.loads(line))\n\t\tinput_file.close()\n\treturn return_data\n\n# Extract travelocity hotel IDs from CSV file \ndef extract_hotel_ids_from_database_data():\n\ttravelocity_hotel_ids_file_name = './travelocity_hotels/travelocity_new_york_hotels.csv'\n\ttravelocity_hotel_ids = []\n\twith open(travelocity_hotel_ids_file_name, 'rb') as travelocity_hotels_csv:\n\t\thotel_reader = csv.reader(travelocity_hotels_csv, delimiter=',', quotechar='\"')\n\t\tfor row in hotel_reader:\n\t\t\ttravelocity_hotel_ids.append(row[21])\n\treturn travelocity_hotel_ids\n\n# Remove features from data dictionary\ndef remove_features(dict, features):\n\tfor el in dict:\n\t\tfor feature in features:\n\t\t\tdel el[feature]\n\n# To strip HTML tags\nclass MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\ndef strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n\n# To extract hotel amenities from a downloaded HTML page\ndef extract_amenities_text_from_html(html_text):\n\tstart_amenities = '
'\n\tend_amenities = '
'\n\tif start_amenities == -1:\n\t\treturn ''\n\thtml_text_amenities = html_text[html_text.find(start_amenities):]\n\thtml_text_amenities = html_text_amenities[:html_text_amenities.find(end_amenities)]\n\treturn html_text_amenities\n\n# Load text data containing amenities for a specific hotel\ndef load_hotel_amenities_data_for_hotel(hotel_id):\t\n\ttry:\n\t\twith open(\"./travelocity_hotel_details/hotel_details.\" + str(hotel_id) + \".downloaded.html\", \"r\") as myfile:\n\t\t\thotel_details_text_data = myfile.read().replace('\\n', '')\n\t\thotel_amenities_text_data = strip_tags(extract_amenities_text_from_html(hotel_details_text_data))\n\texcept:\n\t\thotel_amenities_text_data = ''\n\treturn hotel_amenities_text_data\n\n# Generate a dictionary of hotel details data indexed by hotel id\ndef generate_hotel_amenities_data_dict(hotel_ids):\n\thotel_amenities_data = {}\n\tfor hotel_id in hotel_ids:\n\t\thotel_amenities_data[hotel_id] = load_hotel_amenities_data_for_hotel(hotel_id)\n\treturn hotel_amenities_data\n\n# Generate all amenities data concatenated\ndef generate_hotel_amenities_data(hotel_ids):\n\thotel_amenities_data = ''\n\tfor hotel_id in hotel_ids:\n\t\thotel_amenities_data = hotel_amenities_data + load_hotel_amenities_data_for_hotel(hotel_id) + \" \"\n\treturn hotel_amenities_data\n\n# Add amenities to hotels\ndef add_hotel_amenities_as_features(training_data, hotel_amenities_dict, amenities):\n\tfor el in training_data:\n\t\thotel_details_text = hotel_amenities_dict.get(el['hotel_id'], '')\n\t\tel['size_of_amenities_text'] = len(hotel_details_text)\n\t\tfor amenity in amenities:\n\t\t\tamenity_key = 'has_' + amenity\n\t\t\tif hotel_details_text.find(amenity) == -1:\n\t\t\t\tel[amenity_key] = 'n'\n\t\t\telse:\n\t\t\t\tel[amenity_key] = 'y'\n\n","sub_path":"project/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"318504172","text":"# coding=utf-8\n\"\"\"\nImplements predefined default values from SIA 2024:2015 for SIA 380.1 calculations.\n\n\"\"\"\n\n\ndef default_values(room, area, month, season):\n \"\"\"\n\n :param room: sia2024 room type. dictionary with all the properties\n :param area: room area\n :param month: month of the year\n :param season: winter or summer\n :return:\n \"\"\"\n\n \"\"\"\n tau Zeitkonstante des Gebäudes [h]\n theta_e Aussenlufttemperatur\n theta_i Raumlufttemperatur\n t Länge der Berechnungsperiode [h]\n A_th Thermische Gebäudehüllfläche [m2] \n A_w Fensterfläche [m2] !!!!!! f_g in sia2024 - Glasanteil in [%]\n U_op Wärmedurchgangskoeffizient Aussenwand [W/m2K]\n U_w Wärmedurchgangskoeffizient Fenster [W/m2K]\n q_ve Aussenluft-Volumenstrom [m3/m2h]\n q_vinf Aussenluft-Volumenstrom durch Infiltration [m3/m2h]\n eta_rec Nutzungsgrad der Wärmerückgewinnung [-]\n phi_P Wärmeabgabe Personen [W/m2]\n phi_L Wärmeabgabe Beleuchtung [W/m2]\n phi_A Wärmeabgabe Geräte [W/m2]\n t_P Vollaststunden Personen [h]\n t_L Vollaststunden Beleuchtung [h]\n t_A Vollaststunden Geräte [h]\n g g-Wert [-]\n f_sh Reduktionsfaktor solare Wärmeeinträge [-]\n I description\": \"Solare Strahlung [Wh/m2]\n \"\"\"\n\n summerstart, summerend = 3, 10\n dayspermonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n # length of calculation period (hours per month) [h]\n t = [744.0, 672.0, 744.0, 720.0, 744.0, 720.0, 744.0, 744.0, 720.0, 744.0, 720.0, 744.0]\n\n \"\"\"\n cases according to SIA 2024:2015.\n The norm defines following ROOM and/or BUILDING types (p.7-8):\n _____________________________________________________________________________________\n | abbr. | description | code SIA 380\n ______|___________|________________________________________________|_________________\n - 1.1 mfh: multi-family house (Mehrfamilienhaus) HNF1\n - 1.2 efh: single family house (Einfamilienhaus) HNF1\n - 2.1 ...: Hotelroom HNF1\n - 2.2 ...: Hotel lobby HNF1\n - ... ...:\n - 3.1 office: small office space (Einzel,- Gruppenbüro) HNF2\n - ... ...:\n - 4.1 school: school room (Schulzimmer) HNF5\n - ... ...:\n _____________________________________________________________________________________\n \"\"\"\n\n f_sh = 0.9 # sia2024, p.12, 1.3.1.9 Reduktion solare Wärmeeinträge\n\n if area is None:\n area = room[\"Nettogeschossflaeche\"]\n\n tau = room[\"Zeitkonstante\"]\n if not season:\n if summerstart <= month <= summerend:\n theta_i = room[\"Raumlufttemperatur Auslegung Kuehlung (Sommer)\"]\n else:\n theta_i = room[\"Raumlufttemperatur Auslegung Heizen (Winter)\"]\n else:\n if season == \"summer\":\n theta_i = room[\"Raumlufttemperatur Auslegung Kuehlung (Sommer)\"]\n else:\n theta_i = room[\"Raumlufttemperatur Auslegung Heizen (Winter)\"]\n\n A_th = room[\"Thermische Gebaeudehuellflaeche\"]\n A_w = A_th * (room[\"Glasanteil\"] / 100.0)\n U_op = room[\"U-Wert opake Bauteile\"]\n U_w = room[\"U-Wert Fenster\"]\n q_ve = room[\"Aussenluft-Volumenstrom (pro NGF)\"]\n q_vinf = room[\"Aussenluft-Volumenstrom durch Infiltration\"]\n eta_rec = room[\"Temperatur-Aenderungsgrad der Waermerueckgewinnung\"]\n phi_P = room[\"Waermeeintragsleistung Personen (bei 24.0 deg C, bzw. 70 W)\"]\n phi_L = room[\"Waermeeintragsleistung der Raumbeleuchtung\"]\n phi_A = room[\"Waermeeintragsleistung der Geraete\"]\n t_P = [room[\"Vollaststunden pro Jahr (Personen)\"]] * 12\n t_L = [room[\"Jaehrliche Vollaststunden der Raumbeleuchtung\"]] * 12\n t_A = [room[\"Jaehrliche Vollaststunden der Geraete\"]] * 12\n g = room[\"Gesamtenergiedurchlassgrad Verglasung\"]\n\n # transforming yearly sia2024 data to monthly\n for i in range(len(dayspermonth)):\n t_P[i] *= dayspermonth[i] / 365.0\n t_L[i] *= dayspermonth[i] / 365.0\n t_A[i] *= dayspermonth[i] / 365.0\n\n return tau, theta_i, t[month - 1], \\\n A_th - A_w, A_w, U_op, U_w, \\\n q_ve * area, q_vinf * area, eta_rec, \\\n phi_P * area, phi_L * area, phi_A * area, \\\n t_P[month - 1], t_L[month - 1], t_A[month - 1], \\\n g, f_sh\n\n\nif __name__ == '__main__':\n values = default_values(\"mfh\", \"standard\", None, 1, \"winter\")\n for i in range(len(values)):\n print(values[i])","sub_path":"Core/sia380/sia2024.py","file_name":"sia2024.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"220420659","text":"from keras.models import Model\nfrom keras.layers import Input, Dense, Activation\n\nclass DNN2(Model):\n def __init__(self, num_input, num_hidden, num_output):\n hidden = Dense(num_hidden)\n relu = Activation('relu')\n output = Dense(num_output)\n softmax = Activation('softmax')\n x = Input(shape=(num_input,))\n h = relu(hidden(x))\n y = softmax(output(h))\n super().__init__(x, y)\n self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])","sub_path":"study3/01/dnn2.py","file_name":"dnn2.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"198918479","text":"import json\r\nimport math\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import polyfit, polyval\r\n\r\ndef extract_id(data):\r\n #JSON file is required to run this code\r\n timelist = []\r\n idlist = []\r\n for sample in data['fittslaw']:\r\n t = sample['time']\r\n d = sample['distance']\r\n w = sample['width']\r\n id1 = math.log((d / w) + 1, 2)\r\n timelist.append(t)\r\n idlist.append(id1)\r\n return (idlist, timelist)\r\n\r\ndef main():\r\n #data.json is required to run this code \r\n with open('data.json') as f:\r\n data = json.load(f)\r\n\r\n # Extract (ID, time) points from data\r\n points = extract_id(data)\r\n\r\n coeff = polyfit(points[0], points[1], 1)\r\n\r\n x1 = min(points[0])\r\n x2 = max(points[0])\r\n y1 = polyval(coeff, x1)\r\n y2 = polyval(coeff, x2)\r\n\r\n # Throughput (IP) = (ID/MT)\r\n throughput = ([], [])\r\n for i in range(len(points[0])):\r\n id1 = points[0][i]\r\n t = points[1][i]\r\n through = id1 * 1000 / t\r\n throughput[1].append(through)\r\n throughput[0].append(id1)\r\n \r\n\r\n #Plotting of the two graphs\r\n plt.figure(num=\"Samples\")\r\n plt.xlabel(\"Index of Difficulty\")\r\n plt.ylabel(\"Movement Time (msS)\")\r\n plt.xlim(0, max(points[0]) * 1.2)\r\n plt.ylim(0, max(points[1]) * 1.2)\r\n plt.plot(points[0], points[1], \"bo\", label=\"Samples\")\r\n plt.plot([x1,x2], [y1, y2], \"r-\")\r\n \r\n plt.figure(num=\"Throughput\")\r\n plt.xlabel(\"Index of Difficulty\")\r\n plt.ylabel(\"Throughput (bits/s)\")\r\n plt.xlim(0, max(throughput[0]) * 1.2)\r\n plt.ylim(0, max(throughput[1]) * 1.2)\r\n plt.plot(throughput[0], throughput[1], \"yo\", label=\"Throughput\")\r\n \r\n print(\"Regression coefficients: A={}, B={}\".format(coeff[0] / 1000, coeff[1] / 1000))\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"script_evaluation.py","file_name":"script_evaluation.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160253130","text":"import dash\nfrom dash import dcc, html\nfrom dash.dependencies import Input, Output, State\nfrom secr.labeling.tweet_labeling.labeling_utils import (\n get_labeled_tweets,\n get_labeling_data,\n)\n\napp = dash.Dash(\n __name__, external_stylesheets=[\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]\n)\n\nSENTIMENT_SCORES = list(range(-1, 6))\n\n\napp.layout = html.Div(\n children=[\n html.Div(id=\"tweet-div\", style=dict(padding=\"2%\")),\n dcc.RadioItems(\n options=[\n {\"label\": \"Not relevant\", \"value\": -1},\n {\"label\": \"Negative\", \"value\": 0},\n {\"label\": \"Positive\", \"value\": 1},\n ],\n value=0,\n labelStyle=dict(display=\"inline-block\", margin=\"1%\"),\n id=\"score\",\n ),\n html.Button(\"Submit\", id=\"submit-value\", n_clicks=0),\n ],\n style=dict(\n position=\"absolute\",\n height=\"100vh\",\n width=\"100vw\",\n display=\"flex\",\n flexDirection=\"column\",\n ),\n)\n\n\n@app.callback(\n Output(\"tweet-div\", \"children\"),\n State(\"score\", \"value\"),\n Input(\"submit-value\", \"n_clicks\"),\n)\ndef label_tweet(score, n_clicks):\n\n data_path = (\n \"/home/mpecovnik/Private/sentiment-analysis/data/filtered_tweet_data.parquet\"\n )\n labels_path = (\n \"/home/mpecovnik/Private/sentiment-analysis/data/labeled_tweet_data.parquet\"\n )\n\n labeling_data = get_labeling_data(data_path)\n labeled_data = get_labeled_tweets(labels_path)\n\n if not labeled_data.empty:\n labeling_data = labeling_data[\n ~labeling_data.tweet_id.isin(labeled_data.tweet_id)\n ]\n\n random_tweet = labeling_data.sample(n=1, random_state=42)\n\n values_dict = dict(tweet_id=random_tweet.tweet_id.iloc[0], sentiment_label=score)\n labeled_data = labeled_data.append(values_dict, ignore_index=True)\n\n labeled_data.to_parquet(labels_path)\n\n return random_tweet.text.iloc[0]\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","sub_path":"secr/labeling/tweet_labeling/label_dashboard.py","file_name":"label_dashboard.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"339681417","text":"# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\nimport config\nimport zmq\nimport pickle\nimport time\nimport utils\n\n\n##### to use:\n## ipython\n## from commands import send\n## send(b'yaw_pid.D+=0.03;print(yaw_pid)')\n\nsocket_pub = utils.publisher(config.zmq_pub_command)\n\nsock_sub = utils.subscribe([ config.topic_main_command_fb ], config.zmq_pub_main)\n\n\ndef send(cmd):\n socket_pub.send_multipart([config.topic_command,cmd])\n\ndef recv(timeoutms=100):\n if sock_sub.poll(timeoutms):\n #if len(zmq.select([sock_sub],[],[],0)[0])>0:\n ret = sock_sub.recv_multipart()\n return pickle.loads(ret[1])\n\n\n\nif __name__=='__main__':\n time.sleep(0.5)\n send(b\"tosend='working :)'\")\n print(recv())\n","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"239761381","text":"N = int(input())\nmeets = [int(input()) for _ in range(N)]\nans = float(\"inf\")\nfor bit in range(1 , 2 ** N):\n right = 0\n left = 0\n for idx in range(N):\n if bit >> idx & 1:\n right += meets[idx]\n else:\n left += meets[idx]\n tmp_ans = max(right , left)\n if tmp_ans < ans:\n ans = tmp_ans\nprint(ans)","sub_path":"study/ant/2_basic/2-1_FullSearch/2-1-1_subset_sum_problem/meet.py","file_name":"meet.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"536270812","text":"import MySQLdb\r\nfrom python.datamodel.dao.baseDao import BaseDao\r\nfrom python.datamodel.localization import Localization\r\n\r\nclass LocalizationDao(BaseDao):\r\n\tdef __init__(self):\r\n\t\tBaseDao.__init__(self)\r\n\r\n\tdef getById(self, id):\r\n\t\tpass\r\n\r\n\tdef getByLocalization(self, localization):\r\n\t\tself.openConnection()\r\n\r\n\r\n\t\tlocal = Localization()\r\n\r\n\t\tfor key in local.data.keys():\r\n\t\t\tlocal.data[key] = self._selectValue(key, localization)\r\n\r\n\t\tself.closeConnection()\r\n\r\n\t\treturn local\r\n\r\n\r\n\tdef _selectValue(self, fieldName, localization):\r\n\t\tquery = \"SELECT value FROM localization WHERE localization\"\r\n\t\tquery += \" = \\'\" + localization + \"\\' AND fieldName = \\'\" + fieldName + \"\\'\"\r\n\r\n\t\tresult = self.executeQuery(query)\r\n\r\n\t\tif result:\r\n\t\t\treturn result[0][\"value\"]\r\n\r\n\t\treturn \"\"\r\n\r\n\r\n\r\n\tdef getTranslationFor(self, fieldNames, localization):\r\n\t\ttranslations = {}\r\n\r\n\t\tself.openConnection()\r\n\r\n\t\tfor fieldName in fieldNames:\r\n\t\t\ttranslations[fieldName] = self._selectValue(fieldName, localization)\r\n\r\n\t\tself.closeConnection()\r\n\r\n\r\n\t\treturn translations\r\n\r\n\r\n\r\n\tdef getLanguages(self):\r\n\t\tself.openConnection()\r\n\r\n\t\tquery = \"SELECT * FROM localization WHERE fieldName = 'languageName'\"\r\n\t\tresult = self.executeQuery(query)\r\n\r\n\t\tself.closeConnection()\r\n\r\n\t\tlanguages = []\r\n\r\n\t\tfor r in result:\r\n\t\t\tlanguages.append({\"code\" : r[\"localization\"], \"name\": r[\"value\"]})\r\n\r\n\t\treturn languages\r\n\r\n\r\n","sub_path":"WebApp/TalkToMe/TalkToMe/python/datamodel/dao/localizationDao.py","file_name":"localizationDao.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"178286658","text":"import os, mne, torch, re, time\nfrom collections import defaultdict\nimport numpy as np\nimport pandas as pd\n# from ../LoadFarrahTueData.loadData import jsonLoad\nimport loadData\nfrom preprocessPipeline import TUH_rename_ch, readRawEdf, pipeline, spectrogramMake, slidingWindow\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\n# define path to make sure stuff doesn't get saved weird places\nos.chdir(os.getcwd())\nsave_dir = r\"C:\\Users\\anden\\PycharmProjects\\NovelEEG\"+\"\\\\\" # ~~~ What is your execute path?\nTUAR_dir_single_subject = r\"data_TUH_EEG\\TUH_EEG_CORPUS\\artifact_dataset\\01_tcp_ar\\002\"+\"\\\\\"\n# TUAR_dir = r\"data_TUH_EEG\\TUH_EEG_CORPUS\\artifact_dataset\"+\"\\\\\"\nTUAR_dir = r\"data_TUH_EEG\\TUH_EEG_CORPUS\\artifact_dataset\\**\\01_tcp_ar\"+\"\\\\\" #\\**\\01_tcp_ar #\\100\\00010023\\s002_2013_02_21\n# TUAR_dir = r\"data_TUH_EEG\\TUH_EEG_CORPUS\\artifact_dataset\\**\\02_tcp_le\"+\"\\\\\" # debug 02_tcp_le\n# TUAR_dir = r\"data_TUH_EEG\\TUH_EEG_CORPUS\\artifact_dataset\\**\\03_tcp_ar_a\"+\"\\\\\" # debug 03_tcp_ar_a\n# jsonDir = r\"edfFiles.json\" # ~~~ Where is your json folder?\njsonDir = r\"tmp.json\"\njsonDataDir = save_dir + jsonDir\nTUAR_dirDir = save_dir + TUAR_dir\n\nTUAR_data = loadData.findEdf(path=TUAR_dir, selectOpt=False, saveDir=save_dir)\ntutorial_single = [\"00009630_s001_t001.edf\"]\ntutorial_prep = [\"00010418_s008_t000.edf\", \"00010079_s004_t002.edf\", \"00009630_s001_t001.edf\", '00007952_s001_t001.edf']\ntutorial_model = [\"00010418_s008_t000.edf\", \"00010079_s004_t002.edf\", \"00009630_s001_t001.edf\", '00007952_s001_t001.edf',\n '00009623_s008_t004.edf', '00009623_s008_t005.edf', '00009623_s010_t000.edf',\n '00001006_s001_t001.edf', '00006501_s001_t000.edf', '00006514_s008_t001.edf', '00006514_s020_t001.edf']\n\n# for all subjects run as: file_selected = TUAR_data\nfile_selected = TUAR_data.copy()\n\n# prepare TUAR output\ncounter = 0 # debug counter\ntic = time.time()\n\nsubjects = defaultdict(dict)\nall_subject_gender = {\"male\": [], \"female\": [], \"other\": []}\nall_subject_age = []\nfor edf in file_selected: #TUAR_data:\n subject_ID = edf.split('_')[0]\n if subject_ID in subjects.keys():\n subjects[subject_ID][edf] = TUAR_data[edf].copy()\n else:\n subjects[subject_ID] = {edf: TUAR_data[edf].copy()}\n\n # debug counter for subject error\n counter += 1\n print(\"\\n%s is patient: %i\" % (edf, counter))\n\n # initialize hierarchical dict\n proc_subject = subjects[subject_ID][edf]\n proc_subject = readRawEdf(proc_subject, saveDir=save_dir, tWindow=10, tStep=10*.5,\n read_raw_edf_param={'preload': True}) #,\n # \"stim_channel\": ['EEG ROC-REF', 'EEG LOC-REF', 'EEG EKG1-REF',\n # 'EEG T1-REF', 'EEG T2-REF', 'PHOTIC-REF', 'IBI',\n # 'BURSTS', 'SUPPR']})\n\n # find data labels\n labelPath = subjects[subject_ID][edf]['path'][-1].split(\".edf\")[0]\n proc_subject['annoDF'] = loadData.label_TUH_full(annoPath=labelPath+\".tse\", window=[0, 50000], saveDir=save_dir)\n\n # Makoto + PREP processing steps\n proc_subject[\"rawData\"] = TUH_rename_ch(proc_subject[\"rawData\"])\n # ch_TPC = mne.pick_channels(proc_subject[\"rawData\"].info['ch_names'],\n # include=['Fp1', 'F7', 'T3', 'T5', 'F3', 'C3', 'P3', 'O1', 'Cz',\n # 'Fp2', 'F4', 'C4', 'P4', 'O2', 'F8', 'T4', 'T6'],\n # exclude=['Fz', 'Pz', 'ROC', 'LOC', 'EKG1', 'T1', 'T2', 'BURSTS', 'SUPPR', 'IBI', 'PHOTIC'])\n # exclude2 = [, 'A1', 'A2']\n # mne.pick_info(proc_subject[\"rawData\"].info, sel=ch_TPC, copy=False)\n # proc_subject[\"rawData\"].pick_channels(proc_subject[\"rawData\"].info, sel=ch_TPC, copy=False)\n proc_subject[\"rawData\"].pick_channels(ch_names=['Fp1', 'F7', 'T3', 'T5', 'F3', 'C3', 'P3', 'O1', 'Cz',\n 'Fp2', 'F4', 'C4', 'P4', 'O2', 'F8', 'T4', 'T6', 'A1', 'A2'])\n pipeline(proc_subject[\"rawData\"], type=\"standard_1005\", notchfq=60, downSam=150)\n\n # Generate output windows for (X,y) as (tensor, label)\n # proc_subject[\"preprocessing_output\"] = slidingWindow(proc_subject, t_max=proc_subject[\"rawData\"].times.max(),\n # tStep=proc_subject[\"tStep\"], FFToverlap=0.75, crop_fq=24,\n # annoDir=save_dir,\n # localSave={\"sliceSave\": False, \"saveDir\": save_dir, \"local_return\": False}) #r\"C:\\Users\\anden\\PycharmProjects\"+\"\\\\\"})\n\n # catch age and gender for descriptive statistics\n if subjects[subject_ID][edf][\"gender\"].lower() == 'm':\n all_subject_gender[\"male\"].append(subjects[subject_ID][edf][\"gender\"].lower())\n # gender[0].append(subjects[id][edf][\"gender\"].lower())\n elif subjects[subject_ID][edf][\"gender\"].lower() == 'f':\n all_subject_gender[\"female\"].append(subjects[subject_ID][edf][\"gender\"].lower())\n # gender[1].append(subjects[id][edf][\"gender\"].lower())\n else:\n all_subject_gender[\"other\"].append(subjects[subject_ID][edf][\"gender\"].lower())\n # print(subjects[id][edf][\"gender\"].lower())\n all_subject_age.append(subjects[subject_ID][edf][\"age\"])\n # except:\n # print(\"sit a while and listen: %s\" % subjects[subject_ID][edf]['path'])\n\nall_subject_age = np.array(all_subject_age)\n\ntoc = time.time()\nprint(\"\\n~~~~~~~~~~~~~~~~~~~~\\n\"\n \"it took %imin:%is to run preprocess-pipeline for %i patients\\n with window length [%.2fs] and t_step [%.2fs]\"\n \"\\n~~~~~~~~~~~~~~~~~~~~\\n\"\n % (int((toc-tic)/60), int((toc-tic) % 60), len(subjects), subjects[subject_ID][edf][\"tWindow\"], subjects[subject_ID][edf][\"tStep\"]))\n\n# result inspection\npID = -1\np_inspect = list(file_selected)[pID]\nsubjects[p_inspect.split('_')[0]][p_inspect][\"rawData\"].plot_sensors(show_names=True) # view electrode placement\n# all_ch = ['Fp1', 'F7', 'T3', 'T5', 'F3', 'C3', 'P3', 'O1', 'Cz', 'Fp2', 'F4', 'C4', 'P4', 'O2', 'F8', 'T4', 'T6', 'A1', 'A2']\nsubjects[p_inspect.split('_')[0]][p_inspect][\"rawData\"].plot(remove_dc=True) # plot data as electrodes-amp/samples\nsubjects[p_inspect.split('_')[0]][p_inspect][\"annoDF\"] # show annotation sections\nsubject_prep_output = list(subjects[p_inspect.split('_')[0]][p_inspect][\"preprocessing_output\"].values()) # segmented windows for models\n\nall_subject_age_hist = np.histogram(all_subject_age, range=(0,100))\nplt.hist(all_subject_age, range=(0,100))\nplt.show()\n\nprint(\"end of script - fagprojekt tutorial\")\n","sub_path":"fagprojekt_grupper/fagprojekt_tutorial.py","file_name":"fagprojekt_tutorial.py","file_ext":"py","file_size_in_byte":6630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}