diff --git "a/3081.jsonl" "b/3081.jsonl" new file mode 100644--- /dev/null +++ "b/3081.jsonl" @@ -0,0 +1,809 @@ +{"seq_id":"7291194487","text":"#!/usr/bin/env python\n\nfrom sys import stdout\n\nDIM = 1000\nDIV = 7\n\nNORM_CHAR = ' '\nBORD_CHAR = '1'\nDIV_CHAR = '@'\n\nlast_row = [0] * DIM\nlast_row[0] = 1\n\nstdout.write('%4d => %s\\n' % (0, BORD_CHAR))\n\nfor r in range(1, DIM):\n\n stdout.write('%4d => %s' % (r, BORD_CHAR))\n last_val = 1\n\n for c in range(1, r):\n\n val = last_row[c - 1] + last_row[c]\n if val % DIV == 0:\n stdout.write(' %s' % DIV_CHAR)\n else:\n stdout.write(' %s' % NORM_CHAR)\n\n last_row[c - 1] = last_val\n last_val = val\n\n last_row[r - 1] = last_val\n last_row[r] = 1\n stdout.write(' %s\\n' % BORD_CHAR)\n","repo_name":"scarvalhojr/programming","sub_path":"hackerrank/euler148/printdiv.py","file_name":"printdiv.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33509865998","text":"from wtforms import StringField, SelectField\nfrom app.services.validations.csrf_base_form import CsrfBaseForm\n\n\nclass TurnSearchForm(CsrfBaseForm):\n \"\"\"\n Clase para generar y validar formulario para buscar usuarios.\n Hereda funcionalidades y atributos de CsrfBaseForm.\n\n Attributes:\n select (SelectField): Campo de seleccion para buscar usuarios\n activos, bloqueados o ambos.\n search (StringField): Campo de string para buscar en los\n nombres de usuario y filtrarlos.\n \"\"\"\n\n select = StringField(\n \"Buscar por centro\", render_kw={\"class\": \"form-control\"}\n )\n search = StringField(\n \"Buscar por email\", render_kw={\"class\": \"form-control\"}\n )\n\n def add_data(self, search, select):\n self.search.data = search\n self.select.data = select\n","repo_name":"fdioguardi/UNLP_Proyecto_de_Software","sub_path":"app/services/validations/turn_search_form.py","file_name":"turn_search_form.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32752744070","text":"class LinkedBinaryTree:\r\n class Node:\r\n def __init__(self, data, left=None, right=None):\r\n self.data = data\r\n self.parent = None\r\n self.left = left\r\n if (left is not None):\r\n self.left.parent = self\r\n self.right = right\r\n if (right is not None):\r\n self.right.parent = self\r\n\r\n def __init__(self, root=None):\r\n self.root = root\r\n self.size = self.subtree_count(self.root)\r\n\r\n def __len__(self):\r\n return self.size\r\n\r\n def is_empty(self):\r\n return (len(self) == 0)\r\n\r\n def subtree_count(self, curr_root):\r\n if(curr_root is None):\r\n return 0\r\n else:\r\n left_count = self.subtree_count(curr_root.left)\r\n right_count = self.subtree_count(curr_root.right)\r\n return left_count + right_count + 1\r\n\r\n def height(self):\r\n if(self.is_empty()):\r\n raise Exception(\"Height is not defined for an empty tree\")\r\n return self.subtree_height(self.root)\r\n\r\n def subtree_height(self, curr_root):\r\n if((curr_root.left is None) and (curr_root.right is None)):\r\n return 0\r\n elif(curr_root.right is None):\r\n return 1 + self.subtree_height(curr_root.left)\r\n elif(curr_root.left is None):\r\n return 1 + self.subtree_height(curr_root.right)\r\n else:\r\n left_height = self.subtree_height(curr_root.left)\r\n right_height = self.subtree_height(curr_root.right)\r\n return 1 + max(left_height, right_height)\r\n\r\n def preorder(self):\r\n yield from self.subtree_preorder(self.root)\r\n\r\n def subtree_preorder(self, curr_root):\r\n if(curr_root is None):\r\n return\r\n else:\r\n yield curr_root\r\n yield from self.subtree_preorder(curr_root.left)\r\n yield from self.subtree_preorder(curr_root.right)\r\n\r\n def inorder(self):\r\n yield from self.subtree_inorder(self.root)\r\n\r\n def subtree_inorder(self, curr_root):\r\n if (curr_root is None):\r\n return\r\n else:\r\n yield from self.subtree_inorder(curr_root.left)\r\n yield curr_root\r\n yield from self.subtree_inorder(curr_root.right)\r\n\r\n def postorder(self):\r\n yield from self.subtree_postorder(self.root)\r\n\r\n def subtree_postorder(self, curr_root):\r\n if (curr_root is None):\r\n return\r\n else:\r\n yield from self.subtree_postorder(curr_root.left)\r\n yield from self.subtree_postorder(curr_root.right)\r\n yield curr_root\r\n\r\n def __iter__(self):\r\n for node in self.postorder():\r\n yield node.data\r\n\r\n\r\n def leaves_list(self):\r\n lst = []\r\n if self.root is None:\r\n return lst\r\n for i in self.leaves_generator(self.root):\r\n lst.append(i)\r\n return lst\r\n\r\n\r\n \r\n\r\n def leaves_generator(self, subtree):\r\n\r\n print(subtree.data)\r\n if(subtree.left is None) and (subtree.right is None):\r\n yield subtree.data\r\n elif(subtree.right is None):\r\n yield from self.leaves_generator(subtree.left)\r\n elif(subtree.left is None):\r\n yield from self.leaves_generator(subtree.right)\r\n else:\r\n yield from self.leaves_generator(subtree.left)\r\n yield from self.leaves_generator(subtree.right)\r\n\r\n\r\n'''\r\na = LinkedBinaryTree()\r\n\r\np1 = LinkedBinaryTree.Node(5)\r\np2 = LinkedBinaryTree.Node(1)\r\np3 = LinkedBinaryTree.Node(9, p1, p2)\r\np4 = LinkedBinaryTree.Node(2, p3)\r\np5 = LinkedBinaryTree.Node(8)\r\np6 = LinkedBinaryTree.Node(4)\r\np7 = LinkedBinaryTree.Node(7, p5, p6)\r\np8 = LinkedBinaryTree.Node(3, p4, p7)\r\na.root = p8\r\nfor i in a:\r\n print(i, end=' ')\r\nprint()\r\n\r\nprint(a.leaves_list())\r\n'''","repo_name":"Racheltrq/CS1134","sub_path":"HW7/rt1726_hw7_q2.py","file_name":"rt1726_hw7_q2.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9013224981","text":"from itertools import combinations\n\ndef solution(nums):\n c = list(combinations(nums,3))\n cnt = 0\n for arr in c:\n tmp = 1\n for n in range(3,(sum(arr)//2)+1):\n if sum(arr)%n==0:\n tmp = 0\n break\n cnt += tmp\n return cnt","repo_name":"askges20/programmers_py_js_sql","sub_path":"Level1/소수 만들기.py","file_name":"소수 만들기.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16753697374","text":"# _*_ coding:utf-8 _*_\n__author__ = 'pb'\n__date__ = '2017/6/14 11:34'\n\nfrom django import forms\nfrom .models import ProcessOrder, TSOrderItem, MBOrderItem, KSOrderItem, \\\n STOrderItem, SlabList, SlabListItem\nfrom products.models import Product, Slab\nfrom products.forms import SlabForm\nfrom crispy_forms.helper import FormHelper\n\n\nclass ProcessOrderForm(forms.ModelForm):\n class Meta:\n model = ProcessOrder\n exclude = ('order', 'line_num', 'status')\n widgets = {\n 'date': forms.TextInput(attrs={'class': 'dt'}),\n 'order_type': forms.HiddenInput(),\n 'data_entry_staff': forms.HiddenInput(),\n 'status': forms.TextInput(attrs={'readonly': True}),\n }\n\n def __init__(self, *args, **kwargs):\n super(ProcessOrderForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n\n\ndef block_num_choice():\n return ((item.id, item.block_num) for item in\n Product.objects.values_list('id', 'block_num'))\n\n\nWIDGETS_VALUES = {\n 'block_num': forms.HiddenInput(),\n 'quantity': forms.TextInput(\n attrs={'style': 'width:5em', 'min': '0'}),\n 'price': forms.TextInput(attrs={'size': '3'}),\n 'amount': forms.TextInput(attrs={'size': '3'}),\n 'date': forms.TextInput(attrs={'class': 'dt', 'size': '6'}),\n 'pic': forms.NumberInput(\n attrs={'style': 'width:5em', 'min': '0', 'step': '1',\n 'type': 'number'}),\n 'pi': forms.NumberInput(\n attrs={'style': 'width:5em', 'min': '0', 'step': '1',\n 'type': 'number'}),\n 'thickness': forms.NumberInput(attrs={'style': 'width:5em', 'min': '1.5'}),\n}\n\n\nclass TSOrderItemForm(forms.ModelForm):\n block_name = forms.CharField(label='荒料编号', widget=forms.TextInput(\n attrs={'size': '5', 'list': \"block_info\",\n 'onchange': 'get_source(this.id)'}))\n\n class Meta:\n model = TSOrderItem\n fields = ['block_name', 'be_from', 'block_type', 'destination',\n 'quantity', 'unit', 'price', 'date', 'ps']\n widgets = WIDGETS_VALUES\n\n def clean_destination(self):\n cd = self.cleaned_data\n block_num = cd.get('block_num', None)\n bf = cd['be_from']\n de = cd['destination']\n if bf and de:\n if bf == de:\n raise forms.ValidationError(\n '编号{}起始地 与 目的地不能相同!'.format(block_num))\n return de\n\n def __init__(self, *args, **kwargs):\n super(TSOrderItemForm, self).__init__(*args, **kwargs)\n block_id = self.initial.get('block_num', None)\n self.empty_permitted = False\n \"\"\"\n 使用bootstrap的样色class\n \"\"\"\n # for i in self.fields:\n # attr_cls = self.fields[i].widget.attrs.get('class')\n # if attr_cls:\n # self.fields[i].widget.attrs['class'] += ' form-control'\n # else:\n # self.fields[i].widget.attrs.update({'class': 'form-control'})\n if block_id is not None:\n self.initial['block_name'] = Product.objects.get(id=block_id).block_num\n\n\nclass KSOrderItemForm(TSOrderItemForm):\n class Meta:\n model = KSOrderItem\n fields = '__all__'\n exclude = ('amount',)\n widgets = WIDGETS_VALUES\n\n def __init__(self, *args, **kwargs):\n super(KSOrderItemForm, self).__init__(*args, **kwargs)\n self.fields['quantity'].widget.attrs.update({'readonly': True})\n self.fields['unit'].widget.attrs.update({'readonly': True})\n\n\nclass MBOrderItemForm(TSOrderItemForm):\n slab_list = forms.CharField(label='码单', max_length='2', initial='打开', widget=forms.TextInput(\n attrs={'size': '2', 'class': 'btn btn-default open_slab_list', 'readonly': True,}))\n # 'onclick': 'open_slab_list(this.id,{% url \"product:order_slab_list\" %})'}))\n\n class Meta:\n model = MBOrderItem\n exclude = ('amount',)\n widgets = WIDGETS_VALUES\n\n def __init__(self, *args, **kwargs):\n super(MBOrderItemForm, self).__init__(*args, **kwargs)\n self.empty_permitted = False\n\n def clean_block_num(self):\n block_num = self.cleaned_data.get('block_num')\n ks_block_num_list = [item.block_num for item in\n KSOrderItem.objects.filter(order__status='N')]\n if block_num not in ks_block_num_list:\n raise forms.ValidationError('荒料编号{}#,没有介石记录请检查清楚'.format(block_num))\n return block_num\n\n\nclass STOrderItemForm(forms.ModelForm):\n class Meta:\n model = STOrderItem\n exclude = ('amount',)\n widgets = WIDGETS_VALUES\n\n\nclass SlabListForm(forms.ModelForm):\n class Meta:\n model = SlabList\n exclude = ()\n\n\nclass SlabListItemForm(forms.ModelForm):\n # block_num = forms.CharField(max_length=16, label='荒料编号')\n # thickness = forms.DecimalField(max_digits=4, decimal_places=2, label='厚度')\n # part_num = forms.CharField(label='夹号')\n\n class Meta:\n model = SlabListItem\n exclude = ()\n\n\nclass CustomBaseInlineFormset(forms.BaseInlineFormSet):\n def __init__(self, *args, **kwargs):\n super(CustomBaseInlineFormset, self).__init__(*args, **kwargs)\n for form in self.forms:\n form.empty_permitted = False\n\n def clean(self):\n # if any(self.errors):\n # return\n block_list = []\n for form in self.forms:\n if form.cleaned_data.get('block_num'):\n block_num = form.cleaned_data['block_num']\n if block_num in block_list:\n raise forms.ValidationError(\n '荒料编号[{}]有重复数据'.format(block_num))\n block_list.append(block_num)\n # super(CustomBaseInlineFormset, self).clean()\n","repo_name":"pbpoon/project3","sub_path":"apps/process/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31744365204","text":"import csv\nimport string\n\nfrom django.shortcuts import render\nfrom django.views import generic\nfrom task.models import Task, ApplyTask\nfrom raw_data.models import RawDataType, RawDataSeqFile, RawDataTypeRequest\nfrom submitter.models import Submitter\nfrom parsed_data.models import ParsedData\nfrom rater.models import Rater,AssignedTask\nfrom rater.views import calculate_auto_score\nfrom django.db.models import Sum\nfrom django.shortcuts import get_object_or_404\nfrom submitter.forms import UploadForm, RequestForm\nfrom django.http import HttpResponse\nimport numpy as np\nimport pandas as pd\nimport random\n# Create your views here.\ndef submitter_landing_view(request, *args, **kwargs):\n submitter = get_object_or_404(Submitter, pk=request.user.user_id)\n approved_tasks = ApplyTask.objects.filter(submitter=submitter, approved=1)\n pending_tasks = ApplyTask.objects.filter(submitter=submitter, approved=0)\n return render(request, \"submitter_landing.html\",\n {\"submitter\": submitter, \"approved_tasks\": approved_tasks, \"pending_tasks\": pending_tasks, })\n\n\ndef list_uploaded_file(request, pk):\n task = get_object_or_404(Task, pk=pk)\n submitter = get_object_or_404(Submitter, pk=request.user.user_id)\n data_types = RawDataType.objects.filter(task=task).distinct()\n uploaded_files = RawDataSeqFile.objects.filter(raw_data_type__in=data_types, submitter=submitter).order_by('round')\n count = uploaded_files.count()\n assigned_files = AssignedTask.objects.filter(raw_data__in = uploaded_files)\n parsed_files = ParsedData.objects.filter(submitter=submitter).distinct()\n tuple_count = np.sum(ParsedData.objects.filter(task=pk, pass_or_not=1,submitter=submitter).values_list('total_tuple_num', flat=True))\n request.session['taskid'] = pk\n\n return render(request, \"submitter_uploaded.html\", \n {\"count\":count, \"tuple_count\":tuple_count, \"data_types\": data_types, \"task\": task, \"uploaded_files\": uploaded_files, \"assigned_files\":assigned_files, \"parsed_files\":parsed_files})\n\n\ndef upload_new_file(request, pk):\n task = get_object_or_404(Task, pk=pk)\n submitter = Submitter.objects.get(pk=request.user.user_id)\n form = UploadForm(task, submitter)\n return render(request, \"upload.html\", {\"form\": form, \"task\":task})\n\n\ndef request(request, pk):\n form = RequestForm()\n task = get_object_or_404(Task, pk=pk)\n return render(request, \"request.html\", {\"form\": form, \"task\":task})\n\n\ndef requested(request, pk):\n if request.method == 'POST':\n task = get_object_or_404(Task, pk=pk)\n form = RequestForm(request.POST)\n\n if form.is_valid():\n content = form.cleaned_data['content']\n requested = RawDataTypeRequest.objects.create(task=task, content=content)\n requested.save()\n\n return render(request, \"requested.html\", )\n\n\ndef submitted(request, pk):\n if request.method == 'POST':\n task = get_object_or_404(Task, pk=pk)\n submitter = Submitter.objects.get(pk=request.user.user_id)\n form = UploadForm(task, submitter, request.POST, request.FILES)\n\n if form.is_valid():\n file1 = request.FILES['file']\n\n if is_csv(file1):\n raw_data_type = RawDataType.objects.get(pk=form.data['raw_data_type'])\n submitter = Submitter.objects.get(pk=request.user.user_id)\n round = form.cleaned_data['round']\n term_start = form.cleaned_data['term_start']\n term_end = form.cleaned_data['term_end']\n submitted = RawDataSeqFile.objects.create(submitter=submitter, file=file1, raw_data_type=raw_data_type,\n round=round,\n term_start=term_start, term_end=term_end)\n submitted.save()\n if len(Rater.objects.all())>0:\n while True:\n random_rater = random.sample(list(Rater.objects.all()), 1)\n if not AssignedTask.objects.filter(rater=random_rater[0], raw_data=submitted).exists():\n print(\"loop broken\")\n break\n\n assigned_task = AssignedTask.objects.create(rater=random_rater[0],\n raw_data=submitted, task=task)\n assigned_task.save()\n\n\n return render(request, \"submitted.html\", )\n else:\n form = UploadForm(task, submitter, request.POST, request.FILES)\n return render(request,\"submit_fail.html\",{\"form\": form})\n else:\n task = get_object_or_404(Task, pk=pk)\n form = UploadForm(task, submitter, request.POST, request.FILES)\n return render(request, \"upload.html\", {\"form\": form, \"task\":task})\n\n\ndef is_csv(infile):\n try:\n a=pd.read_csv(infile.open())\n return True\n except csv.Error:\n return False\n except:\n return False\n","repo_name":"sk981102/F20_Database","sub_path":"trial/submitter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5657312874","text":"#primzahlen ausgeben\n#For-Schleife mit Else anstelle Flag\n#Wenn eine For-Schleife ein Else am Ende hat, wird der dortige Teil ausgeführt, wenn die For-Schleife ohne Break durchläuft\n\nzahlen = list(range(1, 101))\nprimzahlen = []\n\nfor zahl in zahlen: \t\t\t\t\t\t\t\t\t\t\t\t\t#Für jede 'Zahl' in den 'Zahlen': mach etwas...\n\tprint(zahl, end=\": \") \t\t\t\t\t\t\t\t\t\t\t\t#'Zahl' ausgeben (end= optische Änderunge, : statt nl)\n\tif zahl > 1: \t\t\t\t\t\t\t\t\t\t\t\t\t\t#Die 'Zahl' muss größer sein als 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Testen ob durch Primzahl ohne Rest teilbar -> keine Primzahl\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Nested Loop: For-Schleife in einer For-Schleife beginnt hier:\n\t\tfor primzahl in primzahlen: \t\t\t\t\t\t\t\t\t#Es wird über alle 'primzahlen' iteriert (iterable)\n\t\t\tif zahl%primzahl == 0: \t\t\t\t\t\t\t\t\t\t#Zahl durch Primzahl ohne Rest teilbar? (Modulo (%), ergibt Rest einer Division)\n\t\t\t\tprint(\"Keine Primzahl, weil durch\",\n\t\t\t\t primzahl, \"teilbar.\")\n\t\t\t\tbreak\t\t\t\t\t\t\t\t\t\t\t\t\t#Bricht aus der Schleife aus, weil Teiler gefunden (wird schneller + lesbarer)\n\t\t\telse: \t\t\t\t\t\t\t\t\t\t\t\t\t\t#Wenn es nicht durch die primzahl ohne Rest teilbar ist, dann tue:\n\t\t\t\tprint(\"Ist nicht durch\", primzahl, \"teilbar.\")\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Dieses Else gehört zur For-Schleife\n\t\t\tprint(\"Primzahl gefunden.\")\t\t\t\t\t\t\t\t\t#Primzahl gefunden, da Primzahl-Schleife komplett ohne break durchgelaufen ist.\n\t\t\tprimzahlen.append(zahl)\t\t\t\t\t\t\t\t\t\t\t\t\t\n\telse: \n\t\tprint() \t\t\t\t\t\t\t\t\t\t\t\t\t\t#Ist die 'Zahl' 1, dann wird sie separat ausgeben\nprint(\"Ich habe folgende Primzahlen gefunden:\")\t\nprint(primzahlen)\n","repo_name":"cw-fsfe/python","sub_path":"primzahlen3.py","file_name":"primzahlen3.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20865655502","text":"import numpy as np\nfrom neural_network import NeuralNetwork\nfrom layers import Layer\nimport loader\nimport csv\nimport multiprocessing\nimport time\nimport matplotlib.pyplot as plt\n\ndef adjust_output(data):\n\treturn data[:, :]\n\nnp.random.seed()\n# This is the configuration of the present experiment.\n\nX, y, r, X_test, y_test, r_test = loader.load_dataset(\"~/anti_stationary.csv\", input_dimension=9, test_ratio=0.3)\n\ny = adjust_output(y)\ny_test = adjust_output(y_test)\n\nprint(X_test)\nprint(y_test)\n\nprint(r_test)\n\nepochs = 5000\n\n\ndef single_run(hidden_neurons, learning_rate, hidden_activation, output_activation):\n\tmodel_data = np.vstack([\n\t\tnp.arange(0, epochs),\n\t\tnp.zeros([4, epochs])\n\t])\n\n\tnn = NeuralNetwork(learning_rate=learning_rate)\n\tnn.add_layer(Layer(hidden_neurons, activation=hidden_activation))\n\n\tnn.initialize(X.shape[1], y.shape[1], output_activation=output_activation)\n\n\tepoch_values = model_data[0, :]\n\tstart = time.time()\n\n\tfor k in epoch_values:\n\t\tk = int(k)\n\t\t# Train for a single epoch\n\t\tfor epoch, train_loss, train_acc in nn.fit(X, y, r, epochs=1):\n\t\t\t#model_data[1, k] = train_loss\n\t\t\tmodel_data[2, k] = train_acc\n\n\t\t# Then make a validation test\n\t\terror, acc, out = nn.evaluate(X_test, y_test, r_test)\n\t\t#model_data[3, k] = error\n\t\tmodel_data[4, k] = acc\n\n\t\tprint(\"Epoch %d; Loss: %s\" % (k, nn.error))\n\t\tif k % 10 == 0:\n\t\t\tnn.print_summary()\n\t\t#print(\"Validation: Loss {0}, accuracy {1}\".format(error, acc))\n\n\telapsed = time.time() - start\n\n\tplt.plot(model_data[0, :], model_data[1, :], label=\"Erro (treinamento)\")\n\tplt.plot(model_data[0, :], model_data[3, :], label=\"Erro (validação)\")\n\tplt.legend(loc=\"lower right\")\n\tplt.xlabel(\"Época\")\n\tplt.title(\"Erro ao longo do tempo\")\n\tplt.show()\n\n\tplt.clf()\n\n\tplt.plot(model_data[0, :], model_data[2, :], label=\"Acurácia (treinamento)\")\n\tplt.plot(model_data[0, :], model_data[4, :], label=\"Acurácia (validação)\")\n\tplt.legend(loc=\"lower right\")\n\tplt.xlabel(\"Época\")\n\tplt.title(\"Acurácia ao longo do tempo\")\n\tplt.show()\n\n\n\n\tresults = {\n\t\t'best_training_loss': np.min(model_data[1, :]),\n\t\t'best_training_loss_epoch': np.argmin(model_data[1, :]),\n\t\t'best_validation_loss': np.min(model_data[3, :]),\n\t\t'best_validation_loss_epoch': np.argmin(model_data[3, :]),\n\t\t'best_training_acc': np.max(model_data[2, :]),\n\t\t'best_training_acc_epoch': np.argmax(model_data[2, :]),\n\t\t'best_validation_acc': np.max(model_data[4, :]),\n\t\t'best_validation_acc_epoch': np.argmax(model_data[4, :]),\n\t\t'final_validation_loss': model_data[3, -1],\n\t\t'final_validation_acc': model_data[4, -1],\n\t\t'final_training_loss': model_data[1, -1],\n\t\t'final_training_acc': model_data[2, -1],\n\t\t'learning_rate': learning_rate,\n\t\t'hidden_neurons': hidden_neurons,\n\t\t'hidden_activation': hidden_activation,\n\t\t'output_activation': output_activation,\n\t\t'elapsed_seconds': elapsed\n\t}\n\n\treturn results\n\n\nlearning_rates = [0.001]#, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4]\nactivations = ['sigmoid']#, 'tanh', 'relu']\nneurons = [5]#, 10, 15, 20, 25, 30, 40, 50]\n\n\ndef run_experiment(replications, portion):\n\tnp.random.seed()\n\tprint(\"Starting up!\")\n\ttotal = len(learning_rates) * (len(activations) ** 2) * len(neurons) * replications\n\tdone = 0\n\n\tresults = list()\n\tfor learning_rate in learning_rates:\n\t\tfor h_activation in activations:\n\t\t\tfor o_activation in activations:\n\t\t\t\tfor n_neurons in neurons:\n\t\t\t\t\tfor replication in range(replications):\n\t\t\t\t\t\tr = single_run(n_neurons, learning_rate, h_activation, o_activation)\n\t\t\t\t\t\tresults.append(r)\n\t\t\t\t\t\tdone += 1\n\t\t\t\t\t\tprint(\"{0}/{1}\".format(done, total))\n\n\n\tfile_name = \"experiments_portion{0}.csv\".format(portion)\n\tkeys = results[0].keys()\n\n\twith open(file_name, \"w\") as data_file:\n\t\twriter = csv.DictWriter(data_file, keys)\n\t\twriter.writeheader()\n\t\twriter.writerows(results)\n\n\nsingle_run(10, 0.08, 'tanh', 'tanh')\n\n\n","repo_name":"concatto/ciencia-da-computacao","sub_path":"Inteligência Artificial/ReinforcementNeuralNetwork/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26144153634","text":"import json\r\nfrom collections import OrderedDict\r\nfrom greedy.greed import getDeadlineResult\r\nfrom greedy.greed import getPriorityResult\r\nfrom greedy.greed import getCombiResult\r\nfrom psoAssign.pso_assignment import pso_result\r\n\r\ngreedy_deadline_machine_result, greedy_deadline_score, greedy_deadline_remain_works = getDeadlineResult()\r\ngreedy_priority_machine_result, greedy_priority_score, greedy_priority_remain_works = getPriorityResult()\r\ngreedy_combi_machine_result, greedy_combi_score, greedy_combi_remain_works = getCombiResult()\r\n\r\npso_priority_machine_result, pso_priority_score, pso_priority_remain_works, pso_deadline_machine_result, pso_deadline_score, pso_deadline_remain_works, pso_combi_machine_result, pso_combi_score, pso_combi_remain_works, psopre_priority_machine_result, psopre_priority_score, psopre_priority_remain_works, psopre_deadline_machine_result, psopre_deadline_score, psopre_deadline_remain_works, psopre_combi_machine_result, psopre_combi_score, psopre_combi_remain_works = pso_result()\r\n\r\n\r\ndef convertTime(time):\r\n hour = time // 60\r\n minute = time % 60\r\n if minute < 10:\r\n result = str(hour) + \":0\" + str(minute)\r\n else:\r\n result = str(hour) + \":\" + str(minute)\r\n return result\r\n\r\ndef makeJson(machineresult, score, remainwork):\r\n resultList = OrderedDict()\r\n resultList[\"resultList\"] = []\r\n resultList[\"columnName\"] = [\"주문번호\", \"품목명\", \"공정번호\", \"배정기계\", \"시작시간\", \"종료시간\", \"공정시간\", \"재셋팅시간\", \"주문자\", \"주문수량\"]\r\n resultList[\"remaincol\"] = [\"품목\", \"주문시간\", \"deadline\", \"우선순위\", \"주문번호\", \"남은공정수\", \"주문자\", \"주문수량\"]\r\n for i in range(len(machineresult)):\r\n time = 0\r\n for j in range(machineresult[i][2]):\r\n element = OrderedDict()\r\n element[\"주문번호\"] = str(machineresult[i][0][j][1])\r\n element[\"품목명\"] = machineresult[i][0][j][0]\r\n element[\"공정번호\"] = str(machineresult[i][0][j][2])\r\n element[\"배정기계\"] = str(i + 1)\r\n element[\"시작시간\"] = convertTime(time)\r\n element[\"종료시간\"] = convertTime(time + machineresult[i][0][j][3] + machineresult[i][0][j][4])\r\n time = time + machineresult[i][0][j][3] + machineresult[i][0][j][4]\r\n element[\"공정시간\"] = str(machineresult[i][0][j][3])\r\n element[\"재셋팅시간\"] = str(machineresult[i][0][j][4])\r\n element[\"주문자\"] = machineresult[i][0][j][5]\r\n element[\"주문수량\"] = str(machineresult[i][0][j][6])\r\n resultList[\"resultList\"].append(element)\r\n resultList[\"remainWorks\"] = []\r\n for i in range(len(remainwork)):\r\n element = OrderedDict()\r\n element[\"품목\"] = remainwork[i][0]\r\n element[\"주문시간\"] = str(remainwork[i][1])\r\n element[\"deadline\"] = str(remainwork[i][2])\r\n element[\"우선순위\"] = str(remainwork[i][3])\r\n element[\"주문번호\"] = str(remainwork[i][4])\r\n element[\"남은공정수\"] = str(remainwork[i][5])\r\n element[\"주문자\"] = remainwork[i][6]\r\n element[\"주문수량\"] = str(remainwork[i][7])\r\n resultList[\"remainWorks\"].append(element)\r\n resultList[\"score\"] = score\r\n return resultList\r\n\r\ngreedy_deadline_resultlist = makeJson(greedy_deadline_machine_result, greedy_deadline_score, greedy_deadline_remain_works)\r\ngreedy_priority_resultlist = makeJson(greedy_priority_machine_result, greedy_priority_score, greedy_priority_remain_works)\r\ngreedy_combi_resultlist = makeJson(greedy_combi_machine_result, greedy_combi_score, greedy_combi_remain_works)\r\n\r\npso_deadline_resultlist = makeJson(pso_deadline_machine_result, pso_deadline_score, pso_deadline_remain_works)\r\npso_priority_resultlist = makeJson(pso_priority_machine_result, pso_priority_score, pso_priority_remain_works)\r\npso_combi_resultlist = makeJson(pso_combi_machine_result, pso_combi_score, pso_combi_remain_works)\r\n\r\npsopre_deadline_resultlist = makeJson(psopre_deadline_machine_result, psopre_deadline_score, psopre_deadline_remain_works)\r\npsopre_priority_resultlist = makeJson(psopre_priority_machine_result, psopre_priority_score, psopre_priority_remain_works)\r\npsopre_combi_resultlist = makeJson(psopre_combi_machine_result, psopre_combi_score, psopre_combi_remain_works)\r\n\r\n\r\ngreedy_temp = OrderedDict()\r\ngreedy_temp[\"deadline\"] = greedy_deadline_resultlist\r\ngreedy_temp[\"priority\"] = greedy_priority_resultlist\r\ngreedy_temp[\"combie\"] = greedy_combi_resultlist\r\n\r\npso_temp = OrderedDict()\r\npso_temp[\"deadline\"] = pso_deadline_resultlist\r\npso_temp[\"priority\"] = pso_priority_resultlist\r\npso_temp[\"combie\"] = pso_combi_resultlist\r\n\r\npsopre_temp = OrderedDict()\r\npsopre_temp[\"deadline\"] = psopre_deadline_resultlist\r\npsopre_temp[\"priority\"] = psopre_priority_resultlist\r\npsopre_temp[\"combie\"] = psopre_combi_resultlist\r\n\r\ntotalresult = OrderedDict()\r\ntotalresult[\"greedy\"] = greedy_temp\r\ntotalresult[\"pso\"] = pso_temp\r\ntotalresult[\"psopre\"] = psopre_temp\r\n\r\ntemp = json.dumps(totalresult, ensure_ascii=False, indent=\"\\t\")\r\n\r\nf = open(\"../saveresult/result.json\", 'w')\r\nf.write(temp)\r\nf.close()\r\n\r\n","repo_name":"JeongJaeUk/SmartFactory","sub_path":"GraduationProject/py/writejson.py","file_name":"writejson.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26414404580","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 25 06:22:13 2016\n\n@author: chihebdaoues\n\"\"\"\n\nimport pylab as plt;\nsample =[]\nlinear = []\nquadratic = []\nfor i in range(30):\n sample.append(i);\n linear.append(2**i);\n quadratic.append(i**2);\nplt.figure(\"lin\");\nplt.clf();\nplt.xlabel(\"zooz\");\nplt.ylim(0,100);\nplt.plot(sample,linear,label=\"z\");\nplt.legend();\nplt.figure(\"quad\");\nplt.plot(sample,quadratic);","repo_name":"chihebdaoues/algo_with_python","sub_path":"ploting.py","file_name":"ploting.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8626330260","text":"\"\"\"\nByteland has n cities, and m roads between them.\nThe goal is to construct new roads so that there is a route between any two cities.\n\nYour task is to find out the minimum number of roads required, and also determine which roads should be built.\n\nInput\n\nThe first input line has two integers n and m: the number of cities and roads. The cities are numbered 1,2,…,n.\n\nAfter that, there are m lines describing the roads. Each line has two integers a and b: there is a road between those cities.\n\nA road always connects two different cities, and there is at most one road between any two cities.\n\nOutput\n\nFirst print an integer k: the number of required roads.\n\nThen, print k lines that describe the new roads. You can print any valid solution.\n\nConstraints\n1≤n≤105\n1≤m≤2⋅105\n1≤a,b≤n\nExample\n\nInput:\n4 2\n1 2\n3 4\n\nOutput:\n1\n2 3\n\"\"\"\nimport collections\n\nif __name__ == '__main__':\n def find(x):\n if x != parent[x]:\n parent[x] = find(parent[x])\n return parent[x]\n def relax():\n for v in range(1, n + 1):\n find(v)\n n, m = list(map(int, input().split()))\n items = []\n for _ in range(m):\n items.append(list(map(int, input().split())))\n parent = [0] * (n + 1)\n for i in range(1, n + 1):\n parent[i] = i\n for x, y in items:\n xx = find(x)\n yy = find(y)\n if xx != yy:\n parent[yy] = xx\n\n relax()\n\n comp = list(set(parent[1:]))\n\n print(len(comp) - 1)\n\n for y1, y2 in zip(comp, comp[1:]):\n print(*[y1, y2])\n","repo_name":"denisschmidt/cses","sub_path":"Building Roads/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71359690010","text":"from fastapi import APIRouter\nfrom model.DealerModel import dealerdb, Dealeruserid, DealerInfo\nfrom model.RouteModel import statedb,citydb\nfrom commonschema import serializeLists, serializeList\nfrom bson import ObjectId\n\nrouter = APIRouter()\n\n\n@router.post(\"/register\")\nasync def dealer_register(dealer: DealerInfo):\n dealerdb.insert_one(dict(dealer))\n return {\"message\": \"Sucess\", \"code\": 103}\n\n\n@router.post(\"/checkdealer\")\nasync def find_for_dealer(userid: Dealeruserid):\n checkdealer = dealerdb.find_one(dict(userid))\n if (checkdealer):\n return {\"dealer\":str(checkdealer[\"_id\"]),'code':104}\n else:\n return {\"message\":\"Not sucess\",\"code\":105}\n\n@router.get(\"/state\")\nasync def state_dealer():\n state_data = statedb.find()\n return serializeList(state_data)\n\n@router.get(\"/city\")\nasync def city_dealer(stateid:str):\n city_data = citydb.find({\"stateid\":ObjectId(stateid)})\n return serializeLists(city_data)\n \n","repo_name":"basithh/DeairD","sub_path":"routers/dealer.py","file_name":"dealer.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25924464120","text":"\"\"\" Pipeline that loads a pretrained Bert model and tokenizer, \nand fine-tunes the model on HuggingFace's 'liar' dataset.\nIncludes functionality to train, evaluate, and save model.\n\"\"\"\nimport datetime\nimport matplotlib.pyplot as plt\nimport pathlib\nfrom tqdm import tqdm, trange\n\nimport datasets\nimport numpy as np\nfrom sklearn.metrics import matthews_corrcoef\nimport torch\nimport torch.nn as nn\nfrom torch.optim import AdamW\nfrom transformers import (\n BertForSequenceClassification, BertTokenizer, get_linear_schedule_with_warmup\n)\n\n\nclass DolosTrainer:\n def __init__(self, model_dir:str, device=None):\n # Set device if none given.\n if device is None:\n self.device = torch.device(\n 'cuda' if torch.cuda.is_available()\n else 'mps' if torch.backends.mps.is_available() \n else 'cpu'\n )\n print(f'Device not specified. Defaulting to {self.device}')\n else:\n self.device = device\n\n # Point to model directory or download model.\n if not isinstance(model_dir, pathlib.Path):\n model_dir = pathlib.Path(model_dir)\n\n if not model_dir.exists():\n base_model = BertForSequenceClassification.from_pretrained('bert-base-cased', num_labels=2)\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n\n base_model.save_pretrained(model_dir)\n self.tokenizer.save_pretrained(model_dir / 'tokenizer')\n\n else:\n base_model = BertForSequenceClassification.from_pretrained(model_dir, num_labels=2)\n self.tokenizer = BertTokenizer.from_pretrained(model_dir / 'tokenizer')\n\n self.model = nn.DataParallel(base_model)\n self.model.to(self.device)\n\n # Define trainable model parameters.\n params = list(self.model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n self.optimizer_params = [\n {\n 'params': [p for n, p in params if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.1\n },\n {\n 'params': [p for n, p in params if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0\n }\n ]\n\n # Initialize additional attributes.\n self.loss_train_list = None\n self.loss_val_list = None\n self.train_dataloader = None\n self.validation_dataloader = None\n\n def tokenize_dataset(self, name: str, batch_size: int=32):\n # Check input.\n if name not in ['train', 'validation', 'test']:\n raise ValueError(f\"Variable 'name' must be one of 'train', 'validation', 'test'. Value received: {name}\")\n\n dataset = datasets.load_dataset('liar', split=name)\n data = dataset.map(lambda sample: self.tokenizer(sample['statement'], padding='max_length', truncation=True), batched=True)\n data.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label']) # 'token_type_ids',\n data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size)\n attr_name = name + '_dataloader'\n setattr(self, attr_name, data_loader)\n\n def train(self,\n epochs: int=2, learning_rate: float=1e-5, epsilon: float=1e-8\n ):\n # Prepare optimizer and scheduler.\n optimizer = AdamW(self.optimizer_params, lr=learning_rate, eps=epsilon)\n total_steps = len(self.train_dataloader) * epochs\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=0, num_training_steps=total_steps)\n\n # Training loop.\n self.loss_train_list = []\n self.loss_val_list = []\n\n for epoch in trange(epochs, leave=True, desc='Epoch:'):\n self.model.train()\n\n # Initialize epoch tracking variables.\n time_start = datetime.datetime.now()\n loss_train, accuracy_train = 0.0, 0.0\n nb_tr_examples, nb_tr_steps = 0, 0\n val_loss, val_accuracy = 0, 0\n n_val_steps = 0\n \n for batch in tqdm(self.train_dataloader, leave=True, desc='Batches:'):\n # Store tensors and move to device.\n batch_sequences, batch_masks, batch_labels = batch['input_ids'].to(self.device), batch['attention_mask'].to(self.device), batch['label'].to(self.device)\n\n optimizer.zero_grad()\n \n # Feed model and calculate loss / accuracy.\n outputs = self.model(batch_sequences, token_type_ids=None, attention_mask=batch_masks, labels=batch_labels)\n\n loss = outputs['loss']\n\n self.loss_train_list.append(loss.item())\n logits = outputs['logits'].detach().cpu().numpy()\n np_labels = batch_labels.to('cpu').numpy()\n batch_train_accuracy = accuracy_score(logits, np_labels)\n accuracy_train += batch_train_accuracy\n \n # Backwards step.\n loss.backward()\n optimizer.step()\n scheduler.step()\n \n # Update train tracking statistics.\n loss_train += loss.item()\n nb_tr_examples += batch_sequences.size(0)\n nb_tr_steps += 1\n\n time_elapsed = datetime.datetime.now() - time_start\n\n # Evaluate each epoch.\n self.model.eval()\n \n for batch in self.validation_dataloader:\n batch_sequences, batch_masks, batch_labels = batch['input_ids'].to(self.device), batch['attention_mask'].to(self.device), batch['label'].to(self.device)\n \n with torch.no_grad():\n output = self.model(batch_sequences, token_type_ids=None, attention_mask=batch_masks, labels=batch_labels)\n logits = output['logits'].detach().cpu().numpy()\n np_labels = batch_labels.to('cpu').numpy()\n \n batch_val_accuracy = accuracy_score(logits, np_labels)\n batch_val_loss = output['loss']\n self.loss_val_list.append(batch_val_loss.item())\n val_loss += batch_val_loss.item()\n val_accuracy += batch_val_accuracy\n n_val_steps += 1\n \n len_train = len(self.train_dataloader)\n len_val = len(self.validation_dataloader)\n print(f\"Epoch: {epoch}, \\n\\\n Average Time per Batch: {time_elapsed / len_train}, \\n\\\n Training Loss: {loss_train / len_train} \\t\\\n Training Accuracy: {accuracy_train / len_train} \\n\\\n Validation Loss: {val_loss / len_val} \\t\\\n Validation Accuracy: {val_accuracy / len_val}\\n\")\n\n def plot_training_loss(self, save_path: str):\n plt.figure(figsize=(15,8))\n plt.title(\"Training loss\")\n plt.xlabel(\"Batch\")\n plt.ylabel(\"Loss\")\n plt.plot(self.loss_train_list)\n plt.savefig(save_path)\n plt.close()\n\n def plot_validation_loss(self, save_path: str):\n plt.figure(figsize=(15,8))\n plt.title(\"Validation loss\")\n plt.xlabel(\"Batch\")\n plt.ylabel(\"Loss\")\n plt.plot(self.loss_val_list)\n plt.savefig(save_path)\n plt.close()\n\n def evaluate_test_performance(self, eval_fn=None):\n if eval_fn is None:\n eval_fn = matthews_corrcoef\n print(\"No metric function provided. Defaulting to aggregate Matthew's coefficient.\")\n\n self.model.eval()\n\n preds = []\n true_state = []\n\n for batch in tqdm(self.test_dataloader):\n batch_sequences = batch['input_ids'].long().to(self.device)\n batch_masks = batch['attention_mask'].long().to(self.device)\n batch_labels = batch['label'].long().to(self.device)\n \n with torch.no_grad():\n output = self.model(batch_sequences, token_type_ids=None, attention_mask=batch_masks)\n \n logits = output['logits'].detach().cpu().numpy()\n np_labels = batch_labels.to('cpu').numpy()\n preds.append(logits)\n true_state.append(np_labels)\n\n flattened_predictions = [item for sublist in preds for item in sublist]\n flat_predictions = np.argmax(flattened_predictions, axis=1).flatten()\n flat_true_labels = [item for sublist in true_state for item in sublist]\n\n return eval_fn(flat_true_labels, flat_predictions)\n\n def save_model(self, directory: str):\n if not isinstance(directory, pathlib.Path):\n directory = pathlib.Path(directory)\n\n if not directory.exists():\n directory.mkdir()\n\n try:\n torch.save(self.model.module.state_dict(), directory / 'model.pt')\n except AttributeError:\n torch.save(self.model.state_dict(), directory)\n\n self.tokenizer.save_pretrained(directory / 'tokenizer')\n\ndef accuracy_score(preds, labels):\n class_preds = np.argmax(preds, axis=1).flatten()\n class_labels = labels.flatten()\n\n return np.sum(class_preds == class_labels) / len(class_labels)\n","repo_name":"JustinSima/political-deception","sub_path":"dolos/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":9293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73747381852","text":"model_name = \"InternLM\"\ncmd_to_install = \"`pip install -r request_llm/requirements_chatglm.txt`\"\n\nfrom transformers import AutoModel, AutoTokenizer\nimport time\nimport threading\nimport importlib\nfrom toolbox import update_ui, get_conf\nfrom multiprocessing import Process, Pipe\nfrom .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM\n\n\n# ------------------------------------------------------------------------------------------------------------------------\n# 🔌💻 Local Model Utils\n# ------------------------------------------------------------------------------------------------------------------------\ndef try_to_import_special_deps():\n import sentencepiece\n\ndef combine_history(prompt, hist):\n user_prompt = \"<|User|>:{user}\\n\"\n robot_prompt = \"<|Bot|>:{robot}\\n\"\n cur_query_prompt = \"<|User|>:{user}\\n<|Bot|>:\"\n messages = hist\n total_prompt = \"\"\n for message in messages:\n cur_content = message\n cur_prompt = user_prompt.replace(\"{user}\", cur_content[0])\n total_prompt += cur_prompt\n cur_prompt = robot_prompt.replace(\"{robot}\", cur_content[1])\n total_prompt += cur_prompt\n total_prompt = total_prompt + cur_query_prompt.replace(\"{user}\", prompt)\n return total_prompt\n\n# ------------------------------------------------------------------------------------------------------------------------\n# 🔌💻 Local Model\n# ------------------------------------------------------------------------------------------------------------------------\n@SingletonLocalLLM\nclass GetInternlmHandle(LocalLLMHandle):\n\n def load_model_info(self):\n # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行\n self.model_name = model_name\n self.cmd_to_install = cmd_to_install\n\n def try_to_import_special_deps(self, **kwargs):\n \"\"\"\n import something that will raise error if the user does not install requirement_*.txt\n \"\"\"\n import sentencepiece\n\n def load_model_and_tokenizer(self):\n # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行\n import torch\n from transformers import AutoModelForCausalLM, AutoTokenizer\n device, = get_conf('LOCAL_MODEL_DEVICE')\n if self._model is None:\n tokenizer = AutoTokenizer.from_pretrained(\"internlm/internlm-chat-7b\", trust_remote_code=True)\n if device=='cpu':\n model = AutoModelForCausalLM.from_pretrained(\"internlm/internlm-chat-7b\", trust_remote_code=True).to(torch.bfloat16)\n else:\n model = AutoModelForCausalLM.from_pretrained(\"internlm/internlm-chat-7b\", trust_remote_code=True).to(torch.bfloat16).cuda()\n\n model = model.eval()\n return model, tokenizer\n\n def llm_stream_generator(self, **kwargs):\n import torch\n import logging\n import copy\n import warnings\n import torch.nn as nn\n from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig\n\n # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行\n def adaptor():\n model = self._model\n tokenizer = self._tokenizer\n prompt = kwargs['query']\n max_length = kwargs['max_length']\n top_p = kwargs['top_p']\n temperature = kwargs['temperature']\n history = kwargs['history']\n real_prompt = combine_history(prompt, history)\n return model, tokenizer, real_prompt, max_length, top_p, temperature\n \n model, tokenizer, prompt, max_length, top_p, temperature = adaptor()\n prefix_allowed_tokens_fn = None\n logits_processor = None\n stopping_criteria = None\n additional_eos_token_id = 103028\n generation_config = None\n # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行\n # 🏃‍♂️🏃‍♂️🏃‍♂️ https://github.com/InternLM/InternLM/blob/efbf5335709a8c8faeac6eaf07193973ff1d56a1/web_demo.py#L25\n\n inputs = tokenizer([prompt], padding=True, return_tensors=\"pt\")\n input_length = len(inputs[\"input_ids\"][0])\n for k, v in inputs.items():\n inputs[k] = v.cuda()\n input_ids = inputs[\"input_ids\"]\n batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n if generation_config is None:\n generation_config = model.generation_config\n generation_config = copy.deepcopy(generation_config)\n model_kwargs = generation_config.update(**kwargs)\n bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n if additional_eos_token_id is not None:\n eos_token_id.append(additional_eos_token_id)\n has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n if has_default_max_length and generation_config.max_new_tokens is None:\n warnings.warn(\n f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n UserWarning,\n )\n elif generation_config.max_new_tokens is not None:\n generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n if not has_default_max_length:\n logging.warn(\n f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n \"Please refer to the documentation for more information. \"\n \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n UserWarning,\n )\n\n if input_ids_seq_length >= generation_config.max_length:\n input_ids_string = \"input_ids\"\n logging.warning(\n f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n \" increasing `max_new_tokens`.\"\n )\n\n # 2. Set generation parameters if not already defined\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n logits_processor = model._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids_seq_length,\n encoder_input_ids=input_ids,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n logits_processor=logits_processor,\n )\n\n stopping_criteria = model._get_stopping_criteria(\n generation_config=generation_config, stopping_criteria=stopping_criteria\n )\n logits_warper = model._get_logits_warper(generation_config)\n\n unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n scores = None\n while True:\n model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)\n # forward pass to get next token\n outputs = model(\n **model_inputs,\n return_dict=True,\n output_attentions=False,\n output_hidden_states=False,\n )\n\n next_token_logits = outputs.logits[:, -1, :]\n\n # pre-process distribution\n next_token_scores = logits_processor(input_ids, next_token_logits)\n next_token_scores = logits_warper(input_ids, next_token_scores)\n\n # sample\n probs = nn.functional.softmax(next_token_scores, dim=-1)\n if generation_config.do_sample:\n next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n else:\n next_tokens = torch.argmax(probs, dim=-1)\n\n # update generated ids, model inputs, and length for next step\n input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n model_kwargs = model._update_model_kwargs_for_generation(\n outputs, model_kwargs, is_encoder_decoder=False\n )\n unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long())\n \n output_token_ids = input_ids[0].cpu().tolist()\n output_token_ids = output_token_ids[input_length:]\n for each_eos_token_id in eos_token_id:\n if output_token_ids[-1] == each_eos_token_id:\n output_token_ids = output_token_ids[:-1]\n response = tokenizer.decode(output_token_ids)\n\n yield response\n # stop when each sentence is finished, or if we exceed the maximum length\n if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n return\n\n \n# ------------------------------------------------------------------------------------------------------------------------\n# 🔌💻 GPT-Academic Interface\n# ------------------------------------------------------------------------------------------------------------------------\npredict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetInternlmHandle, model_name)","repo_name":"binary-husky/gpt_academic","sub_path":"request_llm/bridge_internlm.py","file_name":"bridge_internlm.py","file_ext":"py","file_size_in_byte":9743,"program_lang":"python","lang":"en","doc_type":"code","stars":47095,"dataset":"github-code","pt":"32"} +{"seq_id":"26584571287","text":"import numpy as np\nfrom collections import defaultdict\nimport re\nimport gzip\n\ndef loadSHAPE(path):\n \"\"\"\n Load SHAPE reactivities\n \"\"\"\n SHAPE = {}\n with open(path) as f:\n for line in f:\n assert line.startswith(\">\")\n name = line.strip().replace(\">\",\"\")\n reactivity = np.array(next(f).strip().split(\",\")).astype(float)\n SHAPE[name] = reactivity\n return SHAPE\n\n\ndef loadRecords(path,order =\"sequence,structure,reactivity\",dtype=\"str,str,float\"):\n \"\"\"\n Parameters: \n path: path of fasta like file\n order: order of fields in fasta like records\n dtype: data type corresponding to order\n gzip: whether the input file is gzipped\n Return:\n A dict of dict contains keys specified in order parameter\n \"\"\"\n gzipped = False\n if path.endswith(\".gz\"):\n gzipped = True\n orders = order.split(\",\")\n dtypes = dtype.split(\",\")\n N = len(orders)\n assert len(dtypes) == N, \"order and dtype should have same length\"\n records = defaultdict(dict)\n if not gzipped:\n f = open(path,\"r\")\n else:\n f = gzip.open(path,\"rb\")\n for line in f:\n if gzipped:\n line = line.decode()\n try:\n assert line.startswith(\">\")\n except:\n print(\"The input data is not consistent to specified data_type parameter\")\n seq_id = line.strip()[1:].strip()\n for field,dtype in zip(orders,dtypes):\n line = next(f)\n if gzipped:\n line = line.decode()\n data = line.strip()\n if dtype == \"float\":\n data = np.array(data.split(\",\")).astype(float)\n if dtype == \"int\":\n data = np.array(data.split(\",\")).astype(int)\n records[seq_id][field] = data\n return records\n\n\ndef writeRecords(dataDict,path,order=\"sequence,structure,reactivity\"):\n gzipped = False\n if path.endswith(\".gz\"):\n gzipped = True\n order = order.split(\",\")\n if not gzipped:\n f = open(path,\"w\")\n else:\n f = gzip.open(path,\"wb\")\n for seq_id in dataDict.keys():\n line = \">\"+seq_id+\"\\n\"\n if gzipped:\n line = line.encode()\n f.write(line)\n for key in order:\n if isinstance(dataDict[seq_id][key],str):\n line = dataDict[seq_id][key]\n else:\n data = np.round(np.array(dataDict[seq_id][key]),4)\n data = data.astype(str)\n line = \",\".join(data)\n line += \"\\n\"\n if gzipped:\n line = line.encode()\n f.write(line)\n f.close()\n \n\n\ndef prepareSHAPE(shape,prefix):\n \"\"\"\n Prepare SHAPE file for RNAstructure and ViennaRNA package\n \"\"\"\n shape_path = prefix+\".shape\"\n with open(shape_path,\"w\") as f:\n for i,s in enumerate(shape):\n if np.isnan(s):\n s = -999\n f.write(\"{}\\t{}\\n\".format(i+1,s))\n return shape_path\n\n\n\ndef loadFasta(path):\n \"\"\"\n Load fasta file into an sequence dict\n Each sequence records could span multiple lines\n \"\"\"\n sequences = {}\n with open(path) as f:\n for line in f:\n line = line.strip()\n if len(line)==0:\n continue\n if line.startswith(\">\"):\n seqid = line.replace(\">\",\"\").strip()\n sequences[seqid] = \"\"\n else:\n sequences[seqid] += line\n return sequences\n\n\n\ndef checkDBN(s):\n \"\"\"\n Check whether a dot bracket notation represents a valid secondary structure\n \"\"\"\n stack = []\n for c in s:\n if c == \"(\":\n stack.append(c)\n elif c == \")\":\n assert len(stack) > 0\n _ = stack.pop()\n else:\n continue\n assert len(stack) == 0\n\n\ndef getKmer(structure,reactivity,flankingL = 2):\n \"\"\"\n Get k-mer, where k = flankingL*2 + 1\n structure is a string\n reactivity is a one dimensional numpy array with same length\n return:\n reactivity at positions where reactivity is not None\n structure k-mer flanking these positions\n \"\"\"\n indices = np.where(~np.isnan(reactivity))[0]\n indices = indices[(indices >= flankingL)&(indices<(reactivity.shape[0]- flankingL))]\n\n structures_ = []\n for idx in indices:\n s = structure[idx-flankingL:idx+flankingL+1]\n structures_.append(s)\n reactivity_ = reactivity[indices]\n\n return structures_,reactivity_\n\n\ndef annotatePairing(dataDict):\n \"\"\"\n Input:\n a dict of dict , sequence id as keys\n Items in the inner dict should contain a 'structure' key, \n and the corresponding value corresponds to secondary structure in dot bracket notation \n Return:\n the input dict, with an additional 'pairing' field, \n 'P' indicate this position is paired\n 'U' indicate this position is unpaired\n \"\"\"\n for seq_id in dataDict.keys():\n annotation = dataDict[seq_id][\"structure\"].replace(\".\",\"U\")\n annotation = re.sub(\"[^U]\",\"P\",annotation)\n dataDict[seq_id][\"pairing\"] = annotation\n return dataDict\n\ndef pairingDigitToString(X):\n \"\"\"\n Convert pairing state of a set of k-mer into string\n Input:\n numpy array of shape (n,k) with binary value (0 for unpaired, 1 for paired)\n where n is number of k mer\n k is length of k-mer\n Return:\n numpy array of length n\n With item are length k string [PU]+ \n \"\"\"\n s = np.empty_like(X).astype(str)\n s[X==1],s[X==0] = \"P\",\"U\"\n return np.apply_along_axis(lambda x:\"\".join(x),arr=s,axis=1)\n","repo_name":"uaauaguga/bioinfo-utils","sub_path":"scripts/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74434359131","text":"\r\nfrom azure.storage.blob import BlobServiceClient\r\nfrom azure.identity import DefaultAzureCredential\r\nfrom azure.keyvault.secrets import SecretClient\r\nfrom pathlib import Path\r\nimport datetime\r\nimport json\r\n\r\ncredential = DefaultAzureCredential()\r\n# Check if given credential can get token successfully.\r\ncredential.get_token(\"https://management.azure.com/.default\")\r\nsecret_client = SecretClient(vault_url=\"https://mlgroup.vault.azure.net/\", credential=credential)\r\n\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser(\"prep\")\r\nparser.add_argument(\"--blob_storage\", type=str, help=\"Mounted Azure ML blob storage\")\r\nparser.add_argument(\"--prep_output\")\r\nargs = parser.parse_args()\r\n\r\n# log in to the Blob Service Client\r\nblob_storage = args.blob_storage\r\nblob_storage_key = secret_client.get_secret(\"blob-storage-key\")\r\nblob_service_client = BlobServiceClient(blob_storage, account_key=blob_storage_key.value)\r\n\r\n# connect to the container \r\ncontainer_client = blob_service_client.get_container_client(container=\"stock-news-json\") \r\n\r\n# list and download all currently available blobs\r\nblob_list = container_client.list_blobs()\r\nprint(f\"Blob from: {blob_storage} has these blobs today: {blob_list}\")\r\n\r\n# get the timestamp with the current day \r\ncurrent_day_date = datetime.datetime.today().isoformat()[:10]\r\n\r\n# filter out which blobs have the current date and download them\r\nblobs_to_use = [blob.name for blob in blob_list if current_day_date in blob.name]\r\nfor blob in blobs_to_use:\r\n print(f\"Downloading blob: {blob}\")\r\n blob_client = blob_service_client.get_blob_client(container=\"stock-news-json\", blob=blob)\r\n with open(blob, mode=\"wb\") as sample_blob:\r\n download_stream = blob_client.download_blob()\r\n sample_blob.write(download_stream.readall())\r\n\r\n# combine all blobs into one dictionary\r\nall_data_dict = {}\r\nfor json_file in blobs_to_use:\r\n with open(json_file,\"r+\") as file:\r\n # First we load existing data into a dict.\r\n file_data = json.load(file)\r\n all_data_dict.update(file_data)\r\n\r\n# pass aggregated file to the next step \r\nwith open((Path(args.prep_output) / \"merged_stock_news.json\"), \"w\") as file:\r\n file.write(json.dumps(all_data_dict, indent=4))\r\n","repo_name":"LeonardPuettmann/azure-stock-news-analysis","sub_path":"news-analysis-pipeline/components/prep.py","file_name":"prep.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"863240609","text":"from variables import random,device, devices,os,cv2,pytesseract,msconfig, Output, np,time,cascade_pokestop\r\nfrom catchPokemon import TakeScreen, OnlyScreen\r\nfrom vision import draw_rectangles\r\n\r\nspinStop = 'input swipe 300 980 820 980 500'\r\nexitScreen = 'input tap 545 1794'\r\ndef SearchStops():\r\n image = TakeScreen()\r\n y=880\r\n x=150\r\n h=800\r\n w=800\r\n image[0] = image[0][y:y+h, x:x+w]\r\n rectangles = cascade_pokestop.detectMultiScale(image[0])\r\n top,bot = draw_rectangles(image[0], rectangles)\r\n if not top:\r\n return False\r\n else:\r\n mid = ((top[0][0]+bot[0][0])/2,(top[0][1]+bot[0][1])/2)\r\n text1 = 'input tap'\r\n text2 = str(mid[0])\r\n text3 = str(mid[1])\r\n line_color = (0, 255, 0)\r\n line_type = cv2.LINE_4\r\n final = \" \".join((text1, text2,text3))\r\n print(mid[0],mid[1])\r\n device.shell(final)\r\n return True\r\n\r\ndef CheckStopUsed():\r\n imageOfStop = TakeScreen()\r\n colorOfStop = (imageOfStop[0][1845,135][0],imageOfStop[0][1845,135][1],imageOfStop[0][1845,135][2])\r\n if (200 <= colorOfStop[0] <= 255) and (100<=colorOfStop[1]<=255) and (20<=colorOfStop[2]<=50):\r\n return False\r\n else:\r\n if (195 <= colorOfStop[0] <= 250) and (60<=colorOfStop[1]<=110) and (60<=colorOfStop[2]<=110):\r\n return True\r\n else:\r\n return True\r\n\r\ndef SpinStop():\r\n device.shell(spinStop)\r\n\r\ndef ExitStopScreen():\r\n device.shell(exitScreen)\r\n\r\ndef TapSomewhereRandom():\r\n randomX = random.randint(320,922)\r\n randomY = random.randint(450,1000) \r\n randomClickText = \" \".join(('input tap',str(randomX),str(randomY)))\r\n device.shell(randomClickText)\r\n return randomX,randomY\r\n\r\ndef CheckIfBagIsFull():\r\n #Do something else here, maybe check the bag?\r\n #Checking with pytesseract in Pokestop didn't work\r\n return","repo_name":"Leaderide/CatchBotGo","sub_path":"checkPokestop.py","file_name":"checkPokestop.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"74892874651","text":"import numpy as np\nfrom scipy.special import expit\nimport sys, tqdm, warnings, itertools, pickle, time\nimport multiprocessing as mp\nimport pandas as pd\nfrom datetime import datetime\n\nwarnings.filterwarnings('ignore')\n\nsys.path.append(\"../\")\nfrom evaluator_Linear import evaluator\nfrom probLearner import PMLearner, RewardLearner, PALearner\nfrom ratioLearner import RatioLinearLearner as RatioLearner\nfrom qLearner_Linear import Qlearner\nfrom IHS2018_Data.create_IHS2018_dataset import create_IHS2018_dataset_weekly, binary_split_dataset\nfrom qbehavior import Learn_Behavior_Q\nfrom _util import *\n\n# HYPER PARAMETERS\nSCALER = [\"Standardize\"]\n\n# RUN SETUP\nCORES = 2\nSPECIALTY = [\"all\"]\nSEED = 42\n\n\n# TRT_STEPS = list(range(15, 25))\ndef make_one_run(specialty, t_dependent_Q, scaler, seed):\n t0 = time.time()\n dataset = create_IHS2018_dataset_weekly(specialty=specialty,\n d=1,\n start_week=0)\n T = dataset[\"T\"]\n N = dataset[\"N\"]\n d1, d2 = binary_split_dataset(dataset=dataset, seed=seed)\n est_obj1 = make_one_split(d1, d2, t_dependent_Q, scaler, seed)\n est_obj2 = make_one_split(d2, d1, t_dependent_Q, scaler, seed)\n ind_est_pooled = np.concatenate([est_obj1.ind_est, est_obj2.ind_est],\n axis=1)\n est_DEMESE = np.mean(ind_est_pooled, 1)\n if N > 100:\n se_DEMESE = np.array([\n np.mean(ind_est_pooled[:, i * T:(i + 1) * T], 1) for i in range(N)\n ]).std(0) / np.sqrt(N)\n else:\n se_DEMESE = np.std(ind_est_pooled, 1) / np.sqrt(N)\n\n t1 = time.time()\n return {\n \"specialty\": specialty,\n \"t_dependent_Q\": t_dependent_Q,\n \"scaler\": scaler,\n \"est_value\": est_DEMESE,\n \"se_value\": se_DEMESE,\n \"time\": \"{:.0f}\".format(t1 - t0)\n }\n\n\ndef make_one_split(dataset1, dataset2, t_dependent_Q, scaler, seed):\n sdim = dataset1[\"state\"].shape[-1]\n mdim = dataset1[\"mediator\"].shape[-1]\n\n # LEARN OPTIMAL POLICY\n problearner_parameters = {\n \"splitter\": [\"best\", \"random\"],\n \"max_depth\": range(1, 20)\n },\n Q_settings = {\n 'scaler': 'Identity',\n 'product_tensor': False,\n 'beta': 3 / 7,\n 'include_intercept': False,\n 'expectation_MCMC_iter_Q3': 50,\n 'expectation_MCMC_iter_Q_diff': 50,\n 'penalty': 10**(-4),\n 'd': 3,\n 'min_L': 7\n }\n Q_behavior = Learn_Behavior_Q(dataset1, PMLearner, PALearner, sdim, mdim,\n problearner_parameters, Q_settings, seed)\n\n def optimal_policy(state=None, dim_state=1, action=None):\n opt_A = Q_behavior.opt_A(state)\n if action is None:\n action_value = opt_A\n else:\n action = np.copy(action).flatten()\n action_value = 1 - abs(opt_A - action)\n return action_value\n\n # def control_policy(state=None, dim_state=None, action=None, get_a=False):\n # # fixed policy with fixed action 0\n # if get_a:\n # action_value = np.array([0])\n # else:\n # state = np.copy(state).reshape(-1, dim_state)\n # NT = state.shape[0]\n # if action is None:\n # action_value = np.array([0] * NT)\n # else:\n # action = np.copy(action).flatten()\n # if len(action) == 1 and NT > 1:\n # action = action * np.ones(NT)\n # action_value = 1 - action\n # return action_value\n\n def control_policy(state=None, dim_state=None, action=None, get_a=False):\n # fixed policy with fixed action 0\n if get_a:\n action_value = np.array([1])\n else:\n state = np.copy(state).reshape(-1, dim_state)\n NT = state.shape[0]\n if action is None:\n action_value = np.array([1] * NT)\n else:\n action = np.copy(action).flatten()\n if len(action) == 1 and NT > 1:\n action = action * np.ones(NT)\n action_value = action\n return action_value\n\n # HYPER PARAMETERS\n #Fixed hyper-parameter--no need to modify\n expectation_MCMC_iter = 50\n expectation_MCMC_iter_Q3 = expectation_MCMC_iter_Q_diff = 50\n truncate = 50\n problearner_parameters = {\n \"splitter\": [\"best\", \"random\"],\n \"max_depth\": range(1, 50)\n },\n dim_state = 1\n dim_mediator = 2\n ratio_ndim = 10\n d = 3\n L = 10\n t_depend_target = False\n target_policy = optimal_policy\n control_policy = control_policy\n t_dependent_Q = t_dependent_Q\n scaler = scaler\n num_trajectory = dataset2[\"N\"]\n num_time = dataset2[\"T\"]\n\n est_obj1 = evaluator(dataset2,\n num_trajectory,\n num_time,\n Qlearner,\n PMLearner,\n RewardLearner,\n PALearner,\n RatioLearner,\n problearner_parameters=problearner_parameters,\n ratio_ndim=ratio_ndim,\n truncate=truncate,\n l2penalty=10**(-4),\n t_depend_target=t_depend_target,\n target_policy=target_policy,\n control_policy=control_policy,\n dim_state=dim_state,\n dim_mediator=dim_mediator,\n Q_settings={\n 'scaler': scaler,\n 'product_tensor': False,\n 'beta': 3 / 7,\n 'include_intercept': False,\n 'expectation_MCMC_iter_Q3':\n expectation_MCMC_iter_Q3,\n 'expectation_MCMC_iter_Q_diff':\n expectation_MCMC_iter_Q_diff,\n 'penalty': 10**(-4),\n 'd': d,\n 'min_L': L,\n \"t_dependent_Q\": t_dependent_Q\n },\n expectation_MCMC_iter=expectation_MCMC_iter,\n seed=10)\n\n est_obj1.estimate_DE_ME_SE()\n est_value1 = est_obj1.est_DEMESE\n se_value1 = est_obj1.se_DEMESE\n\n return est_obj1\n\n\ndef make_one_run_star(args):\n return make_one_run(*args)\n\n\ndef expand_grid(data_dict):\n rows = itertools.product(*data_dict.values())\n return pd.DataFrame.from_records(rows, columns=data_dict.keys())\n\n\nif __name__ == \"__main__\":\n note = sys.argv[1]\n settings = expand_grid({\n \"specialty\": SPECIALTY,\n \"t_dependent_Q\": [False, True],\n # \"t_dependent_Q\": [False],\n \"scaler\": SCALER,\n \"seed\": [SEED]\n })\n\n all_jobs = []\n for index, row in settings.iterrows():\n all_jobs.append([\n row[\"specialty\"], row[\"t_dependent_Q\"], row[\"scaler\"], row[\"seed\"]\n ])\n\n with mp.Pool(CORES) as pool:\n output = list(\n tqdm.tqdm(pool.imap(make_one_run_star, all_jobs),\n total=len(all_jobs),\n desc=\"OPTIMAL\"))\n\n pd.DataFrame(output).to_csv(\"./outs/optimal_cv_results_{}_{}.txt\".format(\n note,\n datetime.now().strftime(\"%y%m%d%H%M%S\")),\n index=False,\n sep=\"\\t\")\n","repo_name":"linlinlin97/MediationRL","sub_path":"Real_Case_Study/run_optimal_cv.py","file_name":"run_optimal_cv.py","file_ext":"py","file_size_in_byte":7477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5425450929","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# recipe.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: irifarac +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2023/03/14 17:13:04 by irifarac #+# #+# #\n# Updated: 2023/03/14 21:33:29 by irifarac ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport sys\n\ncookbook = { 'Bocadillo': {'ingredients': ['jamon', 'pan', 'queso', 'tomate'], 'meal': 'lunch', 'prep_time': '10'},\n\t\t\t'Tarta': {'ingredients': ['harina', 'azucar', 'huevos'], 'meal': 'dessert', 'prep_time': '60'},\n\t\t\t'Ensalada': {'ingredients': ['aguacate', 'rucula', 'tomates',\n\t\t\t'espinacas'], 'meal': 'lunch', 'prep_time': '15'}}\n\ndef\tprint_name():\n\tfor key in cookbook:\n\t\tprint(key)\n\ndef\tprint_details():\n\tprint(\"Please enter a recipe to get its details:\")\n\tfor name in sys.stdin:\n\t\tname = name.rstrip()\n\t\tbreak\n\tprint(\" Recipe for {0}\".format(name))\n\tprint(\" Ingredients list: {0}\".format(cookbook[name]['ingredients']))\n\tprint(\" To be eaten for {0}.\".format(cookbook[name]['meal']))\n\tprint(\" Takes {0} minutes of cooking.\".format(cookbook[name]['prep_time']))\n\n\ndef\tdelete_key():\n\tprint(\"Please enter a recipe name to delete:\")\n\tfor name in sys.stdin:\n\t\tname = name.rstrip()\n\t\tbreak\n\tif name in cookbook:\n\t\tdel cookbook[name]\n\ndef\ttake_argv():\n\tprint(\"Process to add a new recipe\")\n\tprint(\"Enter a name:\")\n\tfor name in sys.stdin:\n\t\tname = name.rstrip()\n\t\tbreak\n\tprint(\"Enter ingredients:\")\n\ti = 0\n\tlst = []\n\tfor ingredients in sys.stdin:\n\t\tif i >= 2:\n\t\t\tbreak\n\t\telse:\n\t\t\ti += 1\n\t\tingredients = ingredients.rstrip('\\n')\n\t\tlst.append(ingredients.strip())\n\n\tprint(\"Enter a meal type:\")\n\tfor meal in sys.stdin:\n\t\tmeal = meal.rstrip()\n\t\tbreak\n\tprint(\"Enter a preparation time:\")\n\tprep = input()\n\tingre = {'ingredients': [lst[0], lst[1]], 'meal': meal, 'prep_time': prep}\n\tcookbook[name] = ingre\n\nif __name__ == '__main__':\n\t#take_argv()\n\tprint(\"Welcome to the Python Cookbook !\")\n\tprint(\"List of available option:\")\n\tprint(\"\t1: Add a recipe\")\n\tprint(\"\t2: Delete a recipe\")\n\tprint(\"\t3: Print a recipe\")\n\tprint(\"\t4: Print the cookbook\")\n\tprint(\"\t5: Quit\")\n\twhile True:\n\t\tprint(\"Please select an option:\")\n\t\tchoice = int(input())\n\t\tif (choice == 1):\n\t\t\ttake_argv()\n\t\telif (choice == 2):\n\t\t\tdelete_key()\n\t\telif (choice == 3):\n\t\t\tprint_details()\n\t\telif (choice == 4):\n\t\t\tprint(cookbook)\n\t\telif (choice == 5):\n\t\t\tprint(\"Cookbook closed. Goodbye !\")\n\t\t\tbreak\n\t\telse:\n\t\t\tprint('\\033[91m' + \"Sorry, this option does not exist.\" + '\\033[0m')\n","repo_name":"IsraelR1099/IsraelR1099","sub_path":"python/module00/ex06/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27411566063","text":"\n# A website domain like \"discuss.leetcode.com\" consists of various subdomains. At the top level, we have \"com\", at the next level, we have \"leetcode.com\", and at the lowest level, \"discuss.leetcode.com\". When we visit a domain like \"discuss.leetcode.com\", we will also visit the parent domains \"leetcode.com\" and \"com\" implicitly.\n# Now, call a \"count-paired domain\" to be a count (representing the number of visits this domain received), followed by a space, followed by the address. An example of a count-paired domain might be \"9001 discuss.leetcode.com\".\n# We are given a list cpdomains of count-paired domains. We would like a list of count-paired domains, (in the same format as the input, and in any order), that explicitly counts the number of visits to each subdomain.\n\n# Example 1:\n# Input: \n# [\"9001 discuss.leetcode.com\"]\n# Output: \n# [\"9001 discuss.leetcode.com\", \"9001 leetcode.com\", \"9001 com\"]\n# Explanation: \n# We only have one website domain: \"discuss.leetcode.com\". As discussed above, the subdomain \"leetcode.com\" and \"com\" will also be visited. So they will all be visited 9001 times.\n# Example 2:\n# Input: \n# [\"900 google.mail.com\", \"50 yahoo.com\", \"1 intel.mail.com\", \"5 wiki.org\"]\n# Output: \n# [\"901 mail.com\",\"50 yahoo.com\",\"900 google.mail.com\",\"5 wiki.org\",\"5 org\",\"1 intel.mail.com\",\"951 com\"]\n# Explanation: \n# We will visit \"google.mail.com\" 900 times, \"yahoo.com\" 50 times, \"intel.mail.com\" once and \"wiki.org\" 5 times. For the subdomains, we will visit \"mail.com\" 900 + 1 = 901 times, \"com\" 900 + 50 + 1 = 951 times, and \"org\" 5 times.\n\n\nfrom collections import defaultdict\n\n\ndef subdomainVisits(cpdomains): \n \"\"\"\n First Approach: Hash Map\n ||======= Big O ======= || \n - Time complexity : O(N) where N is the length of cpdomains.\n - Space complexity: O(N) the space used in our count\n \"\"\" \n # A defaultdict can be created by giving its declaration an argument that can have three values; list, set or int.\n map_domain = defaultdict(int)\n\n for cpdomain in cpdomains:\n count, domains = cpdomain.split(' ')\n count = int(count)\n domain = domains.split('.')\n for i in range(len(domain)):\n map_domain['.'.join(domain[i:])] += count\n return ('{} {}'.format(x, y) for y, x in map_domain.items())\n\n\n# Test case\ns1 = [\"9001 discuss.leetcode.com\"]\ns2 = [\"900 google.mail.com\", \"50 yahoo.com\", \"1 intel.mail.com\", \"5 wiki.org\"]\n\nprint(list(subdomainVisits(s1)))\nprint(list(subdomainVisits(s2)))","repo_name":"edithturn/leetcode-training","sub_path":"EASY_Most_Frequency_Asked/Arrays/811-Subdomain-Visit-Count.py","file_name":"811-Subdomain-Visit-Count.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"22625733263","text":"import requests\n\n\nclass Asf():\n \"\"\"ASF Wrapper for the api\n \"\"\"\n\n def __init__(self, ip, port):\n \"\"\"ASF wrapper for the api\n \n Arguments:\n ip {string} -- IP where ASF is located\n port {int} -- Port where ASF is listening\n \"\"\"\n self.ip = ip\n self.port = port\n\n def send_command(self, cmd):\n \"\"\"Function for sending commands to ASF\n \n Arguments:\n cmd {string} -- Command to send\n \n Returns:\n [json] -- Response of ASF Api\n \"\"\"\n\n end_point = 'http://%s:%s/Api/Command/%s'%(self.ip, self.port, cmd)\n response = requests.post(end_point)\n return response.json()['Result']\n\n def get_bot(self, bot_names):\n \"\"\"Function for accesing bot instances of ASF\n \n Arguments:\n bot_names {string}{array} -- Name of the bot to get retrieved, it also can be an array\n \n Returns:\n json -- Response of ASF Api\n \"\"\"\n\n bots = ''\n if isinstance(bot_names, list):\n bots = ','.join(bot_names)\n else:\n bots = bot_names\n\n end_point = 'http://%s:%s/Api/Bot/%s'%(self.ip, self.port, bots)\n response = requests.get(end_point)\n return response.json()['Result']\n","repo_name":"MrMarble/ASFBot","sub_path":"BOT/asf.py","file_name":"asf.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36449831648","text":"import dataclasses\nimport errno\nimport os\nimport sys\nfrom functools import wraps\nimport json\nfrom typing import Union\nfrom collections.abc import Iterable\n\nimport settings\n\n\ndef create_path_to_file(path):\n \"\"\"\n Creates directories to file if it doesn't exist\n \"\"\"\n if not os.path.exists(os.path.dirname(path)):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n\ndef data_to_str(data, align_columns=True):\n \"\"\"\n Converts data dict to string\n\n :param data: Data dict with keys 'title' and 'data'\n :param align_columns: If true, the columns will be aligned. Looks bad if the lines are long. Default: True\n :return: A string ready to be written to a file\n \"\"\"\n if isinstance(data['data'], dict):\n if align_columns:\n max_key_length = max([len(k) for k in data['data']])\n else:\n max_key_length = 0\n processed_data = []\n for key, value in data['data'].items():\n processed_data.append(\"{0: <{k_len}} - {1}\".format(key, value, k_len=max_key_length))\n elif isinstance(data['data'], list):\n\n if isinstance(data['data'][0], dict):\n data_list = [list(i.values()) for i in data['data']]\n else:\n data_list = data['data']\n\n column_lengths = {}\n if align_columns:\n for item in data_list:\n for column_index, column_text in enumerate(item):\n column_lengths[column_index] = max([len(str(column_text)), column_lengths.get(column_index, 0)])\n\n processed_data = []\n for item in data_list:\n processed_data.append(' - '.join(\n [\"{0: <{1}}\".format(item, column_lengths.get(index, 0)) for index, item in enumerate(item)]\n ))\n else:\n return '\\n'.join([data['title'], str(data[\"data\"])])\n return '\\n'.join([data['title'], *processed_data])\n\n\ndef write_in_file(data: dict, file_name, in_json=False, align_columns=True):\n \"\"\"\n Writes given data in file. By default, writes to the .txt file\n\n :param align_columns: If true, when writing to a .txt file, the columns will be aligned in width\n :param data: Data to be written to the file\n :param file_name: Output file name without extension\n :param in_json: If specified, encodes to JSON and writes to a .json file.\n \"\"\"\n if in_json:\n file_name = ''.join((file_name, '.json'))\n file_path = os.path.join(settings.OUTPUT_LOCATION, file_name)\n create_path_to_file(file_path)\n with open(file_path, 'w') as file:\n json.dump(data, file, indent=2)\n else:\n file_name = ''.join((file_name, '.txt'))\n file_path = os.path.join(settings.OUTPUT_LOCATION, file_name)\n create_path_to_file(file_path)\n with open(file_path, 'w') as file:\n if not isinstance(data, str):\n data = data_to_str(data, align_columns=align_columns)\n file.write(data)\n \n\ndef get_cl_args():\n \"\"\"\n Returns command line arguments\n \"\"\"\n return sys.argv[1:]\n\n\ndef has_json_arg():\n \"\"\"\n Checks presence of the \"--json\" parameter\n \"\"\"\n return \"--json\" in get_cl_args()\n\n\ndef write_response_in_file(func, *args, align_columns=True, **kwargs):\n \"\"\"\n Receives a function, executes it, and writes the response to a file. Checks presence of the \"--json\" parameter.\n\n :param align_columns: If true, when writing to a .txt file, the columns will be aligned in width\n :param func: Function to be executed\n :param args: Function args\n :param kwargs: Function kwargs\n \"\"\"\n data = func(*args, **kwargs)\n in_json = has_json_arg()\n write_in_file(data, ''.join((func.__name__, settings.OUTPUT_FILE_SUFFIX)), in_json, align_columns=align_columns)\n \n\ndef write_response_in_file_decorator(in_json=False):\n \"\"\"\n A decorator that loads the response of a function into a file.\n\n :param in_json: If True, response will be uploaded in JSON file, otherwise in TXT.\n \"\"\"\n\n def func_wrapper(func):\n\n @wraps(func)\n def func_caller(*args, **kwargs):\n data = func(*args, **kwargs)\n file_name = ''.join((func.__name__, settings.OUTPUT_FILE_SUFFIX))\n write_in_file(data, file_name, in_json)\n\n return func_caller\n\n return func_wrapper\n\n\n@dataclasses.dataclass\nclass LogItem:\n ip: str\n date_time: str\n method: str\n url: str\n http_version: str\n status_code: str\n size: int\n\n\n@dataclasses.dataclass\nclass IncorrectLogItem:\n text: str\n\n\ndef is_valid_log_entry(entry_dict):\n \"\"\"\n Validates log entry\n\n :param entry_dict: Dict with data\n \"\"\"\n booleans = [\n len(entry_dict[\"method\"]) <= 7,\n len(entry_dict[\"status_code\"]) == 3,\n entry_dict[\"status_code\"].isdigit(),\n entry_dict[\"size\"].isdigit()\n ]\n return all(booleans)\n\n\ndef process_log_entry(entry: str):\n \"\"\"\n Takes a string, processes it and returns a dictionary with the data contained in the string\n\n :param entry: String with data\n :return:\n \"\"\"\n entry_list = entry.split(\" \")\n entry_dict = {\n \"ip\": entry_list[0],\n \"date_time\": ' '.join(entry_list[3:5]).lstrip(\"[\").rstrip(\"]\"),\n \"method\": entry_list[5].lstrip('\"'),\n \"url\": entry_list[6],\n \"http_version\": entry_list[7],\n \"status_code\": entry_list[8],\n \"size\": entry_list[9] if entry_list[9] != \"-\" else \"0\",\n \"other\": \" \".join(entry_list[10:])\n }\n return entry_dict\n\n\ndef get_log_file_data_parsed(log_file_path: str, ignore_incorrect_log_data=True) \\\n -> list[Union[LogItem, IncorrectLogItem]]:\n \"\"\"\n Opens the log file, parses it and returns an objects of the each line.\n\n :param log_file_path: Log file path\n :param ignore_incorrect_log_data: If False, invalid lines will be written to the list as objects\n of class IncorrectLogItem. Default: True\n :return: List of objects of classes LogItem and IncorrectLogItem\n \"\"\"\n items = []\n with open(log_file_path) as file:\n for line in file:\n entry_dict = process_log_entry(line)\n if is_valid_log_entry(entry_dict):\n item = LogItem(\n ip=entry_dict[\"ip\"],\n date_time=entry_dict[\"date_time\"],\n method=entry_dict[\"method\"],\n url=entry_dict[\"url\"],\n http_version=entry_dict[\"http_version\"],\n status_code=entry_dict[\"status_code\"],\n size=int(entry_dict[\"size\"])\n )\n items.append(item)\n else:\n if not ignore_incorrect_log_data:\n item = IncorrectLogItem(text=line)\n items.append(item)\n return items\n\n\ndef get_log_file_data_columns_by_name(log_file_path: str, column_name: str, validate=True):\n \"\"\"\n Opens the log file, parses it and returns an list of strings with the values of the given column\n\n :param log_file_path: Log file path\n :param column_name: Name of the column\n :param validate: If True, the item will be checked before being added to the list\n :return: List of strings with the values of the given column\n \"\"\"\n items = []\n with open(log_file_path) as file:\n for line in file:\n item_dict = process_log_entry(line)\n if validate:\n if is_valid_log_entry(item_dict):\n items.append(item_dict[column_name])\n else:\n items.append(item_dict[column_name])\n return items\n\n\ndef count_items_objects_by_field(items_list: Iterable[LogItem], field_name: str):\n \"\"\"\n Extracts a field_name from each item and counts the number of duplicate values.\n\n :param items_list: List of LogItem objects\n :param field_name: The name of the field whose values will be extracted and counted\n :return: List of dictionaries with fields 'count' and 'field_name'\n \"\"\"\n items_list_str = [getattr(i, field_name) for i in items_list]\n items_count_dicts_list = count_items_str_by_field(items_list_str, field_name)\n return items_count_dicts_list\n\n\ndef count_items_str_by_field(items_list: Iterable[str], field_name: str):\n \"\"\"\n Counts the number of duplicate values\n\n :param items_list: List of strings\n :param field_name: The name of the field. Used as the name of the entry in the dictionary\n :return: List of dictionaries with fields 'count' and 'field_name'\n \"\"\"\n items_list = list(items_list)\n unique_items_list = list(dict.fromkeys(items_list))\n items_count_dicts_list = []\n for item in unique_items_list:\n items_count_dict = {\n field_name: item,\n \"count\": items_list.count(item)\n }\n items_count_dicts_list.append(items_count_dict)\n\n items_count_dicts_list = list(\n sorted(\n sorted(\n items_count_dicts_list,\n key=lambda i: i[field_name]\n ),\n key=lambda i: int(i[settings.COLUMN_NAMES.COUNT]),\n reverse=True\n )\n )\n\n return items_count_dicts_list\n","repo_name":"Dan4ik2504/2021-1-MAILRU-SDET-Python-D-Mashkovtsev","sub_path":"Homework_5/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26932972108","text":"import os\n\nfrom casestudies.find_and_repair_specifications.goals import set_of_goals\nfrom core.cgg import Node\nfrom tools.persistence import Persistence\n\noutput_folder_path = (\n f\"{Persistence.default_folder_path}/casestudies/{os.path.basename(os.getcwd())}\"\n)\n\n\ncgg = Node.build_cgg(set_of_goals)\nprint(cgg)\n\n\"\"\"Setting the saving folder\"\"\"\ncgg.set_session_name(f\"examples/{os.path.basename(os.getcwd())}\")\n\"\"\"Save CGG as text file\"\"\"\ncgg.save()\n\ncgg.realize_specification_controllers()\nprint(cgg)\ncgg.save()\n\n\"\"\"Save CGG so that it can be loaded later\"\"\"\n# Persistence.dump_cgg(cgg, output_folder_path)\n","repo_name":"pierg/crome","sub_path":"casestudies/find_and_repair_specifications/cgg.py","file_name":"cgg.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18805936035","text":"import random\nimport os\n\nx = '❌'\nn = '⬛'\n\ndef randomTreasure():\n row = random.randint(0, 2)\n column = random.randint(0, 2)\n\n map[row][column] = x\n\ndef askUser():\n global count\n\n res = input('Where is the Treasure Chest?\\nType a coordinate to dig ( E.g. 12 )\\n')\n\n row = int( res[0] ) - 1\n column = int( res[1] ) - 1\n\n try:\n _ = res[1]\n map[ row ][ column ]\n except:\n startGame()\n\n if map[row][column] == x :\n clear_map[row][column] = x\n\n printMap()\n\n print('🪙 YOU FOUND THE CHEST! 🪙')\n input('\\nPress ENTER to play again')\n\n reset()\n startGame()\n else:\n clear_map[row][column] = n\n count = count - 1\n\n printMap()\n\n if count:\n askUser()\n else:\n print(\"GAME OVER! You've run out of attempts...\")\n input('\\nPress ENTER to play again')\n\n reset()\n startGame()\n\n\ndef printMap():\n os.system('clear')\n print( f\"\\nYou've { count } attempts left\\n\" )\n print(f' 1 2 3 \\n\\n1 { clear_row1 }\\n\\n2 { clear_row2 }\\n\\n3 { clear_row3 }\\n')\n\ndef startGame():\n randomTreasure()\n printMap()\n askUser()\n\ndef reset():\n global count, clear_row1, clear_row2, clear_row3, clear_map, row1, row2, row3, map\n\n count = 4\n\n clear_row1 = ['⬜', '⬜', '⬜']\n clear_row2 = ['⬜', '⬜', '⬜']\n clear_row3 = ['⬜', '⬜', '⬜']\n\n clear_map = [ clear_row1, clear_row2, clear_row3 ]\n\n row1 = ['⬜', '⬜', '⬜']\n row2 = ['⬜', '⬜', '⬜']\n row3 = ['⬜', '⬜', '⬜']\n\n map = [ row1, row2, row3 ]\n\nreset()\nstartGame()\n","repo_name":"koalba/PythonCourse","sub_path":"004_DAY4/02_TreasureMap/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4501376750","text":"import matplotlib; matplotlib.use('agg')\nimport numpy as np\nimport mvpa2.suite as mv\nfrom os.path import join\n\nn_vertices = 40962\ndata_dir = '/dartfs-hpc/scratch/cara/models/singlealpha/sa_niml/isc'\n# First let's create mask of cortical vertices excluding medial wall\ncortical_vertices = {}\nfor half in ['lh', 'rh']:\n test_dat = mv.niml.read('/idata/DBIC/cara/life/ridge/models/new_niml/ws/ws_run1.{0}.niml.dset'.format(half))\n cortical_vertices[half] = np.ones((n_vertices))\n cortical_vertices[half][np.sum(test_dat.samples[1:, :] != 0, axis=0) == 0] = 0\n\nfor model in ['AA', 'HA']:\n for hemi in ['lh', 'rh']:\n npy = np.load('{0}_{1}_masked.npy'.format(model, hemi))\n\n med_wall_ind = np.where(cortical_vertices[hemi] == 0)[0]\n ds = np.zeros((npy.shape[0]+med_wall_ind.shape[0]),dtype=npy.dtype)\n ds[cortical_vertices[hemi] == 1] = npy\n ds = ds[None,:]\n\n mv.niml.write(join(data_dir, '{0}_rg_isc.{1}.niml.dset'.format(model.lower(), hemi)), ds)\n","repo_name":"jungheejung/life-encoding","sub_path":"cara-code/life_forward-encoding/save_masked_niml.py","file_name":"save_masked_niml.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"23569767326","text":"import numpy as np\nimport textblob as tb\nfrom sklearn.cluster import DBSCAN\nimport pandas as pd\nimport sklearn.metrics\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.distance import cdist\nimport tflearn\n#from tflearn.datasets import\nimport nltk\n\ndef handleClean():\n \"\"\"\n Handles the stemmed and cleaned tweets\n :return: list of tweets\n \"\"\"\n CLEANDATA = \"data_clean_stemmed_withoutRT.csv\"\n cleandata = pd.read_csv(CLEANDATA, index_col=0)\n\n stemmedtweets = []\n\n for tweet in cleandata[\"text\"]:\n tweet = tb.TextBlob(tweet) # preferred format\n stemmedtweets.append(tweet)\n return stemmedtweets\n\n# fetch\nDATAFILE = \"data_original.csv\"\ndata = pd.read_csv(DATAFILE, index_col=0)\n\nn = len(data)\ntweets = []\nvec = [] # clustering vectors\n\npositive = 0\nnegative = 0\nsubjectivity50 = 0\nsubjectivity70 = 0\nfor tweet in data[\"text\"]:\n tweet = tb.TextBlob(tweet) # preferred format\n tweets.append(tweet)\n vec.append([tweet.polarity, tweet.subjectivity])\n\n #percentages\n if( tweet.polarity > 0):\n positive += 1\n if( tweet.subjectivity > 0.5):\n subjectivity50 += 1\n if( tweet.subjectivity >= 0.7):\n subjectivity70 += 1\n elif (tweet.polarity < 0):\n negative += 1\n\nprint(\"Positive tweets: \", round(positive/n, 3),\" Negative tweets: \", round(negative/n, 3), \"Neutral tweets: \", round(positive/n - negative/n, 3) )\nprint(\"Amount of subjective tweets: \", round(subjectivity50/n, 3), \"Amount of very subjective tweets: \", round(subjectivity70/n, 3))\n\ndef findK():\n \"\"\"\n finds optimal k with elbow method\n \"\"\"\n distortions = []\n K = range(1,10)\n for k in K:\n vecFitted, clustLabels, cent, kmeans = cluster(k, vec)\n distortions.append(sum(np.min(cdist(vecFitted, cent, 'euclidean'), axis=1)) / vecFitted.shape[0])\n\n # Plot the elbow\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title('The Elbow Method showing the optimal k')\n plt.show()\n#seems k = 4 is an elbow\n\n\ndef cluster(n_clusters, vec):\n \"\"\"\n :param n_clusters: number of clusters\n :param vec: vectors to be clustered\n :return: clustered vec by Kmeans\n \"\"\"\n vec = np.asarray(vec)\n model = KMeans(n_clusters)\n vecFitted = vec\n model.fit(vecFitted)\n clustLabels = model.predict(vecFitted)\n cent = model.cluster_centers_\n\n kmeans = pd.DataFrame(clustLabels)\n #vecFitted.insert((vec.shape[1]),'kmeans',kmeans)\n return vecFitted, clustLabels, cent, kmeans\n\n\ndef plotOriginal():\n # plotting original data\n npvec = np.asarray(vec) #need np array for slicing\n plt.scatter(npvec[:, 0], npvec[:, 1])\n plt.title(\"Scatterplot of original data\")\n plt.xlabel(\"Polarity\")\n plt.ylabel(\"Subjectivity\")\n plt.show()\n\n\ndef clusterDensity(kmeans):\n \"\"\"\n :param kmeans: takes in the data assigned to each cluster\n :return: amount of tweets in each cluster, and the density of each cluster.\n \"\"\"\n clusters = [0, 0, 0, 0]\n tot = len(kmeans)\n for row in range(len(kmeans)):\n index = kmeans.iat[row, 0]\n clusters[index] += 1\n print(\"Number of tweets in each cluster: \", clusters)\n print(\"densities in clusters: c0 = \", round(clusters[0] / tot,3), \" c1 = \", round(clusters[1] / tot,3), \" c2 = \",\n round(clusters[2] / tot,3), \" c3 = \", round(clusters[3] / tot),3)\n # print(kmeans)\n\n\ndef findMostCommon(dict):\n \"\"\"\n Plots the most common words of each cluster\n \"\"\"\n freqlist = sorted(dict.values()) # amount of times a word is used\n keys = []\n values = np.zeros(14)\n for i in range(14, 0, -1):\n value = freqlist[-i]\n values[i - 1] = value\n for item in dict.items():\n if item[1] == value:\n keys.append(item[0])\n\n keys = list(set(reversed(keys)))\n\n if len(keys) != len(values):\n cut = len(values)\n keys = keys[:cut]\n\n return (keys, values)\n\n\ndef wordFrequencyClusters(kmeans):\n \"\"\"\n :return: The most common words of each cluster.\n \"\"\"\n stemmed = handleClean()\n clustersTweets = ['', '', '', '']\n\n for row in range(len(kmeans)):\n index = kmeans.iat[row, 0] # finds correct cluster\n twe = stemmed[row]\n twe = twe.replace('[', ' ')\n twe = twe.replace(']', ' ')\n clustersTweets[index] += str(twe)\n\n \"\"\"\n Now all stemmed tweets will be sorted into the clusters they belong, and we can do wordcount \n \"\"\"\n word_counts0 = tb.TextBlob(clustersTweets[0]).word_counts # these are dictionaries\n word_counts1 = tb.TextBlob(clustersTweets[1]).word_counts\n word_counts2 = tb.TextBlob(clustersTweets[2]).word_counts\n word_counts3 = tb.TextBlob(clustersTweets[3]).word_counts\n\n words0, freq0 = findMostCommon(word_counts0)\n plt.bar(words0, freq0)\n plt.ylabel(\"Frequency of cluster 0\")\n plt.show()\n words1, freq1 = findMostCommon(word_counts1)\n plt.bar(words1, freq1)\n plt.ylabel(\"Frequency of cluster 1\")\n plt.show()\n words2, freq2 = findMostCommon(word_counts2)\n plt.bar(words2, freq2)\n plt.ylabel(\"Frequency of cluster 2\")\n plt.show()\n words3, freq3 = findMostCommon(word_counts3)\n plt.bar(words3, freq3)\n plt.ylabel(\"Frequency of cluster 3\")\n plt.show()\n\ndef doDBSCAN(vec, eps):\n # #############################################################################\n X = np.array(vec)\n clustering = DBSCAN(eps=eps, min_samples=2).fit(X)\n\n core_samples_mask = np.zeros_like(clustering.labels_, dtype=bool)\n core_samples_mask[clustering.core_sample_indices_] = True\n labels = clustering.labels_\n\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n\n print('Estimated number of clusters: %d' % n_clusters_)\n print('Estimated number of noise points: %d' % n_noise_)\n print(\"Silhouette Coefficient: %0.3f\"% metrics.silhouette_score(X, labels))\n\n # Plot result\n # Black removed and is used for noise instead.\n unique_labels = set(labels)\n colors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=14)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=6)\n\n plt.title('Estimated number of clusters: %d' % n_clusters_)\n plt.show()\n\ndef plotmultipleDBSCAN():\n for i in range(1, 16, 2):\n eps = (i / 100)\n print(eps)\n doDBSCAN(vec, eps)\n\n\ndef plotClustered():\n # plotting clustered\n vecFitted, clustLabels, cent, kmeans = cluster(4, vec)\n clusterDensity(kmeans)\n fig = plt.scatter(vecFitted[:,0],vecFitted[:,1],c=kmeans[0],s=50)\n plt.colorbar(fig)\n plt.title(\"Scatterplot of clustered data\")\n plt.xlabel(\"Polarity\")\n plt.ylabel(\"Subjectivity\")\n plt.show(fig)\n wordFrequencyClusters(kmeans)\n\n\n\n\nif __name__ == '__main__':\n plotOriginal() #Here is the original data with sentiment and polarity analysis\n findK() #finds how many clusters we need\n plotClustered() # we now plot the clustered dataset in the sentiment and polarity graph. And how many tweets in each cluster ?\n # Also shows the words frequency of each cluster.\n plotmultipleDBSCAN()","repo_name":"rodrigogiraoserrao/DataScience","sub_path":"TrumpTweetsClustering/Sentiment.py","file_name":"Sentiment.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12564807767","text":"import os\nimport sys\n\ndef solve_part_1():\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n _input = os.path.join(__location__, 'input.txt')\n\n # build the 2d array\n _flattenedTreeMap = []\n _treeMapWidth = 0\n with open(_input) as f:\n for _line in f:\n _line = _line.strip()\n if _treeMapWidth == 0:\n _treeMapWidth = len(_line)\n for _char in _line:\n _flattenedTreeMap.append(int(_char))\n \n _treeMapHeight = int(len(_flattenedTreeMap)/_treeMapWidth)\n _visibleTrees = _treeMapHeight * 2 + _treeMapWidth * 2 - 4 # The 4 corner points have been considered twice\n _visibleTreeIndices = set()\n # Horizontal Left -> Right and Right -> Left\n for _treeRowIdx in range(1, _treeMapHeight-1):\n _startTreeIdxX = _treeRowIdx * _treeMapWidth\n _currentMax = _flattenedTreeMap[_startTreeIdxX]\n # Left -> Right\n for _treeIdxX in range(0, _treeMapWidth-1):\n _idx = _startTreeIdxX+_treeIdxX\n _treeHeight = _flattenedTreeMap[_idx]\n if _treeHeight > _currentMax:\n _currentMax = _treeHeight\n _visibleTreeIndices.add(_idx)\n _visibleTrees += 1\n _treeLineMax = _currentMax\n # Right -> Left\n _startTreeIdxX = (_treeRowIdx+1)*_treeMapWidth-1\n _currentMax = _flattenedTreeMap[_startTreeIdxX]\n for _treeIdxX in range(0, _treeMapWidth-1):\n _idx = _startTreeIdxX-_treeIdxX\n _treeHeight = _flattenedTreeMap[_idx]\n if _treeHeight > _currentMax:\n if not _idx in _visibleTreeIndices: \n _visibleTreeIndices.add(_idx)\n _visibleTrees += 1\n _currentMax = _treeHeight\n if _treeHeight == _treeLineMax: # optimization: No need to continue if we have reached the heighest tree\n break\n\n # Vertical Top -> Bottom and Bottom to Top\n for _treeColumnIdx in range(1, _treeMapWidth-1):\n _startTreeIdxY = _treeColumnIdx\n _currentMax = _flattenedTreeMap[_startTreeIdxY]\n # Top -> Bottom\n for _treeIdx in range(_treeMapHeight-1):\n _idx = _startTreeIdxY+_treeIdx*_treeMapWidth\n _treeHeight = _flattenedTreeMap[_idx]\n if _treeHeight > _currentMax:\n if not _idx in _visibleTreeIndices: \n _visibleTreeIndices.add(_idx)\n _visibleTrees += 1\n _currentMax = _treeHeight\n _treeLineMax = _currentMax\n # Bottom -> Top\n _startTreeIdxY = _treeMapHeight * _treeMapWidth - _treeColumnIdx - 1\n _currentMax = _flattenedTreeMap[_startTreeIdxY]\n for _treeIdx in range(_treeMapHeight-1):\n _idx = _startTreeIdxY-_treeIdx*_treeMapWidth\n _treeHeight = _flattenedTreeMap[_idx]\n if _treeHeight > _currentMax:\n if not _idx in _visibleTreeIndices:\n _visibleTreeIndices.add(_idx)\n _visibleTrees += 1\n _currentMax = _treeHeight\n if _treeHeight == _treeLineMax: # optimization: No need to continue if we have reached the heighest tree\n break \n\n print(f\"Visible tress: {_visibleTrees}\")\n\ndef solve_part_2():\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n _input = os.path.join(__location__, 'input.txt')\n\n # build the 2d array\n _flattenedTreeMap = []\n _treeMapWidth = 0\n with open(_input) as f:\n for _line in f:\n _line = _line.strip()\n if _treeMapWidth == 0:\n _treeMapWidth = len(_line)\n for _char in _line:\n _flattenedTreeMap.append(int(_char))\n \n _treeMapHeight = int(len(_flattenedTreeMap)/_treeMapWidth)\n _maxScenicScore = 0\n _maxIndex = 0\n # Horizontal Left -> Right and Right -> Left\n for _treeIdx in range(_treeMapWidth+1, (_treeMapHeight-1)*_treeMapWidth-1): # no need to process the edges\n if _treeIdx % _treeMapWidth == 0 or _treeIdx % _treeMapWidth == (_treeMapWidth-1) :\n continue \n\n _leftScore = 0\n _rightScore = 0\n _bottomScore = 0\n _topScore = 0\n _column = _treeIdx % _treeMapWidth\n _row = int(_treeIdx / _treeMapWidth)\n\n _limitLeft = _row * _treeMapWidth\n _limitRight = (_row+1) * _treeMapWidth-1\n _limitUp = _column\n _limitDown = _column + (_treeMapHeight-1) * _treeMapWidth \n _treeHeight = _flattenedTreeMap[_treeIdx]\n # Left of tree\n _viewBlocked = False\n _idx = _treeIdx\n while _idx - 1 >= _limitLeft and not _viewBlocked:\n _leftScore += 1\n _idx = _idx - 1\n _height = _flattenedTreeMap[_idx]\n _viewBlocked = _height >= _treeHeight \n # Right of tree\n _viewBlocked = False\n _idx = _treeIdx\n while _idx + 1 <= _limitRight and not _viewBlocked:\n _rightScore += 1\n _idx = _idx + 1\n _height = _flattenedTreeMap[_idx] \n _viewBlocked = _height >= _treeHeight \n # Top of tree\n _viewBlocked = False\n _idx = _treeIdx\n while _idx - _treeMapWidth >= _limitUp and not _viewBlocked:\n _topScore += 1\n _idx = _idx - _treeMapWidth\n _height = _flattenedTreeMap[_idx] \n _viewBlocked = _height >= _treeHeight \n # Bottom of tree\n _viewBlocked = False\n _idx = _treeIdx\n while _idx + _treeMapWidth <= _limitDown and not _viewBlocked:\n _bottomScore += 1\n _idx = _idx + _treeMapWidth\n _height = _flattenedTreeMap[_idx] \n _viewBlocked = _height >= _treeHeight \n\n _score = _leftScore*_rightScore*_topScore*_bottomScore\n if _score > _maxScenicScore:\n _maxScenicScore = _score\n _maxIndex = _treeIdx\n\n\n print(f\"Max score: {_maxScenicScore}\")\n print(f\"Max tree index: {_maxIndex}\")\n\n\nif __name__ == '__main__':\n # solve_part_1()\n solve_part_2()\n sys.exit(0)\n\n","repo_name":"HarryLong/AdventOfCode","sub_path":"8/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40128089223","text":"# base imports\nimport sys\nimport getopt\n\n# numerical packages / tensorflow\nimport random\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\n# user defined code imports\nfrom data import Data\nfrom models import get_models\nfrom tf_utils import *\n\nverbose = False\n\n\ndef usage():\n print('Usage')\n print('python %s : runs the set of models' % sys.argv[0])\n print('python %s -v|--verbose : runs the set of models with verbose output' % sys.argv[0])\n print('python %s -h : prints this help and exits' % sys.argv[0])\n\ndef run_model(train, accuracy, kp, x, y, vbatch, dtrain):\n \n global verbose\n\n # create a validation feed dictionary for this model run in the ensemble\n v_feed_dict = { x: vbatch[0], y: vbatch[1] }\n\n # iterate over batches for training\n for i in range(5000):\n \n # get a batch\n batch = dtrain.get_batch()\n \n # create the training feed dict\n tr_feed_dict = { x: batch[0], y: batch[1] }\n\n # report accuracy if i modulo 1000 is zero - i.e. every 1000th step - includes first step\n if i % 1000 == 0 and verbose:\n \n # set a value to feed kp if it has been set\n if kp is not None:\n tr_feed_dict[kp] = 1.0\n\n train_acc = accuracy.eval(feed_dict=tr_feed_dict)\n print('step %d, training accuracy %g' % (i, train_acc))\n\n # set a value to feed kp if it has been set\n if kp is not None:\n v_feed_dict[kp] = 1.0\n\n valid_acc = accuracy.eval(feed_dict=v_feed_dict)\n print('Validation accuracy %g' % valid_acc)\n\n # set a value to feed kp if it has been set\n if kp is not None:\n tr_feed_dict[kp] = 0.5\n\n # run a training step\n train.run(feed_dict=tr_feed_dict)\n\n # get the final accuracy\n # set a value to feed kp if it has been set\n if kp is not None:\n v_feed_dict[kp] = 1.0\n\n valid_acc = accuracy.eval(feed_dict=v_feed_dict)\n if verbose:\n print('\\nFinal Validation accuracy: %g\\n' % valid_acc)\n\n return valid_acc\n\n \n\ndef run_ensemble(sess, m, x, y, vbatch, dtrain):\n \n # print the model description to screen\n print('======================')\n print(m[0])\n \n # get the accuracy\n accuracy = m[2]\n # get the trainer\n train = m[1]\n # get the keep probability\n kp = m[3]\n\n \n acc = []\n ensemble_count = 3\n\n for i in range(ensemble_count):\n \n # initialise global variables in the graph\n sess.run(tf.global_variables_initializer())\n\n acc.append(run_model(train, accuracy, kp, x, y, vbatch, dtrain))\n\n print(acc)\n print('\\n\\nEnsemble average validation accuracy: %g\\n\\n' % (sum(acc)/len(acc)))\n print('======================')\n\nif __name__ == '__main__':\n\n # get command line options\n optlist, args = getopt.getopt(sys.argv[1:], 'vh', ['verbose', 'help'])\n\n for (k, v) in optlist:\n if k in ('-v', '--verbose'):\n verbose = True\n elif k in ('-h', '--help'):\n usage()\n sys.exit(0)\n\n # create an instance of the Data class\n dtrain = Data('datasets/train-exploration.csv')\n dvalid = Data('datasets/validation-exploration.csv')\n \n # create placeholders for x and y\n x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])\n y = tf.placeholder(tf.float32, shape=[None, 10])\n\n # build the neural network\n models = get_models(x, y)\n\n with tf.Session() as sess:\n \n # get a validation batch\n vbatch = (dvalid.features, dvalid.labels) # get_batch(4207)\n\n # iterate over all models\n for m in models:\n \n run_ensemble(sess, m, x, y, vbatch, dtrain)\n \n","repo_name":"ianharris/digit-recognizer-experiment","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1099513886","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home),\n path('home/', views.home),\n path('profile/', views.profile),\n path('support/', views.support),\n path('findJob/', views.findJob),\n path('mail/', views.mail)\n\n\n]\n","repo_name":"carminvuong/job-searching","sub_path":"jobsearching/jobwebsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"27116812001","text":"\"\"\"\nPython 3 Object-Oriented Programming\n\nChapter 14. Concurrency\n\"\"\"\nfrom concurrent import futures\nfrom PIL import Image # type: ignore [import]\nfrom pathlib import Path\nimport time\nfrom typing import Iterator, List, Iterable, Tuple, Optional, Type\n\n\n# State design with flyweight objects.\n\n\nclass RLERun:\n \"\"\"\n Two subclasses of RLERun for Replicated\n bytes and Literal bytes.\n \"\"\"\n\n def __init__(self, buffer: bytes, start: int) -> None:\n self.buffer = buffer\n self.start = start\n self.end = start + 1\n\n @property\n def count(self) -> int:\n return self.end - self.start\n\n def byte_state(self, index: int) -> \"RLERun\":\n raise NotImplementedError\n\n def emit(self) -> bytes:\n raise NotImplementedError\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.start}: {self.end})\"\n\n\nclass Replicate(RLERun):\n \"\"\"\n All bytes in self.buffer[self.start:self.end] are equal.\n\n >>> source = bytes([42, 42, 43])\n >>> s = Replicate(source, 0)\n >>> s.byte_state(1) == s\n True\n >>> s.start\n 0\n >>> s.end\n 2\n >>> s.byte_state(2) == s\n False\n >>> s.emit()\n b'\\\\x81*'\n\n >>> source = bytes(129*[42])\n >>> t = Replicate(source, 0)\n >>> t.byte_state(127) == t\n True\n >>> t.byte_state(128) == t\n False\n >>> t.emit()\n b'\\\\xff*'\n\n \"\"\"\n\n flag = 0x80 # Identical Bytes\n\n def byte_state(self, index: int) -> RLERun:\n if self.buffer[index] == self.buffer[self.end - 1] and index - self.start < 128:\n self.end = index + 1\n return self\n else:\n self.end = index\n return Literal(self.buffer, index)\n\n def emit(self) -> bytes:\n data = [(self.count - 1) | self.flag, self.buffer[self.start]]\n return bytes(data)\n\n\nclass Literal(RLERun):\n \"\"\"\n All bytes in self.buffer[self.start:self.end]\n serially unequal. self.buffer[self.start] != self.buffer[self.start+1]\n\n >>> source = bytes([42, 43, 44, 44])\n >>> s = Literal(source, 0)\n >>> s.byte_state(1) == s\n True\n >>> s.byte_state(2) == s\n True\n >>> s.byte_state(3) == s\n False\n >>> s.emit()\n b'\\\\x01*+'\n\n \"\"\"\n\n flag = 0x00 # Unique Bytes\n\n def byte_state(self, index: int) -> RLERun:\n if self.buffer[index] != self.buffer[self.end - 1]:\n if index - self.start < 128:\n self.end = index + 1\n return self\n else:\n self.end = index\n return Literal(self.buffer, index)\n else:\n self.end = index - 1\n change = Replicate(self.buffer, self.end)\n change.byte_state(index)\n return change\n\n def emit(self) -> bytes:\n return (\n bytes([(self.count - 1) | self.flag]) + self.buffer[self.start : self.end]\n )\n\n\ndef rle_compress(image_bytes: bytes) -> Iterator[RLERun]:\n \"\"\"\n >>> row = bytes([42, 42, 42, 42, 43, 44, 45, 45, 45])\n >>> [b.emit() for b in rle_compress(row)]\n [b'\\\\x83*', b'\\\\x01+,', b'\\\\x82-']\n \"\"\"\n state: RLERun = Literal(image_bytes, 0)\n for index in range(1, len(image_bytes)):\n next_state = state.byte_state(index)\n if next_state != state:\n if state.count != 0:\n yield state\n state = next_state\n yield state\n\n\ndef rle_row_compress(row_bytes: bytes) -> bytes:\n return b\"\".join(run.emit() for run in rle_compress(row_bytes))\n\n\ndef image_to_rle(image: Image, workers: Optional[futures.Executor] = None) -> bytes:\n if workers is None:\n workers = futures.ProcessPoolExecutor()\n b_w = image.convert(\"L\")\n width, height = b_w.size\n image_bytes: bytes = bytes(b_w.getdata())\n row_slices = (slice(r * width, (r + 1) * width) for r in range(height))\n row_compressors = [\n workers.submit(rle_row_compress, image_bytes[s]) for s in row_slices\n ]\n return b\"\".join(c.result() for c in row_compressors)\n\n\ndef compress(\n image_path: Path,\n executor_type: Optional[Type[futures.Executor]] = None,\n) -> Tuple[str, float]:\n if executor_type is None:\n executor_type = futures.ProcessPoolExecutor\n start = time.perf_counter()\n source_image = Image.open(image_path)\n with executor_type() as workers:\n compressed_image = image_to_rle(source_image, workers)\n target = image_path.with_suffix(\".rle\")\n target.write_bytes(compressed_image)\n end = time.perf_counter()\n return target.name, end - start\n\n\ndef rle_decompress(width: int, height: int, compressed: bytes) -> bytes:\n \"\"\"\n >>> rle_decompress(9, 1, bytes([0x83, 42, 0x01, 43, 44, 0x82, 45]))\n b'****+,---'\n \"\"\"\n image_bytes = bytearray(width * height)\n index = 0\n iter_compressed = iter(compressed)\n for h in iter_compressed:\n if h & Replicate.flag:\n # Replicate\n span = (h ^ Replicate.flag) + 1\n r = bytes([next(iter_compressed)] * span)\n else:\n # Literal\n span = h + 1\n r = bytes(next(iter_compressed) for _ in range(span))\n image_bytes[index : index + span] = r\n index = index + span\n return bytes(image_bytes)\n\n\ndef rle_to_image(width: int, height: int, source: bytes) -> Image:\n image_bytes = rle_decompress(width, height, source)\n image = Image.new(\"L\", (width, height))\n image.putdata(image_bytes)\n return image\n\n\ndef ascii_art(image: Image) -> None:\n grayscale = \"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcft/\\\\|()1{}[]?-_+~<>i!lI;:,\\\"^`'. \"\n # \"1\" == 1-bit pixels, black and white, stored with one pixel per byte\n # \"L\" == 8-bit pixels, black and white\n b_w = image.convert(\"L\")\n # b_w.show()\n width, height = b_w.size\n if width > 256:\n h_ratio = height / width\n scaled_b_w = b_w.resize((256, int(256 * h_ratio)))\n else:\n scaled_b_w = b_w\n\n bytes = list(scaled_b_w.getdata())\n for r in range(scaled_b_w.height):\n gray_char = lambda b: int(len(grayscale) * (b / 256))\n gray = map(\n gray_char, scaled_b_w[r * scaled_b_w.width : (r + 1) * scaled_b_w.width]\n )\n text = \"\".join(grayscale[g] for g in gray)\n print(text)\n\n\ndef display(image_path: Path = Path.cwd() / \"images\" / \"bricks.bmp\") -> None:\n with Image.open(image_path) as image:\n print(f\"********** {image_path.name} {image.size} **********\")\n ascii_art(image)\n\n\ndef benchmark() -> None:\n for conversion_type in (\n futures.ProcessPoolExecutor,\n futures.ThreadPoolExecutor,\n ):\n for compression_type in (\n futures.ProcessPoolExecutor,\n futures.ThreadPoolExecutor,\n ):\n print(\"per image, compression, workload, time\")\n start = time.perf_counter()\n with conversion_type() as conversion_workers:\n images = [\n conversion_workers.submit(compress, image_path, compression_type)\n for image_path in (Path.cwd() / \"images\").glob(\"*.bmp\")\n ]\n done, not_done = futures.wait(images, return_when=futures.ALL_COMPLETED)\n end = time.perf_counter()\n print(\n f\"{conversion_type.__name__}, {compression_type.__name__}, \"\n f\"{len(done)}, {end-start:.3f}\"\n )\n for d in done:\n name, duration = d.result()\n print(f\", , {name}, {duration:.3f}\")\n\n\nif __name__ == \"__main__\":\n benchmark()\n","repo_name":"PacktPublishing/Python-Object-Oriented-Programming---4th-edition","sub_path":"ch_14/bonus/image_compressor.py","file_name":"image_compressor.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"32"} +{"seq_id":"17294021448","text":"import requests\nimport json\nfrom flask import Flask, request, jsonify, redirect\napp = Flask(__name__)\nfrom flask_cors import CORS, cross_origin\nCORS(app)\n\nTOKEN = None\n\ndef obter_token():\n # data = {\n # 'username':'andre.saraujo',\n # 'password': 'Gilberto1'\n # }\n\n data = {\n 'username': 'goliveira',\n 'password': 'hhv2573@2'\n }\n\n # data = {\n # 'username': 'bernardo.abreu',\n # 'password': 'Brasil123.'\n # }\n # data = {\n # 'username': 'bernardo.abreu',\n # 'password': 'Gerais123.'\n # }\n\n # data = {\n # 'username': 'keller.simone',\n # 'password': 'KELLER03!'\n # }\n # data = {\n # 'username': 'TMARCOLINO',\n # 'password': 'TAVARES13'\n # }\n #data = {\n # 'username': 'MINASFER',\n # 'password': 'MFL18533-@'\n #}\n\n #data = {\n # 'username': 'MARCELOVASCONCELOS',\n # 'password': 'RA102030'\n #}\n #data = {\n # 'username':'bernardo.abreu',\n # 'password': 'Minas123.'\n #}\n\n\n # data = {\n # 'username':'centralcomprassuperbuy',\n # 'password': 'SUPERBUY2017'\n # }\n\n r = requests.post(\"http://localhost:5000/mobile/api/login\", data=json.dumps(data))\n # import pdb; pdb.set_trace()\n global TOKEN\n if r.status_code == 200:\n TOKEN = r.json()['token']\n print(TOKEN)\n\n\n@app.route('/', defaults={'path': ''})\n@app.route('/', methods=['GET'])\ndef catch_all(path):\n url = request.url.replace(\"5001\", \"5000\").replace(\"172.16.0.24\", \"127.0.0.1\")\n if TOKEN:\n headers = {'Authorization': \"Bearer \" + TOKEN}\n else:\n headers = {}\n\n keywords = ['favicon', 'static', 'robots']\n if any([ key in url for key in keywords] ):\n return redirect(url.replace('sstatic', 'static'), code=301)\n else:\n response = requests.get(url, headers=headers)\n try:\n resp = jsonify(response.json())\n\n resp.status_code = response.status_code\n return resp\n except ValueError:\n return response.content, response.status_code\n\n@app.route('/', defaults={'path': ''}, methods=['POST'])\n@app.route('/', methods=['POST'])\ndef catch_all_post(path):\n url = request.url.replace(\"5001\", \"5000\")\n\n data = json.dumps(json.loads(request.data or '{}'))\n global Token\n\n if TOKEN:\n headers = {'Authorization': \"Bearer \" + TOKEN}\n else:\n headers = {}\n\n if request.files and data == '{}':\n data = {}\n for k, v in request.form.items():\n data[k] = v\n\n response = requests.post(url, data=data, files=request.files, headers=headers)\n\n try:\n resp = jsonify(response.json())\n\n resp.status_code = response.status_code\n return resp\n except ValueError:\n return response.content, response.status_code\n\nif __name__ == '__main__':\n obter_token()\n app.run(host='0.0.0.0', debug=True, port=5001, threaded=True)\n","repo_name":"pedebodes/proxyFlaskCors","sub_path":"app-proxy.py","file_name":"app-proxy.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25550346990","text":"# CSV v1.1\n# -*- coding: UTF-8 -*-\n# Skrypt z odczytu, zmiany danych i zapisu w plikach CSV | JSON | PICKLE |\nimport os\nimport sys\nimport time\nimport msvcrt\nfrom sys import argv as argv\nfrom sys import stdin, stdout, stderr\nfrom csv_lib.csv_additives import clear_screen, title_line\nfrom csv_lib.csv_functions import \\\n read_csv_to_list, read_json_to_list, read_pickle_to_list, \\\n save_list_to_csv, save_list_to_json, save_list_to_pickle, \\\n print_argv_changes, print_alldata_from_csvfile, \\\n print_alldata_from_jsonfile, print_alldata_from_picklefile, \\\n clean_csv_file, change_data_from_argv\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #\npath_csv_src = 'trees1.csv'\npath_csv_dst = 'trees2.csv'\nplace = ''\nget_data = ''\nworking_list: list = []\nargv_list: list = []\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #\nclear_screen()\nwhile True:\n while 1:\n if len(sys.argv) >= 1:\n place = 'SYS.ARGV'\n path_csv_src = str(sys.argv[1])\n clean_csv_file(path_csv_src)\n path_csv_dst = str(sys.argv[2])\n else:\n place = 'TERMINAL'\n title_line(' Pochodzenie zmiany danych: {} '.format(place))\n time.sleep(2)\n\n if place == 'SYS.ARGV':\n working_list = read_csv_to_list(working_list, path_csv_src)\n stdout.write('\\n\\t<<< Zawartość pliku przed zmianami: >>>\\n')\n print_alldata_from_csvfile(path_csv_src)\n time.sleep(1)\n if working_list and len(sys.argv) >= 3:\n stdout.write('\\nWykaz zmian w zawartości pliku:\\n')\n for i in range(3, len(sys.argv)):\n print_argv_changes(sys.argv[i])\n working_list = change_data_from_argv(\n working_list, sys.argv[i])\n time.sleep(2)\n break\n else:\n stderr.write('\\nBrak danych do zmiany.\\n')\n break\n\n\n elif place == 'TERMINAL':\n print('\\nPodaj nazwę pliku źródłowego: ')\n path_csv_src = stdin.readline()[:-1]\n print('\\nPodaj nazwę pliku docelowego: ')\n path_csv_dst = stdin.readline()[:-1]\n working_list = read_csv_to_list(working_list, path_csv_src)\n if working_list:\n while True:\n stdout.write('\\nPodaj dane \"Y,X,wartość\" lub \"stop\": ')\n get_data = stdin.readline()[:-1]\n if get_data == 'stop' or get_data == '':\n break\n else:\n print_argv_changes(get_data)\n working_list = change_data_from_argv(\n working_list, get_data)\n continue\n time.sleep(1)\n else:\n stderr.write('Brak danych do zmiany.\\n')\n break\n\n if working_list:\n while 1:\n title_line(' Wybierz formę zapisu danych CSV: ')\n stdout.write(f'\\t(1) CSV?\\t(2) JSON?\\t(3) PICKLE?\\n')\n which_choice = int(msvcrt.getch())\n if which_choice == 1:\n if save_list_to_csv(working_list, path_csv_dst):\n stderr.write('\\nPoprawnie zapisano plik (CSV).\\n')\n stdout.write('\\t<<< Zawartość pliku po zmianach: >>>\\n')\n print_alldata_from_csvfile(path_csv_dst)\n else:\n stderr.write('Błąd zapisu do pliku! (CSV)\\n')\n break\n elif which_choice == 2:\n if save_list_to_json(working_list, path_csv_dst):\n stderr.write('\\nPoprawnie zapisano plik (JSON).\\n')\n stdout.write('\\t<<< Zawartość pliku po zmianach: >>>\\n')\n print_alldata_from_jsonfile(path_csv_dst)\n else:\n stderr.write('Błąd zapisu do pliku! (JSON)\\n')\n break\n elif which_choice == 3:\n if save_list_to_pickle(working_list, path_csv_dst):\n stderr.write('\\nPoprawnie zapisano plik (PICKLE).\\n')\n stdout.write('\\t<<< Zawartość pliku po zmianach: >>>\\n')\n print_alldata_from_picklefile(path_csv_dst)\n else:\n stderr.write('\\nBłąd zapisu do pliku! (PICKLE)\\n')\n break\n else:\n stdout.write('\\n\\tCoś źle klikasz, jeszcze raz...\\n')\n time.sleep(2)\n clear_screen()\n continue\n time.sleep(2)\n stdout.write('\\nPress any key...\\n')\n if msvcrt.getch():\n clear_screen()\n break\n","repo_name":"marcin-se/python-learn","sub_path":"py06csv/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33091646407","text":"#!/usr/bin/python3\n# coding: utf-8\nimport random\nfrom collections import namedtuple\nStudent = namedtuple('Student', ['id', 'ans']) # tuple 子类, 类似于结构体\nN_Questions = 10 # 问题的数量\nN_Students = 10 # 学生的数量\n\ndef gen_random_list(opts, n):\n return [random.choice(opts) for i in range(n)]\n\nANS = gen_random_list('ABCD', N_Questions) # 问题答案 'ABCD' 随机\nSCORE = gen_random_list(range(1, 6), N_Questions) # 题目分值 1~5 分\nquize = list(zip(ANS, SCORE)) # [('A', 3), ('B', 1), ('D', 1), ...\nstudents = [\n Student(_id, gen_random_list('ABCD*', N_Questions)) for _id in range(1, N_Students+1) # 学生答案为 'ABCD*' 随机, '*' 代表未作答\n] # [Student(id=1, ans=['C', 'B', 'A', ...\n\nprint ('ID\\tScore\\n==================') # 这里还可以这样写\nfor student in students:\n print(student.id, '\\t', sum(q[1] for ans, q in zip(student.ans, quize) if ans==q[0]))\n# python 中的 for 循环比 c 更进一步, 通常不需要额外的状态变量来记录当前循环次数, 但有时候也不得不使用状态变量, 如第二个循环中比较两个列表的元素\n# 函数式编程的一大特点就是尽量抛弃这种明显循环遍历的做法 (计算机思维), 而是把注意集中在解决问题本身,\n# 一如在现实中我们批改试卷时, 只需要将两组答案并列进行比较即可, 下面是实现函数式编程\nprint('ID\\tScore\\n==================') # 这里还可以这样写\ndef cal(quize):\n def inner(student):\n # 将学生答案与正确答案合并到一起, 然后过滤出答案一致的题目\n filtered = filter(lambda x: x[0] == x[1][0], zip(student.ans, quize)) # [('A', ('A', 3)), ('D', ('D', 1)), ...]\n from functools import reduce\n reduced = reduce(lambda x, y: x + y[1][1], filtered, 0)\n print(student.id, '\\t', reduced)\n return inner # 借助闭包(Closure)的方法, 就可以维持纯净的 FP 模式\nlist(map(cal(quize), students))\n# 通过 zip/filter/reduce/map 等函数将数据处理的方法打包应用到数据上, 实现了基本的函数式编程操作\n","repo_name":"HCShi/jShellscript","sub_path":"bin/template/src/jptzen/l1_namedtuple_random_list_计算n个同学m道题目的得分.py","file_name":"l1_namedtuple_random_list_计算n个同学m道题目的得分.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30009145637","text":"\ndef make_grlex(lst):\n sort = sorted(lst, key= str.lower)\n sum_len = 0\n for word in lst:\n sum_len += len(word) \n length = sum_len / len(lst)\n if length != len(sort[0]):\n return sorted(sort, key=len)\n else:\n return sort\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"qLMZ2hEvrhRSSSnQw_6.py","file_name":"qLMZ2hEvrhRSSSnQw_6.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27771598258","text":"#coding: utf-8\r\n\r\n#######################################################################\r\n# Monitoria de matemática discreta 2016.2 #\r\n# Monitora: Ivyna Alves #\r\n# Prof.: Leandro Balby Marinho #\r\n# #\r\n#\tPrimeira atividade de programção + matemática discreta (extra) #\r\n#\tObjetivo: trabalhar recursão e custo com algoritmos de busca e #\r\n#\tordenação(busca binária e quick sort). #\r\n# #\r\n#\tNota está dividida: 30% --> testes e 70% --> design de código #\r\n#######################################################################\r\n\r\n'''\r\n Algorithm binary search index with recursion\r\n :return: index of value in collection\r\n'''\r\ndef buscaBinariaIndice(lista, valor):\r\n return buscaBinaria(lista, valor, 0, len(lista)-1)\r\n\r\ndef buscaBinaria(lista, valor, left, right):\r\n mid = int((left + right) / 2) # floor\r\n\r\n if valor in lista:\r\n if lista[mid] == valor:\r\n return mid\r\n\r\n elif lista[mid] < valor:\r\n return buscaBinaria(lista, valor, mid + 1, right)\r\n\r\n else:\r\n return buscaBinaria(lista, valor, left, mid - 1)\r\n\r\n else:\r\n return -1\r\n\r\n'''\r\n Recursive sorting algorithm with limits in collection\r\n'''\r\ndef quickSort(lista, inicio, fim):\r\n if len(lista) <= 0:\r\n return\r\n if inicio < 0 or fim <= 0:\r\n return\r\n if fim > len(lista) - 1:\r\n return\r\n\r\n if (inicio < fim):\r\n pivot = particion(lista, inicio, fim)\r\n quickSort(lista, inicio, pivot - 1)\r\n quickSort(lista, pivot + 1, fim)\r\n\r\n\r\ndef particion(lista, inicio, fim):\r\n pivot = lista[inicio]\r\n\r\n left = inicio + 1\r\n right = fim\r\n\r\n while left <= right:\r\n if lista[left] <= pivot:\r\n left += 1\r\n\r\n elif pivot < lista[right]:\r\n right -= 1\r\n\r\n else:\r\n troca(lista, left, right)\r\n left += 1\r\n right -= 1\r\n\r\n lista[inicio] = lista[right]\r\n lista[right] = pivot\r\n return pivot\r\n\r\n'''\r\n Auxiliar algorithm to do swaps in elements of collection\r\n'''\r\ndef troca(lista, i, j):\r\n swap = lista[i]\r\n lista[i] = lista[j]\r\n lista[j] = swap\r\n","repo_name":"ivynasantino/prog-md","sub_path":"original.py","file_name":"original.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74348695771","text":"from datetime import datetime, timezone, timedelta\nfrom pathlib import Path\nfrom typing import Optional\nfrom unittest import TestCase\nfrom uuid import UUID\n\nfrom nx_config import ConfigSection, URL, SecretString, validate, Config\nfrom nx_config.test_utils import update_section\nfrom tests.typing_test_helpers import collection_type_holders, CollectionTypeHolder\n\n\ndef _get_testing_section_cls(tps: CollectionTypeHolder) -> type:\n # noinspection PyUnresolvedReferences\n class DatabaseSection(ConfigSection):\n user: str = \"John Doe\"\n password: SecretString\n token: UUID = UUID(int=5_444_333_222_111)\n birthday: datetime = datetime(\n 1955, 11, 5, 1, 22, tzinfo=timezone(offset=timedelta(hours=-3))\n )\n url: URL = \"www.nx_config_db.com\"\n port: int = 1_234\n ultimate_question: Optional[str] = None\n no_collection: Optional[tps.tuple[int, ...]] = None\n resources: Path = Path(\"/a/b/c/resources/\")\n rel_resources: Path = Path(\"c/resources\")\n secure_mode: bool = True\n growth_factor: float = 1.5\n cats: Optional[tps.frozenset[str]] = frozenset(\n (\"grey\", \"brown\", \"black\", \"white\")\n )\n dogs: tps.tuple[str, ...] = (\"lazy\", \"happy\", \"sad\")\n sasquatch: tps.tuple[str, ...] = ()\n big_foot: tps.frozenset[str] = frozenset()\n secret_files: tps.frozenset[Path] = frozenset(\n (Path(\"hi.txt\"), Path(\"/hello.md\"))\n )\n more_files: tps.tuple[Path, ...] = (Path(\"bye.exe\"), Path(\"/see_ya.py\"))\n fibonacci: tps.frozenset[int] = frozenset((0, 1, 1, 2, 3, 5))\n more_fibonacci: tps.tuple[int, ...] = (8, 13, 21, 34)\n holidays: tps.frozenset[datetime] = frozenset((datetime(1985, 11, 12),))\n non_holidays: tps.tuple[datetime, ...] = (datetime(1985, 11, 13),)\n old_tokens: tps.frozenset[UUID] = frozenset(\n (UUID(int=1), UUID(int=9_999_999_999_999))\n )\n future_tokens: tps.tuple[UUID, ...] = (UUID(int=2), UUID(int=7), UUID(int=17))\n needs_escaping: str = \"Hello\\nWorld\\r!\\tHowdy?\"\n\n class Nested:\n pass\n\n Alias = Nested\n\n def a_method(self):\n pass\n\n @validate\n def a_validator(self):\n pass\n\n return DatabaseSection\n\n\nclass EmptySection(ConfigSection):\n pass\n\n\ndef _get_big_testing_config_cls(tps: CollectionTypeHolder) -> type:\n db_section = _get_testing_section_cls(tps)\n\n class BigConfig(Config):\n database: db_section\n empty: EmptySection\n\n return BigConfig\n\n\nclass EmptyConfig(Config):\n pass\n\n\ndef indent_after_newline(s: str) -> str:\n return s.replace(\"\\n\", \"\\n \")\n\n\nclass PrettyPrintingTestCase(TestCase):\n def setUp(self) -> None:\n db_section_cls = _get_testing_section_cls(collection_type_holders[0])\n database_sec = db_section_cls()\n\n cats_str = \"{\" + \", \".join(f\"'{x}'\" for x in database_sec.cats) + \"}\"\n dogs_str = \"(\" + \", \".join(f\"'{x}'\" for x in database_sec.dogs) + \")\"\n big_foot_str = \"{}\"\n secret_files_str = (\n \"{\" + \", \".join(f\"'{x}'\" for x in database_sec.secret_files) + \"}\"\n )\n more_files_str = (\n \"(\" + \", \".join(f\"'{x}'\" for x in database_sec.more_files) + \")\"\n )\n holidays_str = \"{\" + \", \".join(str(x) for x in database_sec.holidays) + \"}\"\n non_holidays_str = (\n \"(\" + \", \".join(str(x) for x in database_sec.non_holidays) + \")\"\n )\n old_tokens_str = \"{\" + \", \".join(str(x) for x in database_sec.old_tokens) + \"}\"\n future_tokens_str = (\n \"(\" + \", \".join(str(x) for x in database_sec.future_tokens) + \")\"\n )\n needs_escaping_str = \"Hello\\\\nWorld\\\\r!\\\\tHowdy?\"\n\n self.expected_database_str = (\n f\"DatabaseSection(user='{database_sec.user}',\"\n f\" password=Unset,\"\n f\" token={database_sec.token},\"\n f\" birthday={database_sec.birthday},\"\n f\" url='{database_sec.url}',\"\n f\" port={database_sec.port},\"\n f\" ultimate_question=None,\"\n f\" no_collection=None,\"\n f\" resources='{database_sec.resources}',\"\n f\" rel_resources='{database_sec.rel_resources}',\"\n f\" secure_mode={database_sec.secure_mode},\"\n f\" growth_factor={database_sec.growth_factor},\"\n f\" cats={cats_str},\"\n f\" dogs={dogs_str},\"\n f\" sasquatch=(),\"\n f\" big_foot={big_foot_str},\"\n f\" secret_files={secret_files_str},\"\n f\" more_files={more_files_str},\"\n f\" fibonacci={set(database_sec.fibonacci)},\"\n f\" more_fibonacci={database_sec.more_fibonacci},\"\n f\" holidays={holidays_str},\"\n f\" non_holidays={non_holidays_str},\"\n f\" old_tokens={old_tokens_str},\"\n f\" future_tokens={future_tokens_str},\"\n f\" needs_escaping='{needs_escaping_str}'\"\n f\")\"\n )\n\n self.expected_database_repr = (\n f\"DatabaseSection(\\n\"\n f\" user={repr(database_sec.user)},\\n\"\n f\" password=Unset,\\n\"\n f\" token={repr(database_sec.token)},\\n\"\n f\" birthday={repr(database_sec.birthday)},\\n\"\n f\" url={repr(database_sec.url)},\\n\"\n f\" port={repr(database_sec.port)},\\n\"\n f\" ultimate_question=None,\\n\"\n f\" no_collection=None,\\n\"\n f\" resources={repr(database_sec.resources)},\\n\"\n f\" rel_resources={repr(database_sec.rel_resources)},\\n\"\n f\" secure_mode={repr(database_sec.secure_mode)},\\n\"\n f\" growth_factor={repr(database_sec.growth_factor)},\\n\"\n f\" cats={repr(database_sec.cats)},\\n\"\n f\" dogs={repr(database_sec.dogs)},\\n\"\n f\" sasquatch={repr(database_sec.sasquatch)},\\n\"\n f\" big_foot={repr(database_sec.big_foot)},\\n\"\n f\" secret_files={repr(database_sec.secret_files)},\\n\"\n f\" more_files={repr(database_sec.more_files)},\\n\"\n f\" fibonacci={repr(database_sec.fibonacci)},\\n\"\n f\" more_fibonacci={repr(database_sec.more_fibonacci)},\\n\"\n f\" holidays={repr(database_sec.holidays)},\\n\"\n f\" non_holidays={repr(database_sec.non_holidays)},\\n\"\n f\" old_tokens={repr(database_sec.old_tokens)},\\n\"\n f\" future_tokens={repr(database_sec.future_tokens)},\\n\"\n f\" needs_escaping={repr(database_sec.needs_escaping)},\\n\"\n f\")\"\n )\n\n def test_pretty_section_str(self):\n for tps in collection_type_holders:\n with self.subTest(types=tps):\n sec = _get_testing_section_cls(tps)()\n self.assertEqual(self.expected_database_str, str(sec))\n\n def test_pretty_empty_section_str(self):\n sec = EmptySection()\n self.assertEqual(\"EmptySection()\", str(sec))\n\n def test_pretty_section_repr(self):\n for tps in collection_type_holders:\n with self.subTest(types=tps):\n sec = _get_testing_section_cls(tps)()\n self.assertEqual(self.expected_database_repr, repr(sec))\n\n def test_pretty_empty_section_repr(self):\n sec = EmptySection()\n self.assertEqual(\"EmptySection(\\n)\", repr(sec))\n\n def test_pretty_config_str(self):\n for tps in collection_type_holders:\n with self.subTest(types=tps):\n cfg = _get_big_testing_config_cls(tps)()\n self.assertEqual(\n f\"BigConfig(database={cfg.database}, empty={cfg.empty})\", str(cfg)\n )\n\n def test_pretty_empty_config_str(self):\n cfg = EmptyConfig()\n self.assertEqual(\"EmptyConfig()\", str(cfg))\n\n def test_pretty_config_repr(self):\n for tps in collection_type_holders:\n with self.subTest(types=tps):\n cfg = _get_big_testing_config_cls(tps)()\n self.assertEqual(\n (\n f\"BigConfig(\\n\"\n f\" database={indent_after_newline(repr(cfg.database))},\\n\"\n f\" empty={indent_after_newline(repr(cfg.empty))},\\n\"\n f\")\"\n ),\n repr(cfg),\n )\n\n def test_pretty_empty_config_repr(self):\n cfg = EmptyConfig()\n self.assertEqual(\"EmptyConfig(\\n)\", repr(cfg))\n\n def test_config_str_and_repr_after_mutation(self):\n class MySection(ConfigSection):\n my_entry: int = 42\n\n class MyConfig(Config):\n my_section: MySection\n\n cfg = MyConfig()\n update_section(cfg.my_section, my_entry=7)\n\n self.assertEqual(f\"MyConfig(my_section=MySection(my_entry=7))\", str(cfg))\n self.assertEqual(\n (\n f\"MyConfig(\\n\"\n f\" my_section=MySection(\\n\"\n f\" my_entry=7,\\n\"\n f\" ),\\n\"\n f\")\"\n ),\n repr(cfg),\n )\n\n def test_section_and_config_str_and_repr_with_secret(self):\n class MySection(ConfigSection):\n my_int: int = 42\n my_secret: SecretString\n my_none_secret: Optional[SecretString] = None\n\n class MyConfig(Config):\n my_section: MySection\n\n cfg = MyConfig()\n update_section(cfg.my_section, my_secret=\"hello, world!\")\n\n self.assertEqual(\n f\"MySection(my_int=42, my_secret='*****', my_none_secret=None)\",\n str(cfg.my_section),\n )\n self.assertEqual(\n (\n f\"MySection(\\n\"\n f\" my_int=42,\\n\"\n f\" my_secret={repr('*****')},\\n\"\n f\" my_none_secret=None,\\n\"\n f\")\"\n ),\n repr(cfg.my_section),\n )\n\n self.assertEqual(f\"MyConfig(my_section={cfg.my_section})\", str(cfg))\n self.assertEqual(\n (\n f\"MyConfig(\\n\"\n f\" my_section={indent_after_newline(repr(cfg.my_section))},\\n\"\n f\")\"\n ),\n repr(cfg),\n )\n\n def test_secret_string_masking_in_collections(self):\n for tps in collection_type_holders:\n with self.subTest(types=tps):\n\n class MySection(ConfigSection):\n my_tuple: tps.tuple[SecretString, ...]\n my_frozenset: Optional[tps.frozenset[SecretString]] = None\n my_empty_tuple: tps.tuple[SecretString, ...] = ()\n my_empty_frozenset: tps.frozenset[SecretString] = frozenset()\n my_none: Optional[tps.tuple[SecretString, ...]] = None\n\n class MyConfig(Config):\n my_section: MySection\n\n cfg = MyConfig()\n update_section(\n cfg.my_section,\n my_tuple=(\"hello\",),\n my_frozenset=frozenset((\"goodbye\", \"see ya!\")),\n )\n\n self.assertEqual(\n (\n \"MySection(my_tuple=('*****', ...), my_frozenset={'*****', ...},\"\n \" my_empty_tuple=(), my_empty_frozenset={}, my_none=None)\"\n ),\n str(cfg.my_section),\n )\n\n my_tuple_str = repr((...,)).replace(\n \"Ellipsis,\", f\"{repr('*****')}, ...\"\n )\n my_frozenset_str = repr(frozenset((...,))).replace(\n \"Ellipsis\", f\"{repr('*****')}, ...\"\n )\n\n self.assertEqual(\n (\n f\"MySection(\\n\"\n f\" my_tuple={my_tuple_str},\\n\"\n f\" my_frozenset={my_frozenset_str},\\n\"\n f\" my_empty_tuple={repr(())},\\n\"\n f\" my_empty_frozenset={repr(frozenset())},\\n\"\n f\" my_none=None,\\n\"\n f\")\"\n ),\n repr(cfg.my_section),\n )\n","repo_name":"NextKraftwerke/PyConfig","sub_path":"tests/test_pretty_printing.py","file_name":"test_pretty_printing.py","file_ext":"py","file_size_in_byte":12199,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"32"} +{"seq_id":"7394945094","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"add\", views.sell, name=\"add_product\"),\n path(\"product/\",views.prod_view,name=\"Prod_page\"),\n path(\"my_list\",views.mylist_page,name=\"mylist_page\"),\n path(\"update/\",views.update_prod,name=\"update_prod\"),\n path(\"watchlist\",views.watchList,name = \"watchlist\"),\n path(\"addtowatch/\",views.addToWatchlist,name = \"add_to_watchList\")\n]\n","repo_name":"sannny/e-Bay","sub_path":"commerce/auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9564701881","text":"import cv2\nimport numpy as np\n\n\ndef edge_detection(target):\n target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(target, 100, 200)\n return edges\n\ndef fourier_transform(target):\n target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)\n f = np.fft.fft2(target)\n fshift = np.fft.fftshift(f)\n magnitude_spectrum = 20 * np.log(np.abs(fshift))\n\n return magnitude_spectrum\n\ndef high_pass_filter(target):\n target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)\n f = np.fft.fft2(target)\n fshift = np.fft.fftshift(f)\n rows, cols = target.shape\n crow, ccol = rows / 2, cols / 2\n fshift[crow - 30:crow + 30, ccol - 30:ccol + 30] = 0\n f_ishift = np.fft.ifftshift(fshift)\n img_back = np.fft.ifft2(f_ishift)\n img_back = np.abs(img_back)\n return img_back","repo_name":"Nickeroro/NASAGRAM","sub_path":"static/home/picturesfilters/other_filters/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21608211169","text":"import pandas as pd\nfrom pandas import DataFrame\n\ndef get_top_n_books(n):\n ratings_file_path = \"static/dataset/BX-Book-Ratings.csv\"\n rating_data = DataFrame(pd.read_csv(\n ratings_file_path, header=0, encoding=\"ISO-8859-1\", sep=';'))\n\n ratings_counts = rating_data[\"ISBN\"].value_counts()\n\n isbns = ratings_counts.head(n).index.to_list()\n return isbns\n\n\ndef get_top_n_books_shifted(n):\n ratings_file_path = \"static/dataset/BX-Book-Ratings.csv\"\n rating_data = DataFrame(pd.read_csv(\n ratings_file_path, header=0, encoding=\"ISO-8859-1\", sep=';'))\n\n ratings_counts = rating_data[\"ISBN\"].value_counts()\n\n isbns = ratings_counts.head(n+2700)\n isbns = isbns.tail(n).index\n return isbns\n\ndef get_top_n_users_who_have_rated_xyz_books(n, xyz):\n rating_data = DataFrame(pd.read_csv(\n \"static/dataset/BX-Book-Ratings.csv\", header=0, encoding=\"ISO-8859-1\", sep=';'))\n ratings_for_chosen_books = rating_data[rating_data[\"ISBN\"].isin(xyz)]\n rating_users = ratings_for_chosen_books[\"User-ID\"].value_counts()\n rating_users = rating_users.head(1000).index.to_list()\n return rating_users","repo_name":"amir-rahim/BookClubSocialNetwork","sub_path":"BookClub/management/commands/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"26969090557","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def detectCycle(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: ListNode\r\n \"\"\"\r\n if head == None:\r\n return None\r\n #判断是否有圈\r\n slow, fast = head,head.next\r\n while slow != fast:\r\n if fast == None:\r\n return None\r\n slow = slow.next\r\n fast = fast.next\r\n if fast != None:\r\n fast = fast.next\r\n else:\r\n return None\r\n #从头和重合处分别遍历\r\n start,meet = head,slow.next\r\n while start != meet:\r\n start = start.next\r\n meet = meet.next\r\n return start","repo_name":"FengFengHan/LeetCode","sub_path":"Linked List Cycle II.py","file_name":"Linked List Cycle II.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36053567745","text":"from enum import Enum\nimport abc\nfrom typing import Optional, List, Tuple, Union\nfrom copy import deepcopy\n\n\nclass PieceTypes(Enum):\n pawn = \"Pawn\"\n rook = \"Rook\"\n knight = \"Knight\"\n bishop = \"Bishop\"\n king = \"King\"\n queen = \"Queen\"\n\n\nclass Color(Enum):\n white = \"White\"\n black = \"Black\"\n\n\ndef revert_color(color: Color) -> Color:\n if color == Color.white:\n return Color.black\n if color == Color.black:\n return Color.white\n\n\nclass Player:\n def __init__(self, name: str, color: Color) -> None:\n self.name = name\n self.color = color\n\nclass Piece(abc.ABC):\n piece_type: PieceTypes = None\n\n def __init__(self, color: Color) -> None:\n self.color = color\n\n @abc.abstractmethod\n def is_valid_move(self, board: \"Board\", move: \"Move\") -> bool:\n ...\n\n def move(self, board: \"Board\", move: \"Move\") -> None:\n if not self.is_valid_move(board, move):\n raise ValueError(\"Invalid Move.\")\n board.positions[move.start.row][move.end.column].update(board, None)\n board.positions[move.end.row][move.end.column].update(board, move.start.piece)\n move.clone_positions()\n\n @abc.abstractmethod\n def get_all_possible_moves(self, board: \"Board\", position: \"Position\") -> List[\"Move\"]:\n '''\n Get all possible moves by the piece.\n Moves which can result the own king in check are also included.\n '''\n ...\n\n def clone(self) -> \"Piece\":\n '''Creates a deep copy of instance.'''\n return deepcopy(self)\n\n\nclass Pawn(Piece):\n piece_type = PieceTypes.pawn\n\n def __init__(self, color: Color) -> None:\n self.color = color\n self.has_moved = False\n\n def _is_en_passant(self, board: \"Board\") -> bool:\n last_move = board.get_last_move()\n if last_move is None or last_move.start.piece.piece_type != PieceTypes.pawn:\n return False\n if abs(last_move.end.row-last_move.start.row) == 2:\n return True\n return False\n\n def _can_capture(self, board: \"Board\", move: \"Move\") -> bool:\n if move.end.row-move.start.row == 1 and abs(move.end.column-move.start.column) == 1:\n if move.end.piece is not None:\n return True\n elif move.end.piece is None:\n en_passant_pos = board.positions[move.start.row][move.end.column]\n if en_passant_pos is not None and en_passant_pos.piece.piece_type == PieceTypes.pawn:\n if self._is_en_passant() \\\n and self.color != en_passant_pos.piece.color:\n return True\n return False\n\n def _black_move_valid(self, board: \"Board\", move: \"Move\") -> bool:\n if move.start.column == move.end.column:\n delta_rows = move.start.row - move.end.row\n if delta_rows == 1 or (delta_rows == 2 and not self.has_moved):\n for row_in_between in range(move.start.row-1, move.end.row-1, -1):\n if board.positions[row_in_between][move.start.column].piece is not None:\n return False\n return True\n else:\n return False\n else:\n return self._can_capture(board, move)\n\n def _white_move_valid(self, board: \"Board\", move: \"Move\") -> bool:\n if move.start.column == move.end.column:\n delta_rows = move.end.row - move.start.row\n if delta_rows == 1 or (delta_rows == 2 and not self.has_moved):\n for row_in_between in range(move.start.row+1, move.end.row+1):\n if board.positions[row_in_between][move.start.column].piece is not None:\n return False\n return True\n else:\n return False\n else:\n return self._can_capture(board, move)\n\n def is_valid_move(self, board: \"Board\", move: \"Move\") -> bool:\n '''Check if move is valid.'''\n\n if self.color == Color.black and not self._black_move_valid(board, move):\n return False\n elif self.color == Color.white and not self._white_move_valid(board, move):\n return False\n if move.can_result_in_check_of_own_king(board):\n return False\n return True\n\n def can_promote(self, move: \"Move\") -> bool:\n '''Check if pawn can be promoted.'''\n\n if (\n self.color == Color.white and move.end.row == 7\n ) or (\n self.color == Color.black and move.end.row == 0\n ):\n return True\n return False\n\n def move(self, board: \"Board\", move: \"Move\", promote: Optional[PieceTypes] = None) -> None:\n '''Move if valid.'''\n\n if not self.is_valid_move(board, move):\n raise ValueError(\"Invalid Move\")\n\n can_promote = self.can_promote(move)\n if can_promote and (promote is None or promote == PieceTypes.pawn):\n raise ValueError(\"Invalid Input for promote.\")\n if not can_promote and promote is not None:\n raise ValueError(\"Promote must be None.\")\n\n board.positions[move.start.row][move.start.column].update(board, None)\n if not can_promote:\n board.positions[move.end.row][move.end.column].update(board, \n move.start.piece)\n if self._is_en_passant(board):\n board.positions[move.start.row][move.end.column].update(board, \n None, True)\n else:\n board.positions[move.end.row][move.end.column].update(board, promote)\n self.has_moved = True\n move.clone_positions()\n\n def get_all_possible_moves(self, board: \"Board\", position: \"Position\") -> List[\"Move\"]:\n '''\n Get all possible moves by the piece.\n Moves which can result the own king in check are also included.\n '''\n poss_moves = []\n i = 1 if self.color is Color.white else -1\n if board.positions[position.row+i][position.column].piece is None:\n poss_moves.append(Move(\n position,\n board.positions[position.row+i][position.column]\n ))\n capture_positions = [(position.row+i, position.column+1),\n (position.row+i, position.column-1)]\n for row, col in capture_positions:\n if col >= 0 and col <= 7:\n cap_piece = board.positions[row-i][col].piece\n if cap_piece is not None and cap_piece.color != self.color:\n poss_moves.append(Move(\n position,\n board.positions[row][col]\n ))\n continue\n if self._is_en_passant(board):\n last_move = board.get_last_move()\n poss_moves.append(Move(\n position,\n board.positions[position.row+i][last_move.start.column]\n ))\n if not self.has_moved and board.positions[position.row+i][position.column] is None and \\\n board.positions[position.row+2*i][position.column] is None:\n poss_moves.append(Move(\n position,\n board.positions[position.row+2*i][last_move.start.column]\n ))\n return poss_moves\n\n\nclass Knight(Piece):\n piece_type = PieceTypes.knight\n\n def is_valid_move(self, board: \"Board\", move: \"Move\") -> bool:\n if (abs(move.start.row-move.end.row) == 2 and abs(move.start.column-move.end.column) == 1) or \\\n (abs(move.start.row-move.end.row) == 1 and abs(move.start.column-move.end.column) == 2):\n if not move.can_result_in_check_of_own_king(board):\n return True\n return False\n\n def get_all_possible_moves(self, board: \"Board\", position: \"Position\") -> List[\"Move\"]:\n possible_moves = []\n row, col = position.row, position.column\n for i in [2, -2]:\n for j in [1, -1]:\n try:\n move = Move(\n position,\n board.positions[row+i][col+j]\n )\n if move.is_possibly_valid():\n possible_moves.append(move)\n except IndexError:\n continue\n try:\n move = Move(\n position,\n board.positions[row+j][col+i]\n )\n if move.is_possibly_valid():\n possible_moves.append(move)\n except IndexError:\n continue\n return possible_moves\n\n\nclass Bishop(Piece):\n piece_type = PieceTypes.bishop\n\n def is_valid_move(self, board: \"Board\", move: \"Move\") -> bool:\n if abs(move.end.row-move.start.row) == abs(move.end.column-move.start.column) != 0:\n i, j = 1 if move.end.row > move.start.row else - \\\n 1, 1 if move.end.column > move.start.column else -1\n return board.check_if_move_valid(move, i, j)\n return False\n\n def get_all_possible_moves(self, board: \"Board\", position: \"Position\") -> List[\"Move\"]:\n moves = []\n for i in [1, -1]:\n for j in [1, -1]:\n for move in board.get_all_possible_moves_in_given_dir(position, i, j):\n if move.is_possibly_valid():\n moves.append(move)\n return move\n\n\nclass Queen(Piece):\n piece_type = PieceTypes.queen\n\n def is_valid_move(self, board: \"Board\", move: \"Move\") -> bool:\n if abs(move.end.row-move.start.row) == abs(move.end.column-move.start.column) != 0:\n i, j = 1 if move.end.row > move.start.row else - \\\n 1, 1 if move.end.column > move.start.column else -1\n return board.check_if_move_valid(move, i, j)\n elif abs(move.end.row-move.start.row) != 0 and abs(move.end.column-move.start.column) != 0:\n return False\n elif abs(move.end.row-move.start.row) == 0 and abs(move.end.column-move.start.column) == 0:\n rowdel, coldel = move.end.row - move.start.row, move.end.column - move.start.column\n if rowdel == 0:\n i = 0\n if rowdel < 0:\n i = -1\n if rowdel > 0:\n i = 1\n if coldel == 0:\n j = 0\n if coldel < 0:\n j = -1\n if coldel > 0:\n j = 1\n return board.check_if_move_valid(move, i, j)\n return False\n\n def get_all_possible_moves(self, board: \"Board\", position: \"Position\") -> List[\"Move\"]:\n moves = []\n for i in [1, -1, 0]:\n for j in [1, -1, 0]:\n if i == 0 and j == 0:\n continue\n for move in board.get_all_possible_moves_in_given_dir(position, i, j):\n if move.is_possibly_valid():\n moves.append(move)\n return move\n\n\nclass Rook(Piece):\n piece_type = PieceTypes.rook\n\n def __init__(self, color: Color) -> None:\n super().__init__(color)\n self.has_moved = False\n\n def is_valid_move(self, board: \"Board\", move: \"Move\") -> bool:\n if not move.is_castle:\n if abs(move.end.row-move.start.row) == 0 and abs(move.end.column-move.start.column) == 0:\n rowdel, coldel = move.end.row - move.start.row, move.end.column - move.start.column\n if rowdel == 0:\n i = 0\n if rowdel < 0:\n i = -1\n if rowdel > 0:\n i = 1\n if coldel == 0:\n j = 0\n if coldel < 0:\n j = -1\n if coldel > 0:\n j = 1\n return board.check_if_move_valid(move, i, j)\n return False\n else:\n if move.start.piece.can_castle(board, move.start) and move.start1.piece.can_castle(board, move.start1):\n return not move.can_result_in_check_of_own_king(board)\n return False\n\n def can_castle(self, board: \"Board\", position: \"Position\") -> bool:\n if not self.has_moved:\n king_position = board.get_king_position(self.color)\n king: King = king_position.piece\n if not king.can_castle(board, king_position):\n return False\n i = 1 if position.column > king_position.column else -1\n col = king_position.column\n positions_should_not_be_under_attack = []\n for _ in range(3):\n positions_should_not_be_under_attack.append(board.positions[position.row][col])\n col += i\n for pos in positions_should_not_be_under_attack:\n if pos.piece is not None:\n return False\n if board.are_positions_under_attack(positions_should_not_be_under_attack):\n return False\n return True\n return False\n\n def get_all_possible_moves(self, board: \"Board\", position: \"Position\") -> List[\"Move\"]:\n moves = []\n for i, j in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n for move in board.get_all_possible_moves_in_given_dir(position, i, j):\n if move.is_possibly_valid(): moves.append(move)\n if self.can_castle(board, position):\n king_pos = board.get_king_position(self.color)\n i = 1 if position.column > king_pos.column else -1\n moves.append(\n Move(\n position,\n board.positions[king_pos.row][king_pos.column+i],\n True, \n king_pos,\n board.positions[king_pos.row][king_pos.column+2*i]\n )\n )\n return moves\n \n def move(self, board: \"Board\", move: \"Move\") -> None:\n if not move.is_castle:\n super().move(board, move)\n self.has_moved = True\n else:\n if not self.is_valid_move(board, move):\n raise ValueError(\"Invalid Move.\")\n board.positions[move.start.row][move.end.column].update(board, None)\n board.positions[move.end.row][move.end.column].update(board, move.start.piece)\n board.positions[move.start1.row][move.end1.column].update(board, None)\n board.positions[move.end1.row][move.end1.column].update(board, move.start1.piece)\n move.clone_positions()\n board.positions[move.end.row][move.end.column].piece.has_moved = True\n board.positions[move.end1.row][move.end1.column].piece.has_moved = True\n\nclass King(Piece):\n piece_type = PieceTypes.king\n\n def __init__(self, color: Color) -> None:\n super().__init__(color)\n self.has_moved = False\n self.has_been_checked = False\n\n def can_castle(self, board: \"Board\", position: \"Position\") -> bool:\n return not self.has_moved and not self.has_been_checked\n \n def is_valid_move(self, board: \"Board\", move: \"Move\") -> bool:\n if not move.is_castle:\n if abs(move.end.row-move.start.row) <= 1 and abs(move.end.column-move.start.column) <= 1:\n return board.check_if_move_valid(move, move.end.row, move.end.column)\n return False\n else:\n if move.start.piece.can_castle(board, move.start) and move.start1.piece.can_castle(board, move.start1):\n return not move.can_result_in_check_of_own_king(board)\n return False\n \n def get_all_possible_moves(self, board: \"Board\", position: \"Position\") -> List[\"Move\"]:\n moves = []\n row = position.row\n col = position.column\n for i, j in [(1, 0), (1, 1), (1, -1), (0, 1), (0, -1), (-1, 0), (-1, -1), (-1, 1)]:\n if 0 <= row+i <= 7 and 0 <= col+j <= 7:\n move = Move(\n position,\n board.positions[row+i][col+j]\n )\n if move.is_possibly_valid():\n moves.append(move)\n\n if self.can_castle:\n initial_rook_positions = [(0, 0), (0, 7)] if self.color == Color.white else [(7, 0), (7, 7)]\n for (x, y) in initial_rook_positions:\n rook_pos = board.positions[x][y]\n if rook_pos.piece is None or rook_pos.piece.piece_type != PieceTypes.rook:\n continue\n if rook_pos.piece.has_moved or not rook_pos.piece.can_castle(board, rook_pos):\n continue\n i = 1 if position.column < rook_pos.column else -1\n moves.append(\n Move(\n position,\n board.positions[position.row][position.column+2*i],\n True, \n rook_pos,\n board.positions[position.row][position.column+i]\n )\n )\n if move.is_possibly_valid(): moves.append(move)\n return moves\n \n def move(self, board: \"Board\", move: \"Move\") -> None:\n if not move.is_castle:\n super().move(board, move)\n self.has_moved = True\n else:\n if not self.is_valid_move(board, move):\n raise ValueError(\"Invalid Move.\")\n board.positions[move.start.row][move.end.column].update(board, None)\n board.positions[move.end.row][move.end.column].update(board, move.start.piece)\n board.positions[move.start1.row][move.end1.column].update(board, None)\n board.positions[move.end1.row][move.end1.column].update(board, move.start1.piece)\n move.clone_positions()\n board.positions[move.end.row][move.end.column].piece.has_moved = True\n board.positions[move.end1.row][move.end1.column].piece.has_moved = True\n\nclass PieceFactory:\n @classmethod\n def create_piece(cls, piece: PieceTypes, color: Color) -> Piece:\n if piece == PieceTypes.pawn:\n return Pawn(color)\n if piece == PieceTypes.rook:\n return Rook(color)\n if piece == PieceTypes.knight:\n return Knight(color)\n if piece == PieceTypes.bishop:\n return Bishop(color)\n if piece == PieceTypes.king:\n return King(color)\n if piece == PieceTypes.queen:\n return Queen(color)\n\n\nclass Position:\n '''\n Could have made a simple dataclass but making it a class gives opportunity \n to define further functions if needed.\n '''\n\n def __init__(\n self, row: int, column: int, color: Color, piece: Optional[Piece] = None\n ) -> None:\n '''\n x, y must be integers from 0 to 7.\n '''\n if row not in [0, 1, 2, 3, 4, 5, 6, 7] or column not in [0, 1, 2, 3, 4, 5, 6, 7]:\n raise ValueError(\n \"x and y must be an integer between 0 to 7 (inclusive both)\")\n self.row = row\n self.column = column\n self.piece = piece\n self.color = color\n\n def update(self, board: \"Board\", piece: Optional[Piece] = None, en_passant_capture: bool = False) -> None:\n self.piece = piece\n if self.piece.piece_type == PieceTypes.king:\n board.king_positions[self.piece.color] = self\n\n def clone(self) -> \"Position\":\n '''Clone the position.'''\n return self.clone()\n\n\nclass Board:\n def __init__(self, white_player: \"Player\", black_player: \"Player\"):\n self.positions: List[List[Optional[Position]]] = [\n [None for _ in range(8)] for _ in range(8)]\n\n self.positions[0] = [\n Position(0, 0, Color.black, PieceFactory.create_piece(\n PieceTypes.rook, Color.white)),\n Position(0, 1, Color.white, PieceFactory.create_piece(\n PieceTypes.knight, Color.white)),\n Position(0, 2, Color.black, PieceFactory.create_piece(\n PieceTypes.bishop, Color.white)),\n Position(0, 3, Color.white, PieceFactory.create_piece(\n PieceTypes.queen, Color.white)),\n Position(0, 4, Color.black, PieceFactory.create_piece(\n PieceTypes.king, Color.white)),\n Position(0, 5, Color.white, PieceFactory.create_piece(\n PieceTypes.bishop, Color.white)),\n Position(0, 6, Color.black, PieceFactory.create_piece(\n PieceTypes.knight, Color.white)),\n Position(0, 7, Color.white, PieceFactory.create_piece(\n PieceTypes.rook, Color.white)),\n ]\n self.positions[7] = [\n Position(7, 0, Color.white, PieceFactory.create_piece(\n PieceTypes.rook, Color.black)),\n Position(7, 1, Color.black, PieceFactory.create_piece(\n PieceTypes.knight, Color.black)),\n Position(7, 2, Color.white, PieceFactory.create_piece(\n PieceTypes.bishop, Color.black)),\n Position(7, 3, Color.black, PieceFactory.create_piece(\n PieceTypes.queen, Color.black)),\n Position(7, 4, Color.white, PieceFactory.create_piece(\n PieceTypes.king, Color.black)),\n Position(7, 5, Color.black, PieceFactory.create_piece(\n PieceTypes.bishop, Color.black)),\n Position(7, 6, Color.white, PieceFactory.create_piece(\n PieceTypes.knight, Color.black)),\n Position(7, 7, Color.black, PieceFactory.create_piece(\n PieceTypes.rook, Color.black)),\n ]\n cnt_color = Color.white\n for i in range(1, 7):\n for j in range(8):\n if i == 1:\n self.positions[i][j] = Position(\n i, j, cnt_color, PieceFactory.create_piece(\n PieceTypes.pawn, Color.white)\n )\n elif i == 6:\n self.positions[i][j] = Position(\n i, j, cnt_color, PieceFactory.create_piece(\n PieceTypes.pawn, Color.black)\n )\n else:\n self.positions[i][j] = Position(\n i, j, cnt_color, None\n )\n cnt_color = revert_color(cnt_color)\n cnt_color = revert_color(cnt_color)\n\n self.king_positions = {\n Color.white: self.positions[0][4], Color.black: self.positions[7][4]}\n self.white = white_player\n self.black = black_player\n self._last_move = None\n\n def are_positions_under_attack(self, positions: List[\"Position\"], color: \"Color\") -> bool:\n '''Check if any of given positions under attack by pieces of given color.'''\n coords = [(pos.row, pos.col) for pos in positions]\n for i in range(8):\n for j in range(8):\n cntpos = self.positions[i][j]\n if cntpos.piece is not None and cntpos.piece.color != color:\n if cntpos.piece.piece_type != PieceTypes.pawn:\n for move in cntpos.piece.get_all_possible_moves(self, cntpos):\n if (move.end.row, move.end.column) in coords:\n return True\n else:\n if color == Color.white:\n if (cntpos.row+1, cntpos.column+1) in coords or (cntpos.row+1, cntpos.column-1) in coords:\n return True\n else:\n if (cntpos.row-1, cntpos.column+1) in coords or (cntpos.row-1, cntpos.column-1) in coords:\n return True\n return False\n\n def is_king_in_check(self, color: Color) -> bool:\n '''Checks if given color king is in check.'''\n king_pos = self.king_positions[color]\n return self.are_positions_under_attack(positions=[king_pos], color = revert_color(color))\n\n def clone(self) -> \"Board\":\n return deepcopy(self)\n\n def get_last_move(self) -> \"Union[Move, None]\":\n return self._last_move\n\n def get_king_position(self, color:\"Color\") -> Position:\n return self.king_positions[color]\n\n def get_all_possible_moves_in_given_dir(self, position: \"Position\", i: int, j: int) -> List[\"Move\"]:\n if i < -1 or i > 1 or j < -1 or j > 1:\n raise ValueError(\"Invalid Value of i and j.\")\n cntrow = position.row\n cntcol = position.column\n cntrow += i\n cntcol += j\n moves = []\n while True:\n if 0 <= cntrow <= 7 and 0 <= cntcol <= 7:\n cntpos = self.positions[cntrow][cntcol]\n moves.append(Move(position, cntpos))\n if cntpos.piece is not None:\n break\n else:\n break\n return moves\n\n def check_if_move_valid(self, move: \"Move\", i: int, j: int) -> bool:\n if i < -1 or i > 1 or j < -1 or j > 1:\n raise ValueError(\"Invalid Value of i and j.\")\n i, j = 1 if move.end.row > move.start.row else - \\\n 1, 1 if move.end.column > move.start.column else -1\n row, col = move.start.row, move.start.column\n row += i\n col += j\n while row != move.end.row and col != move.end.column:\n if self.positions[row][col].piece is not None:\n return False\n row += i\n col += j\n if not move.can_result_in_check_of_own_king(self):\n return True\n return False\n\n\nclass Move:\n def __init__(\n self,\n start: Position,\n end: Position,\n is_castle=False,\n start1: Optional[Position] = None,\n end1: Optional[Position] = None\n ) -> None:\n if is_castle and (start1 is None or end1 is None):\n raise ValueError(\n \"In case of castling, start1 and end1 must be given.\")\n if not is_castle and (start1 is not None or end1 is not None):\n raise ValueError(\n \"in case of simple move, start1 and end1 need not be given.\")\n self.start = start\n self.end = end\n self.is_castle = is_castle\n self.start1 = start1\n self.end1 = end1\n\n def is_possibly_valid(self) -> bool:\n if self.start.piece is None or (\n self.end.piece is not None and self.end.piece.color==self.start.piece.color\n ):\n return False\n if self.is_castle and self.start1.piece is None or self.end1.piece is not None or \\\n {self.start.piece.piece_type, self.start1.piece.piece_type} != {PieceTypes.rook, PieceTypes.king}\\\n or self.start.piece.color != self.start1.piece.color:\n return False\n return True\n\n def clone_positions(self) -> None:\n '''Clones the pieces saved in the positions and saves them in the self.'''\n self.start = self.start.clone()\n self.end = self.end.clone()\n self.start1 = self.start1.clone() if self.start1 is not None else None\n self.end1 = self.end1.clone() if self.end1 is not None else None\n\n def can_result_in_check_of_own_king(self, board: \"Board\") -> bool:\n temp_board = board.clone()\n return temp_board.is_king_in_check(color=self.start.piece.color)\n\n\nclass MoveObserver:\n def __init__(self):\n self._moves = []\n\n def add(self, move: Move) -> None:\n self._moves.append(move)\n\n\nclass Chess:\n def __init__(self, player1_name: str, player2_name: str) -> None:\n self.white = Player(player1_name, Color.white)\n self.black = Player(player2_name, Color.black)\n self.turn = Color.white\n self.board = Board(self.white, self.black)\n self.move_observer = MoveObserver()\n\n def _change_turn(self):\n self.turn = revert_color(self.turn)\n\n def _verify_coord(self, coords: List[int]) -> None:\n for c in coords:\n if c<0 or c>7:\n raise ValueError(\"Wrong Coordinates.\")\n \n def _get_pos(self, x: int, y: int) -> Position:\n return self.board.positions[x][y]\n\n def get_all_possible_moves(self, x: int, y: int) -> List[Move]:\n '''x and y considering left corner of white as 0, 0'''\n self._verify_coord([x, y])\n position = self._get_pos(x, y)\n if position.piece is None or position.piece.color != self.turn:\n raise ValueError(\"Only the player with his turn can access this function.\")\n return position.piece.get_all_possible_moves(self.board, position)\n \n def move(self, move: Move) -> None:\n if move.start.piece is None or move.start.piece.color != self.turn or not move.is_possibly_valid:\n raise ValueError(\"Only player with current turn can move.\")\n move.start.piece.move(self.board, move)\n self._change_turn()\n if self.board.is_king_in_check(self.turn):\n self.board.king_positions[self.turn].has_been_checked = True\n self.move_observer.add(move)","repo_name":"sineshashi/Chess","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37648751109","text":"import os\nimport sys\nimport torch\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nfrom sklearn.metrics import f1_score\nfrom tqdm import trange\n\ndef L2Loss(model,alpha=0.0000007):\n l2_loss = torch.tensor(0.0,requires_grad = True)\n for name,parma in model.named_parameters():\n if 'bias' not in name:\n l2_loss = l2_loss + (0.5*alpha * torch.sum(torch.pow(parma,2)))\n return l2_loss\n\ndef train(train_iter, dev_iter, model, args):\n if args.cuda:\n model.cuda()\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n best_acc = 0\n\n if not os.path.isdir(args.save_dir):\n os.makedirs(args.save_dir)\n f = open(args.save_dir+\"/loss.csv\", 'w')\n f.write(\"epoch, loss, evl_loss, f1, evl_f1\")\n\n for epoch in range(1, args.epochs+1):\n print('----------------------')\n print('Epoch: {}/{}'.format(epoch, args.epochs))\n avg_loss = 0\n step = 0\n batch_num = len(train_iter.dataset)//train_iter.batch_size + 1\n avg_f1 = 0\n for batch in train_iter:\n step += 1\n model.train()\n feature, target = batch.text, batch.label\n feature.t_() # batch first, index align\n if args.cuda:\n feature, target = feature.cuda(), target.cuda()\n\n optimizer.zero_grad()\n logit = model(feature)\n loss = F.cross_entropy(logit, target) + L2Loss(model)\n loss.backward()\n optimizer.step()\n avg_f1 += f1_score(torch.max(logit, 1)[1].view(target.size()).data.cpu(), target.data.cpu())\n\n avg_loss += loss.item()\n corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n accuracy = 100.0 * corrects/batch.batch_size\n sys.stdout.write(\n '\\rBatch[{}/{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(step, batch_num,\n loss.item(),\n accuracy.item(),\n corrects.item(),\n batch.batch_size))\n\n avg_loss /= len(train_iter.dataset)\n avg_loss *= train_iter.batch_size\n avg_f1 /= batch_num\n print('\\nTraining - avg_loss: {:.6f} avg_f1: {:.6f}'.format(avg_loss, avg_f1))\n\n dev_avg_loss, dev_acc, evl_f1 = eval(dev_iter, model, args)\n if dev_acc > best_acc:\n best_acc = dev_acc\n if args.save_best:\n save(model, args.save_dir, 'best', epoch)\n\n if epoch % args.save_interval == 0:\n save(model, args.save_dir, 'snapshot', epoch)\n\n f.write('{}, {:.6f}, {:.6f}, {:.6f}, {:6f}\\n'.format(epoch, avg_loss, dev_avg_loss, avg_f1, evl_f1))\n\n f.close()\n\n\ndef eval(data_iter, model, args):\n model.eval()\n corrects, avg_loss = 0, 0\n\n step = 0\n batch_num = len(data_iter.dataset)//data_iter.batch_size + 1\n correct_num = 0\n f1_avg = 0\n # if args.cuda:\n # f1_avg.cuda()\n\n for batch in data_iter:\n step += 1\n feature, target = batch.text, batch.label\n feature.t_() # batch first, index align\n if args.cuda:\n feature, target = feature.cuda(), target.cuda()\n\n logit = model(feature)\n loss = F.cross_entropy(logit, target, reduction='sum') + L2Loss(model)\n\n avg_loss += loss.item()\n corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n correct_num += corrects\n\n f1_avg += f1_score(torch.max(logit, 1)[1].view(target.size()).data.cpu(), target.data.cpu())\n\n accuracy = 100.0 * corrects / batch.batch_size\n sys.stdout.write(\n '\\rBatch[{}/{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(step, batch_num,\n loss.item(),\n accuracy.item(),\n corrects.item(),\n batch.batch_size))\n\n avg_loss /= len(data_iter.dataset)\n f1_avg /= batch_num\n accuracy = correct_num/len(data_iter.dataset)*100\n print('\\nEvaluation - avg_loss: {:.6f} acc: {:.4f}%({}/{}) F1_avg: {:.4f}'.format(avg_loss,\n accuracy,\n correct_num,\n len(data_iter.dataset),\n f1_avg))\n return avg_loss, accuracy, f1_avg\n\n\ndef predict(text, model, text_field, label_feild, cuda_flag):\n assert isinstance(text, str)\n model.eval()\n # text = text_field.tokenize(text)\n text = text_field.preprocess(text)\n text = [[text_field.vocab.stoi[x] for x in text]]\n x = torch.tensor(text)\n x = autograd.Variable(x)\n if cuda_flag:\n x = x.cuda()\n print(x)\n output = model(x)\n _, predicted = torch.max(output, 1)\n return label_feild.vocab.itos[predicted.item()+1]\n\n\ndef kaggle_test(tweets, model, text_field, label_field, cuda_flag):\n model.eval()\n results = []\n tweets_vec = [[text_field.vocab.stoi[w] for w in l.split(\" \")] for l in tweets]\n tweets_vec = [i+[0]*(30-len(i)) for i in tweets_vec]\n for i in trange(len(tweets_vec)):\n veci = tweets_vec[i]\n x = torch.tensor([veci])\n x = autograd.Variable(x)\n if cuda_flag:\n x = x.cuda()\n output = model(x)\n _, predicted = torch.max(output, 1)\n results.append(int(predicted))\n\n return results\n\ndef save(model, save_dir, save_prefix, steps):\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n save_prefix = os.path.join(save_dir, save_prefix)\n save_path = '{}_epoch_{}.pt'.format(save_prefix, steps)\n torch.save(model.state_dict(), save_path)\n","repo_name":"x1aotian/twitter_analysis","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34295717127","text":"from string import punctuation, digits\nimport numpy as np\nimport random\n\n\n\n# Part I\n\n\n#pragma: coderesponse template\ndef get_order(n_samples):\n try:\n with open(str(n_samples) + '.txt') as fp:\n line = fp.readline()\n return list(map(int, line.split(',')))\n except FileNotFoundError:\n random.seed(1)\n indices = list(range(n_samples))\n random.shuffle(indices)\n return indices\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef hinge_loss_single(feature_vector, label, theta, theta_0):\n\n y = np.dot(theta, feature_vector) + theta_0\n loss = max(0.0, 1 - y * label)\n return loss\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef hinge_loss_full(feature_matrix, labels, theta, theta_0):\n\n loss = 0\n for i in range(len(feature_matrix)):\n loss += hinge_loss_single(feature_matrix[i], labels[i], theta, theta_0)\n return loss / len(labels)\n\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef perceptron_single_step_update(\n feature_vector,\n label,\n current_theta,\n current_theta_0):\n if label * (np.dot(current_theta, feature_vector) + current_theta_0) <= 0:\n current_theta += label * feature_vector\n current_theta_0 += label\n return (current_theta, current_theta_0)\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef perceptron(feature_matrix, labels, T):\n (nsamples, nfeatures) = feature_matrix.shape\n theta = np.zeros(nfeatures)\n theta_0 = 0.0\n\n for t in range(T):\n for i in get_order(feature_matrix.shape[0]):\n # Your code here\n theta, theta_0 = perceptron_single_step_update(\n feature_matrix[i], labels[i], theta, theta_0)\n return (theta, theta_0)\npass\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef average_perceptron(feature_matrix, labels, T):\n (nsamples, nfeatures) = feature_matrix.shape\n theta = np.zeros(nfeatures)\n theta_sum = np.zeros(nfeatures)\n theta_0 = 0.0\n theta_0_sum = 0.0\n for t in range(T):\n for i in get_order(nsamples):\n theta, theta_0 = perceptron_single_step_update(\n feature_matrix[i], labels[i], theta, theta_0)\n theta_sum += theta\n theta_0_sum += theta_0\n return (theta_sum / (nsamples * T), theta_0_sum / (nsamples * T))\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef pegasos_single_step_update(\n feature_vector,\n label,\n L,\n eta,\n current_theta,\n current_theta_0):\n mult = 1 - (eta * L)\n if label * (np.dot(feature_vector, current_theta) + current_theta_0) <= 1:\n return ((mult * current_theta) + (eta * label * feature_vector),\n (current_theta_0) + (eta * label))\n return (mult * current_theta, current_theta_0)\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef pegasos(feature_matrix, labels, T, L):\n (nsamples, nfeatures) = feature_matrix.shape\n theta = np.zeros(nfeatures)\n theta_0 = 0\n count = 0\n for t in range(T):\n for i in get_order(nsamples):\n count += 1\n eta = 1.0 / np.sqrt(count)\n (theta, theta_0) = pegasos_single_step_update(\n feature_matrix[i], labels[i], L, eta, theta, theta_0)\n return (theta, theta_0)\n\n#pragma: coderesponse end\n\n# Part II\n\n\n#pragma: coderesponse template\ndef classify(feature_matrix, theta, theta_0):\n (nsamples, nfeatures) = feature_matrix.shape\n predictions = np.zeros(nsamples)\n for i in range(nsamples):\n feature_vector = feature_matrix[i]\n prediction = np.dot(theta, feature_vector) + theta_0\n if (prediction > 0):\n predictions[i] = 1\n else:\n predictions[i] = -1\n return predictions\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef classifier_accuracy(\n classifier,\n train_feature_matrix,\n val_feature_matrix,\n train_labels,\n val_labels,\n **kwargs):\n\n theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)\n train_predictions = classify(train_feature_matrix, theta, theta_0)\n val_predictions = classify(val_feature_matrix, theta, theta_0)\n train_accuracy = accuracy(train_predictions, train_labels)\n validation_accuracy = accuracy(val_predictions, val_labels)\n return (train_accuracy, validation_accuracy)\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef extract_words(input_string):\n\n for c in punctuation + digits:\n input_string = input_string.replace(c, ' ' + c + ' ')\n\n return input_string.lower().split()\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef bag_of_words(texts):\n stop_words = {}\n with open(\"stopwords.txt\") as f_stop:\n for line in f_stop:\n s_line = line.rstrip()\n stop_words[s_line] = len(stop_words)\n\n dictionary = {} # maps word to unique index\n for text in texts:\n word_list = extract_words(text)\n for word in word_list:\n if word in stop_words:continue\n if word not in dictionary:\n dictionary[word] = len(dictionary)\n\n return dictionary\n\n#pragma: coderesponse end\n\n\n#pragma: coderesponse template\ndef extract_bow_feature_vectors(reviews, dictionary):\n\n\n\n num_reviews = len(reviews)\n feature_matrix = np.zeros([num_reviews, len(dictionary)])\n\n for i, text in enumerate(reviews):\n word_list = extract_words(text)\n for word in word_list:\n if word in dictionary:\n feature_matrix[i, dictionary[word]] += 1\n return feature_matrix\n#pragma: coderesponse end\n\n\n\n#pragma: coderesponse template\ndef accuracy(preds, targets):\n\n return (preds == targets).mean()\n#pragma: coderesponse end\n","repo_name":"zlaku72/Sentiment_Analysis-","sub_path":"sentiment_analysis/project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30888813390","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : np_where.py\n@Time : 2019/11/05 17:09:24\n@Author : Jeffrey Wang\n@Version : 1.0\n@Contact : shwangjj@163.com\n@Desc : np.where 函数示例\n\nwhere(条件, 是的值, 否的值)\n\n API DOC:\n https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.where.html\n\n'''\nimport numpy as np\nimport pandas as pd\n\n\ndef demo_np_where():\n # 示范:找出price大于0.5的数设置1, 否则设置-1\n dict_data = {\n 'date': pd.date_range('20190101', periods=10),\n 'price': np.random.rand(10)\n }\n data = pd.DataFrame(dict_data)\n data.set_index('date', inplace=True)\n # 使用:\n data['position'] = np.where(data['price'] > 0.5, 1, -1)\n print(data)\n\n\nif __name__ == \"__main__\":\n demo_np_where()\n","repo_name":"shwdbd/python_codepool","sub_path":"src/main/python/wdbd/codepool/numpy/np_where.py","file_name":"np_where.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71001820250","text":"import requests\r\nfrom PIL import Image\r\n\r\nimage_data = open(\"family.jpg\", \"rb\").read()\r\nimage = Image.open(\"family.jpg\").convert(\"RGB\")\r\n\r\nresponse = requests.post(\r\n \"http://localhost:80/v1/vision/face/recognize\", files={\"image\": image_data}\r\n).json()\r\nprint(response)\r\ni = 0\r\nfor face in response[\"predictions\"]:\r\n\r\n y_max = int(face[\"y_max\"])\r\n y_min = int(face[\"y_min\"])\r\n x_max = int(face[\"x_max\"])\r\n x_min = int(face[\"x_min\"])\r\n cropped = image.crop((x_min, y_min, x_max, y_max))\r\n\r\n cropped.save(\"image{}.jpg\".format(i))\r\n\r\n i += 1\r\n","repo_name":"johnolafenwa/DeepStack","sub_path":"demo/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":605,"dataset":"github-code","pt":"32"} +{"seq_id":"6112552252","text":"########################################################################################\n###################################### __init__.py #####################################\n##### 'The __init__.py serves double duty: it will contain the application factory, ####\n##### and it tells Python that the flaskr directory should be treated as a package' ####\n########################################################################################\n\n\n########################################################################################\n######################################### IMPORTS ######################################\n########################################################################################\nfrom flask import Flask\nimport os\nfrom . import db, globalvariables\nfrom urllib3 import logging #Removes request warnings from console\nfrom flask_socketio import SocketIO\n########################################################################################\n\ngv_socketio = SocketIO(logger=True, engineio_logger=True)\n\n########################################################################################\n################################## APPLICATION FACTORY #################################\n########################################################################################\n'''--> disable insecurewarnings'''\n #disable insecurewarnings in terminal when performing api requests without certificate: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings\nlogging.captureWarnings(True)\n\ndef create_app():\n #instance_relative_config=True --> instance folder is located outside of main (in this case 'spotr') folder.\n app = Flask(__name__, instance_relative_config=True)\n\n #default configuration settings. 'spotr.sqlite' will be located in instance folder.\n app.config.from_mapping(\n SECRET_KEY='dev',\n DATABASE=os.path.join(app.instance_path, 'spotr.sqlite'),\n )\n\n #overrides default configuration with values written in 'config.py'.gi\n app.config.from_pyfile('config.py', silent=True)\n\n #initialize plugins\n globalvariables.init()\n\n #socketio extension\n # gv_socketio = SocketIO()\n gv_socketio.init_app(app)\n\n #instance folder exists?\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n #register 'close_db' and 'init_db_command' with application instance\n db.init_app(app)\n\n with app.app_context():\n app.gv_socketio = gv_socketio\n\n #register blueprint(s)\n from . import watchlist, home, database, search, everynoise, discover, create, _import, autosearch #blueprints\n\n app.register_blueprint(watchlist.bp_watchlist)\n app.register_blueprint(home.bp_home)\n app.register_blueprint(database.bp_database)\n app.register_blueprint(search.bp_search)\n app.register_blueprint(everynoise.bp_everynoise)\n app.register_blueprint(discover.bp_discover)\n app.register_blueprint(create.bp_create)\n app.register_blueprint(_import.bp_import)\n app.register_blueprint(autosearch.bp_autosearch)\n\n return app\n########################################################################################","repo_name":"vdthh/spotr","sub_path":"spotr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3608646449","text":"# Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.\n\n# Follow up: If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.\n\n \n\n# Example 1:\n\n# Input: nums = [-2,1,-3,4,-1,2,1,-5,4]\n# Output: 6\n# Explanation: [4,-1,2,1] has the largest sum = 6.\n\ndef maxSubArray(nums):\n max_sums = nums[0]\n for i in range(1, len(nums)):\n nums[i] = max(nums[i]+nums[i-1], nums[i])\n if nums[i] > max_sums:\n max_sums = nums[i]\n\n return max_sums\n\nnums = [50,-10,-3,4,-1,2,1,-5,4]\n\nprint(maxSubArray(nums))\n","repo_name":"Maxwell2016LeChouchou/coding","sub_path":"leetcode/python/maxSubArray_53.py","file_name":"maxSubArray_53.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70639522013","text":"import pandas as pd\nimport pickle\nfrom tqdm import tqdm\nimport argparse\nfrom time import sleep\nfrom tqdm import tqdm\n\nimport torch\n\nfrom config import DefaultConfig\nfrom model import CustomModel\nfrom preprocessing import CustomDataset\n\ndef inference(model, test_dataloader):\n \"\"\"\n You can customize this code to fit your task.\n \"\"\"\n preds_lst = []\n with torch.no_grad():\n print(\"Inference....\")\n model.eval()\n bar = tqdm(enumerate(test_dataloader), total=len(test_dataloader))\n for idx, items in bar:\n sleep(0.1)\n item = {key: val.to(device) for key,val in items.items()}\n outputs = model(**item)\n loss = criterion(outputs, item['labels'].view(-1, 1).float())\n \n preds_lst.append(loss.item())\n \n print(f\"Loss: {sum(preds_lst)/len(preds_lst)}\")\n logger.info(f\"Loss: {sum(preds_lst)/len(preds_lst)}\")\n \n \n\nif __name__ == '__main__':\n config = DefaultConfig()\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', type=str, default='data/test_dataloader.pkl', help=\"test dataloader path\")\n parser.add_argument('--model_path', type=str, default='data/model.bin', help=\"saved model path\")\n\n args = parser.parse_args()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n \n print(\"Loading Test DataLoader...\")\n test_dataloader = pickle.load(open(args.path, 'rb'))\n \n print(\"Loading saved model...\")\n model = CustomModel(config.MODEL_CONFIG)\n model.parameters\n model.to(device)\n model.load_state_dict(torch.load(args.model_path, map_location=device))\n \n # Inference\n inference(model, test_dataloader)\n print(\"Inference Finish!\")\n ","repo_name":"kookeej/pytorch_transformers_framework","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26777199406","text":"import RPi.GPIO as GPIO\nimport time\nimport os\nfrom packages import send_email, send_message, get_response\nfrom utils import parse_reponse, get_current_video, get_lastn_videos\n\n\n#disable warnings (optional)\nGPIO.setwarnings(False)\n#Select GPIO Mode\nGPIO.setmode(GPIO.BCM)\n\n\nSERVER_ADDRESS = \"http://192.168.43.88:5000\"\nSAVE_VIDEO_DIR = \"temp/stream\"\nLAST_N = 25\nMAX_DIST = 5\n\n# Define GPIO pins\nPIN_RED = 13\nPIN_GREEN = 19\nPIN_BLUE = 26\nPIN_BUZZER = 27\nPIN_SWITCH = 22\nPIN_TRIG1 = 9 \nPIN_ECHO1 = 10\nPIN_TRIG2 = 23 \nPIN_ECHO2 = 24\n\n# Set GPIO pins as outputs\nGPIO.setup(PIN_RED, GPIO.OUT)\nGPIO.setup(PIN_GREEN, GPIO.OUT)\nGPIO.setup(PIN_BLUE, GPIO.OUT)\nGPIO.setup(PIN_BUZZER, GPIO.OUT)\nGPIO.setup(PIN_TRIG1, GPIO.OUT)\nGPIO.setup(PIN_ECHO1, GPIO.IN) \nGPIO.setup(PIN_TRIG2, GPIO.OUT)\nGPIO.setup(PIN_ECHO2, GPIO.IN)\n\n# Set switch output line as an input\nGPIO.setup(PIN_SWITCH, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\ndef color(red_color, green_color, blue_color):\n GPIO.output(PIN_RED, red_color)\n GPIO.output(PIN_GREEN, green_color)\n GPIO.output(PIN_BLUE, blue_color)\n\ndef prox(echo_pin, trig_pin):\n # Send trigger signal\n GPIO.output(trig_pin, GPIO.HIGH)\n time.sleep(0.00001)\n GPIO.output(trig_pin, GPIO.LOW) \n pulse_start = pulse_end = None\n # Measure echo signal\n timeout_start = time.time()\n while GPIO.input(echo_pin) == 0:\n pulse_start = time.time()\n if pulse_start - timeout_start > 0.5:\n return False\n\n timeout_start = time.time()\n while GPIO.input(echo_pin) == 1:\n pulse_end = time.time()\n if pulse_end - timeout_start > 0.5:\n return False\n \n if pulse_start is None or pulse_end is None:\n return False\n\n return round((pulse_end - pulse_start) * 17150, 2) <= MAX_DIST\n\ndef main():\n # Set initial color to green\n color(0, 1, 0)\n\n switch_state = False\n prox1_state = False\n prox2_state = False\n email_state = True\n message_state = False\n video_state = False\n\n last_motion_type = None\n motion_stack = []\n\n while True:\n # set color to green\n switch_state = GPIO.input(PIN_SWITCH) is GPIO.LOW\n video_state = False\n prox1_state = False\n prox2_state = False\n\n if switch_state:\n color(0, 0, 1)\n\n else:\n prox1_state = prox(PIN_ECHO1, PIN_TRIG1)\n prox2_state = prox(PIN_ECHO2, PIN_TRIG2)\n\n if prox1_state or prox2_state:\n color(0, 1, 1)\n\n else:\n video_name = get_current_video()\n print(video_name, f\"{SAVE_VIDEO_DIR}/{video_name}\", os.path.exists(f\"{SAVE_VIDEO_DIR}/{video_name}\"))\n if os.path.exists(f\"{SAVE_VIDEO_DIR}/{video_name}\"):\n\n try:\n response = get_response(f\"{SAVE_VIDEO_DIR}/{video_name}\", SERVER_ADDRESS)\n predictions = parse_reponse(response)\n print(predictions)\n\n last_motion_type = predictions[0]\n\n if last_motion_type != \"Normal\":\n video_state = True\n color(0, 1, 1)\n except Exception as e:\n print(\"error 1:\", e)\n \n else:\n color(0, 1, 0)\n\n # print all states\n print(\n f\"switch_state = {switch_state}, prox1_state = {prox1_state}, prox2_state = {prox2_state}, video_state = {video_state}\"\n ) \n if switch_state or prox1_state or prox2_state or video_state:\n if email_state:\n last_n_videos = get_lastn_videos(LAST_N)\n os.system(f\"cat {' '.join([SAVE_VIDEO_DIR + '/' + i for i in last_n_videos])} > {SAVE_VIDEO_DIR}/concat.mp4\")\n send_email(\"Suspicious activity detected. Please check the attached video\", f\"{SAVE_VIDEO_DIR}/concat.mp4\")\n if message_state:\n send_message(f\"{SAVE_VIDEO_DIR}/{video_name}\")\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n print(e)\n finally:\n GPIO.cleanup()\n","repo_name":"habuta/raspberry-pi-control","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7141410005","text":"\"\"\" Testing for the the reader.py module\n\nThe script can be executed on its own or incorporated into a larger test suite.\n\n\"\"\"\nfrom io import StringIO\nfrom collections import namedtuple\n\nimport pytest\nfrom serial.core import IntField\nfrom serial.core import ListField\nfrom serial.core import StringField\nfrom serial.core.reader import * # tests __all__\n\n\n@pytest.fixture\ndef records():\n return [\n {\"int\": 123, \"arr\": [{\"x\": \"abc\", \"y\": \"def\"}]},\n {\"int\": 456, \"arr\": [{\"x\": \"ghi\", \"y\": \"jkl\"}]},\n {\"int\": 789, \"arr\": [{\"x\": \"mno\", \"y\": \"pqr\"}]}\n ]\n\n\ndef stop_filter(record):\n \"\"\" A filter function to stop iteration.\n\n \"\"\"\n if record[\"int\"] == 789:\n raise StopIteration\n return record\n\n\ndef reject_filter(record):\n \"\"\" A filter function to reject records.\n\n \"\"\"\n return record if record[\"int\"] != 123 else None\n\n\ndef modify_filter(record):\n \"\"\" A filter function to modify records.\n\n \"\"\"\n # Input filters can safely modify record.\n record[\"int\"] *= 2\n return record\n\n\nclass _ReaderTest(object):\n \"\"\" Abstract base class for Reader unit testing.\n\n \"\"\"\n def test_next(self, reader, records):\n \"\"\" Test the __next__() method.\n\n \"\"\"\n assert next(reader) == records[0]\n\n def test_iter(self, reader, records):\n \"\"\" Test the __iter__() method.\n\n \"\"\"\n assert list(reader) == records\n\n def test_filter(self, reader, records):\n \"\"\" Test the filter() method.\n\n \"\"\"\n reader.filter(stop_filter, reject_filter, modify_filter)\n records[1][\"int\"] = 912\n assert list(reader) == records[1:2]\n return\n\n\nclass DictReaderTest(_ReaderTest):\n \"\"\" Unit testing for the DictReader class.\n\n \"\"\"\n @classmethod\n @pytest.fixture\n def reader(cls, records):\n \"\"\" Return DictReader for testing.\n\n \"\"\"\n return DictReader(records)\n\n def test_next_keys(self, records):\n \"\"\" Test the __next__() method for a subset of keys.\n\n \"\"\"\n key = \"int\"\n records = [{key: record[key]} for record in records]\n reader = DictReader(records, [key])\n assert next(reader) == records[0]\n return\n\n\nclass ObjectReaderTest(_ReaderTest):\n \"\"\" Unit testing for the ObjectReader class.\n\n \"\"\"\n @classmethod\n @pytest.fixture\n def reader(cls, records):\n \"\"\" Return an ObjectReader for testing.\n\n \"\"\"\n attrs = list(records[0].keys())\n obj = namedtuple(\"Object\", attrs)\n objects = [obj(**record) for record in records]\n return ObjectReader(objects, attrs)\n\n\nclass _TabularReaderTest(_ReaderTest):\n \"\"\" Abstract base class for tabular Reader unit testing.\n\n \"\"\"\n TEST_CLASS = None # must be defined by concrete classes\n\n @classmethod\n @pytest.fixture\n def reader(cls, stream, kwargs):\n \"\"\" Return a DelimitedReader for testing.\n\n \"\"\"\n return cls.TEST_CLASS(stream, **kwargs)\n\n def test_open(self, stream, kwargs, records):\n \"\"\" Test the open method.\n\n \"\"\"\n with self.TEST_CLASS.open(stream, **kwargs) as reader:\n assert next(reader) == records[0]\n assert stream.closed\n return\n\n\nclass DelimitedReaderTest(_TabularReaderTest):\n \"\"\" Unit testing for the DelimitedReader class.\n\n \"\"\"\n TEST_CLASS = DelimitedReader\n\n @classmethod\n @pytest.fixture\n def kwargs(cls):\n \"\"\" Keyword arguments to initialize a reader.\n\n \"\"\"\n fields = (\n IntField(\"int\", 0),\n ListField(\"arr\", (1, None), (\n StringField(\"x\", 0),\n StringField(\"y\", 1),)))\n return {\"fields\": fields, \"delim\": \",\", \"endl\": \"\\n\"}\n\n @classmethod\n @pytest.fixture\n def stream(cls):\n \"\"\" Return an input stream containing test data.\n\n \"\"\"\n return StringIO(\"123, abc, def\\n456, ghi, jkl\\n789, mno, pqr\\n\")\n\n def test_iter_escape(self, kwargs, records):\n \"\"\" Test the __iter__() method with an escaped delimiter.\n\n \"\"\"\n stream = StringIO(\"123, abc\\\\,, def\\n456, ghi, jkl\\n789, mno, pqr\\n\")\n kwargs[\"esc\"] = \"\\\\\"\n reader = self.TEST_CLASS(stream, **kwargs)\n records[0][\"arr\"] = [{\"x\": \"abc,\", \"y\": \"def\"}]\n assert list(reader) == records\n return\n\n\nclass FixedWidthReaderTest(_TabularReaderTest):\n \"\"\" Unit testing for the FixedWidthReader class.\n\n \"\"\"\n TEST_CLASS = FixedWidthReader\n\n @pytest.fixture\n def kwargs(self):\n \"\"\" Keyword arguments to initialize a reader.\n\n \"\"\"\n fields = (\n IntField(\"int\", (0, 4), \"3d\"),\n ListField(\"arr\", (4, None), (\n StringField(\"x\", (0, 4)),\n StringField(\"y\", (4, 8)))))\n return {\"fields\": fields, \"endl\": \"\\n\"}\n\n @pytest.fixture\n def stream(self):\n \"\"\" Return a test data stream.\n\n \"\"\"\n return StringIO(\" 123 abc def\\n 456 ghi jkl\\n 789 mno pqr\\n\")\n\n\nclass ChainReaderTest(object):\n \"\"\" Unit testing for the ChainReader class.\n\n \"\"\"\n @classmethod\n @pytest.fixture\n def streams(cls):\n \"\"\" Return test data streams.\n\n \"\"\"\n # Data for a FixedWidthReader\n data = \" 123 abc def\\n 456 ghi jkl\\n\", \" 789 mno pqr\\n\"\n return map(StringIO, data)\n\n @classmethod\n def reader(cls, stream):\n \"\"\" Return a FixedWidthReader to read the test data.\n\n \"\"\"\n fields = (\n IntField(\"int\", (0, 4), \"3d\"),\n ListField(\"arr\", (4, None), (\n StringField(\"x\", (0, 4)),\n StringField(\"y\", (4, 8)))))\n return FixedWidthReader(stream, fields)\n\n def test_next(self, streams, records):\n \"\"\" Test the __next__() method.\n\n \"\"\"\n reader = ChainReader(streams, self.reader)\n assert next(reader) == records[0]\n\n def test_iter(self, streams, records):\n \"\"\" Test the __iter__() method.\n\n \"\"\"\n reader = ChainReader(streams, self.reader)\n assert list(reader) == records\n assert all(stream.closed for stream in streams)\n return\n\n def test_iter_empty(self):\n \"\"\" Test the __iter__() method for an empty intput sequence.\n\n \"\"\"\n reader = ChainReader([], self.reader)\n assert not list(reader)\n\n def test_open(self, streams, records):\n \"\"\"\n\n \"\"\"\n with ChainReader.open(streams, self.reader) as reader:\n assert next(reader) == records[0]\n assert all(stream.closed for stream in streams)\n\n\n# Make the module executable.\n\nif __name__ == \"__main__\":\n raise SystemExit(pytest.main([__file__]))\n","repo_name":"mdklatt/serial-python","sub_path":"tests/test_reader.py","file_name":"test_reader.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40854268090","text":"class Category:\n def __init__(self, name) -> None:\n self.name = name\n self.ledger = []\n\n def deposit(self, amount, description = \"\") -> None:\n self.ledger.append({\"amount\": amount, \"description\": description})\n\n def withdraw(self, amount, description = \"\") -> bool:\n can_withdraw = self.check_funds(amount)\n if can_withdraw:\n self.ledger.append({\"amount\": -amount, \"description\": description})\n return can_withdraw\n\n def get_balance(self):\n return sum([ledge[\"amount\"] for ledge in self.ledger])\n \n def transfer(self, amount, budget) -> bool:\n can_transfer = self.check_funds(amount)\n if can_transfer:\n self.withdraw(amount, \"Transfer to \" + budget.name)\n budget.deposit(amount, \"Transfer from \" + self.name)\n return can_transfer\n\n def check_funds(self, amount):\n return False if amount > self.get_balance() else True\n \n def get_total_deposit(self):\n return sum([ledge[\"amount\"] for ledge in self.ledger if ledge[\"amount\"] > 0])\n \n def get_total_withdrawal(self):\n return sum([ledge[\"amount\"] for ledge in self.ledger if ledge[\"amount\"] < 0]) * -1\n \n def __str__(self) -> str:\n category_name_length = len(self.name)\n line_length = 30\n asterisks_length = line_length - category_name_length\n left_asterisks_length = int(asterisks_length / 2)\n right_asterisks_length = line_length - left_asterisks_length - category_name_length\n \n budget_string = (\"*\" * left_asterisks_length) + self.name + (\"*\" * right_asterisks_length)\n\n for ledge in self.ledger:\n ledge_description_length = len(ledge[\"description\"]) if len(ledge[\"description\"]) <= 23 else 23\n ledge_truncated_description = ledge[\"description\"][0:ledge_description_length]\n ledge_formatted_amount = \"%.2f\" % ledge[\"amount\"]\n ledge_amount_length = len(ledge_formatted_amount)\n whitespaces_between_desc_and_amount = 30 - ledge_description_length - ledge_amount_length\n \n budget_string += \"\\n\" + ledge_truncated_description + \" \"*whitespaces_between_desc_and_amount + ledge_formatted_amount\n \n budget_string += \"\\nTotal: \" + \"%.2f\" % self.get_balance()\n\n return budget_string\n \n\n\ndef create_spend_chart(categories):\n chart_string = \"Percentage spent by category\\n\"\n total_withdrawals = sum([category.get_total_withdrawal() for category in categories])\n percent_list = [100,90,80,70,60,50,40,30,20,10,0]\n \n for percent in percent_list:\n chart_string += str(percent).rjust(3) + \"| \"\n for category in categories:\n category_percent = (category.get_total_withdrawal() * 100) / total_withdrawals\n rounded_category_percent = int(category_percent / 10) * 10\n\n chart_string += \"o \" if percent <= rounded_category_percent else \" \"\n chart_string += \"\\n\"\n \n chart_string += \" ----------\\n\"\n \n longest_word = max([len(category.name) for category in categories])\n \n for i in range(0,longest_word):\n chart_string += \" \"\n for category in categories:\n chart_string += \" \" + category.name[i] if i < len(category.name) else \" \"\n chart_string += \" \\n\" if i < longest_word - 1 else \" \"\n\n return chart_string\n\n","repo_name":"PedroLabrador/fcc","sub_path":"Scientific Computing with Python/budget-app/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36800182447","text":"from imutils.video import VideoStream\nfrom imutils import face_utils\nimport numpy as np\nimport imutils\nimport time\nimport dlib\nimport cv2\n\n# Khoi tao cac module detect mat va facial landmark\nface_detect = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\nlandmark_detect = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\n# Doc tu camera\nvs = VideoStream(src=0).start()\ntime.sleep(1.0)\n\nwhile True:\n \n\t# Doc tu camera\n\tframe = vs.read()\n\n\t# Resize de tang toc do xu ly\n\t#frame = imutils.resize(frame, width=600)\n\n\t# Chuyen ve gray\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n\t# Detect cac mat trong anh\n\tfaces = face_detect.detectMultiScale(gray, scaleFactor=1.1,\t\tminNeighbors=5, minSize=(100, 100),\t\tflags=cv2.CASCADE_SCALE_IMAGE)\n # là nguồn/bức ảnh xám,độ scale sau mỗi lần quét, tính theo 0.01 = 1%. Nếu như để scaleFactor = 1 thì tấm ảnh sẽ giữ nguyên\n\t# Duyet qua cac mat\n\tfor (x, y, w, h) in faces:\n\n\t\t# Tao mot hinh chu nhat quanh khuon mat\n\t\trect = dlib.rectangle(int(x), int(y), int(x + w),\n\t\t\tint(y + h))\n\n\t\t# Nhan dien cac diem landmark\n\t\tlandmark = landmark_detect(gray, rect)\n\t\tlandmark = face_utils.shape_to_np(landmark)\n\n\t\t# Capture vung mieng\n\t\t(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"nose\"]\n\t\tmouth = landmark[mStart:mEnd]\n\n\t\t# Lay hinh chu nhat bao vung mieng\n\t\tboundRect = cv2.boundingRect(mouth)\n\t\tcv2.rectangle(frame,\n\t\t\t\t\t (int(boundRect[0]), int(boundRect[1])),\n\t\t\t\t\t (int(boundRect[0] + boundRect[2]), int(boundRect[1] + boundRect[3])), (255,0,0), 2)\n\n\t\t# Tinh toan saturation trung binh\n\t\thsv = cv2.cvtColor(frame[int(boundRect[1]):int(boundRect[1] + boundRect[3]),int(boundRect[0]):int(boundRect[0] + boundRect[2])], cv2.COLOR_RGB2HSV)\n\t\tsum_saturation = np.sum(hsv[:, :, 1]) # Sum the brightness values\n\t\tarea = int(boundRect[2])*int(boundRect[3])\n\t\tavg_saturation = sum_saturation / area\n\n\t\t# Kiem tra va canh bao voi nguong\n\t\tif avg_saturation>95:\n\t\t\tcv2.putText(frame, \"NO MASK- NO HEALTH\", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),\n\t\t\t\t\t\t2)\n \n \n\t# Hien thi len man hinh\n\tcv2.imshow(\"Camera\", frame)\n\n\t# Bam Esc de thoat\n\tkey = cv2.waitKey(1) & 0xFF\n\tif key == 27:\n\t\tbreak\n\n\ncv2.destroyAllWindows()\n\n","repo_name":"peachope/python","sub_path":"project_mask_detection.py","file_name":"project_mask_detection.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36449662178","text":"import time\nimport logging\nimport allure\n\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException, StaleElementReferenceException, NoSuchElementException, \\\n JavascriptException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.remote.webdriver import WebDriver\n\nfrom utils.javascript_code import JsCode\nimport settings\n\n\nclass BasePage:\n \"\"\"Base page object\"\"\"\n URL = settings.Url.BASE\n logger = logging.getLogger(settings.Logging.LOGGER_NAME)\n\n def __init__(self, driver):\n self.driver: WebDriver = driver\n self.check = self._Check(self)\n\n class CustomWaitTimeoutException(Exception):\n pass\n\n class FindingException(Exception):\n pass\n\n class FastFindingException(Exception):\n pass\n\n def is_opened(self):\n \"\"\"Additional check to see that page has been opened\"\"\"\n return True\n\n @property\n def action_chains(self):\n \"\"\"ActionChains\"\"\"\n return ActionChains(self.driver)\n\n def wait(self, timeout=settings.Basic.DEFAULT_TIMEOUT):\n \"\"\"WebDriverWait\"\"\"\n return WebDriverWait(self.driver, timeout)\n\n def open_page(self, url=None):\n \"\"\"Open the page\"\"\"\n url = url if url else self.URL\n log_msg = f\"Page opening: {url}\"\n with allure.step(log_msg):\n self.logger.info(log_msg)\n\n log_msg = f'URL checking'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n if not self.check.is_links_equal(self.driver.current_url, url, raise_exception=False):\n log_msg = f'Opening URL: \"{url}\". Previous URL: \"{self.driver.current_url}\"'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.driver.get(url)\n\n log_msg = 'Checking page load'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.custom_wait(self.check.is_page_opened)\n self.logger.info(f'Page opened: \"{url}\"')\n\n def scroll_to_element(self, element):\n \"\"\"Scrolling to the element found by locator\"\"\"\n log_msg = f'Scrolling to \"{element.tag_name}\"'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.driver.execute_script(JsCode.scroll_into_view, element)\n\n def find(self, locator, timeout=settings.Basic.DEFAULT_TIMEOUT):\n \"\"\"Finding item by locator\"\"\"\n log_msg = f'Searching of the element by locator: \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n try:\n log_msg = f'Waiting for the presence of the element'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n element = self.wait(timeout).until(EC.presence_of_element_located(locator))\n self.logger.info(f'Element have been found: \"{element.tag_name}\"')\n return element\n except TimeoutException:\n raise self.FindingException(f'Element not found by locator: \"{locator[1]}\" (type: {locator[0]})')\n\n def fast_find(self, locator):\n \"\"\"Finding item by locator without waiting\"\"\"\n log_msg = f'Fast searching of the element by locator: \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n try:\n element = self.driver.find_element(*locator)\n self.logger.info(f'Element have been found: \"{element.tag_name}\"')\n return element\n except (NoSuchElementException, StaleElementReferenceException):\n raise self.FastFindingException(f'Element not found by locator: \"{locator[1]}\" (type: {locator[0]})')\n\n def find_elements(self, locator):\n \"\"\"Finding an items by locator\"\"\"\n log_msg = f'Searching elements by locator: \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n elements = self.driver.find_elements(*locator)\n\n elements_names = []\n for e in elements:\n try:\n elements_names.append(e.tag_name)\n except StaleElementReferenceException:\n pass\n\n if len(elements) > 0:\n self.logger.info(f'{len(elements)} element(s) have been found')\n self.logger.debug(f'Element(s) have been found: \"{\", \".join(elements_names)}\"')\n else:\n self.logger.info(f'No items found')\n return elements\n\n def click(self, locator, timeout=settings.Basic.DEFAULT_TIMEOUT):\n \"\"\"Click on an element found by locator\"\"\"\n log_msg = f'Clicking on \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n for i in range(settings.Basic.CLICK_RETRY):\n log_msg = f'Clicking on \"{locator[1]}\" (type: {locator[0]}). ' \\\n f'Try {i + 1} of {settings.Basic.CLICK_RETRY}...'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n try:\n log_msg = f'Waiting for page opening and loading'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.custom_wait(self.check.is_page_opened, check_url=False)\n\n log_msg = f'Searching of the element found by locator \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n elem = self.find(locator, timeout=timeout)\n\n log_msg = f'Scrolling to the element found by locator \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.scroll_to_element(elem)\n\n log_msg = f'Waiting for element found by locator ' \\\n f'\"{locator[1]}\" (type: {locator[0]}) to be clickable'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n elem = self.wait(timeout).until(EC.element_to_be_clickable(locator))\n\n log_msg = f'Clicking on \"{elem.tag_name}\" found by \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n elem.click()\n return\n except (TimeoutException, StaleElementReferenceException, self.FindingException) as exc:\n if i == settings.Basic.CLICK_RETRY - 1:\n raise exc\n self.logger.debug(f'Error thrown: {exc}. Trying click again')\n\n def _fill_field(self, locator, text):\n log_msg = f'Waiting for page opening and loading'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.custom_wait(self.check.is_page_opened, check_url=False)\n\n log_msg = f'Waiting for element found by locator ' \\\n f'\"{locator[1]}\" (type: {locator[0]}) visibility to be located'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n element = self.wait().until(EC.visibility_of_element_located(locator))\n\n log_msg = f'Scrolling to the element found by locator \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.scroll_to_element(element)\n\n log_msg = f'Waiting for element found by locator \"{locator[1]}\" (type: {locator[0]}) to be visible'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n self.custom_wait(self.check.is_visible, locator)\n\n log_msg = f'Filling element found by locator \"{locator[1]}\" (type: {locator[0]}) with \"{text}\"'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n prev_text = element.text\n element.clear()\n element.send_keys(text)\n\n self.logger.info(f'Field \"{element.tag_name}\" filled')\n return element, prev_text\n\n def fill_field(self, locator, text):\n \"\"\"Fills field found by locator with the given text\"\"\"\n log_msg = f'Filling field found by locator \"{locator[1]}\" (type: {locator[0]}) with \"{text}\"'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n element, _ = self._fill_field(locator, text)\n return element\n\n def fill_field_and_return_previous_text(self, locator, text: str):\n \"\"\"Fills field found by locator with the given text and returns the previously text\"\"\"\n log_msg = f'Filling field found by locator \"{locator[1]}\" (type: {locator[0]}) with \"{text}\" ' \\\n f'and returning the previous text'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n _, prev_text = self._fill_field(locator, text)\n return prev_text\n\n def get_input_value(self, locator):\n \"\"\"Returns the text of the input field found by locator\"\"\"\n log_msg = f'Getting the value of the input field: \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n\n log_msg = f'Searching of the input field by locator \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n element = self.find(locator)\n\n log_msg = f'Getting value from input field found by locator \"{locator[1]}\" (type: {locator[0]})'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n result = element.get_attribute(\"value\")\n\n self.logger.info(f'Received the value of the input field: \"{locator[1]}\" (type: {locator[0]})')\n self.logger.debug(f'Value of the input field: {result}')\n return result\n\n def custom_wait(self, method, *args, error=None, timeout=settings.Basic.DEFAULT_TIMEOUT,\n interval=settings.Basic.DEFAULT_CHECKING_INTERVAL, check=True, **kwargs):\n \"\"\"A custom function to wait for the passed function to succeed\"\"\"\n log_msg = f'Waiting for successfully method \"{method.__name__}\" execution'\n with allure.step(log_msg):\n self.logger.info(log_msg)\n\n if not error:\n error = self.check.exceptions.get(method.__name__, Exception)\n self.logger.debug(f'Expected method Exception: {error.__name__}')\n\n st = time.perf_counter()\n last_exception = None\n i = 0\n while time.perf_counter() - st < timeout:\n try:\n i += 1\n log_msg = f'Method execution: \"{method.__name__}\". Try: {i}'\n with allure.step(log_msg):\n self.logger.debug(log_msg)\n result = method(*args, **kwargs)\n if check:\n if result:\n self.logger.debug(f'Method \"{method.__name__}\" execution result: \"{result}\"')\n return result\n last_exception = f'Method \"{method.__name__}\" returned \"{result}\"'\n else:\n self.logger.debug(f'Method \"{method.__name__}\" execution result: \"{result}\"')\n return result\n except error as e:\n last_exception = e\n time.sleep(interval)\n\n raise self.CustomWaitTimeoutException(\n f'Method {method.__name__} timeout in {timeout}sec with exception: \"{last_exception}\"')\n\n class _Check:\n page = None\n exceptions: dict = None\n\n def __init__(self, page):\n self.page = page\n self.exceptions = {\n \"is_element_visible\": self.ElementNotVisibleException,\n \"is_visible\": self.ElementNotVisibleException,\n \"is_element_not_visible\": self.ElementVisibleException,\n \"is_not_visible\": self.ElementVisibleException,\n \"is_exists\": self.ElementNotExistsException,\n \"is_not_exists\": self.ElementExistsException,\n \"is_element_text_equal\": self.ComparisonException,\n \"is_element_text_not_equal\": self.ComparisonException,\n \"is_links_equal\": self.ComparisonException,\n \"is_page_url_match_driver_url\": self.PageUrlDoesNotMatchDriverUrl,\n \"is_page_opened\": self.PageNotOpenedException,\n }\n\n class ComparisonException(Exception):\n pass\n\n class ElementNotVisibleException(Exception):\n pass\n\n class ElementVisibleException(Exception):\n pass\n\n class ElementNotExistsException(Exception):\n pass\n\n class ElementExistsException(Exception):\n pass\n\n class PageNotOpenedException(Exception):\n pass\n\n class PageUrlDoesNotMatchDriverUrl(Exception):\n pass\n\n def _raise_exception_wrapper(self, exc, exc_msg, raise_exception, result=False):\n self.page.logger.debug(f'Raised exception \"{exc.__name__}\" with message: \"{exc_msg}\"')\n if raise_exception:\n raise exc(exc_msg)\n else:\n return result\n\n def _is_element_visible(self, element):\n try:\n result = self.page.driver.execute_script(JsCode.is_visible, element)\n self.page.logger.debug(f'Element \"{element.tag_name}\" visibility status: \"{result}\"')\n return result\n except (JavascriptException, StaleElementReferenceException):\n self.page.logger.debug(f'Element is not visible or stale')\n return False\n\n def is_element_visible(self, element, raise_exception=True):\n \"\"\"Checking that an element is visible\"\"\"\n self.page.logger.debug(f'Checking that element is visible')\n if self._is_element_visible(element):\n return True\n\n exc_msg = f'Element is not visible'\n return self._raise_exception_wrapper(self.ElementNotVisibleException, exc_msg, raise_exception)\n\n def is_visible(self, locator, raise_exception=True):\n \"\"\"Checking that an element found by locator is visible\"\"\"\n self.page.logger.debug(f'Checking that element found by locator '\n f'\"{locator[1]}\" (type: {locator[0]}) is visible')\n\n elem = self.page.find(locator)\n if self.is_element_visible(elem, raise_exception=False):\n self.page.logger.debug(\n f'Element found by locator \"{locator[1]}\" (type: {locator[0]}) is visible')\n return True\n else:\n self.page.logger.debug(\n f'Element found by locator \"{locator[1]}\" (type: {locator[0]}) is not visible')\n\n exc_msg = f'Element found by locator \"{locator[1]}\" (type: {locator[0]}) is not visible'\n return self._raise_exception_wrapper(self.ElementNotVisibleException, exc_msg, raise_exception)\n\n def is_element_not_visible(self, element, raise_exception=True):\n \"\"\"Checking that an element is not visible\"\"\"\n self.page.logger.debug(f'Checking that element is not visible')\n\n if not self._is_element_visible(element):\n return True\n\n exc_msg = f'Element is visible'\n return self._raise_exception_wrapper(self.ElementVisibleException, exc_msg, raise_exception)\n\n def is_not_visible(self, locator, raise_exception=True):\n \"\"\"Checking that an element found by locator is not visible\"\"\"\n self.page.logger.debug(\n f'Checking that element found by locator \"{locator[1]}\" (type: {locator[0]}) is not visible')\n try:\n elem = self.page.fast_find(locator)\n except self.page.FastFindingException:\n self.page.logger.debug(f'Element is not founded by locator \"{locator[1]}\" (type: {locator[0]})')\n return True\n\n if self.is_element_not_visible(elem, raise_exception=raise_exception):\n self.page.logger.debug(\n f'Element found by locator \"{locator[1]}\" (type: {locator[0]}) is not visible')\n return True\n else:\n self.page.logger.debug(\n f'Element found by locator \"{locator[1]}\" (type: {locator[0]}) is visible')\n\n exc_msg = f'Element found by \"{locator[1]}\" (type: {locator[0]}) is visible'\n return self._raise_exception_wrapper(self.ElementVisibleException, exc_msg, raise_exception)\n\n def is_exists(self, locator, raise_exception=True):\n \"\"\"Checking that an element found by locator exists\"\"\"\n self.page.logger.debug(f'Checking that element found by locator '\n f'\"{locator[1]}\" (type: {locator[0]}) does not exists')\n try:\n elem = self.page.driver.find_element(*locator)\n if elem:\n self.page.logger.debug(\n f'Element found by locator \"{locator[1]}\" (type: {locator[0]}) exists')\n return True\n except NoSuchElementException:\n pass\n\n exc_msg = f'Element \"{locator[1]}\" (type: {locator[0]}) is not found'\n return self._raise_exception_wrapper(self.ElementNotExistsException, exc_msg, raise_exception)\n\n def is_not_exists(self, locator, raise_exception=True):\n \"\"\"Checking that an element found by locator does not exist\"\"\"\n self.page.logger.debug(f'Checking that element found by locator '\n f'\"{locator[1]}\" (type: {locator[0]}) exists')\n try:\n elem = self.page.driver.find_element(*locator)\n if elem:\n exc_msg = f'Element \"{elem.tag_name}\" found by {locator[1]} (type: {locator[0]}) exists'\n return self._raise_exception_wrapper(self.ElementExistsException, exc_msg, raise_exception)\n except (NoSuchElementException, StaleElementReferenceException):\n pass\n self.page.logger.debug(\n f'Element found by {locator[1]} (type: {locator[0]}) does not exists')\n return True\n\n def is_element_text_equal(self, elem, text, raise_exception=True):\n \"\"\"Checking that the text of an element is equal to the given text\"\"\"\n self.page.logger.debug(f'Checking that element \"{elem.tag_name}\" text \"{elem.text}\" == given text \"{text}\"')\n if elem.text == text:\n self.page.logger.debug(f'Element text \"{elem.text}\" == given text \"{text}\"')\n return True\n else:\n exc_msg = f'Element text \"{elem.text}\" != given text \"{text}\"'\n return self._raise_exception_wrapper(self.ComparisonException, exc_msg, raise_exception)\n\n def is_element_text_not_equal(self, elem, text, raise_exception=True):\n \"\"\"Checking that the text of an element is not equal to the given text\"\"\"\n self.page.logger.debug(f'Checking that element \"{elem.tag_name}\" text \"{elem.text}\" != given text \"{text}\"')\n if elem.text != text:\n self.page.logger.debug(f'Element text \"{elem.text}\" != given text \"{text}\"')\n return True\n else:\n exc_msg = f'Element text \"{elem.text}\" == given text \"{text}\"'\n return self._raise_exception_wrapper(self.ComparisonException, exc_msg, raise_exception)\n\n def is_links_equal(self, url_1, url_2, raise_exception=True):\n \"\"\"Url comparison without arguments\"\"\"\n self.page.logger.debug(f'Checking that URLs equal: \"{url_1}\" == \"{url_2}\"')\n urls = (url_1, url_2)\n new_urls = []\n for url in urls:\n url = url.split(\"?\")[0]\n url = url.split(\"#\")[0]\n url = url.rstrip(\"/\")\n new_urls.append(url)\n new_url_1, new_url_2 = new_urls\n result = new_url_1 == new_url_2\n if result:\n self.page.logger.debug(f'URLs \"{url_1}\" == URL \"{url_2}\"')\n return True\n else:\n exc_msg = f'URL \"{url_1}\" != URL \"{url_2}\"'\n return self._raise_exception_wrapper(self.ComparisonException, exc_msg, raise_exception)\n\n def is_page_url_match_driver_url(self, raise_exception=True):\n \"\"\"Checking that the current url matches the url of the page\"\"\"\n url_1 = self.page.driver.current_url\n url_2 = self.page.URL\n self.page.logger.debug(\n f'Checking that current URL \"{url_1}\" == {self.page.__class__.__name__} page URL {url_2}')\n result = self.is_links_equal(url_1, url_2, raise_exception=False)\n if result:\n return True\n\n exc_msg = f'Current url \"{url_1}\" does not match page object url \"{url_2}\"'\n return self._raise_exception_wrapper(self.PageUrlDoesNotMatchDriverUrl, exc_msg, raise_exception)\n\n def is_page_opened(self, url=None, check_url=True, raise_exception=True):\n \"\"\"Checking that the page has been opened and fully loaded\"\"\"\n self.page.logger.debug('Checking that the page has been opened and fully loaded')\n\n if check_url:\n url = url if url else self.page.URL\n current_url = self.page.driver.current_url\n if not self.is_links_equal(current_url, url, raise_exception=False):\n exc_msg = f'Current URL \"{current_url}\" != page URL \"{url}\"'\n return self._raise_exception_wrapper(self.PageNotOpenedException, exc_msg, raise_exception)\n else:\n self.page.logger.debug(f'Current URL \"{current_url}\" == page URL \"{url}\"')\n\n status = self.page.driver.execute_script(JsCode.document_ready_state)\n expected = \"complete\"\n if not status == expected:\n exc_msg = f'Current page loading status \"{status}\" != expected status \"{expected}\"'\n return self._raise_exception_wrapper(self.PageNotOpenedException, exc_msg, raise_exception)\n else:\n self.page.logger.debug(f'Current page loading status \"{status}\" == expected status \"{expected}\"')\n\n result = self.page.is_opened()\n if not result:\n exc_msg = \"Page is not opened\"\n return self._raise_exception_wrapper(self.PageNotOpenedException, exc_msg, raise_exception)\n\n self.page.logger.debug(f'Page (URL: {self.page.driver.current_url}) has been opened and fully loaded')\n return True\n","repo_name":"Dan4ik2504/2021-1-MAILRU-SDET-Python-D-Mashkovtsev","sub_path":"Homework_2/ui/pages/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":23401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8695837261","text":"import allure\nfrom framework import check\nfrom framework.helper import gen_title, gen_body\nfrom framework.jsonplaceholder_client import Client\nimport pytest\n\n\n@allure.suite(\"POST /posts\")\n# POST requests have no validation now, tests below are written in case userId and title are required\nclass TestCreatePosts:\n @pytest.mark.parametrize(\n \"data\",\n [\n {\"title\": gen_title(), \"body\": gen_body(), \"userId\": \"1\"},\n {\"title\": gen_title(), \"userId\": \"10\"},\n ],\n )\n @allure.title(\"Positive. Create new post\")\n def test_create_valid_post(self, data):\n response = Client().create_new_post(data)\n check.check_response_code(response, 201)\n check.check_response_data(response, data)\n\n @pytest.mark.parametrize(\n \"data\",\n [{\"title\": gen_title()}, {\"userId\": 10}, {\"random_field\": gen_title()}, {}],\n )\n @allure.title(\"Negative. Create new post\")\n def test_create_invalid_post(self, data):\n response = Client().create_new_post(data)\n check.check_response_code(response, 404)\n","repo_name":"pressannykey/test_task","sub_path":"tests/test_create_posts.py","file_name":"test_create_posts.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37621445004","text":"# FIZZBUZZ\ndef division(number):\n if number % 3 == 0:\n if number % 5 == 0:\n return \"FizzBuzz\"\n return \"Fizz\"\n elif number % 5 == 0:\n return \"Buzz\"\n return number\n\n\ndef create_list():\n numbers = []\n for i in range(1, 101):\n numbers.append(i)\n return numbers\n\n\nif __name__ == \"__main__\":\n numbers = create_list()\n finished_numbers = []\n for i in numbers:\n finished_numbers.append(division(i))\n print(finished_numbers)\n\n# BUBBLE SORT\n\ndef bubble(arr):\n for i in range(len(arr)):\n for j in range(0, len(arr) - i - 1):\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n return arr\n\n# QUICK SORT\n\ndef quick_sort(arr):\n length = len(arr)\n smaller_numbers = []\n greater_numbers = []\n\n if length <= 1:\n return arr\n else:\n pivot = arr.pop()\n for i in arr:\n if i > pivot:\n greater_numbers.append(i)\n else:\n smaller_numbers.append(i)\n return quick_sort(smaller_numbers) + [pivot] + quick_sort(greater_numbers)\n\n\n# INSERTION SORT\n\ndef insertion_sort(arr):\n length = len(arr)\n\n for i in range(1, length):\n while arr[i - 1] > arr[i] and i > 0:\n arr[i - 1], arr[i] = arr[i], arr[i - 1]\n i -= 1\n\n return arr\n\n# COUNTING SORT\n\ndef counting_sort(arr):\n arr_len = len(arr)\n output = [0] * arr_len\n arr_count = [0] * 10\n\n for i in range(0, arr_len):\n arr_count[arr[i]] += 1\n\n for j in range(1, 10):\n arr_count[j] += arr_count[j - 1]\n\n a = arr_len - 1\n while a >= 0:\n output[arr_count[arr[a]] - 1] = arr[a]\n arr_count[arr[a]] -= 1\n a -= 1\n\n for k in range(0, arr_len):\n arr[k] = output[k]\n\n# BINARY SEARCH\n\ndef binary_search(arr, item):\n begin_index = 0\n end_index = len(arr) - 1\n while begin_index <= end_index:\n midpoint = int(begin_index + (end_index - begin_index) / 2)\n midpoint_value = arr[midpoint]\n if item == midpoint_value:\n return midpoint\n elif item < midpoint_value:\n end_index = midpoint - 1\n else:\n begin_index = midpoint + 1\n return \"Not found\"\n\n","repo_name":"majabukowska/algorithms","sub_path":"algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30275880656","text":"import Kmeans as km\nimport KNN as knn\nfrom utils_data import read_dataset, read_extended_dataset, crop_images, visualize_retrieval, Plot3DCloud, \\\n visualize_k_means\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef retrieval_by_color(images, labels, str_list):\n \"\"\"\n images: dataset of images that are given to us\n labels: labels we receive by applying our k-means algorithm to those images\n str-list: color of the images we want to retrieve\n returns: list of images that match the color of the str-list parameter\n \"\"\"\n image_list = []\n for i, l in zip(images, labels):\n for j in l: # CHECKS EACH COLOR OF THE LABEL\n if j in str_list:\n image_list.append(i)\n return image_list\n\n\ndef get_color_accuracy(km_colors, gt):\n \"\"\"\n km_colors: colors that our k-means algorithm returns\n gt: ground truth of the dataset\n returns: accuracy on the k-means algorithm\n \"\"\"\n total_sum = 0\n for a, b in zip(km_colors, gt[2]):\n already_in = []\n inside = 0\n for color in a:\n if color not in already_in:\n already_in.append(color)\n if color in b:\n inside += 1\n total_sum += (inside / len(already_in))\n return (total_sum / len(km_colors)) * 100\n\n\ndef knn_accuracy(pred, gt):\n \"\"\"\n pred: our KNN' prediction\n gt: ground truth of the dataset\n returns: accuracy on the KNN algorithm\n \"\"\"\n i = 0\n correct_sum = 0\n while i < len(pred):\n if pred[i] == gt[i]:\n correct_sum += 1\n i += 1\n percentage = (correct_sum / len(pred)) * 100\n return percentage\n\n\ndef askForInteger():\n correct = False\n num = 0\n while (not correct):\n try:\n num = int(input(\"Introduce an integer: \"))\n correct = True\n except ValueError:\n print('Error, introduce an integer')\n\n return num\n\n\nif __name__ == '__main__':\n\n # Load all the images and Ground Truth\n train_imgs, train_class_labels, train_color_labels, test_imgs, test_class_labels, \\\n test_color_labels = read_dataset(root_folder='./images/', gt_json='./images/gt.json')\n\n # List with all the existent classes\n classes = list(set(list(train_class_labels) + list(test_class_labels)))\n\n # Load extended Ground Truth\n imgs, class_labels, color_labels, upper, lower, background = read_extended_dataset()\n\n cropped_images = crop_images(imgs, upper, lower)\n\n my_gt = read_extended_dataset(root_folder='./images/', extended_gt_json='./images/gt_reduced.json', w=60, h=80)\n\n quit = False\n option = 0\n\n while not quit:\n print(\"****************************************************************************************\")\n print(\"1. Visualize k-means: compare the k-means result to the original image (we reduced the dataset to the \"\n \"first 5 images to make it more dynamic, \"\n \"you can change the value of the retrieved images in the option 1 on the code)\")\n print(\"2. Retrieve all the images that matches the parameter' color you introduce by parameters (you can \"\n \"change it on the option 2, by default is 'Black')\")\n print(\"3. Calculate k-means algorithm accuracy. You can change the threshold value on the \"\n \"tolerance value in _init_options_ (by default is 0.2), you can also change the km_init value (by \"\n \"default 'optimum')\")\n print(\"4. Here you can see the KNN algorithm accuracy. You can change the distance used in the k_neighbours \"\n \"function, inside the cdist function (by default 'euclidean', others: Hamming, Minkowski, \"\n \"Sokalmichener, Russellrao)\")\n print(\"5. Quit\")\n print(\"Choose an option\")\n print(\"****************************************************************************************\")\n\n option = askForInteger()\n\n if option == 1:\n for im in imgs[0:5]:\n my_km = km.KMeans(im)\n my_km._init_centroids()\n my_km.get_labels()\n my_km.get_centroids()\n visualize_k_means(my_km, im.shape)\n elif option == 2:\n colors = []\n for i in cropped_images:\n my_km = km.KMeans(i)\n my_km._init_centroids()\n my_km.get_labels()\n my_km.get_centroids()\n new_color = km.get_colors(my_km.centroids)\n colors.append(new_color)\n selected_images = retrieval_by_color(imgs, colors, 'Black')\n visualize_retrieval(selected_images, 15)\n elif option == 3:\n colors = []\n for i in cropped_images:\n my_km = km.KMeans(i)\n my_km._init_centroids()\n my_km.get_labels()\n my_km.get_centroids()\n c = km.get_colors(my_km.centroids)\n colors.append(c)\n color_accuracy_percentage = get_color_accuracy(colors, my_gt)\n print(color_accuracy_percentage)\n elif option == 4:\n test_imgs = test_imgs[:, :, :, 0]\n train_imgs = train_imgs[:, :, :, 0]\n\n my_knn = knn.KNN(train_imgs, train_class_labels)\n preds = my_knn.predict(test_imgs, 10)\n accuracy = knn_accuracy(preds, test_class_labels)\n print(accuracy)\n elif option == 5:\n quit = True\n else:\n print(\"Introduce a number between 1 and 4\")\n\n print(\"Fin\")\n","repo_name":"naimmoltrasio/Clothes-Classification-using-Machine-Learning","sub_path":"my_labelling.py","file_name":"my_labelling.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25409842909","text":"#%%\nimport numpy as np\n\n#%%\nf = open(\"2016/input_03.txt\", \"r\")\ninstructions = f.readlines()\ninstructions = [i[:-1].split(\" \") for i in instructions]\ninstructions = [[int(x) for x in i if x != \"\"] for i in instructions]\n\n# %%\ncount = 0\nfor i in instructions:\n m = max(i)\n max_i = i.index(m)\n if sum(i[:max_i] + i[max_i + 1 :]) > m:\n count += 1\n\n# ANSWER 1\nprint(count)\n\n#%%\nnp_ins = np.array(instructions)\nnew_ins = []\nfor i in range(3, len(np_ins) + 1, 3):\n g = np_ins[max(0, i - 3) : i]\n for j in np.transpose(g):\n new_ins.append(list(j))\n\n# %%\ncount_2 = 0\nfor i in new_ins:\n m = max(i)\n max_i = i.index(m)\n if sum(i[:max_i] + i[max_i + 1 :]) > m:\n count_2 += 1\n\n# ANSWER 2\nprint(count_2)\n","repo_name":"stepva/adventofcode","sub_path":"2016/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"23291548520","text":"__author__ = 'cody'\n\nimport flask\nfrom dbapi import app\nfrom flask import request\nfrom dbapi.models import *\nfrom utils.rest_api_utils import *\nfrom utils.exceptions import *\n\n@app.route(\"/user\", methods=[\"GET\"])\ndef get_all_users():\n all_users = [user.todict() for user in flask.g.db_session.query(User).all()]\n return rest_jsonify(all_users)\n\n@app.route(\"/user\", methods=[\"POST\", \"PUT\"])\ndef create_new_user():\n data = validate_convert_request(request.data, required_headers=User.required_fields)\n user = flask.g.db_session.query(User).filter(User.username == data[\"username\"]).first()\n if user is not None:\n raise AlreadyExistsException(USER_ALREADY_EXISTS)\n else:\n user = User(data)\n flask.g.db_session.add(user)\n flask.g.db_session.commit()\n return rest_jsonify(message=USER_CREATED, status=HTTPStatusCodes.CREATED)\n\n@app.route(\"/user/\", methods=[\"POST\", \"PUT\"])\ndef update_user(username):\n data = validate_convert_request(request.data, required_headers=[])\n user = flask.g.db_session.query(User).filter(User.username == username).first()\n if user is None:\n raise NotFoundException(USER_NOT_FOUND)\n else:\n user.fromdict(data)\n flask.g.db_session.merge(user)\n flask.g.db_session.commit()\n return rest_jsonify(message=RESOURCE_UPDATED, status=HTTPStatusCodes.OK)\n\n@app.route(\"/user/\", methods=[\"DELETE\"])\ndef delete_user(username):\n user = flask.g.db_session.query(User).filter(User.username == username).scalar()\n if user is None:\n raise NotFoundException(USER_NOT_FOUND)\n else:\n flask.g.db_session.delete(user)\n flask.g.db_session.commit()\n return rest_jsonify(message=RESOURCE_DELETED, status=HTTPStatusCodes.NO_CONTENT)\n\n@app.route(\"/user/\", methods=[\"GET\"])\ndef get_specific_user(username):\n user = flask.g.db_session.query(User).filter(User.username == username).scalar()\n if user is None:\n raise NotFoundException(USER_NOT_FOUND)\n else:\n return rest_jsonify(user.todict())\n\n@app.route(\"/user/authenticate\", methods=[\"POST\"])\ndef authenticate_user():\n data = validate_convert_request(request.data, required_headers=[\"username\", \"password\"])\n user = flask.g.db_session.query(User).filter(User.username == data[\"username\"]).scalar()\n if user is None:\n raise NotFoundException(USER_NOT_FOUND)\n elif not user.check_password(data[\"password\"]):\n raise AuthenticationFailureException\n else:\n return rest_jsonify(user.todict())\n\n","repo_name":"codyharrington/todolist","sub_path":"dbapi/controllers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"12085221029","text":"MOD = 10**9 + 7\n\ndef count_geometric_subsequences(n, q, a):\n counts = [0] * (n - 1) # Khởi tạo một danh sách chứa số dãy con có độ dài từ 2 đến n của dãy a, ban đầu các số đếm đều là 0\n \n for i in range(n - 1):\n current_count = 0 # Số dãy con đạt yêu cầu với độ dài (i+2) ban đầu là 0\n current_val = a[i] # Giá trị hiện tại được sử dụng để kiểm tra tính công bội\n \n for j in range(i + 1, n): # Duyệt qua các phần tử từ i+1 đến n-1\n if a[j] % current_val == 0: # Nếu phần tử thỏa mãn tính công bội\n current_count += 1 # Tăng số dãy con đạt yêu cầu lên 1\n current_val *= q # Cập nhật giá trị hiện tại lên bội số tiếp theo\n current_val %= MOD # Chia dư cho MOD để tránh số quá lớn\n \n counts[i] = current_count % MOD # Gán số dãy con đạt yêu cầu với độ dài (i+2) vào danh sách counts\n \n return counts\n\n# Đọc dữ liệu từ file input\nwith open('SO.INP', 'r') as file:\n n, q = map(int, file.readline().split()) # Đọc số phần tử n và giá trị q\n a = list(map(int, file.readline().split())) # Đọc dãy số nguyên a\n\n# Tính số dãy con theo yêu cầu\nresult = count_geometric_subsequences(n, q, a)\n\n# Ghi kết quả vào file output\nwith open('SO.OUT', 'w') as file:\n file.write(' '.join(str(x) for x in result)) # Ghi danh sách kết quả vào file output, cách nhau bởi dấu cách\n","repo_name":"aerovfx/Fullstack4kid","sub_path":"CREATE_APP/Python/PythonChallenge/Level3/de2/SO.PY","file_name":"SO.PY","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"vi","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"14729243548","text":"import torch\nimport torch.nn as nn\n\n\n# like vgg network pattern\ndef make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\ncfg = {\n 'A1': [32, 'M', 64, 'M'],\n 'A2': [32, 'M', 64, 'M', 128, 'M'],\n 'B1': [32, 32, 'M', 64, 64, 'M'],\n 'B2': [32, 32, 'M', 64, 64, 'M', 128, 128, 'M'],\n}\n\ncfg_with_feature_size = {\n 'A1': (cfg['A1'], (64, 8, 8)),\n 'A2': (cfg['A2'], (128, 4, 4)),\n 'B1': (cfg['B1'], (64, 8, 8)),\n 'B2': (cfg['B2'], (128, 4, 4))\n}\n\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n\n\n def forward(self, x):\n N, C, H, W = x.size()\n return x.view(N, -1)\n\n\nclass GlobalAveragePool2d(nn.Module):\n def __init__(self, h, w):\n super(GlobalAveragePool2d, self).__init__()\n self.gap = nn.AvgPool2d(kernel_size=(h, w), stride=(h, w))\n \n\n def forward(self, x):\n return self.gap(x)\n\n\nclass BaseClassiferHead(nn.Module):\n def __init__(self, feature_size, num_classes):\n super(BaseClassiferHead, self).__init__()\n C, H, W = feature_size\n size = C * H * W\n\n self.classifier_head = nn.Sequential(\n Flatten(),\n nn.Linear(size, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, num_classes)\n )\n\n def forward(self, x):\n return self.classifier_head(x)\n\n\nclass BaseDropoutClassiferHead(nn.Module):\n def __init__(self, feature_size, num_classes):\n super(BaseDropoutClassiferHead, self).__init__()\n C, H, W = feature_size\n size = C * H * W\n\n self.classifier_head = nn.Sequential(\n Flatten(),\n nn.Linear(size, 1024),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(1024, num_classes)\n )\n\n def forward(self, x):\n return self.classifier_head(x)\n\n\nclass AlexNetStyleClassiferHead(nn.Module):\n def __init__(self, feature_size, num_classes):\n super(AlexNetStyleClassiferHead, self).__init__()\n C, H, W = feature_size\n size = C * H * W\n \n self.classifier_head = nn.Sequential(\n Flatten(),\n nn.Dropout(),\n nn.Linear(size, 1024),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(1024, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, num_classes)\n )\n\n\n def forward(self, x):\n return self.classifier_head(x)\n\n\nclass VggNetStyleClassifierHead(nn.Module):\n def __init__(self, feature_size, num_classes):\n super(VggNetStyleClassifierHead, self).__init__()\n C, H, W = feature_size\n size = C * H * W\n\n self.classifier_head = nn.Sequential(\n Flatten(),\n nn.Linear(size, 1024),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(1024, 1024),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(1024, num_classes)\n )\n\n\n def forward(self, x):\n return self.classifier_head(x)\n\n\nclass SqueezeNetStyleClassifierHead(nn.Module):\n def __init__(self, feature_size, num_classes):\n super(SqueezeNetStyleClassifierHead, self).__init__()\n C, H, W = feature_size\n size = C * H * W\n final_conv = nn.Conv2d(C, num_classes, kernel_size=1)\n\n self.classifier_head = nn.Sequential(\n nn.Dropout(),\n final_conv,\n nn.ReLU(inplace=True),\n GlobalAveragePool2d(H, W),\n Flatten()\n )\n\n\n def forward(self, x):\n return self.classifier_head(x)\n\n\nclass ResNetStyleClassifierHead(nn.Module):\n def __init__(self, feature_size, num_classes):\n super(ResNetStyleClassifierHead, self).__init__()\n C, H, W = feature_size\n\n self.classifier_head = nn.Sequential(\n GlobalAveragePool2d(H, W),\n Flatten(),\n nn.Linear(C, num_classes)\n )\n\n\n def forward(self, x):\n return self.classifier_head(x)\n\n\nclass DepthNetBase(nn.Module):\n def __init__(self, features, classifier_head):\n super(DepthNetBase, self).__init__()\n self.features = features\n self.classifier = classifier_head\n\n def forward(self, x):\n x = self.features(x)\n return self.classifier(x)\n\n\ndef make_classifier_head(style, feature_size, num_classes):\n if style == 'base':\n return BaseClassiferHead(feature_size, num_classes)\n elif style == 'dropout':\n return BaseDropoutClassiferHead(feature_size, num_classes)\n elif style == 'alex':\n return AlexNetStyleClassiferHead(feature_size, num_classes)\n elif style == 'vgg':\n return VggNetStyleClassifierHead(feature_size, num_classes)\n elif style == 'squeeze':\n return SqueezeNetStyleClassifierHead(feature_size, num_classes)\n elif style == 'resnet':\n return ResNetStyleClassifierHead(feature_size, num_classes)\n else:\n return BaseClassiferHead(feature_size, num_classes)\n\n\ndef DepthNet(feature_body_style='A1', classifier_head_style='base', batch_norm=True):\n num_classes = 10\n features_cfg, feature_size = cfg_with_feature_size[feature_body_style]\n\n features = make_layers(features_cfg, batch_norm=batch_norm)\n classifier_head = make_classifier_head(classifier_head_style, feature_size, num_classes)\n\n model = DepthNetBase(features, classifier_head)\n\n return model\n","repo_name":"dragonbook/cs231n-assignments","sub_path":"assignment2/my2/model_net_arch.py","file_name":"model_net_arch.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15223576960","text":"\"\"\"\nConvert bytes string to integer, and integer to bytes string.\n\"\"\"\n\nfrom struct import calcsize, unpack, error as struct_error\nfrom itertools import chain, repeat\n\nBIG_ENDIAN = \"ABCD\"\nLITTLE_ENDIAN = \"DCBA\"\n\ndef uint2bytes(value, endian, size=None):\n r\"\"\"\n Convert an unsigned integer to a bytes string in the specified endian.\n If size is given, add nul bytes to fill to size bytes.\n\n >>> uint2bytes(0x1219, BIG_ENDIAN)\n '\\x12\\x19'\n >>> uint2bytes(0x1219, BIG_ENDIAN, 4) # 32 bits\n '\\x00\\x00\\x12\\x19'\n >>> uint2bytes(0x1219, LITTLE_ENDIAN, 4) # 32 bits\n '\\x19\\x12\\x00\\x00'\n \"\"\"\n assert (not size and 0 < value) or (0 <= value)\n assert endian in (LITTLE_ENDIAN, BIG_ENDIAN)\n text = []\n while (value != 0 or text == \"\"):\n byte = value % 256\n text.append( chr(byte) )\n value >>= 8\n if size:\n need = max(size - len(text), 0)\n else:\n need = 0\n if need:\n if endian is BIG_ENDIAN:\n text = chain(repeat(\"\\0\", need), reversed(text))\n else:\n text = chain(text, repeat(\"\\0\", need))\n else:\n if endian is BIG_ENDIAN:\n text = reversed(text)\n return \"\".join(text)\n\ndef _createStructFormat():\n \"\"\"\n Create a dictionnary (endian, size_byte) => struct format used\n by bytes2uint() to convert raw data to positive integer.\n \"\"\"\n format = {\n BIG_ENDIAN: {},\n LITTLE_ENDIAN: {},\n }\n for struct_format in \"BHILQ\":\n try:\n size = calcsize(struct_format)\n format[BIG_ENDIAN][size] = '>%s' % struct_format\n format[LITTLE_ENDIAN][size] = '<%s' % struct_format\n except struct_error:\n pass\n return format\n_struct_format = _createStructFormat()\n\ndef bytes2uint(data, endian):\n r\"\"\"\n Convert a bytes string into an unsigned integer.\n\n >>> chr(bytes2uint('*', BIG_ENDIAN))\n '*'\n >>> bytes2uint(\"\\x00\\x01\\x02\\x03\", BIG_ENDIAN) == 0x10203\n True\n >>> bytes2uint(\"\\x2a\\x10\", LITTLE_ENDIAN) == 0x102a\n True\n >>> bytes2uint(\"\\xff\\x14\\x2a\\x10\", BIG_ENDIAN) == 0xff142a10\n True\n >>> bytes2uint(\"\\x00\\x01\\x02\\x03\", LITTLE_ENDIAN) == 0x3020100\n True\n >>> bytes2uint(\"\\xff\\x14\\x2a\\x10\\xab\\x00\\xd9\\x0e\", BIG_ENDIAN) == 0xff142a10ab00d90e\n True\n >>> bytes2uint(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\", BIG_ENDIAN) == (2**64-1)\n True\n \"\"\"\n assert 1 <= len(data) <= 32 # arbitrary limit: 256 bits\n try:\n return unpack(_struct_format[endian][len(data)], data)[0]\n except KeyError:\n pass\n\n assert endian in (BIG_ENDIAN, LITTLE_ENDIAN)\n shift = 0\n value = 0\n if endian is BIG_ENDIAN:\n data = reversed(data)\n for character in data:\n byte = ord(character)\n value += (byte << shift)\n shift += 8\n return value\n\n","repo_name":"clem1/segvault","sub_path":"fusil/fusil/bits.py","file_name":"bits.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"32"} +{"seq_id":"27899029030","text":"from ParserInfo import ParserInfo\nfrom math import fabs\nfrom Annotation import Annotation\nimport scipy\nimport re\n\nclass ExtractFeatures():\n \"\"\"description of class\"\"\"\n _sentence = \"\"\n _tagInfo = [] #[(start, end, id, cat, pos)]\n _np = () #(start, end)\n _math = () #(start, end)\n _depTree = [] #[(relation, first token, second token)]\n _ann = ''\n _charPreCount = 0\n\n _between_start = 0\n _between_end = 0\n \n _np_wordidx = 0 #starting word\n _np_endidx = 0\n _mt_wordidx = 0 #starting word\n _mt_endidx = 0\n\n def __init__(self, sentence, tag, np, math, depTree, annotation, preCount=None):\n self._sentence = sentence\n self._tagInfo = tag\n self._np = self.__validNP(np, math)\n self._math = math\n self._depTree = depTree\n if annotation != None:\n self._ann = annotation\n self._charPreCount = preCount\n\n idx = [self._np[0], self._np[1], self._math[0], self._math[1]]\n idx.sort()\n self._between_start = idx[1]\n self._between_end = idx[2]\n\n def __validNP(self, np, math):\n mathtoken = self._sentence[math[0]:math[1]]\n nptoken = self._sentence[np[0]:np[1]]\n if nptoken.rstrip().endswith(mathtoken.strip()):\n originalNPEndIdx = [i for i,tag in enumerate(self._tagInfo) if tag[1] == np[1]][0]\n return (np[0], self._tagInfo[originalNPEndIdx - 1][1])\n else:\n return np\n\n def __mathNPValid(self):\n return not(self._math[0] >= self._np[0] and self._math[1] <= self._math[1])\n\n def __checkPrepPre(self, mathname, desc_idx):\n i = self._np_wordidx\n for x in range(self._np_wordidx - 1, -1, -1):\n if self._tagInfo[x][4] == 'IN' and self._sentence[self._tagInfo[x][0]:self._tagInfo[x][1]] in self._ann._mathtext[mathname][desc_idx]:\n return False\n elif self._sentence[self._tagInfo[x][0]:self._tagInfo[x][1]] not in self._ann._mathtext[mathname][desc_idx]:\n break\n return True\n\n def __checkPrepEnd(self, mathname, desc_idx):\n i = self._np_wordidx\n for x in range(self._np_endidx - 1, self._np_wordidx - 1, -1):\n if self._tagInfo[x][4] == 'IN' and self._sentence[self._tagInfo[x][0]:self._tagInfo[x][1]] not in self._ann._mathtext[mathname][desc_idx]:\n return False\n return True\n\n def isDescription(self, relv):\n lengthPreSpaces = sum([1 for c in self._sentence[:self._np[0]] if c == ' '])\n lengthsEndSpaces = sum([1 for c in self._sentence[:self._np[1]] if c == ' '])\n mathname = re.match(r'MATH_\\d*', self._sentence[self._math[0]:self._math[1]])\n descriptionsOfMath = self._ann._math[mathname.group(0)]\n\n annStartIdx = [tok for tok,idx in self._ann._tokens.items() if idx[0] <= (self._charPreCount + self._np[0] - lengthPreSpaces) < idx[1]][0]\n annEndIdxs = [tok for tok,idx in self._ann._tokens.items() if idx[0] <= (self._charPreCount + self._np[1] - lengthsEndSpaces) <= idx[1]]\n annEndIdx = annEndIdxs[0] if len(annEndIdxs) > 0 else max(self._ann._tokens.keys())\n \n for desc_i in range(len(descriptionsOfMath)):\n desc = descriptionsOfMath[desc_i]\n if (desc[0] == annStartIdx and desc[-1] == annEndIdx) or (relv and desc[0] <= annStartIdx <= desc[-1] and self.__checkPrepPre(mathname.group(0), desc_i)) or (not relv and desc[-1] == annEndIdx and self.__checkPrepEnd(mathname.group(0), desc_i)):\n return True, annStartIdx, annEndIdx\n return False, annStartIdx, annEndIdx\n\n def FirstFeature(self):\n if self.__mathNPValid():\n return ':' in self._sentence[self._between_start:self._between_end], ',' in self._sentence[self._between_start:self._between_end], '__MATH_' in self._sentence[self._between_start:self._between_end]\n else:\n return False, False, False\n\n def SecondFeature(self):\n if self.__mathNPValid():\n if self._math[1] < self._np[0]:\n return '(' in self._sentence[self._math[1]:self._np[0]] and ')' in self._sentence[self._np[1]:]\n elif self._np[1] < self._math[0]:\n return '(' in self._sentence[:self._np[0]] and ')' in self._sentence[self._np[1]:self._math[0]]\n return False\n\n def ThirdFeature(self):\n self._np_wordidx = [i for i,tag in enumerate(self._tagInfo) if tag[0] == self._np[0]][0]\n self._np_endidx = [i for i,tag in enumerate(self._tagInfo) if tag[1] == self._np[1]][0]\n\n self._mt_wordidx = [i for i,tag in enumerate(self._tagInfo) if tag[0] == self._math[0]][0]\n self._mt_endidx = [i for i,tag in enumerate(self._tagInfo) if tag[1] == self._math[1]][0]\n\n return (self._mt_wordidx - self._np_endidx - 1) if self._mt_wordidx > self._np_endidx else (self._np_wordidx - self._mt_endidx - 1) \n\n def FourthFeature(self):\n return self._math[0] <= self._np[0]\n\n def FifthFeature(self):\n verbtag = []\n\n if self._np_wordidx < self._mt_wordidx:\n verbs = [tag for tag in self._tagInfo[self._np_endidx+1:self._mt_wordidx] if tag[3] == 'V']\n verbtag = verbs[0] if len(verbs) > 0 else None\n else:\n verbs = [tag for tag in self._tagInfo[self._mt_endidx+1:self._np_wordidx] if tag[3] == 'V']\n verbtag = verbs[0] if len(verbs) > 0 else None\n return self._sentence[verbtag[0]:verbtag[1]] if verbtag != None else ''\n\n def SixthFeature(self, amount):\n presurf = []\n prepos = []\n nextsurf = []\n nextpos = []\n\n i = self._np_wordidx\n if self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]].startswith(\"__MATH_\"):\n presurf.append(\"OTHERMATH\")\n prepos.append(\"OTHERMATH\")\n else:\n presurf.append(self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]])\n prepos.append(self._tagInfo[i][4])\n\n i -= 1\n while i >= 0:\n if i == self._mt_endidx:\n presurf.insert(0, \"MATH\")\n prepos.insert(0, \"MATH\")\n elif i != self._mt_endidx and self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]].startswith(\"__MATH_\"):\n presurf.insert(0, \"OTHERMATH\")\n prepos.insert(0, \"OTHERMATH\")\n else:\n presurf.insert(0, self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]])\n prepos.insert(0, self._tagInfo[i][4])\n \n if len(presurf) == amount:\n break\n i -= 1\n \n j = self._np_endidx\n if self._sentence[self._tagInfo[j][0]:self._tagInfo[j][1]].startswith(\"__MATH_\"):\n nextsurf.append(\"OTHERMATH\")\n nextpos.append(\"OTHERMATH\")\n else:\n nextsurf.append(self._sentence[self._tagInfo[j][0]:self._tagInfo[j][1]])\n nextpos.append(self._tagInfo[j][4])\n\n j += 1\n while j < len(self._tagInfo):\n if j == self._mt_wordidx:\n nextsurf.append(\"MATH\")\n nextpos.append(\"MATH\")\n elif j != self._mt_wordidx and self._sentence[self._tagInfo[j][0]:self._tagInfo[j][1]].startswith(\"__MATH_\"):\n nextsurf.append(\"OTHERMATH\")\n nextpos.append(\"OTHERMATH\")\n else:\n nextsurf.append(self._sentence[self._tagInfo[j][0]:self._tagInfo[j][1]])\n nextpos.append(self._tagInfo[j][4])\n\n if len(nextsurf) == amount:\n break\n j += 1\n\n return presurf, prepos, nextsurf, nextpos\n\n def SeventhFeature(self, amount):\n presurf = []\n prepos = []\n nextsurf = []\n nextpos = []\n\n i = self._mt_wordidx - 1\n\n while i >= 0:\n if self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]].startswith(\"__MATH_\"):\n presurf.insert(0, \"OTHERMATH\")\n prepos.insert(0, \"OTHERMATH\")\n else:\n presurf.insert(0, self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]])\n prepos.insert(0, self._tagInfo[i][4])\n \n if len(presurf) == amount:\n break\n i -= 1\n \n j = self._mt_endidx + 1\n\n while j < len(self._tagInfo):\n if self._sentence[self._tagInfo[j][0]:self._tagInfo[j][1]].startswith(\"__MATH_\"):\n nextsurf.append(\"OTHERMATH\")\n nextpos.append(\"OTHERMATH\")\n else:\n nextsurf.append(self._sentence[self._tagInfo[j][0]:self._tagInfo[j][1]])\n nextpos.append(self._tagInfo[j][4])\n\n if len(nextsurf) == amount:\n break\n j += 1\n\n return presurf, prepos, nextsurf, nextpos\n\n def EighthFeature(self, ptn1, ptn2, ptn3, ptn4, ptn5, ptn6):\n temp_sent = self._sentence[:self._np[0]] + \"NP\" + self._sentence[self._np[1]:]\n temp_sent = temp_sent.replace(self._sentence[self._math[0]:self._math[1]], 'MATH')\n \n ans1 = ptn1.search(temp_sent) != None\n ans2 = ptn2.search(temp_sent) != None\n ans3 = ptn3.search(temp_sent) != None\n ans4 = ptn4.search(temp_sent) != None\n ans5 = ptn5.search(temp_sent) != None\n ans6 = ptn6.search(temp_sent) != None\n ans7 = False\n\n preIndex = self._mt_wordidx - 1\n beginDetectedIndex = 0\n endDetectedIndex = 0\n \n if preIndex > -1:\n if self._tagInfo[preIndex][3] == \"N\" and not self._sentence[self._tagInfo[preIndex][0]:self._tagInfo[preIndex][1]].startswith(\"__MATH_\"):\n endDetectedIndex = preIndex\n\n i = preIndex\n while(i>=0):\n if (self._tagInfo[i][3] == \"N\" and not self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]].startswith(\"__MATH_\")) or (self._tagInfo[i][3] == \"ADJ\" and self._tagInfo[i+1][3] == \"N\" and not self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]].startswith(\"__MATH_\")):\n beginDetectedIndex = i\n elif self._tagInfo[i][3] == \"D\":\n beginDetectedIndex = i\n break\n else:\n break\n i -= 1\n else:\n searchIndex = preIndex\n while searchIndex >= 0 and (self._sentence[self._tagInfo[searchIndex][0]:self._tagInfo[searchIndex][1]].startswith(\"__MATH_\") or self._sentence[self._tagInfo[searchIndex][0]:self._tagInfo[searchIndex][1]] == \"and\" or self._sentence[self._tagInfo[searchIndex][0]:self._tagInfo[searchIndex][1]] == \"or\" or self._sentence[self._tagInfo[searchIndex][0]:self._tagInfo[searchIndex][1]] == \",\"):\n searchIndex -= 1\n\n i = searchIndex\n while i >= 0:\n if (self._tagInfo[i][3] == \"N\" and self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]].startswith(\"__MATH_\")) or (self._tagInfo[i][3] == \"ADJ\" and self._tagInfo[i+1][3] == \"N\" and not self._sentence[self._tagInfo[i][0]:self._tagInfo[i][1]].startswith(\"__MATH_\")):\n beginDetectedIndex = i\n if i == searchIndex:\n endDetectedIndex = i\n elif self._tagInfo[i][3] == \"D\":\n beginDetectedIndex = i\n break\n else:\n break\n i -= 1\n if beginDetectedIndex == self._np_wordidx and endDetectedIndex == self._np_endidx:\n ans7 = True\n\n return ans1, ans2, ans3, ans4, ans5, ans6, ans7\n\n #Feature 9 and Feature 10 need a dependency graph\n def NinthFeature(self):\n NotImplemented\n\n def PreTenthFeature(self):\n return self._np_wordidx, self._np_endidx, self._mt_wordidx\n","repo_name":"frozstone/MLPython","sub_path":"MLPython/ExtractFeatures.py","file_name":"ExtractFeatures.py","file_ext":"py","file_size_in_byte":11846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71003249051","text":"import psycopg2 as Database\nfrom django_rq import job\nfrom psycopg2.extras import DictCursor\n\nimport customers.models\nfrom django.conf import settings\nfrom django.db import models\nfrom odoo.odoo_client import OdooClient as client\nfrom products.utils import smtp_send\n\n\n@job\ndef send_invitation_email(**kwargs):\n smtp_send(\n subject=kwargs[\"subject\"],\n emails=kwargs[\"emails\"],\n body=kwargs[\"body\"],\n html_body=kwargs[\"html_body\"],\n )\n\n\nclass Role(models.TextChoices):\n ADMIN = \"admin\"\n STAFF = \"staff\"\n\n\nclass InvitationStatus(models.TextChoices):\n PENDING = \"pending\"\n APPROVED = \"approved\"\n REJECTED = \"rejected\"\n\n\ndef get_company_id_by_code(ref):\n # get all companies from Odoo\n conn = Database.connect(settings.ODOO_DB_URI)\n cursor = conn.cursor(cursor_factory=DictCursor)\n query_str = \"\"\"SELECT id\n FROM res_partner e\n WHERE e.ref = %(ref)s\n \"\"\"\n cursor.execute(query_str, {\"ref\": ref})\n\n odoo_id = cursor.fetchone()\n cursor.close()\n return odoo_id[0]\n\n\ndef sync_address_to_odoo(address_obj, *args, **kwargs):\n \"\"\"Update or create address for company when edit or add on the django\"\"\"\n # mapping fields between django and odoo\n fields_map = {\n \"name\": \"name\",\n \"street_address_1\": \"street\",\n \"street_address_2\": \"street2\",\n \"city\": \"city\",\n \"address_postal\": \"zip\",\n \"latitude\": \"latitude\",\n \"longitude\": \"longitude\",\n }\n # get data, return dict\n data = {}\n for field_name, field_value in fields_map.items():\n data[field_value] = getattr(address_obj, field_name)\n\n odoo_id = address_obj.odoo_id\n # create new address\n if odoo_id == 0:\n company = address_obj.company_id\n # check and create new address if the company does exists\n if company and company.uuid:\n parent_id = get_company_id_by_code(company.company_code)\n data[\"parent_id\"] = parent_id\n data[\"type\"] = \"delivery\"\n res = client.Instance().ResPartner.create(data)\n add_obj = customers.models.Addresses.objects.get(uuid=address_obj.uuid)\n add_obj.odoo_id = res.id\n add_obj.save()\n else:\n client.Instance().ResPartner.write([odoo_id], data)\n","repo_name":"Kiennguyen97/vue_django_demo","sub_path":"back-end/src/customers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8838533775","text":"#!/usr/bin/env python3\n\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nfrom logger import init_logger\nfrom scheduler_job import Job\n\nscheduler_logger = init_logger('scheduler logger')\n\n# initialize scheduler\nSCHEDULER = BackgroundScheduler()\nSCHEDULER.start()\nscheduler_logger.info('SCHEDULER started')\n\n\nclass Scheduler(object):\n\n def __init__(self):\n self.hour = 12\n self.job = Job()\n scheduler_logger.info('SCHEDULER initialized to run at {}'.format(self.hour))\n\n def add_get_all_fans_job(self):\n SCHEDULER.add_job(self.job.get_all_fans_job, 'cron', hour=self.hour, minute=00)\n scheduler_logger.info('adding job to fire at: {}'.format(self.hour))\n\n\nif __name__ == '__main__':\n Scheduler().add_get_all_fans_job()\n","repo_name":"LuckCky/counterbot","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12976706039","text":"from func.AdEx_meta import *\nfrom func.helpers import *\nimport sys\nJ =[1.4]#,700.,700.,700.,700.,500.,510.,510.,450.]#400.,450.,450.,450.,600.]#[3822.]#[392.,427.,540.,854.,1709.,3822.]#[ 450., 450., 450.,450.]#[400.,450.,450.,450.,600.]#[400.,400.,400.,400.,400.]\n\ng =5.#np.float(sys.argv[1])#4.0#(NE/NI)\neta =0.5#np.float(sys.argv[2])#0.5\n\nd = [3.5]\nNI =200#,100,200,250,300,500,700,800,950]#,200,500,800,990]#[40,200,800,3200,16000]#[50,200,500,800,950,990]#[200,500,800,950]#[200,800,2000,3200,3800]\nsim_time= 10000#110\ndirectory = 'sim/AdEx_test'\nNE =int(1000-NI)#int(5000-NI)\n\njA = J[0]\nj = jA\nsimulation ='AdEx_J=%s_g=%s_eta=%s_NI=%s_N_tot=%s_sim_time=%s'%(J[0],g,eta,NI,NI+NE,sim_time)\n#simulation='tsodyks_j=%s_g=%s_eta=%s_NI=%s_u=%s_tau_rec=%s_tau_fac=%s_Nt=%s'%(j,g,eta,NI,U,tau_rec,tau_fac,NI+NE)\nA = AdExModel(directory= directory,\n simulation = simulation,\n g=np.round(g,decimals=3), # inhibitor0y strenght\n eta = np.round(eta,decimals=3),\n d=d, # synaptic delay\n J=jA, #synaptic strengthd\n NE =NE, # fraction of inh neurons\n NI= NI,\n N_rec = NE+NI,\n epsilon = 0.1,\n tauMem =40.,\n simtime=sim_time,\n master_seed = 2000,\n verbose = True,\n chunk= False,\n chunk_size= 50000,\n voltage = False)\nA.build(tau_w = 100000., a=0.,b = 0., constant_k=[0.1,0.1])#rate = 180.)#196\nA.connect(j_cnqx=False)\nA.run()\n#simulation = 'comp'\nsimulation ='LIF_J=%s_g=%s_eta=%s_NI=%s_N_tot=%s_sim_time=%s'%(J[0],g,eta,NI,NI+NE,sim_time)\nB = meta_brunel(directory= directory,\n simulation = simulation,\n g=np.round(g,decimals=3), # inhibitor0y strenght\n eta = np.round(eta,decimals=3),\n d=d, # synaptic delay\n J=jA, #synaptic strengthd\n NE =NE, # fraction of inh neurons\n NI= NI,\n N_rec = NE+NI,\n epsilon = 0.1,\n tauMem =40.,\n simtime=sim_time,\n master_seed = 1000,\n verbose = True,\n chunk= False,\n chunk_size= 50000,\n voltage = False)\nB.build()#tau_w = 100., a=0.,b = 0., constant_k=[0.1,0.1])#rate = 180.)#196\nB.connect()#(j_cnqx=False)\nB.run()\n","repo_name":"OlegFS/N_balance","sub_path":"AdEx_test.py","file_name":"AdEx_test.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23056595846","text":"'''\nWrite a function to tokenize a given string and return a dictionary with the frequency of\neach word\n'''\n\ndef tokenize(string):\n a_1 = dict()\n for i in string:\n\t if i not in a_1:\n\t\t a_1[i] = a_1.count(i)\n\t else:\n\t\t a_1[i] += 1\n return a_1\n\n \ndef main():\n \n lines=int(input())\n matrix=list()\n for i in range(lines):\n\t lst = [i for i in input().split(\" \")]\n matrix.append(lst)\n print(tokenize(matrix))\n\t\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"reddy2018bharath/CSPP-1-assignments","sub_path":"M22/assignment3/tokenize.py","file_name":"tokenize.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36741088621","text":"#!/usr/bin/python3\n\n#\n#Llamado a migen \n#-----------------------------------------------------------------------\t\nfrom migen import *\nfrom litex.soc.interconnect.csr import *\n\n\n\n\n# Modulo Principal\nclass Video(Module,AutoCSR):\n def __init__(self):\n ##Entradas\n \n self.CLK = Signal()\n self.Reset = Signal() \n\n ##Salidas \n \n ##TFT\n\n self.TFT_SPI_MOSI = Signal()\n self.TFT_SPI_CLK = Signal()\n self.TFT_RS = Signal()\n self.TFT_RST = Signal()\n self.TFT_SPI_CS = Signal()\n\n \n ##Valores Internos\n self.TilesControlRegisterCSR = CSRStorage(14) ##[13:5] Pisicion Tile [4:1] Tile\n self.TilesControlRegister = Signal(14)\n \n \n \n\n self.specials +=Instance(\"Video\",\n\n i_Reset = self.Reset,\n i_CLK = self.CLK, \n i_TilesControlRegister = self.TilesControlRegister, \n o_TFT_SPI_CLK = self.TFT_SPI_CLK,\n o_TFT_SPI_CS = self.TFT_SPI_CS,\n o_TFT_SPI_MOSI = self.TFT_SPI_MOSI,\n o_TFT_RST = self.TFT_RST,\n o_TFT_RS = self.TFT_RS, \n \n )\n \n self.comb += self.TilesControlRegister.eq(self.TilesControlRegisterCSR.storage)\n \n","repo_name":"caverar/SoC","sub_path":"Hardware/Video.py","file_name":"Video.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30600816518","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# You are given a string and your task is to swap cases. \n# In other words, convert all lowercase letters to uppercase letters and vice versa.\n\n# Function Description\n\n# Complete the swap_case function in the editor below.\n# swap_case has the following parameters:\n# string s: the string to modify\n\n# Returns\n\n# string: the modified string\n\n# Input Format\n\n# A single line containing a string s.\n\ndef swap_case(s):\n x = \"\"\n for c in s:\n if c.islower():\n c = c.upper()\n else:\n c = c.lower()\n x += \"\".join(c) \n return x\n\ns = input()\nresult = swap_case(s)\nprint(result)\n\n# Another Method is to use swapcase() function as follows:\n# def swap_case(s):\n# return s.swapcase()\n\n","repo_name":"sohilsharma1996/Innomatics-Internship-July-2022","sub_path":"TASK - 4 (Basic Python Programming)/sWAP cASE(1).py","file_name":"sWAP cASE(1).py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"9848604392","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, scoped_session, Session\nfrom settings import settings\n# modul for async database queries\n# database = databases.Database(settings.sqlite_database_url)\n\nengine = create_engine(\n settings.database_url,\n # connect_args={\"check_same_thread\": False}\n)\n\nBase = declarative_base()\nBase.metadata.create_all(engine)\nLocalSession = scoped_session(sessionmaker(\n autocommit=False, autoflush=False, bind=engine))\n\nBase.query = LocalSession.query_property()\n\n\ndef get_db():\n db: Session = LocalSession()\n try:\n return db\n finally:\n db.close()\n\n\n\n","repo_name":"4crash/gracall-test","sub_path":"db_lib/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34699219908","text":"def diferenca():\n i=0\n soma1=[]\n soma2=[]\n for i in range(101):\n x=i**2\n soma1.append(x)\n i++1\n for i in range(101):\n soma2.append(i)\n i++1\n somadosquadrados=sum(soma1)\n quadradodassomas=sum(soma2)\n quadradodassomas=quadradodassomas**2\n return quadradodassomas-somadosquadrados\n \n \n \n","repo_name":"CCoxinho/Project_Euler_Python","sub_path":"exercicio6.py","file_name":"exercicio6.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70394068572","text":"import struct\n\nclass IPC(object):\n def __init__(self, sock):\n super(IPC, self).__init__()\n self.sock = sock\n\n def send(self, data):\n \"\"\"\n Send arbitrary data to the process via self.sock\n \"\"\"\n header = ''.encode('utf-8')\n data = data.encode('utf-8')\n header = struct.pack('\" % (self.__class__.__name__, self.pid)\n","repo_name":"devopspp/pyliveupdate","sub_path":"pyliveupdate/ipc.py","file_name":"ipc.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"32"} +{"seq_id":"72074403930","text":"#*- coding: utf-8 -*-\r\n# Etienne Glossi - etienne.glossi@gmail.com\r\n# PyPoker: game.card.definitions\r\n# Dictionnaires contenant l'ensemble des cartes utilisées ainsi que les combinaisons possibles.\n# Utilisés principalement pour permettre l'affichage des cartes.\n# 20 décembre 2009\n\r\ncartes = {\n\r\n #coeur\r\n '14co': 'As de coeur',\r\n '02co': '2 de coeur',\r\n '03co': '3 de coeur',\r\n '04co': '4 de coeur',\r\n '05co': '5 de coeur',\r\n '06co': '6 de coeur',\r\n '07co': '7 de coeur',\r\n '08co': '8 de coeur',\r\n '09co': '9 de coeur',\r\n '10co': '10 de coeur',\r\n '11co': 'Valet de coeur',\r\n '12co': 'Dame de coeur',\r\n '13co': 'Roi de coeur',\r\n\r\n #carreau\r\n '14ca': 'As de carreau',\r\n '02ca': '2 de carreau',\r\n '03ca': '3 de carreau',\r\n '04ca': '4 de carreau',\r\n '05ca': '5 de carreau',\r\n '06ca': '6 de carreau',\r\n '07ca': '7 de carreau',\r\n '08ca': '8 de carreau',\r\n '09ca': '9 de carreau',\r\n '10ca': '10 de carreau',\r\n '11ca': 'Valet de carreau',\r\n '12ca': 'Dame de carreau',\r\n '13ca': 'Roi de carreau',\r\n\r\n #trèfle\r\n '14tr': 'As de trefle',\r\n '02tr': '2 de trefle',\r\n '03tr': '3 de trefle',\r\n '04tr': '4 de trefle',\r\n '05tr': '5 de trefle',\r\n '06tr': '6 de trefle',\r\n '07tr': '7 de trefle',\r\n '08tr': '8 de trefle',\r\n '09tr': '9 de trefle',\r\n '10tr': '10 de trefle',\r\n '11tr': 'Valet de trefle',\r\n '12tr': 'Dame de trefle',\r\n '13tr': 'Roi de trefle',\n \r\n #pique\r\n '14pi': 'As de pique',\r\n '02pi': '2 de pique',\r\n '03pi': '3 de pique',\r\n '04pi': '4 de pique',\r\n '05pi': '5 de pique',\r\n '06pi': '6 de pique',\r\n '07pi': '7 de pique',\r\n '08pi': '8 de pique',\r\n '09pi': '9 de pique',\r\n '10pi': '10 de pique',\r\n '11pi': 'Valet de pique',\r\n '12pi': 'Dame de pique',\r\n '13pi': 'Roi de pique'\r\n}\r\n\r\ncombinaisons = {\r\n 0: 'une Carte Haute',\r\n 1: 'une Paire',\r\n 2: 'une Double Paire',\r\n 3: 'un Brelan',\r\n 4: 'une Suite',\r\n 5: 'une Couleur à %s %s',\r\n 6: 'un Full',\r\n 7: 'un Carre de %s',\r\n 8: 'une Quinte Flush %s',\r\n 9: 'une Quinte Flush Royale !'\r\n}\r\n\r\ndisp = {\r\n 2 : 'deux',\r\n 3 : 'trois',\r\n 4 : 'quatre',\r\n 5 : 'cinq',\r\n 6 : 'six',\r\n 7 : 'sept',\r\n 8 : 'huit',\r\n 9 : 'neuf',\r\n 10 : 'dix',\r\n 11 : 'valet',\r\n 12 : 'dame',\r\n 13 : 'roi',\r\n 14 : 'as',\r\n 'tr': 'trefle',\r\n 'pi': 'pique',\r\n 'ca': 'carreau',\r\n 'co': 'coeur'\r\n}\n","repo_name":"corpg/simPyPoker","sub_path":"game/card/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34701939590","text":"class CardsSorter:\n def __init__(self, input_data):\n self.input_data = input_data['cards']\n self.cache = {}\n self.output = []\n self.message = []\n self.error = None\n\n def _create_cache(self):\n for index, card in enumerate(self.input_data):\n self._add_item_to_cache(card[\"from\"], \"from\", index)\n self._add_item_to_cache(card[\"to\"], \"to\", index)\n\n def _add_item_to_cache(self, city_name, direction_key, card_id):\n if city_name not in self.cache:\n self.cache[city_name] = {}\n self.cache[city_name][\"count\"] = 0\n if direction_key not in self.cache[city_name]:\n self.cache[city_name][direction_key] = card_id\n self.cache[city_name][\"count\"] += 1\n else:\n raise Exception(f\"There are two cards with {city_name} as {direction_key}\")\n\n def _get_trip_start(self):\n trip_start_card_id = None\n trip_end_card_id = None\n for item in self.cache:\n trip_start_card_id = self._validate(self.cache[item], trip_start_card_id, \"from\")\n trip_end_card_id = self._validate(self.cache[item], trip_end_card_id, \"to\")\n return trip_start_card_id\n\n def _validate(self, cache_item, trip_start_or_end, direction_key):\n if cache_item[\"count\"] == 1 and direction_key in cache_item:\n if trip_start_or_end is None:\n return cache_item[direction_key]\n else:\n city_1 = self.input_data[trip_start_or_end][direction_key]\n city_2 = self.input_data[cache_item[direction_key]][direction_key]\n raise Exception(\n f\"There are two candidates for journey {direction_key}: {city_1} and {city_2}\")\n else:\n return trip_start_or_end\n\n def _sort(self, card_id):\n for _ in self.input_data:\n self.output.append(self.input_data[card_id])\n city_to = self.input_data[card_id]['to']\n if \"from\" in self.cache[city_to]:\n card_id = self.cache[city_to]['from']\n\n def _prepare_message(self):\n for index, item in enumerate(self.output):\n if item['connection_number'] != \"\":\n transport = f\"{item['transport_type']} {item['connection_number']}\"\n else:\n transport = item['transport_type']\n if item['seat'] != \"\":\n seat_message = f\"Take seat #{item['seat']}\"\n else:\n seat_message = f\"No seat assignment\"\n self.message.append(\n f\"{index + 1}. Take {transport} from {item['from']} to {item['to']}. {seat_message}. {item['extra_data']}\")\n self.message.append(f\"{len(self.output)+1}. Your reached your final destination.\")\n\n def process_cards(self):\n try:\n self._create_cache()\n card_id = self._get_trip_start()\n self._sort(card_id)\n self._prepare_message()\n except Exception as err:\n self.error = err.args[0]\n\n def get_sorted_cards_and_message(self):\n return {\"cards\": self.output,\n \"message\": self.message}\n\n\n\n","repo_name":"khrystynaplakhtiy/travel_cards_api","sub_path":"cards_sorter.py","file_name":"cards_sorter.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4483741019","text":"import json\nimport socket\nimport select\nimport time\nimport re\nimport collections\nfrom datetime import datetime as dt, timedelta\n#from pync import Notifier\n\nimport irc_argument_parser as irc_ap\nfrom irc_argument_parser import ArgumentParser, LeaddArgument, SfsArgument\nfrom peewee_db import todos, todos_pvp, Todos_Todolist\nfrom battleparser import BattleParserIrc\n\n#os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"src.settings\")\n#import django\n#django.setup()\n\n# result_receiving = 'abc !lead add livvo legate'\n# found_plugin = [x for x in ArgumentParser().get_subclasses() if x in result_receiving]\n# plugin_class = \"{}Argument\".format(found_plugin.pop().replace('!', '').capitalize())\n#\n# plugin_class = getattr(irc_ap, plugin_class)\n# print(plugin_class('!lead add livvo legate'))\n# print(plugin_class('!lead list'))\n# print(plugin_class('!lead add livvo legate').data)\n# print(plugin_class('!lead list').data)\n# exit()\n#\ndef min_max_str(data, length=6, rev=False):\n number = data\n total = length - len(str(number))\n if ' ' in str(number):\n name, number_rev = str(number).split(' ')\n if rev == False:\n return \"\".join([\"\".join([\" \" for x in range(1, total)]), str(number),])\n else:\n if ' ' in str(number):\n total = length - len(name) - len(str(number_rev))\n if ' ' in str(number):\n first_join = [name, \"\".join([\" \" for x in range(1, total)]), str(number_rev)]\n else:\n first_join = [number, \"\".join([\" \" for x in range(1, total)])]\n return \"\".join(first_join)\nsock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nsock1.connect(('irc.gamesurge.net', 6667))\n#sock2.connect(('irc.gamesurge.net', 6667))\nconnected_sockets = []\nchannels = ['#DesertAndPlains'] # '#LivvTest', '#LivvTest2', '#DesertAndPlains'\nchannels = ['#TLK-Dev', '#raberber'] #'#mountainclan'] # '#LivvTest', '#LivvTest2', '#DesertAndPlains'\nconnected_channels = []\npinged_sockets = []\n\nbackground_white_color_black = '\\x031,0'\nbackground_red_color_white = '\\x034,0'\nbackground_end = '\\x03'\n\nsfs = False\npvp = False\nreminders = []\n\nglobal flood_protection\nflood_protection = []\n\n\ndef check_flood_protection():\n timestamp_now = dt.utcnow().timestamp()\n global flood_protection\n print('FLOOD LEN {}'.format(len(flood_protection)))\n for item in flood_protection:\n if item < (timestamp_now - 12):\n flood_protection.remove(item)\n if len(flood_protection) > 5:\n print('FLOOD', len(flood_protection))\n print(flood_protection[0] < timestamp_now)\n print(flood_protection[0], timestamp_now)\n print((timestamp_now - flood_protection[0]))\n time.sleep(2)\n check_flood_protection()\n if len(flood_protection) > 6:\n print('Waaaait FLOOODING CLOSE!')\n time.sleep(3)\n\n print('check_flood_protection after checks')\n\n return True\n\ndef send_to_nick(sock, nick, message):\n send_message(sock, nick, message)\n\n\ndef send_to_channel(sock, channel, message):\n if isinstance(channel, list):\n channels = channel\n for channel in channels:\n send_message(sock, channel, message)\n else:\n send_message(sock, channel, message)\n\n\ndef send_message(sock, target, message):\n date_custom = dt.utcnow()\n message = \"PRIVMSG {target} :{message}\\n\".format(target=target, message=message)\n print(message)\n check_flood_protection()\n sock.send(bytes(message, 'ascii'))\n global flood_protection\n flood_protection.append(dt.utcnow().timestamp())\n\n\ndef send_notice(sock, target, message):\n date_custom = dt.utcnow()\n message = \"NOTICE {target} :{message} {date}\\n\".format(target=target, message=message, date=date_custom)\n print(message)\n check_flood_protection()\n sock.send(bytes(message, 'ascii'))\n global flood_protection\n flood_protection.append(dt.utcnow().timestamp())\n\nseconds_running = 0\nwhile 1:\n # Await a read event\n rlist, wlist, elist = select.select([sock1,], [], [], 1)\n # Test for timeout\n if [rlist, wlist, elist] == [[], [], []]:\n\n #print(\"Five seconds elapsed.\")\n\n if sock1 not in connected_sockets:\n sock1.send(bytes(\"NICK BotLivvo\\r\\n\", 'ascii'))\n sock1.send(bytes(\"USER BotLivvo AAA AAA AAA\\r\\n\", 'ascii'))\n connected_sockets.append(sock1)\n time.sleep(1)\n\n if sock1 in connected_sockets and sock1 in pinged_sockets and channels != connected_channels and seconds_running > 10:\n diff_channels = [x for x in channels if x not in connected_channels]\n for channel in diff_channels:\n if channel == '#DesertAndPlains':\n channel = '#DesertAndPlains iguessineedapassword'\n channel_message = 'JOIN {}\\n'.format(channel)\n print(channel_message)\n sock1.send(bytes(channel_message, 'ascii'))\n if channel == '#DesertAndPlains iguessineedapassword':\n channel = '#DesertAndPlains'\n connected_channels.append(channel)\n\n if sock1 in connected_sockets and sock1 in pinged_sockets and channels == connected_channels and seconds_running > 20:\n if pvp and len(todos_pvp) > 0:\n data = json.loads(todos_pvp[0].data)\n attacker_types = [x for x in json.loads(todos_pvp[0].data)['attacker']]\n attacker_tot = sum([int(json.loads(todos_pvp[0].data)['attacker'][x]['Tot']) for x in attacker_types])\n attacker_inj = sum([int(json.loads(todos_pvp[0].data)['attacker'][x]['Inj']) for x in attacker_types])\n attacker_dead = sum([int(json.loads(todos_pvp[0].data)['attacker'][x]['Dead']) for x in attacker_types])\n attacker_leftover = attacker_tot - attacker_inj - attacker_dead\n\n defender_types = [x for x in json.loads(todos_pvp[0].data)['defender']]\n defender_tot = sum([int(json.loads(todos_pvp[0].data)['defender'][x]['Tot']) for x in defender_types])\n defender_inj = sum([int(json.loads(todos_pvp[0].data)['defender'][x]['Inj']) for x in defender_types])\n defender_dead = sum([int(json.loads(todos_pvp[0].data)['defender'][x]['Dead']) for x in defender_types])\n defender_leftover = defender_tot - defender_inj - defender_dead\n\n if attacker_leftover > defender_leftover:\n outcome = 'Attacker wins'\n elif defender_leftover > attacker_leftover:\n outcome = 'Defender wins'\n else:\n outcome = 'Tied'\n\n battle_time = data['stats']['time'] if 'time' in data['stats'] else 'NO TIME'\n attacker_name = data['stats']['attacker_name']\n outcome_stats = data['stats']['outcome']\n defender_name = data['stats']['defender_name']\n\n attacker_country = data['stats']['attacker_country']\n defender_country = data['stats']['defender_country']\n attacker_name_stats = data['stats']['attacker_name_stats']\n\n if outcome_stats == 'defeats':\n outcome_txt = 'defeats'\n else:\n outcome_txt = 'loses to'\n\n todos_pvp[0].status = True\n todos_pvp[0].save()\n\n country_message = \"{bg}{attacker_country} vs {defender_country}{bg_end}: {attacker_name_stats} {outcome_txt} {defender_name}\".format(\n attacker_country=attacker_country, defender_country=defender_country, attacker_name_stats=attacker_name_stats,\n outcome_txt=outcome_txt, defender_name=defender_name, bg=background_red_color_white, bg_end=background_end)\n send_to_channel(sock1, connected_channels, country_message)\n\n # Stat: 448 vs 120 soldiers. 448 (100.0%) vs 49 (40.83%) standing. Attacker wins! 11 (22.45%) captured! FLAWLESS!!\n message = \"{attacker_name} vs {defender_name}. Stat: {attacker_tot} vs {defender_tot} soldiers. \" \\\n \"{attacker_leftover} vs {defender_leftover} standing. \" \\\n \"{outcome}! Time CET (+1): {battle_time}\" \\\n \"\" \\\n \"\".format(attacker_name=attacker_name, defender_name=defender_name,\n attacker_tot=attacker_tot, defender_tot=defender_tot,\n attacker_leftover=attacker_leftover, defender_leftover=defender_leftover,\n outcome=outcome, battle_time=battle_time)\n send_to_channel(sock1, connected_channels, message)\n\n todos_pvp = Todos_Todolist.select().where(Todos_Todolist.status == False, Todos_Todolist.todo_type_id == 2).order_by(Todos_Todolist.identifier)\n\n if sfs and len(todos) > 0:\n data = json.loads(todos[0].data)\n attacker_types = [x for x in json.loads(todos[0].data)['attacker']]\n attacker_tot = sum([int(json.loads(todos[0].data)['attacker'][x]['Tot']) for x in attacker_types])\n attacker_inj = sum([int(json.loads(todos[0].data)['attacker'][x]['Inj']) for x in attacker_types])\n attacker_dead = sum([int(json.loads(todos[0].data)['attacker'][x]['Dead']) for x in attacker_types])\n attacker_leftover = attacker_tot - attacker_inj - attacker_dead\n\n defender_types = [x for x in json.loads(todos[0].data)['defender']]\n defender_tot = sum([int(json.loads(todos[0].data)['defender'][x]['Tot']) for x in defender_types])\n defender_inj = sum([int(json.loads(todos[0].data)['defender'][x]['Inj']) for x in defender_types])\n defender_dead = sum([int(json.loads(todos[0].data)['defender'][x]['Dead']) for x in defender_types])\n defender_leftover = defender_tot - defender_inj - defender_dead\n\n data_ext = json.loads(todos[0].data)['attacker']\n data_extf = json.loads(todos[0].data)['defender']\n\n defenders_count = 0\n\n if attacker_leftover > defender_leftover:\n outcome = 'Attacker wins'\n elif defender_leftover > attacker_leftover:\n outcome = 'Defender wins'\n else:\n outcome = 'Tied'\n\n battle_time = data['stats']['time']\n attacker_name = data['stats']['attacker_name']\n outcome_stats = data['stats']['outcome']\n defender_name = data['stats']['defender_name']\n\n attacker_country = data['stats']['attacker_country']\n defender_country = data['stats']['defender_country']\n attacker_name_stats = data['stats']['attacker_name_stats']\n\n if defender_tot >= 500:\n points = \"{bg} {points} points gained.{bg_end} \".format(\n points=int(defender_tot / 4),\n bg=background_red_color_white, bg_end=background_end\n )\n else:\n points = ''\n\n if outcome_stats == 'defeats':\n outcome_txt = 'captures'\n else:\n outcome_txt = 'fails to capture'\n\n todos[0].status = True\n todos[0].save()\n\n # 1000\n total_soldiers = attacker_leftover + defender_leftover\n attacker_leftover_perc = round(attacker_leftover / attacker_tot * 100, 2)\n defender_leftover_perc = round(defender_leftover / defender_tot * 100, 2)\n\n defenders_count = data_extf['Commanders']['Tot']\n country_message = \"{bg}{attacker_country} vs {defender_country}{bg_end}: {attacker_name_stats} {outcome_txt} {defender_name} ({defenders_count})\".format(\n attacker_country=attacker_country, defender_country=defender_country, attacker_name_stats=attacker_name_stats,\n outcome_txt=outcome_txt, defender_name=defender_name, bg=background_white_color_black, bg_end=background_end,\n defenders_count=defenders_count)\n if attacker_country == 'Berber' or defender_country == 'Berber':\n send_to_channel(sock1, connected_channels, country_message)\n\n #if data_extf['Commanders']['Tot'] > 0:\n # bp = BattleParserIrc(data)\n # bp.run()\n # for line in bp.lines:\n # send_to_channel(sock1, connected_channels, line)\n\n # Stat: 448 vs 120 soldiers. 448 (100.0%) vs 49 (40.83%) standing. Attacker wins! 11 (22.45%) captured! FLAWLESS!!\n # {attacker_name} vs {defender_name}.\n message = \"Stat: {attacker_tot} vs {defender_tot} soldiers. \" \\\n \"{attacker_leftover} ({attacker_leftover_perc}%) vs {defender_leftover} ({defender_leftover_perc}%) standing. \" \\\n \"{outcome}! {points}- Time CET (+1): {battle_time}\" \\\n \"\" \\\n \"\".format(attacker_name=attacker_name, defender_name=defender_name,\n attacker_tot=attacker_tot, defender_tot=defender_tot,\n attacker_leftover=attacker_leftover, defender_leftover=defender_leftover,\n outcome=outcome, battle_time=battle_time, points=points,\n defender_leftover_perc=defender_leftover_perc, attacker_leftover_perc=attacker_leftover_perc)\n\n if attacker_country == 'Berber' or defender_country == 'Berber':\n send_to_channel(sock1, connected_channels, message)\n\n if attacker_country == 'Berber':\n send_notice(sock1, '#LivvTest', 'REJOIN!')\n #Notifier.notify('REJOIN', title='TLK')\n\n todos = Todos_Todolist.select().where(Todos_Todolist.status == False, Todos_Todolist.todo_type_id == 1).order_by(Todos_Todolist.identifier)\n\n if reminders:\n for reminder in reminders:\n if reminder and dt.utcnow() >= reminder.date_deadline:\n reminder.status = True\n reminder.save()\n message = \"{msg} for {nick} - > {desc} \".format(msg='Reminderrrrr', desc=reminder.description, nick=reminder.name)\n send_to_channel(sock1, reminder.data, message)\n\n todos = Todos_Todolist.select().where(Todos_Todolist.status == False, Todos_Todolist.todo_type_id == 1).order_by(Todos_Todolist.identifier)\n todos_pvp = Todos_Todolist.select().where(Todos_Todolist.status == False, Todos_Todolist.todo_type_id == 2).order_by(Todos_Todolist.identifier)\n reminders = Todos_Todolist.select().where(Todos_Todolist.status == False, Todos_Todolist.todo_type_id == 3)\n else:\n # Loop through each socket in rlist, read and print the available data\n for sock in rlist:\n try:\n result_receiving = str(sock.recv(1000), 'ascii')\n except UnicodeDecodeError:\n pass\n #print(result_receiving)\n\n if 'PING :' in result_receiving:\n time.sleep(1)\n message = bytes('PONG {}\\r\\n'.format(result_receiving.split(':')[1].strip()), 'ascii')\n print(message)\n sock.send(message)\n pinged_sockets.append(sock)\n\n if 'livvo' in result_receiving:\n print(result_receiving)\n\n found_plugin = [x for x in ArgumentParser().get_subclasses() if x in result_receiving]\n if found_plugin:\n plugin_class = \"{}Argument\".format(found_plugin.pop().replace('!', '').capitalize())\n plugin_class = getattr(irc_ap, plugin_class)\n custom_args = result_receiving.split(' :!')[1]\n re_nick = re.findall(':(\\w+)!~', result_receiving)\n nick = re_nick.pop() if re_nick else 'Unknown'\n re_channel = re.findall('PRIVMSG (#\\w+) :', result_receiving)\n channel = re_channel.pop() if re_channel else 'Unknown'\n extra = {\n 'nick': nick,\n 'channel': channel, # PRIVMSG #LivvTest :\n }\n data = plugin_class(custom_args, **extra).data\n if data:\n if isinstance(data, list):\n for line in data:\n channel = re.findall('(#[\\w-]+) ', result_receiving)\n send_to_channel(sock, channel[0], line)\n else:\n\n channel = re.findall('(#\\w+) ', result_receiving)\n send_to_channel(sock, channel.pop(), data)\n\n if '!Livvo' in result_receiving:\n channel = re.findall('(#\\w+) ', result_receiving)\n message = '{bg}PONGGG{bg_end}'.format(bg=background_white_color_black, bg_end=background_end)\n send_to_channel(sock, channel.pop(), message)\n\n if '!sfstats' in result_receiving:\n _, minutes = result_receiving.split('!sfstats')\n minutes = minutes.strip()\n if minutes == '':\n minutes = 45\n if isinstance(minutes, str):\n minutes = int(minutes)\n dt_now = dt.utcnow()\n dt_now = dt_now - timedelta(minutes=minutes)\n number_sfs = len(Todos_Todolist.select().where(Todos_Todolist.created >= dt_now, Todos_Todolist.todo_type_id == 1))\n channel = re.findall('(#\\w+) ', result_receiving)\n message = '{bg}Number of SF the last {minutes} minutes: {number_sfs}{bg_end}'.format(bg=background_white_color_black, bg_end=background_end, number_sfs=number_sfs, minutes=minutes)\n\n send_to_channel(sock, channel.pop(), message)\n\n if '!pvpstats' in result_receiving:\n _, minutes = result_receiving.split('!pvpstats')\n minutes = minutes.strip()\n if minutes == '':\n minutes = 45\n if isinstance(minutes, str):\n minutes = int(minutes)\n dt_now = dt.utcnow()\n dt_now = dt_now - timedelta(minutes=minutes)\n pvp_battles = Todos_Todolist.select().where(Todos_Todolist.created >= dt_now, Todos_Todolist.todo_type_id == 2)\n tree = lambda: collections.defaultdict(tree)\n stats = tree()\n for pvp_battle in pvp_battles:\n data = json.loads(pvp_battle.data)\n data = data['stats']\n if 'defeats' in data['outcome']:\n stats[data['attacker_country']]['wins'] = stats[data['attacker_country']]['wins'] + 1 if 'wins' in stats[data['attacker_country']] else 1\n stats[data['defender_country']]['losses'] = stats[data['defender_country']]['losses'] + 1 if 'losses' in stats[data['defender_country']] else 1\n\n elif 'lost to' in data['outcome']:\n stats[data['attacker_country']]['losses'] = stats[data['attacker_country']]['losses'] + 1 if 'losses' in stats[data['attacker_country']] else 1\n stats[data['defender_country']]['wins'] = stats[data['defender_country']]['wins'] + 1 if 'wins' in stats[data['defender_country']] else 1\n\n else:\n stats[data['attacker_country']]['ties'] = stats[data['attacker_country']]['ties'] + 1 if 'ties' in stats[data['attacker_country']] else 1\n stats[data['defender_country']]['ties'] = stats[data['defender_country']]['ties'] + 1 if 'ties' in stats[data['defender_country']] else 1\n\n if stats:\n calc_wins = stats['Berber']['wins']\n calc_losses = stats['Berber']['losses']\n calc_add = stats['Berber']['wins'] - stats['Berber']['losses']\n calc_ties = stats['Berber']['ties'] if 'ties' in stats['Berber'] else 0\n number_pvps = len(pvp_battles)\n channel = re.findall('(#\\w+) ', result_receiving)\n message = '{bg}Number of PVPs the last {minutes} minutes: {number_pvps} Wins: {calc_wins} Losses: {calc_losses} Ties: {calc_ties} {bg_end}'.format(\n bg=background_white_color_black, bg_end=background_end, number_pvps=number_pvps, minutes=minutes,\n calc_wins=calc_wins, calc_losses=calc_losses, calc_ties=calc_ties\n )\n\n send_to_channel(sock, channel.pop(), message)\n\n if '!sfs on' in result_receiving:\n sfs = True\n\n if '!pvp on' in result_receiving:\n pvp = True\n\n if '!sfs off' in result_receiving:\n sfs = False\n\n if '!pvp off' in result_receiving:\n pvp = False\n\n\n seconds_running += 1\n\n\n","repo_name":"AlexDevelop/irc_tlk","sub_path":"irc_bot/select_protocol.py","file_name":"select_protocol.py","file_ext":"py","file_size_in_byte":21334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18832136460","text":"\"\"\"\nElinizde şöyle bir liste bulunsun.\n\n [1,2,3,4,5,6,7,8,9,10]\n\nBu listenin içindeki çift sayıların toplamını ekrana yazdıran bir fonksiyon yazın.\n\nNot: İlk önce filter() fonksiyonu ile çift sayıları ayıklayın. Daha sonra reduce() fonksiyonunu kullanın.\n\"\"\"\nfrom functools import reduce\n\nsayilar = [1,2,3,4,5,6,7,8,9,10]\n\nciftSayilar = list(filter(lambda x : x%2 == 0, sayilar))\n\ntoplam = reduce(lambda x,y : x+y ,ciftSayilar)\n\nprint(toplam)\n\n#Problem çözüldü.","repo_name":"mebon/PythonDenemeleri","sub_path":"Problemler9/Problem9_3.py","file_name":"Problem9_3.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24170428103","text":"import asyncio\nimport os\nfrom typing import Tuple\n\nimport pytest\n\nfrom spamanalyzer.data_structures import MailAnalysis, SpamAnalyzer\nfrom spamanalyzer.domain import Domain\n\nSAMPLES_FOLDER = \"tests/samples\"\n\ntrustable_mail = os.path.join(\n SAMPLES_FOLDER,\n \"97.47949e45691dd7a024dcfaacef4831461bf5d5f09c85a6e44ee478a5bcaf8539.email\",\n)\nspam = os.path.join(\n SAMPLES_FOLDER,\n \"00.1d30d499c969369915f69e7cf1f5f5e3fdd567d41e8721bf8207fa52a78aff9a.email\",\n)\n\nwith open(\"src/app/conf/word_blacklist.txt\", \"r\", encoding=\"utf-8\") as f:\n wordlist = f.read().splitlines()\n\n\n@pytest.fixture\nasync def analysis() -> Tuple[MailAnalysis, MailAnalysis]:\n analyzer = SpamAnalyzer(wordlist)\n\n return await asyncio.gather(analyzer.analyze(trustable_mail),\n analyzer.analyze(spam))\n\n\nclass TestSpamAnalyzer:\n analyzer = SpamAnalyzer(wordlist)\n\n @pytest.mark.asyncio\n async def test_get_domain(self):\n assert (await self.analyzer.get_domain(trustable_mail)\n ) == Domain(\"github-lowworker-5fb2734.va3-iad.github.net\")\n\n @pytest.mark.asyncio\n async def test_mail_analysis_is_spam(self, analysis):\n assert self.analyzer.is_spam(analysis[0]) is False\n\n @pytest.mark.asyncio\n async def test_multiple_analysis(self, analysis):\n ham, spam = analysis\n assert self.analyzer.classify_multiple_input([ham, spam]) == [\n False,\n True,\n ]\n\n\nclass TestMailAnalysis:\n\n @pytest.mark.asyncio\n async def test_mail_analysis_type(self, analysis):\n ham, _ = analysis\n assert isinstance(ham, MailAnalysis)\n\n @pytest.mark.asyncio\n async def test_mail_analysis_file_path(self, analysis):\n assert analysis[0].file_path == trustable_mail\n\n @pytest.mark.asyncio\n async def test_to_dict(self, analysis):\n dict_mail = analysis[0].to_dict()\n assert isinstance(dict_mail, dict)\n assert isinstance(dict_mail[\"body\"], dict)\n with pytest.raises(KeyError):\n assert dict_mail[\"is_spam\"] is None\n assert dict_mail[\"not_existing_key\"] is None\n","repo_name":"matteospanio/spam-analyzer","sub_path":"tests/spamanalyzer/test_data_structures.py","file_name":"test_data_structures.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"33637237583","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/carbon/common/script/net/machoNetPacket.py\nimport types\nimport service\nimport macho\nglobals().update(service.consts)\nimport log\nimport const\n\nclass MachoPacket():\n __guid__ = 'macho.MachoPacket'\n __intorstringtype__ = (types.IntType, types.StringType, types.UnicodeType)\n __bizzarrerouters__ = (const.cluster.MACHONETMSG_TYPE_SESSIONCHANGENOTIFICATION,\n const.cluster.MACHONETMSG_TYPE_SESSIONINITIALSTATENOTIFICATION,\n const.cluster.MACHONETMSG_TYPE_PING_REQ,\n const.cluster.MACHONETMSG_TYPE_PING_RSP)\n\n def __init__(self, *args, **keywords):\n self.userID = None\n self.compressedPart = 0\n self.source = macho.MachoAddress()\n self.destination = macho.MachoAddress()\n self.contextKey = None\n self.applicationID = None\n self.languageID = None\n self.oob = {}\n dtc = 0\n for each in keywords.iterkeys():\n if each != 'donttypecheck':\n setattr(self, each, keywords[each])\n else:\n dtc = 1\n\n self.command = self.__machodesc__['command']\n if not dtc:\n for each in self.__machodesc__['params']:\n if each[-1:] != '?':\n if not hasattr(self, each):\n raise TypeError('%s requires %s to be specified' % (self.__class__.__name__, each))\n\n self.pickleSize = 0\n\n def __getstate__(self):\n params = self.__machodesc__['params']\n body = [None] * len(params)\n for i in range(len(params)):\n if params[i].endswith('?'):\n tmp = params[i][:-1]\n if hasattr(self, tmp):\n body[i] = getattr(self, tmp)\n else:\n body.pop(-1)\n break\n else:\n body[i] = getattr(self, params[i])\n\n oob = None\n if self.oob or self.compressedPart:\n oob = self.oob\n if self.compressedPart:\n oob['compressedPart'] = self.compressedPart\n return (self.command,\n self.source,\n self.destination,\n self.userID,\n tuple(body),\n oob,\n self.contextKey,\n self.applicationID,\n self.languageID)\n\n def __setstate__(self, state):\n self.command, self.source, self.destination, self.userID, body, self.oob, self.contextKey, self.applicationID, self.languageID = state\n if self.oob is None:\n self.oob = {}\n self.compressedPart = self.oob.get('compressedPart', 0)\n params = self.__machodesc__['params']\n l = len(params)\n if len(body) < l:\n l = len(body)\n for i in range(l):\n if params[i].endswith('?'):\n tmp = params[i][:-1]\n else:\n tmp = params[i]\n setattr(self, tmp, body[i])\n\n def Response(self, *args, **keywords):\n if not self.__machodesc__.has_key('response'):\n raise AttributeError(self.__class__.__name__, 'Response', 'There is no such thing as a response to a %s' % self.__class__.__name__)\n theResponse = apply(self.__machodesc__['response'], (), {'donttypecheck': 1})\n theResponse.source = self.destination\n theResponse.destination = self.source\n theResponse.userID = self.userID\n theResponse.contextKey = None\n theResponse.applicationID = None\n theResponse.languageId = None\n responseParams = theResponse.__machodesc__['params']\n i = 0\n for each in responseParams:\n if each.endswith('?'):\n if len(args) > i:\n setattr(theResponse, each[:-1], args[i])\n break\n else:\n if len(args) <= i:\n break\n setattr(theResponse, each, args[i])\n i = i + 1\n\n for each in keywords.iterkeys():\n if each not in responseParams:\n raise TypeError('%s.Response does not take %s as a parameter' % (self.__class__.__name__, each))\n setattr(theResponse, each, keywords[each])\n\n for each in responseParams:\n if not each.endswith('?') and not hasattr(theResponse, each):\n raise TypeError('%s.Response requires %s as a parameter, but it was not specified' % (self.__class__.__name__, each))\n\n return theResponse\n\n def ErrorResponse(self, code, payload):\n theResponse = ErrorResponse(originalCommand=self.command, code=code, payload=payload)\n theResponse.source = self.destination\n theResponse.destination = self.source\n theResponse.userID = self.userID\n theResponse.contextKey = None\n theResponse.applicationID = None\n theResponse.languageID = None\n return theResponse\n\n def SetPickle(self, thePickle):\n self.__dict__['thePickle'] = thePickle\n self.__dict__['pickleSize'] = len(self.thePickle)\n\n def GetPickle(self):\n if not hasattr(self, 'thePickle'):\n self.__dict__['thePickle'] = macho.Dumps(self)\n self.__dict__['pickleSize'] = len(self.thePickle)\n return self.thePickle\n\n def GetPickleSize(self, machoNet):\n if not self.pickleSize:\n self.GetPickle()\n return self.pickleSize\n\n def Changed(self):\n if hasattr(self, 'thePickle'):\n delattr(self, 'thePickle')\n\n def RoutesTo(self, towhat):\n if self.command in self.__bizzarrerouters__:\n return 1\n if self.command == const.cluster.MACHONETMSG_TYPE_AUTHENTICATION_REQ and macho.mode == 'proxy':\n return 0\n return self.destination.RoutesTo(towhat, self.source)\n\n def __setattr__(self, attr, value):\n if hasattr(self, 'thePickle'):\n if hasattr(self, attr):\n curr = getattr(self, attr)\n if type(curr) not in self.__intorstringtype__ or type(value) not in self.__intorstringtype__ or curr != value:\n self.Changed()\n else:\n self.Changed()\n self.__dict__[attr] = value\n\n def __repr__(self):\n try:\n if self.__guid__ == 'macho.AuthenticationReq':\n return 'Packet::AuthenticationReq(%s,%s,%s,%s,%s)' % (self.source,\n self.destination,\n self.clientinfo,\n self.userName,\n getattr(self, 'address', None))\n self.GetPickle()\n if len(self.thePickle) > 1500000:\n return 'Packet::%s (%s,%s,GENOCIDAL PAYLOAD(%d bytes),%s, %s)' % (self.__class__.__name__,\n self.source,\n self.destination,\n len(self.thePickle),\n self.oob,\n self.contextKey)\n if len(self.thePickle) > 1000000:\n return 'Packet::%s (%s,%s,MURDEROUS PAYLOAD(%d bytes),%s, %s)' % (self.__class__.__name__,\n self.source,\n self.destination,\n len(self.thePickle),\n self.oob,\n self.contextKey)\n if len(self.thePickle) > 100000:\n return 'Packet::%s (%s,%s,GARGANTUAN PAYLOAD(%d bytes),%s, %s)' % (self.__class__.__name__,\n self.source,\n self.destination,\n len(self.thePickle),\n self.oob,\n self.contextKey)\n if len(self.thePickle) > 10000:\n return 'Packet::%s (%s,%s,HUGE PAYLOAD(%d bytes),%s, %s)' % (self.__class__.__name__,\n self.source,\n self.destination,\n len(self.thePickle),\n self.oob,\n self.contextKey)\n if len(self.thePickle) > 1000:\n return 'Packet::%s (%s,%s,LARGE PAYLOAD(%d bytes),%s, %s)' % (self.__class__.__name__,\n self.source,\n self.destination,\n len(self.thePickle),\n self.oob,\n self.contextKey)\n try:\n l = len(self.thePickle)\n params = []\n for each in self.__machodesc__['params']:\n if each[-1:] == '?':\n tmp = each[:-1]\n if hasattr(self, tmp):\n params.append(getattr(self, tmp))\n else:\n params.append(getattr(self, each))\n\n if hasattr(self, 'strayload'):\n return 'Packet::%s (%s,%s,%s bytes,%s,%s, %s)' % (self.__class__.__name__,\n self.source,\n self.destination,\n l,\n self.strayload,\n self.oob,\n self.contextKey)\n return 'Packet::%s (%s,%s,%s bytes,%s,%s, %s)' % (self.__class__.__name__,\n self.source,\n self.destination,\n l,\n params,\n self.oob,\n self.contextKey)\n except Exception:\n log.LogException()\n return 'Packet::%s (CRAPPY TUPLE)' % self.__class__.__name__\n\n except Exception:\n log.LogException()\n return 'Packet containing crappy data'\n\n __str__ = __repr__\n\n\nclass ErrorResponse(MachoPacket):\n __guid__ = 'macho.ErrorResponse'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_ERRORRESPONSE,\n 'params': ['originalCommand', 'code', 'payload']}\n\n\nclass IdentificationRsp(MachoPacket):\n __guid__ = 'macho.IdentificationRsp'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_IDENTIFICATION_RSP,\n 'params': ['accepted',\n 'nodeID',\n 'others',\n 'isProxy',\n 'isApp',\n 'serviceMask']}\n\n\nclass IdentificationReq(MachoPacket):\n __guid__ = 'macho.IdentificationReq'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_IDENTIFICATION_REQ,\n 'params': ['nodeID',\n 'myaddress',\n 'others',\n 'isProxy',\n 'isApp',\n 'serviceMask'],\n 'response': IdentificationRsp}\n\n\nclass CallRsp(MachoPacket):\n __guid__ = 'macho.CallRsp'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_CALL_RSP,\n 'params': ['payload']}\n\n\nclass CallReq(MachoPacket):\n __guid__ = 'macho.CallReq'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_CALL_REQ,\n 'params': ['payload?'],\n 'response': CallRsp}\n\n\nclass TransportClosed(MachoPacket):\n __guid__ = 'macho.TransportClosed'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_TRANSPORTCLOSED,\n 'params': ['clientID', 'isRemote']}\n\n\nclass Notification(MachoPacket):\n __guid__ = 'macho.Notification'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_NOTIFICATION,\n 'params': ['payload?']}\n\n\nclass SessionChangeNotification(MachoPacket):\n __guid__ = 'macho.SessionChangeNotification'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_SESSIONCHANGENOTIFICATION,\n 'params': ['sid', 'change', 'nodesOfInterest?']}\n\n\nclass SessionInitialStateNotification(MachoPacket):\n __guid__ = 'macho.SessionInitialStateNotification'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_SESSIONINITIALSTATENOTIFICATION,\n 'params': ['sid', 'sessionType', 'initialstate']}\n\n\nclass PingRsp(MachoPacket):\n __guid__ = 'macho.PingRsp'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_PING_RSP,\n 'params': ['times']}\n\n\nclass PingReq(MachoPacket):\n __guid__ = 'macho.PingReq'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_PING_REQ,\n 'response': PingRsp,\n 'params': ['times']}\n\n\nclass MovementNotification(MachoPacket):\n __guid__ = 'macho.MovementNotification'\n __machodesc__ = {'command': const.cluster.MACHONETMSG_TYPE_MOVEMENTNOTIFICATION,\n 'params': ['payload']}","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/carbon/common/script/net/machoNetPacket.py","file_name":"machoNetPacket.py","file_ext":"py","file_size_in_byte":12050,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"70729071451","text":"import numpy as np\nfrom sympy import Symbol, solve\n\n\"\"\" 매트릭스 만들기 \"\"\"\n# 정의\nm1 = np.matrix(\n [\n [5,10],\n [11,3]\n ]\n)\nm2 = np.matrix([[6,11], [12,4]])\nstM = np.matrix(\n [\n [1,0],\n [0,1]\n ]\n)\n\n# 역함수\ndef revMatrix (m2):\n ## 요소 변수에 저장 \n a1 = m2[0,0]\n a2 = m2[0,1]\n a3 = m2[1,0]\n a4 = m2[1,1] \n print(\"a1, a2, a3, a4 : \", a1, a2, a3, a4)\n ## 역함수로 변환\n revM = np.matrix(\n [\n [-a4,a2],\n [a3,-a1]\n ]\n )\n return revM\n\n# .\n# [ x1, x2 ] ->\n# [ x3, x4 ] \n# <- .\n\n\n# m2_rev = np.linalg.inv(m2) #오작동\nprint(\"m2의 역행렬 : \", revMatrix(m2))\nrevM = revMatrix(m2)\nprint(\"testing : \", m2*revM)\n\n# 출력\nprint(\"m1 : \\n\", m1)\nprint(\"m2 : \\n\", m2)\nprint(\"m1 + m2 : \\n\", m1+m2)\nprint(\"기존의 50%만 : \\n\", m1/2)\nprint(\"m1 * m2 : \\n\", m1*m2)\n# print(\"m2의 역: \\n\", m2_rev)\n# print(\"m2 * m2_rev: \\n\", m2*m2_rev)\nprint(\"단위행렬 연산 : \\n\", m2*stM)\n\n","repo_name":"gityBoiii/MATH","sub_path":"matrix/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15263577174","text":"from tessilator import tessilator\nimport numpy as np\nimport logging\n\ndef main(args=None):\n fluxCon, lcCon, makePlots, fileRef, tFile = tessilator.setup_input_parameters()\n conFile, periodFile = tessilator.setup_filenames(fileRef)\n\n Rad, SkyRad = 1.0, np.array([6.0,8.0])\n\n logging.basicConfig(filename=\"output.log\", level=logging.WARNING)\n\n print(\"Reading the table and formatting into astropy table structure.\")\n tTargets = tessilator.read_data(tFile)\n print(\"Done reading the table and formatting.\")\n\n print(\"...now calculating the contamination.\")\n tTargets = tessilator.collect_contamination_data(tTargets, fluxCon, lcCon,\n conFile, Rad=Rad)\n print(\"Done calculating the contamination.\")\n\n print(\"...now iterating over each source.\")\n tessilator.all_sources_cutout(tTargets, periodFile, lcCon, fluxCon, conFile,\n makePlots, choose_sec=None, tot_attempts=2, cap_files=8)\n","repo_name":"alexbinks/tessilator","sub_path":"tessilator/scripts/run_tess_cutouts.py","file_name":"run_tess_cutouts.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6425993649","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^signin/$', 'ale.views.sign_in', name='sign in'),\n url(r'^signup/$', 'ale.views.sign_up', name='sign in'),\n url(r'^logout/$', 'ale.views.log_out', name='log out'),\n url(r'^$', 'ale.views.dashboard', name='dashboard'),\n url(r'^project/create/$', 'ale.views.create_project', name='dashboard'),\n url(r'^project/import/(?P.*)/$', 'ale.io_views.import_user_project',\n name='import'),\n url(r'^project/export/(?P.*)/$', 'ale.io_views.export_user_project',\n name='import'),\n url(r'^project/(?P.*)/$', 'ale.views.show_user_project', name='project'),\n url(r'^json/project/(?P.*)/cell/modify/$', 'ale.views.modify_cell',\n name='modify cell'),\n url(r'^json/project/(?P.*)/$', 'ale.views.cells_data_json', name='project json'),\n\n\n url(r'^json/shares/project/(?P.*)/$', 'ale.share_views.get_shares',\n name='get shares'),\n url(r'^json/share/project/(?P.*)/$', 'ale.share_views.share_project',\n name='share project'),\n url(r'^json/share/remove/project/(?P.*)/$', 'ale.share_views.remove_share',\n name='remove share'),\n url(r'^share/(?P.*)/$', 'ale.share_views.show_shared_project', name='shared project'),\n url(r'^json/share/cells/(?P.*)/$', 'ale.share_views.get_json_shared_cells',\n name='shared project cells'),\n url(r'^json/share/modify/cell/(?P.*)/$', 'ale.share_views.modify_shared_project_cell',\n name='modify shared project'),\n url(r'^import/shared/(?P.*)/$', 'ale.share_views.import_shared_project',\n name='import shared project'),\n url(r'^export/shared/(?P.*)/$', 'ale.share_views.export_shared_project',\n name='export shared project'),\n\n\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n)","repo_name":"stermedia/AndroidLanguageEditor","sub_path":"AndroidLanguageEditor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28639416463","text":"from manimlib.imports import *\n\nclass FooScene(Scene):\n def construct(self):\n cmap={r\"\\phi\":RED,r\"\\psi\":YELLOW,\"{f}\":BLUE}\n eqn=TexMobject(r\"\\phi,\\psi\\uparrow\\quad {f}\\in C^n [x_0,+\\infty)\",\n tex_to_color_map=cmap)\n eqn2=TexMobject(r\"{f}=O(\\phi)\\quad{f}^{(n)}=O(\\psi)\",\n tex_to_color_map=cmap)\n eqn3=TexMobject(r\"\\Rightarrow{f}^{(k)}=O(\\phi^{1-\\frac kn}\\psi^{\\frac kn})\\quad\\forall 0 1 else False\n self.bias = self.config.get('bias')\n self.model = self.create_model()\n self.criterion_fn = nn.CrossEntropyLoss()\n self.kappa_scheduler = LinearScheduler(start=1, end=0.5)\n self.eps_scheduler = LinearScheduler(start=0)\n self.prev_weight, self.prev_eps = {}, {}\n self.clipping = self.config['clipping']\n self.current_head = \"All\"\n self.current_task = 1\n self.schedule_stack = []\n self.tb = SummaryWriter(log_dir=f\"runs/{self.config['dataset_name']}_experiment/\")\n for s in self.config[\"schedule\"][::-1]:\n self.schedule_stack.append(s)\n\n if agent_config['gpuid'][0] >= 0:\n self.cuda()\n self.gpu = True\n else:\n self.gpu = False\n self.init_optimizer()\n self.reset_optimizer = False\n self.valid_out_dim = 'ALL'\n # Default: 'ALL' means all output nodes are active\n # Set a interger here for the incremental class scenario\n\n t = agent_config['force_out_dim'] if agent_config['force_out_dim'] else self.model.last[\"1\"].out_features\n self.C = [-torch.eye(t).cuda() for _ in range(t)]\n for y0 in range(t):\n self.C[y0][y0, :] += 1\n\n def init_optimizer(self):\n optimizer_arg = {'params': (p for p in self.model.parameters() if p.requires_grad),\n 'lr': self.config['lr'],\n 'weight_decay': self.config['weight_decay']}\n if self.config['optimizer'] in ['SGD', 'RMSprop']:\n optimizer_arg['momentum'] = self.config['momentum']\n elif self.config['optimizer'] in ['Rprop']:\n optimizer_arg.pop('weight_decay')\n elif self.config['optimizer'] == 'amsgrad':\n optimizer_arg['amsgrad'] = True\n self.config['optimizer'] = 'Adam'\n\n self.optimizer = opt.__dict__[self.config['optimizer']](**optimizer_arg)\n self.scheduler = opt.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.config['milestones'],\n gamma=0.1)\n\n def create_model(self):\n cfg = self.config\n\n # Define the backbone (MLP, LeNet, VGG, ResNet ... etc) of model\n model = models.__dict__[cfg['model_type']].__dict__[cfg['model_name']]()\n\n # Apply network surgery to the backbone\n # Create the heads for tasks (It can be single task or multi-task)\n n_feat = model.last.in_features\n\n # The output of the model will be a dict: {task_name1:output1, task_name2:output2 ...}\n # For a single-headed model the output will be {'All':output}\n model.last = nn.ModuleDict()\n for task, out_dim in cfg['out_dim'].items():\n model.last[task] = LinearInterval(n_feat, out_dim, bias=self.bias)\n\n # Redefine the task-dependent function\n def new_logits(self, x):\n outputs = {}\n for task, func in self.last.items():\n outputs[task] = func(x)\n return outputs\n\n # Replace the task-dependent function\n model.logits = MethodType(new_logits, model)\n # Load pre-trained weights\n if cfg['model_weights'] is not None:\n print('=> Load model weights:', cfg['model_weights'])\n model_state = torch.load(cfg['model_weights'],\n map_location=lambda storage, loc: storage) # Load to CPU.\n model.load_state_dict(model_state)\n print('=> Load Done')\n return model\n\n def forward(self, x):\n return self.model.forward(x)\n\n def predict(self, inputs):\n self.model.eval()\n out = self.forward(inputs)\n for t in out.keys():\n out[t] = out[t].detach()\n return out\n\n def restore_weights(self):\n i = 0\n for c in self.model.children():\n if isinstance(c, nn.Sequential):\n for layer in c.children():\n if isinstance(layer, (Conv2dInterval, LinearInterval)):\n layer.weight.data = self.prev_weight[i].clone()\n i += 1\n elif isinstance(c, nn.ModuleDict) and not self.multihead:\n c[\"All\"].weight.data = self.prev_weight[i].clone()\n i += 1\n elif isinstance(c, (Conv2dInterval, LinearInterval)):\n c.weight.data = self.prev_weight[i].clone()\n i += 1\n\n def move_weights(self, sign):\n for c in self.model.children():\n if isinstance(c, nn.Sequential):\n for layer in c.children():\n if isinstance(layer, (Conv2dInterval, LinearInterval)):\n layer.weight.data += sign * layer.eps\n elif isinstance(c, nn.ModuleDict) and not self.multihead:\n c[\"All\"].weight.data += sign * c[\"All\"].eps\n elif isinstance(c, (Conv2dInterval, LinearInterval)):\n c.weight.data += sign * c.eps\n\n def validation_with_move_weights(self, dataloader):\n # moves = (0.001, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)\n moves = (1, )\n for move in moves:\n self.move_weights(-move)\n self.validation(dataloader, txt=f\"Lower {move}\")\n self.restore_weights()\n\n for move in moves:\n self.move_weights(-move)\n self.validation(dataloader, txt=f\"Upper {move}\")\n self.restore_weights()\n\n def validation(self, dataloader, txt=\"\"):\n # This function doesn't distinguish tasks.\n batch_timer = Timer()\n acc = AverageMeter()\n batch_timer.tic()\n\n orig_mode = self.training\n self.eval()\n for i, (inputs, target, task) in enumerate(dataloader):\n if self.gpu:\n with torch.no_grad():\n inputs = inputs.cuda()\n target = target.cuda()\n output = self.predict(inputs)\n\n # Summarize the performance of all tasks, or 1 task, depends on dataloader.\n # Calculated by total number of data.\n acc = accumulate_acc(output, target, task, acc)\n\n self.train(orig_mode)\n\n self.log(' | Val: {acc.avg:.3f} | Time: {time:.2f} | {txt}'.format(\n txt=txt, acc=acc, time=batch_timer.toc()))\n return acc.avg\n\n def _interval_based_bound(self, y0, idx, key):\n # requires last layer to be linear\n C = self.C[y0].t()\n cW = C @ (self.model.last[key].weight - self.model.last[key].eps)\n # cb = C @ self.model.last[key].bias\n l, u = self.model.bounds\n # return (cW.clamp(min=0) @ l[idx].t() + cW.clamp(max=0) @ u[idx].t() + cb[:, None]).t()\n return (cW.clamp(min=0) @ l[idx].t() + cW.clamp(max=0) @ u[idx].t()).t()\n\n def criterion(self, preds, targets, tasks, **kwargs):\n # The inputs and targets could come from single task or a mix of tasks\n # The network always makes the predictions with all its heads\n # The criterion will match the head and task to calculate the loss.\n loss, robust_loss, robust_err = 0, 0, 0\n if self.multihead:\n for t, t_preds in preds.items():\n # The index of inputs that matched specific task\n inds = [i for i in range(len(tasks)) if tasks[i] == t]\n if len(inds) > 0:\n t_preds = t_preds[inds]\n t_target = targets[inds]\n loss += self.criterion_fn(t_preds, t_target) * len(inds)\n\n if self.eps_scheduler.current:\n for y0 in range(len(self.C)):\n if (t_target == y0).sum().item() > 0:\n lower_bound = self._interval_based_bound(y0, t_target == y0, key=t)\n # robust_loss += self.criterion_fn(-lower_bound, t_target[t_target == y0])\n robust_loss += nn.CrossEntropyLoss(reduction='sum')(-lower_bound,\n t_target[t_target == y0]) / t_target.size(0)\n\n # increment when true label is not winning\n robust_err += (lower_bound.min(dim=1)[0] < 0).sum().item()\n robust_err /= len(t_target)\n\n loss /= len(targets) # Average the total loss by the mini-batch size\n if self.eps_scheduler.current:\n loss *= self.kappa_scheduler.current\n loss += (1 - self.kappa_scheduler.current) * robust_loss\n\n else:\n pred = preds['All']\n # (Not 'ALL') Mask out the outputs of unseen classes for incremental class scenario\n if isinstance(self.valid_out_dim, int):\n pred = preds['All'][:, :self.valid_out_dim]\n loss = self.criterion_fn(pred, targets)\n if self.eps_scheduler.current:\n robust_loss, robust_err = 0, 0\n for y0 in range(len(self.C)):\n if (targets == y0).sum().item() > 0:\n lower_bound = self._interval_based_bound(y0, targets == y0, key=\"All\")\n # (Not 'ALL') Mask out the outputs of unseen classes for incremental class scenario\n if isinstance(self.valid_out_dim, int):\n lower_bound = lower_bound[:, :self.valid_out_dim]\n\n # robust_loss += self.criterion_fn(-lower_bound, targets[targets == y0])\n robust_loss += nn.CrossEntropyLoss(reduction='sum')(-lower_bound,\n targets[targets == y0]) / targets.size(0)\n\n # increment when true label is not winning\n robust_err += (lower_bound.min(dim=1)[0] < 0).sum().item()\n\n loss *= self.kappa_scheduler.current\n loss += (1 - self.kappa_scheduler.current) * robust_loss\n robust_err /= len(targets)\n\n return loss, robust_err, robust_loss\n\n def save_params(self):\n self.prev_weight, self.prev_eps = {}, {}\n i = 0\n for block in self.model.children():\n if isinstance(block, nn.Sequential):\n for layer in block.children():\n if isinstance(layer, (Conv2dInterval, LinearInterval)):\n self.prev_weight[i] = layer.weight.data.detach().clone()\n self.prev_eps[i] = layer.eps.detach().clone()\n i += 1\n\n elif isinstance(block, nn.ModuleDict) and not self.multihead:\n self.prev_weight[i] = block[\"All\"].weight.data.detach().clone()\n self.prev_eps[i] = block[\"All\"].eps.detach().clone()\n i += 1\n\n elif isinstance(block, (Conv2dInterval, LinearInterval)):\n self.prev_weight[i] = block.weight.data.detach().clone()\n self.prev_eps[i] = block.eps.detach().clone()\n i += 1\n\n # self.tb.add_histogram(\"input/weight\", self.model.input.weight, self.current_task)\n # self.tb.add_histogram(\"input/eps\", self.model.input.eps, self.current_task)\n # self.tb.add_histogram(\"input/importance\", self.model.input.importance, self.current_task)\n #\n # self.tb.add_histogram(\"c1/0/weight\", self.model.c1[0].weight, self.current_task)\n # self.tb.add_histogram(\"c1/0/eps\", self.model.c1[0].eps, self.current_task)\n # self.tb.add_histogram(\"c1/0/importance\", self.model.c1[0].importance, self.current_task)\n #\n # self.tb.add_histogram(\"c1/2/weight\", self.model.c1[2].weight, self.current_task)\n # self.tb.add_histogram(\"c1/2/eps\", self.model.c1[2].eps, self.current_task)\n # self.tb.add_histogram(\"c1/2/importance\", self.model.c1[2].importance, self.current_task)\n #\n # self.tb.add_histogram(\"c2/0/weight\", self.model.c2[0].weight, self.current_task)\n # self.tb.add_histogram(\"c2/0/eps\", self.model.c2[0].eps, self.current_task)\n # self.tb.add_histogram(\"c2/0/importance\", self.model.c2[0].importance, self.current_task)\n #\n # self.tb.add_histogram(\"c2/2/weight\", self.model.c2[2].weight, self.current_task)\n # self.tb.add_histogram(\"c2/2/eps\", self.model.c2[2].eps, self.current_task)\n # self.tb.add_histogram(\"c2/2/importance\", self.model.c2[2].importance, self.current_task)\n #\n # self.tb.add_histogram(\"c3/0/weight\", self.model.c3[0].weight, self.current_task)\n # self.tb.add_histogram(\"c3/0/eps\", self.model.c3[0].eps, self.current_task)\n # self.tb.add_histogram(\"c3/0/importance\", self.model.c3[0].importance, self.current_task)\n #\n # self.tb.add_histogram(\"c3/2/weight\", self.model.c3[2].weight, self.current_task)\n # self.tb.add_histogram(\"c3/2/eps\", self.model.c3[2].eps, self.current_task)\n # self.tb.add_histogram(\"c3/2/importance\", self.model.c3[2].importance, self.current_task)\n #\n # self.tb.add_histogram('fc1/weight', self.model.fc1[0].weight, self.current_task)\n # self.tb.add_histogram(\"fc1/eps\", self.model.fc1[0].eps, self.current_task)\n # self.tb.add_histogram(\"fc1/importance\", self.model.fc1[0].importance, self.current_task)\n\n # self.tb.add_histogram('fc1/bias', self.model.fc1.bias, self.current_task)\n\n # self.tb.add_histogram('fc1/weight', self.model.fc1.weight, self.current_task)\n # self.tb.add_histogram(\"fc1/eps\", self.model.fc1.eps, self.current_task)\n # self.tb.add_histogram(\"fc1/importance\", self.model.fc1.importance, self.current_task)\n #\n # # self.tb.add_histogram('fc2/bias', self.model.fc2.bias, self.current_task)\n # self.tb.add_histogram('fc2/weight', self.model.fc2.weight, self.current_task)\n # self.tb.add_histogram(\"fc2/eps\", self.model.fc2.eps, self.current_task)\n # self.tb.add_histogram(\"fc2/importance\", self.model.fc2.importance, self.current_task)\n\n # self.tb.add_histogram('last/bias', self.model.last[self.current_head].weight, self.current_task)\n # self.tb.add_histogram('last/weight', self.model.last[self.current_head].weight, self.current_task)\n # self.tb.add_histogram(\"last/eps\", self.model.last[self.current_head].eps, self.current_task)\n # self.tb.add_histogram(\"last/importance\", self.model.last[self.current_head].importance, self.current_task)\n # self.tb.flush()\n\n def clip_weights(self, i, weights):\n low_old = self.prev_weight[i] - self.prev_eps[i]\n upp_old = self.prev_weight[i] + self.prev_eps[i]\n weights = torch.max(low_old, weights)\n weights = torch.min(upp_old, weights)\n return weights\n\n def clip_intervals(self, i, layer_weight, layer_eps):\n eps_old = self.prev_eps[i]\n assert (eps_old >= 0).all()\n\n low_old = self.prev_weight[i] - eps_old\n upp_old = self.prev_weight[i] + eps_old\n assert (low_old <= layer_weight).all()\n assert (upp_old >= layer_weight).all()\n\n low_new = layer_weight - layer_eps\n upp_new = layer_weight + layer_eps\n\n low = torch.max(low_old, low_new)\n upp = torch.min(upp_old, upp_new)\n assert (low <= upp).all()\n\n weight_new = (low + upp) / 2\n eps_new = torch.abs(low - upp) / 2\n # eps_new = torch.where(eps_old < eps_new, eps_old, eps_new)\n # calc = (eps_old < eps_new)\n # if calc.any():\n # print(f\"ile złych: {calc.sum()}, wszystkich: {(eps_old >= 0).sum()}\")\n # assert (eps_old >= eps_new).all(), print(f\"eps assert i: {i}, ile złych: {(eps_old < eps_new).sum()}, wszystkich: {(eps_new >= 0).sum()}\")\n\n return eps_new, weight_new\n\n def clip_params(self):\n i = 0\n for c in self.model.children():\n if isinstance(c, nn.Sequential):\n for layer in c.children():\n if isinstance(layer, (Conv2dInterval, LinearInterval)):\n layer.weight.data = self.clip_weights(i, layer.weight.data.detach())\n layer.eps, layer.weight.data = self.clip_intervals(\n i, layer.weight.data.detach(), layer.eps.detach())\n i += 1\n\n elif isinstance(c, nn.ModuleDict) and not self.multihead:\n c[\"All\"].weight.data = self.clip_weights(i, c[\"All\"].weight.data.detach())\n c[\"All\"].eps, c[\"All\"].weight.data = self.clip_intervals(\n i, c[\"All\"].weight.data.detach(), c[\"All\"].eps.detach())\n i += 1\n\n elif isinstance(c, (Conv2dInterval, LinearInterval)):\n c.weight.data = self.clip_weights(i, c.weight.data.detach())\n c.eps, c.weight.data = self.clip_intervals(i, c.weight.data.detach(), c.eps.detach())\n i += 1\n\n def update_model(self, inputs, targets, tasks):\n out = self.forward(inputs)\n loss, robust_err, robust_loss = self.criterion(out, targets, tasks)\n self.optimizer.zero_grad()\n loss.backward()\n # nn.utils.clip_grad_norm_(self.model.parameters(), 1)\n nn.utils.clip_grad_norm_(self.model.parameters(), 1, norm_type=float('inf'))\n self.optimizer.step()\n if self.clipping and self.prev_eps:\n self.clip_params()\n\n self.kappa_scheduler.step()\n self.eps_scheduler.step()\n self.model.set_eps(self.eps_scheduler.current, trainable=self.config['eps_per_model'], head=self.current_head)\n return loss.item(), robust_err, robust_loss, out\n\n def learn_batch(self, train_loader, val_loader=None):\n if self.reset_optimizer: # Reset optimizer before learning each task\n self.log('Optimizer is reset!')\n self.init_optimizer()\n\n schedule = self.schedule_stack.pop()\n\n is_logging = not issubclass(tqdm, notebook_tqdm)\n pbar = range(schedule) if is_logging else tqdm(range(schedule))\n\n for epoch in pbar:\n data_timer = Timer()\n batch_timer = Timer()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n acc = AverageMeter()\n robust_err, robust_loss = -1, -1\n\n # Config the model and optimizer\n msg = f'Epoch: {epoch}'\n self.model.train()\n for param_group in self.optimizer.param_groups:\n msg += f' | LR: {param_group[\"lr\"]}'\n\n # Learning with mini-batch\n data_timer.tic()\n batch_timer.tic()\n\n for i, (inputs, target, task) in enumerate(train_loader):\n data_time.update(data_timer.toc()) # measure data loading time\n if self.gpu:\n inputs = inputs.cuda()\n target = target.cuda()\n\n loss, robust_err, robust_loss, output = self.update_model(inputs, target, task)\n inputs = inputs.detach()\n target = target.detach()\n self.tb.add_scalar(f\"Loss/train - task {self.current_task}\", loss, epoch)\n self.tb.add_scalar(f\"Robust error/train - task {self.current_task}\", robust_err, epoch)\n\n # measure accuracy and record loss\n acc = accumulate_acc(output, target, task, acc)\n losses.update(loss, inputs.size(0))\n\n batch_time.update(batch_timer.toc()) # measure elapsed time\n data_timer.toc()\n\n msg += (f' | Acc: {acc.avg:.3f} | Loss: {losses.avg:.3f}'\n f' | Rob. loss: {robust_loss:.3f} | Rob. error: {robust_err:.3f}')\n\n if is_logging:\n self.log(msg)\n else:\n cast(tqdm, pbar).set_description(desc=msg)\n # Evaluate the performance of current task\n if val_loader is not None:\n self.validation(val_loader)\n\n self.scheduler.step()\n # self.tb.flush()\n\n def add_valid_output_dim(self, dim=0):\n # This function is kind of ad-hoc, but it is the simplest way to support incremental class learning\n self.log('Incremental class: Old valid output dimension:', self.valid_out_dim)\n if self.valid_out_dim == 'ALL':\n self.valid_out_dim = 0 # Initialize it with zero\n self.valid_out_dim += dim\n self.log('Incremental class: New Valid output dimension:', self.valid_out_dim)\n return self.valid_out_dim\n\n def count_parameter(self):\n return sum(p.numel() for p in self.model.parameters())\n\n def save_model(self, filename):\n model_state = self.model.state_dict()\n if isinstance(self.model, torch.nn.DataParallel):\n # Get rid of 'module' before the name of states\n model_state = self.model.module.state_dict()\n for key in model_state.keys(): # Always save it to cpu\n model_state[key] = model_state[key].cpu()\n print('=> Saving model to:', filename)\n torch.save(model_state, filename + '.pth')\n print('=> Save Done')\n\n def cuda(self):\n # torch.cuda.set_device(self.config['gpuid'][0])\n self.model = self.model.cuda()\n self.criterion_fn = self.criterion_fn.cuda()\n # Multi-GPU\n if len(self.config['gpuid']) > 1:\n self.model = torch.nn.DataParallel(self.model, device_ids=self.config['gpuid'],\n output_device=self.config['gpuid'][0])\n return self\n\n\ndef accumulate_acc(output, target, task, meter):\n if 'All' in output.keys(): # Single-headed model\n meter.update(accuracy(output['All'], target), len(target))\n else: # outputs from multi-headed (multi-task) model\n for t, t_out in output.items():\n inds = [i for i in range(len(task)) if task[i] == t] # The index of inputs that matched specific task\n if len(inds) > 0:\n t_out = t_out[inds]\n t_target = target[inds]\n meter.update(accuracy(t_out, t_target), len(inds))\n\n return meter\n","repo_name":"lukinio/CLB","sub_path":"agents/interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":23562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70471973533","text":"from heapq import heappush, heappop\n\ndef find_k_largest_numbers(nums, k):\n # iterate through nums\n # put first k in min heap\n # for rest of list just check its bigger than the smallest\n ## number in min_heap. If so pop that and push in num - else skip\n min_heap = []\n for i, num in enumerate(nums):\n if i < k:\n heappush(min_heap, num)\n elif num > min_heap[0]:\n heappop(min_heap)\n heappush(min_heap, num)\n return sorted(list(min_heap))\n\ndef find_kth_smallest_numbers(nums, k):\n # to find smallest - reverse direction of heap by multiplying each num by -1\n # this will return the lagest int value (which is negative)\n\n ## iterate through list - add to heap k items\n ## next check if that item is larger than current head of heap\n ### if so pop and push\n\n max_heap = []\n for i, num in enumerate(nums):\n if i < k:\n heappush(max_heap, -num)\n elif num < -max_heap[0]:\n heappop(max_heap)\n heappush(max_heap, -num)\n return -max_heap[0]\n\ndef find_k_freq_nums(nums, k):\n # create map num : count\n # iterate over keys adding to heap (val, key)\n ## when len(min_heap) > k - start poping so you are left\n ### k most frequent numbers\n counter = {}\n for num in nums:\n counter[num] = counter.get(num,0) + 1\n\n min_heap = []\n for num, count in counter.items():\n heappush(min_heap, (count,num))\n if len(min_heap) > k:\n heappop(min_heap)\n\n return sorted([l[1] for l in list(min_heap)])\n\ndef find_k_frequent_letters(letters):\n\n ### map counter\n m = {}\n for letter in letters:\n m[letter] = m.get(letter,0) + 1\n\n ## heap to find k largest\n max_heap = []\n for letter, count in m.items():\n heappush(max_heap, (-count, letter))\n\n sorted_letters = []\n for _ in list(max_heap):\n letter = heappop(max_heap)\n for _ in range(abs(letter[0])):\n sorted_letters.append(letter[1])\n\n return ''.join(sorted_letters)\n\ndef find_closest_elements(nums, K, X):\n #store tuple of (dist, num) in min_heap\n max_heap = []\n for i, num in enumerate(nums):\n dist_num_tuple = (-abs(num - X), num)\n heappush(max_heap, dist_num_tuple)\n if i >= K:\n heappop(max_heap)\n return sorted([dist_tuple[1] for dist_tuple in max_heap])\n\nassert find_k_largest_numbers([3, 1, 5, 12, 2, 11], 3) == [5, 11, 12]\nassert find_k_largest_numbers([5, 12, 11, -1, 12], 3) == [11, 12, 12]\nassert find_k_largest_numbers([100, 100, 2, 50, 49], 3) == [50, 100, 100]\n\nassert find_kth_smallest_numbers([1, 5, 12, 2, 11, 5], k = 3) == 5\nassert find_kth_smallest_numbers([1, 5, 12, 2, 11, 5], k = 4) == 5\nassert find_kth_smallest_numbers([5, 12, 11, -1, 12], k = 3) == 11\n\nassert find_k_freq_nums([1, 3, 5, 12, 11, 12, 11], k = 2) == [11, 12]\nassert find_k_freq_nums([5, 12, 11, 3, 11], k = 2) in [[5,11], [11, 12], [3, 11]]\n\nassert find_k_frequent_letters('Programming') == 'ggmmrrPaino'\nassert find_k_frequent_letters('abcbab') == 'bbbaac'\n\nassert find_closest_elements([5, 6, 7, 8, 9], 3, 7) == [6, 7, 8]\nassert find_closest_elements([2, 4, 5, 6, 9], 3, 6) == [4, 5, 6]\nassert find_closest_elements([2, 4, 5, 6, 9], 3, 10) == [5, 6, 9]\nprint('all tests have passed!')","repo_name":"cabrossman/python_algos","sub_path":"M) Top K/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15970581799","text":"from django.shortcuts import render\n\nimport json\n\n# Create your views here.\n\n\nfrom django.http import HttpResponse\n\n\ndef index(request, id):\n return HttpResponse(\"Hello, world. You're at the mainpage index,参数值:%s.\" % id)\n\n\ndef show(request):\n from .models import CaseShow\n from .models import ServiceShow\n case_list = CaseShow.objects.all()\n service_list = ServiceShow.objects.all()\n context = {'case_list': case_list,\n 'service_list': service_list}\n return render(request, 'mainpage/show.html', context)\n\n\ndef productDetail(request, productId):\n from .models import ProductShow\n productDetail = ProductShow.objects.get(pk=productId)\n # caseshowDetailMulti=caseDetail.caseshowdetailmulti_set.all().order_by('order')\n context = {'productDetail': productDetail}\n return render(request, 'mainpage/productDetail.html', context)\n\n\ndef caseDetail(request, caseId,curCata,curPage):\n from .models import CaseShow\n caseDetail = CaseShow.objects.get(pk=caseId)\n caseshowDetailMulti = caseDetail.caseshowdetailmulti_set.all().order_by('order')\n context = {'caseDetail': caseDetail, 'caseshowDetailMulti': caseshowDetailMulti,'curCata':curCata,'curPage':curPage}\n return render(request, 'mainpage/caseDetail.html', context)\n\n\ndef caseList(request, cata, pageIndex):\n from .models import CaseCat\n from .models import CaseShow\n from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n #0-表示所有类别\n\n caseCatList = CaseCat.objects.order_by('order')\n\n if 0==int(cata):\n case_list_all=CaseShow.objects.all()\n else:\n case_list_all = CaseShow.objects.filter(caseCat=cata)\n\n paginator = Paginator(case_list_all, 6, 1)\n\n try:\n case_list_page = paginator.page(pageIndex)\n except PageNotAnInteger:\n case_list_page = paginator.page(1)\n except EmptyPage:\n case_list_page = paginator.page(paginator.num_pages)\n\n context = {'case_list_page': case_list_page, 'caseCatList': caseCatList,'curCata':cata, 'curPage':pageIndex}\n\n return render(request, 'mainpage/caseList.html', context)\n\n\ndef serviceDetail(request, serviceId):\n return render(request, 'mainpage/serviceDetail' + serviceId + '.html')\n\n\ndef about(request):\n return render(request, 'mainpage/about.html')\n\n\ndef contact(request):\n return render(request, 'mainpage/contact.html')\n\n\ndef serviceList(request):\n from .models import ServiceShow\n service_list = ServiceShow.objects.all()\n context = {'service_list': service_list}\n return render(request, 'mainpage/serviceList.html', context)\n\n\ndef productList(request, cata, pageIndex):\n from .models import ProductShow\n from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n product_list_all = ProductShow.objects.all()\n paginator = Paginator(product_list_all, 4)\n\n try:\n product_list_page = paginator.page(pageIndex)\n except PageNotAnInteger:\n product_list_page = paginator.page(1)\n except EmptyPage:\n product_list_page = paginator.page(paginator.num_pages)\n\n context = {'product_list_page': product_list_page, }\n\n return render(request, 'mainpage/productList.html', context)\n\n\ndef guestCollect(request): # 收集客户需求信息\n from .models import GuestCollect\n ctx = {'rlt': 10000}\n if request.POST:\n ctx['name'] = request.POST['name']\n ctx['tel'] = request.POST['tel']\n ctx['txt'] = request.POST['txt']\n else:\n ctx['name'] = request.POST['name']\n ctx['tel'] = request.POST['tel']\n ctx['txt'] = request.POST['txt']\n\n try:\n GuestCollect.objects.create(guestName=ctx['name'], guestTel=ctx['tel'], guestRequire=ctx['txt']);\n except Exception as e:\n resp = {'code': 101, 'detail': 'Eorr'}\n\n resp = {'code': 100, 'detail': 'Success'}\n\n return HttpResponse(json.dumps(resp), content_type=\"application/json;charset=utf-8\")\n\n","repo_name":"walkingmanc/officalweb","sub_path":"front/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26465810564","text":"import csv\nimport http.client\nimport json\n\n#nome do csv que vai ser gerado (não mudar):\ncsvName = 'codigoshijos.csv'\nidhijo = ''\n\n\n#lugar onde você quer que o CSV esteja localizado\ncsvDestination ='C:\\\\Users\\\\carlo\\\\Documents\\\\git\\\\GetVariationIdFromMercadoLibre\\\\OUT\\\\codigoshijos.csv'\n\n\n\n#variaveis diversas, não mexer\ndirectory = ''\nincidentNumber = ''\n\n\n\n#variaveis do http.client\n\n\nconn = http.client.HTTPSConnection(\"api.mercadolibre.com\")\npayload = ''\nheaders = {}\nconn.request(\"GET\", \"/items/MLB2026657802\", payload, headers)\nres = conn.getresponse()\ndata = res.read()\nresponse = json.loads(data)\n\n\nwith open( csvDestination , 'w',encoding=\"utf-8\", newline='') as csvout:\n writer = csv.writer(csvout)\n\n #define o cabeçalho do arquivo\n writer.writerow([\"Tray-Pai\",\"MLB-Pai\",\"MLB-filho\",\"Nome-produto\",\"Var1\",\"var2\",\"var3\",\"Nome-concatenado\"])\n with open('csvIn.csv', newline='') as csvin:\n reader = csv.DictReader(csvin)\n for row in reader:\n conn.request(\"GET\", \"/items/\"+row['codigo_MLB_padre'], payload, headers)\n res = conn.getresponse()\n data = res.read()\n response = json.loads(data)\n product_name = response[\"title\"]\n\n #this step goes into the response and grabs the desired information from Mercado Libre's API\n for variation in response[\"variations\"]:\n \n print(variation[\"id\"])\n print(row['codigo_MLB_padre'])\n\n \n \n\n \n if len(variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")) == 3:\n vari1 = variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")[0].strip()\n vari2 = variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")[1].strip()\n vari3 = variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")[2].strip()\n\n #essa com 3\n nome_concatenado = product_name+\" cor:\"+vari1+\";Tamanho:\"+vari2+\"vari3:\"+vari3\n elif len(variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")) == 2:\n vari1 = variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")[0].strip()\n vari2 = variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")[1].strip()\n vari3 = \"N/A\"\n\n #essa com 2\n nome_concatenado = product_name+\" cor:\"+vari1+\";Tamanho:\"+vari2\n else:\n vari1 = variation[\"attribute_combinations\"][0][\"value_name\"].split(\"/\")[0].strip()\n vari2 = \"N/A\"\n vari3 = \"N/A\"\n\n #Essa com 1\n nome_concatenado = product_name+\" cor:\"+vari1\n \n \n\n print(nome_concatenado)\n\n\n writer.writerow([row['codigo_padre_plataforma'],row['codigo_MLB_padre'], variation[\"id\"] ,product_name,vari1,vari2,vari3,nome_concatenado])\n \n\n","repo_name":"CarlosBueno99/GetVariationIdFromMercadoLibre","sub_path":"gettingVariationCodesFromMLid.py","file_name":"gettingVariationCodesFromMLid.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27791310675","text":"import web\nimport calendar\nfrom web import form\nfrom waterbird.utils import *\nfrom waterbird.model import Model\nfrom waterbird.month import Month\nfrom waterbird.date import Date\n\nurls = (\n\t'/', 'index',\n\t'/month', 'month',\n\t'/prev', 'prev',\n\t'/next', 'next',\n\t'/date', 'date',\n\t'/error', 'error',\n\t'/login', 'login',\n\t'/register', 'register',\n\t'/entries', 'entries'\n)\n\nweb.config.debug = False\n\napp = web.application(urls, globals())\nsession = web.session.Session(app, web.session.DiskStore('sessions'))\nglobals = {'calmonth': calendar.month_name, 'session':session}\nrender = web.template.render('templates/', globals=globals)\ndb = Model()\n\n# Session will be all INTEGERS\n# No more left padding\n\nclass index():\n\tdef GET(self):\n\t\t\tsetSession(session)\n\t\t\tweb.seeother('/month')\n\nclass month():\n\tdef GET(self):\n\t\tmonth = Month(int(session.year), int(session.month))\n\t\tmonth_page = render.month(month=month)\n\t\treturn render.layout(content=month_page)\n\nclass prev():\n\tdef GET(self):\n\t\tsetPrev(session)\n\t\treturn web.seeother('/month')\n\nclass next():\n\tdef GET(self):\n\t\tsetNext(session)\n\t\treturn web.seeother('/month')\n\nclass date():\n\tdef GET(self):\n\t\tpd = parseDateUrl(web.input().date)\n\t\tif pd == False or 'day' not in pd:\n\t\t\tweb.seeother('/error')\n\n\t\tyear = pd['year']\n\t\tmonth = pd['month']\n\t\tday = pd['day']\n\t\tsetSession(session, year, month, day)\n\n\t\tdate_obj = Date(session.year, session.month, session.day)\n\t\tif not date_obj.hasEntry:\n\t\t\tdate_obj.hasEntry = True\n\t\t\tdate_obj.entry = '' # dummy entry for div to show up\n\n\t\tda = render.date(days=[date_obj])\n\t\treturn render.layout(da, '..')\n\n\n\tdef POST(self):\n\t\tentry = web.input().entry.strip()\n\t\tdate_obj = Date(session.year, session.month, session.day)\n\t\tdate_obj.updateEntry(entry)\n\t\tsame = '/date?date=%s-%s-%s' % (session.year, session.month, session.day)\n\t\traise web.seeother(same)\n\nclass login():\n\tdef GET(self):\n\t\tlform = form.Form(\n\t\t\tform.Textbox('Username:'),\n\t\t\tform.Textbox('Password:'))\n\n\t\treturn render.layout(render.login())\n\n\tdef POST(self):\n\t\treturn 'POST'\n\nclass entries():\n\tdef GET(self):\n\t\tmonth = Month(int(session.year), int(session.month))\n\t\tda = render.date(days=month.days)\n\n\t\treturn render.layout(da, '..')\n\nclass register():\n\tdef POST(self):\n\t\tdb.register(web.input().username, web.input().password, web.input().email)\n\t\treturn render.layout(render.index('Registration Successful.'))\n\nclass login():\n\tdef GET(self):\n\t\treturn render.layout(render.login())\n\n\n\tdef POST(self):\n\t\tres = db.login(web.input().username, web.input().password)\n\t\tif res == None:\n\t\t\tmsg = \"No such user exists.\"\n\t\telif res == False:\n\t\t\tmsg = \"Wrong password.\"\n\t\telse:\n\t\t\tmsg = \"Successfully logged in.\"\n\t\t\tsession.user = web.input().username\n\t\treturn render.layout(render.index(msg))\n\nclass error():\n\tdef GET(self):\n\t\treturn render.index('Something is terribly wrong.')\n\nif __name__ == \"__main__\":\n\tapp.run()\n\n","repo_name":"kingfisherblue/waterbird","sub_path":"bin/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5459931258","text":"import argparse, re\nfrom PIL import Image, ImageFont, ImageDraw\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"program_path\")\n parser.add_argument(\"image_path\")\n parser.add_argument(\"--width\", type=int, default=670)\n parser.add_argument(\"--height\", type=int, default=700)\n parser.add_argument(\"--fontsize\", type=int, default=19)\n parser.add_argument(\"--xoffset\", type=int, default=0)\n parser.add_argument(\"--yoffset\", type=int, default=0)\n parser.add_argument(\"--lineoffset\", type=int, default=2)\n parser.add_argument(\"--baseimage\", type=str, default=\"\")\n parser.add_argument(\"--header\", type=str, default=\"\")\n parser.add_argument(\"--line_colors\", type=str, default=None)\n parser.add_argument(\"--bg_color\", type=str, default=None)\n args = parser.parse_args()\n\n program_lines = [line.rstrip() for line in open(args.program_path, \"r\")]\n\n white = (255, 255, 255)\n black = (0, 0, 0)\n red = (149, 26, 28)\n blue = (1, 142, 169)\n green = (29, 147, 23)\n color_map = { \"w\": white, \"k\": black, \"r\": red, \"b\": blue, \"g\": green }\n\n image = Image.new(\"RGBA\", (args.width, args.height), white)\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype(\"fonts/CONSOLA.TTF\", args.fontsize)\n\n if args.bg_color is not None:\n draw.rectangle([(0, 0), image.size], fill=args.bg_color)\n\n y_offset = args.yoffset\n line_height = args.fontsize + args.lineoffset\n header = False\n id_regex = re.compile(\"[a-zA-Z_]+|[+*=]\")\n\n if len(args.header) > 0:\n program_lines = [args.header] + program_lines\n header = True\n\n line_colors = []\n if args.line_colors is not None:\n import ast\n line_colors = ast.literal_eval(args.line_colors)\n\n for line_idx, line in enumerate(program_lines):\n if line.strip():\n if header:\n draw.rectangle([(0, 0), (args.width, line_height - 2)], fill=black)\n size = draw.textsize(line, font=font)\n draw.text(((args.width / 2) - (size[0] / 2), y_offset), line, white, font=font)\n header = False\n else:\n text_color = black\n if line_idx < len(line_colors):\n lc = line_colors[line_idx]\n if len(lc) > 0:\n #draw.rectangle([(0, y_offset), (args.width, y_offset + line_height - 1)], fill=lc)\n text_color = lc\n\n draw.text((args.xoffset, y_offset), line, text_color, font=font)\n \n # Next line\n y_offset += line_height\n\n if len(args.baseimage) == 0:\n image.save(args.image_path)\n else:\n base_image = Image.open(args.baseimage)\n base_image.paste(image, (0, 0))\n base_image.save(args.image_path)\n","repo_name":"synesthesiam/eyecode2-web","sub_path":"render_program.py","file_name":"render_program.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74118515610","text":"\nfrom collections import defaultdict\n\ndef dfs(ticket_dic, depth, key, answer):\n\n answer.append(key)\n value_list = ticket_dic[key]\n print(value_list)\n for i in range(len(value_list)):\n if value_list[i] != \"none\":\n key = value_list[i]\n value_list[i] = \"none\"\n\n dfs(ticket_dic, depth + 1, key, answer)\n\n\ndef solution(tickets):\n answer = []\n ticket_dic = defaultdict(list)\n print(tickets)\n for k, v in tickets:\n ticket_dic[k].append(v)\n print(ticket_dic)\n\n for k, v in ticket_dic.items():\n ticket_dic[k] = sorted(v)\n dfs(ticket_dic, tickets[0][0], answer)\n\n print(ticket_dic)\n ## 하이\nif __name__ == \"__main__\":\n tickets = [[\"ICN\", \"SFO\"], [\"ICN\", \"ATL\"], [\"SFO\", \"ATL\"], [\"ATL\", \"ICN\"], [\"ATL\",\"SFO\"]]\n print(solution(tickets))","repo_name":"ekzm8523/CodingTestPractice","sub_path":"python/programmers/dfs_bfs_4.py","file_name":"dfs_bfs_4.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"7533439844","text":"import json\nimport os\n\nimport dotenv\n\ndotenv.load_dotenv(verbose=True)\n\n\ndef parse_str(key, default_value):\n env_value = os.environ.get(key, default_value)\n return env_value\n\n\ndef parse_list(key, default_value):\n env_value = os.environ.get(key, '')\n value = env_value.split(',') or default_value\n return value\n\n\ndef parse_set(key, default_value):\n return set(parse_list(key, default_value))\n\n\ndef parse_dict(key, default_value):\n env_value = os.environ.get(key, '{}')\n value = json.loads(env_value) or default_value\n return value\n\n\ndef parse_bool(key, default_value):\n return bool(parse_int(key, default_value))\n\n\ndef parse_int(key, default_value):\n env_value = os.environ.get(key)\n if env_value is None:\n return default_value\n value = int(env_value)\n return value\n\n\ndef load_setting_value(field, field_type, default_value):\n valid_field_type_to_parse_func = {\n str: parse_str,\n list: parse_list,\n set: parse_set,\n dict: parse_dict,\n bool: parse_bool,\n int: parse_int,\n }\n\n parse_func = valid_field_type_to_parse_func.get(field_type)\n if parse_func is None:\n raise Exception(\n f\"unsupported field type {field_type}, only support field type {valid_field_type_to_parse_func.keys()}\"\n )\n value = parse_func(field, default_value)\n return value\n\n\nclass Setting(object):\n\n FLASK_ENV = load_setting_value('FLASK_ENV', str, 'production')\n SECRET_KEY = load_setting_value('SECRET_KEY', str, '45008b73ad9d00c01f174dddd41df6ad')\n TESTING = load_setting_value('TESTING', bool, False)\n\n SENTRY_URI = load_setting_value('SENTRY_URI', str, '')\n SERVER_HOST = load_setting_value('SERVER_HOST', str, 'http://127.0.0.1:5000/')\n\n JWT_SECRET_KEY = load_setting_value('JWT_SECRET_KEY', str, 'ddfaac4a33c94806')\n JWT_TOKEN_LOCATION = load_setting_value('JWT_TOKEN_LOCATION', list, ['headers'])\n JWT_QUERY_STRING_NAME = load_setting_value('JWT_QUERY_STRING_NAME', str, 'token')\n JWT_QUERY_STRING_VALUE_PREFIX = load_setting_value('JWT_QUERY_STRING_VALUE_PREFIX', str, '')\n JWT_ACCESS_TOKEN_EXPIRES = load_setting_value('JWT_ACCESS_TOKEN_EXPIRES', int, 60 * 5)\n JWT_REFRESH_TOKEN_EXPIRES = load_setting_value('JWT_REFRESH_TOKEN_EXPIRES', int, 60 * 60 * 24 * 7)\n SQLALCHEMY_DATABASE_URI = load_setting_value(\n 'SQLALCHEMY_DATABASE_URI', str, ''\n )\n SQLALCHEMY_ENGINE_OPTIONS = load_setting_value(\n 'SQLALCHEMY_ENGINE_OPTIONS', dict, {'pool_size': 10, 'pool_recycle': 3600}\n )\n SQLALCHEMY_TRACK_MODIFICATIONS = load_setting_value('SQLALCHEMY_TRACK_MODIFICATIONS', bool, False)\n TEST_DATA_PATH = load_setting_value('TEST_DATA_PATH', str, 'sample_data')\n\n ORIGIN_MEDIA_PATH = load_setting_value('MEDIA_PATH', str, 'static/origin_media/')\n ENCRYPT_MEDIA_PATH = load_setting_value('ENCRYPT_MEDIA_PATH', str, 'static/encrypt_media')\n STORAGE_BACKEND = load_setting_value('STORAGE_BACKEND', str, 'app.storage_backends.local_backend.LocalBackend')\n\n # celery 配置\n CELERY_BROKER_URL = load_setting_value('CELERY_BROKER_URL', str, 'redis://localhost:6379/2')\n CELERY_RESULT_BACKEND = load_setting_value('CELERY_RESULT_BACKEND', str, 'redis://localhost:6379/3')\n DEFAULT_CELERY_MAX_RETRIES = load_setting_value('DEFAULT_CELERY_MAX_RETRIES', int, 3)\n\n\nsetting = Setting()\n","repo_name":"December1208/hls-video-encrypt","sub_path":"app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24291636893","text":"import network # type: ignore (this is a pylance ignore warning directive)\nimport urequests # type: ignore\nfrom time import sleep\nfrom machine import Pin, Timer, UART # type: ignore\n# my own files\nimport my_config # type: ignore\n\n# 0 is doing GET-communication\n# 1 uses post. transmits a identifier, values as blob, sends to RX1.php\nTX_INTERFACE_VERSION = 1 # integer (range 0 to 9), just increasing when there is a change on the transmitted value format \n\n\ndef debug_print(DO_DEBUG_PRINT:bool, text:str):\n if(DO_DEBUG_PRINT):\n print(text)\n # otherwise just return\n\n# define the toggle as function. Overkill for now but might be expanded later\ndef blink(timer):\n led_onboard.toggle()\n\ndef uart_ir_e350(uart_ir, IR_SIMULATION:bool):\n if(IR_SIMULATION):\n return('/LGZ4ZMF100AC.M26\\r\\n\\x02F.F(00)\\r\\n0.0( 120858)\\r\\nC.1.0(13647123)\\r\\nC.1.1( )\\r\\n1.8.1(042951.721*kWh)\\r\\n1.8.2(018609.568*kWh)\\r\\n2.8.1(000000.302*kWh)\\r\\n2.8.2(000010.188*kWh)\\r\\n1.8.0(061561.289*kWh)\\r\\n2.8.0(000010.490*kWh)\\r\\n15.8.0(061571.780*kWh)\\r\\nC.7.0(0008)\\r\\n32.7(241*V)\\r\\n52.7(243*V)\\r\\n72.7(242*V)\\r\\n31.7(000.35*A)\\r\\n51.7(000.52*A)\\r\\n71.7(000.47*A)\\r\\n82.8.1(0000)\\r\\n82.8.2(0000)\\r\\n0.2.0(M26)\\r\\nC.5.0(0401)\\r\\n!\\r\\n\\x03\\x01')\n if (uart_ir.any() != 0):\n uart_ir.read() # first clear everything. This should return None\n print('Warning: UART buffer was not empty at first read')\n uart_ir.write('\\x2F\\x3F\\x21\\x0D\\x0A') # in characters: '/?!\\r\\n'\n sleep(1) # need to make sure it has been sent but not wait more than 2 secs. TODO: maybe use uart_ir.flush()\n uart_str_id = uart_ir.read() # should be b'/LGZ4ZMF100AC.M26\\r\\n'\n uart_ir.write('\\x06\\x30\\x30\\x30\\x0D\\x0A') # in characters: ACK000\\r\\n\n sleep(2) \n uart_str_values_0 = uart_ir.read()\n sleep(2) \n uart_str_values_1 = uart_ir.read()\n sleep(2) \n if (uart_ir.any() != 0):\n print('Warning: UART buffer is not empty after two reads')\n \n if ((uart_str_id == None) or (uart_str_values_0 == None) or (uart_str_values_1 == None)):\n return('uart communication did not work')\n else:\n return(uart_str_id.decode()+uart_str_values_0.decode()+uart_str_values_1.decode())\n\ndef find_positions(uart_received_str):\n positions = list()\n positions.append(uart_received_str.find(\"1.8.1(\")+6) # returns -1 if not found\n positions.append(uart_received_str.find(\"1.8.2(\")+6) \n\n positions.append(uart_received_str.find(\"32.7(\")+5)\n positions.append(uart_received_str.find(\"52.7(\")+5)\n positions.append(uart_received_str.find(\"72.7(\")+5)\n \n positions.append(uart_received_str.find(\"31.7(\")+5)\n positions.append(uart_received_str.find(\"51.7(\")+5)\n positions.append(uart_received_str.find(\"71.7(\")+5)\n\n positions.append(min(positions) > 20) # all of them need to be bigger than 20. Otherwise returning false (find returns -1 but I add the length of the string)\n \n return(positions)\n\ndef print_values(DO_DEBUG_PRINT:bool, values:list, val_watt_cons:str):\n debug_print(DO_DEBUG_PRINT, \"NT / HT values [kWh]: \"+values[0]+\", \"+values[1])\n debug_print(DO_DEBUG_PRINT, \"Phase1, Phase2, Phase3 values [V*A]: \"+values[2]+\"*\"+values[5]+\", \"+values[3]+\"*\"+values[6]+\", \"+values[4]+\"*\"+values[7])\n debug_print(DO_DEBUG_PRINT, \"Watt consumption now [W]: \"+val_watt_cons)\n\ndef get_wlan_ok(WLAN_SIMULATION:bool, wlan):\n if(WLAN_SIMULATION):\n return(True)\n return(wlan.isconnected())\n\ndef wlan_connect(WLAN_SIMULATION:bool, wlan, tim, led_onboard):\n wlan_ok = get_wlan_ok(WLAN_SIMULATION=WLAN_SIMULATION, wlan=wlan)\n if(wlan_ok):\n return() # nothing to do\n else:\n tim.init(freq=4.0, mode=Timer.PERIODIC, callback=blink) # signals I'm searching for WLAN \n while not wlan_ok:\n config_wlan = my_config.get_wlan_config() # stored in external file\n wlan.connect(config_wlan['ssid'], config_wlan['pw'])\n sleep(3)\n wlan_ok = get_wlan_ok(WLAN_SIMULATION=WLAN_SIMULATION, wlan=wlan)\n print(\"WLAN connected? \"+str(wlan_ok)) # debug output\n\n # signals wlan connection is ok\n tim.deinit()\n led_onboard.on()\n\ndef send_message_and_wait(WLAN_SIMULATION:bool, message:str, wait_time:int, led_onboard):\n if(not WLAN_SIMULATION): # not sending anything in simulation\n response = urequests.post(message) \n response.close() # this is needed, I'm getting outOfMemory exception otherwise after 4 loops\n sleep(wait_time) # in seconds. Do not set it below ~3 to limit the number of requests\n led_onboard.toggle()\n \ndef urlencode(dictionary:dict):\n urlenc = \"\"\n for key, val in dictionary.items():\n urlenc += \"%s=%s&\" %(key,val)\n urlenc = urlenc[:-1] # gets me something like 'val0=23&val1=bla space'\n return(urlenc)\n\ndef send_message_and_wait_post(WLAN_SIMULATION:bool, message:dict, wait_time:int, led_onboard, TX_INTERFACE_VERSION:int):\n if(not WLAN_SIMULATION): # not sending anything in simulation\n URL = \"https://widmedia.ch/wmeter/getRX1.php?TX=pico&TXVER=\"+str(TX_INTERFACE_VERSION)\n HEADERS = {'Content-Type':'application/x-www-form-urlencoded'}\n\n urlenc = urlencode(message)\n response = urequests.post(URL, data=urlenc, headers=HEADERS)\n debug_print(DO_DEBUG_PRINT, \"Text:\"+response.text)\n response.close() # this is needed, I'm getting outOfMemory exception otherwise after 4 loops\n sleep(wait_time) # in seconds\n led_onboard.toggle() # signal success\n\n\n# constants\nLENGTHS = [10,10,3,3,3,6,6,6] # HT, NT, 3 x voltages, 3 x currents\n# debug stuff\nDO_DEBUG_PRINT = my_config.get_debug_print()\nIR_SIMULATION = my_config.get_ir_simulation()\nWLAN_SIMULATION = my_config.get_wlan_simulation()\n\n# pins\nled_onboard = Pin(\"LED\", Pin.OUT)\nenable3v3_pin = Pin(28, Pin.OUT) # solder pin GP28 to '3V3_EN'-pin\n\n# machine specific stuff\ntim = Timer() # no need to specify a number on pico, all SW timers\nuart_ir = UART(0, baudrate=300, bits=7, parity=0, stop=1, tx=Pin(0), rx=Pin(1))\n\n# normal variables\nwlan_ok = False\n\n## program starts here\nled_onboard.off()\nenable3v3_pin.off()\n\nwlan = network.WLAN(network.STA_IF)\nwlan.active(True)\nsleep(3)\n\nwhile True:\n enable3v3_pin.on() # power on IR head\n sleep(2) # make sure 3.3V power is stable\n uart_received_str = uart_ir_e350(uart_ir,IR_SIMULATION) # this takes some seconds\n # print(uart_received_str)\n enable3v3_pin.off() # power down IR head\n\n # find parameters\n positions = find_positions(uart_received_str=uart_received_str)\n if (not positions[8]): # one of the finds did not work. Doesn't make sense to continue in this while loop\n print('Warning: did not find the values in the IR answer')\n sleep(10)\n continue\n\n values = list()\n for i in range(0,8): \n values.append(uart_received_str[positions[i]:positions[i]+LENGTHS[i]])\n\n # TODO: the calculation below is not correct. Not sure what the reported current value (in mA) relates to, simple P = U * I does not work (Scheinleistung/Wirkleistung)\n val_watt_cons = str(float(values[2])*float(values[5])+float(values[3])*float(values[6])+float(values[4])*float(values[7]))\n print_values(DO_DEBUG_PRINT=DO_DEBUG_PRINT, values=values, val_watt_cons=val_watt_cons)\n\n transmit_str = values[0]+\"_\"+values[1]+\"_\"+val_watt_cons # TODO: rather transmit the whole readout and have the string logic on the server\n message = dict([('device',my_config.get_device_name()),('val',transmit_str)])\n debug_print(DO_DEBUG_PRINT, str(message))\n \n wlan_connect(WLAN_SIMULATION=WLAN_SIMULATION, wlan=wlan, tim=tim, led_onboard=led_onboard) # try to connect to the WLAN. Hangs there if no connection can be made\n\n send_message_and_wait_post(WLAN_SIMULATION=WLAN_SIMULATION, message=message, wait_time=10, led_onboard=led_onboard, TX_INTERFACE_VERSION=TX_INTERFACE_VERSION) # does not send anything when in simulation\n# end while\n","repo_name":"saliWd/div","sub_path":"pico/wireless/__old_trial_unused/TX/main.oldWithValueLogic.py","file_name":"main.oldWithValueLogic.py","file_ext":"py","file_size_in_byte":7963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13106893885","text":"import asyncio # 异步协程\r\nimport aiohttp\r\n\r\nurls = [\r\n \"https://i1.huishahe.com/uploads/allimg/202205/9999/a703018f22.jpg\",\r\n \"https://i1.huishahe.com/uploads/allimg/202206/9999/9783c7e78d.jpg\",\r\n \"https://i1.huishahe.com/uploads/allimg/202205/9999/cce11863bd.jpg\"\r\n]\r\n\r\n\r\nasync def aiodownload(url):\r\n name = url.split(\"/\", 1)[1]\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(url) as resp:\r\n with open(name, mode=\"wb\") as f:\r\n f.write(await resp.content.read())\r\n\r\n\r\nasync def main():\r\n tasks = []\r\n for url in urls:\r\n tasks.append(asyncio.create_task(aiodownload(url)))\r\n await asyncio.wait(tasks)\r\n\r\n\r\nif __name__ == '__main__':\r\n loop = asyncio.get_event_loop()\r\n loop.run_until_complete(main())\r\n\r\n print(\"ok\")\r\n","repo_name":"ZP3333333333333/some_spider","sub_path":"some_spider/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2327880867","text":"\"\"\"\nhttps://leetcode.com/problems/longest-palindromic-substring/\n���要注意很多边界条件\n\"\"\"\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n res, N = \"\", len(s)\n dp = [[False] * N for _ in range(N)]\n # dp[i][j] = dp[i+1][j-1] if s[i] == s[j]\n for i in range(N-1, -1, -1):\n for j in range(i, N):\n # print(i, j, s[i:j+1])\n if i == j:\n dp[i][j] = True\n elif i + 1 == j and s[i] == s[j]:\n dp[i][j] = True\n else:\n if s[i] == s[j]:\n dp[i][j] |= dp[i+1][j-1]\n if dp[i][j]:\n if j - i + 1 > len(res):\n res = s[i: j+1]\n return res\n\n\nif __name__ == \"__main__\":\n print(Solution().longestPalindrome(\"babad\"))\n print(Solution().longestPalindrome(\"cbbd\"))\n","repo_name":"ironboxer/leetcode","sub_path":"dp/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23471878144","text":"#%% [markdown]\n## Project Name: covid_misinformation\n### Program Name: CoronaV_Twitter_Rank.py\n### Purpose: To plot the Twitter users mentioned or quoted the most by day \n##### Date Created: June 23rd 2020\n#### \n#%%\nfrom IPython import get_ipython\n#get_ipython().magic('reset -sf')\nimport datetime\nfrom datetime import datetime as dt\nfrom datetime import date\nimport os \nfrom os import listdir\nfrom os.path import isfile, join\nimport pathlib\nimport colorlover as cl\nimport plotly.graph_objs as go\nimport chart_studio.plotly as py\nimport plotly.express as px\nimport pandas as pd\nimport numpy as np\nimport math\nimport re\nimport twint\nimport jgraph as jg\nimport ast\nimport functools\nimport operator\nfrom collections import Counter\nimport json\nAPP_PATH = str(pathlib.Path(__file__).parent.resolve())\n#%%\nt0=dt.now()\nprint('-------------------------Start Running Code------------------')\nprint('Time:', t0)\n#%% \n# Styles\nplotlycl=px.colors.qualitative.Plotly\norcl3=cl.scales['3']['seq']['Oranges']\ngrcl3=cl.scales['3']['seq']['Greys']\nbgcl=\"#111\"\nlinecl=grcl3[0]\nfontcl=\"#eee\"\nmarkercl=\"#e7ad52\"\nplotfont=\"Open Sans, sans-serif\"\n#%%\n# Reading in the Tweets from April 1st to 4th\ndatafiles=['5g_twitter_2020_04_01.csv', '5g_twitter_2020_04_02.csv', \n '5g_twitter_2020_04_03.csv', '5g_twitter_2020_04_04.csv',\n '5g_twitter_2020_04_05.csv', '5g_twitter_2020_04_06.csv', \n '5g_twitter_2020_04_07.csv', \n '5g_twitter_2020_04_08.csv', \n ]\ndflst=list(\n map(\n lambda x: pd.read_csv(\n os.path.join(APP_PATH, 'data', 'Twitter', x),\n error_bad_lines=False,\n dtype={\n 'id': int, 'conversation_id': int, 'created_at': int, 'date': object, 'time': object,\n 'timezone': object, 'user_id': int, 'username': str, 'name': str, 'place': str,\n 'tweet': str, 'mentions': object, 'urls': object, 'photos': object, 'replies_count': int,\n 'retweets_count': int, 'likes_count': int, 'hashtags': str, 'cashtags': str,\n 'link': str, 'reweet': bool, 'quote_url': str, 'video': int, 'near': float,\n 'geo': float, 'source': float, 'user_rt_id': float, 'user_rt': float, 'retweet_id': float,\n 'reply_to': object, 'retweet_date': float, 'translate': float, 'trans_src': float,\n 'trans_dest': float,\n }\n ),\n datafiles\n )\n )\ndf0=pd.concat(dflst)\nusername_dict=dict(zip(df0.username, df0.name))\ndel df0\n#%%\nusername_dict['youtube']='YouTube'\nusername_dict['realdonaldtrump']='Donald J. Trump'\nusername_dict['drzwelimkhize']='Dr Zweli Mkhize'\nusername_dict['piersmorgan']='Piers Morgan'\nusername_dict['borisjohnson']='Boris Johnson #StayAlert'\nusername_dict['stormisuponus']='Storm Is Upon Us'\nusername_dict['inevitable_et']='l E T 17'\nusername_dict['blaackdiamonnd']='theREALBlack💎'\nusername_dict['realjameswoods']='James Woods'\nusername_dict['realcandaceo']='Candace Owens'\nusername_dict['umvrr']='_umvr'\nusername_dict['holbornlolz']='Old Holborn ✘'\nusername_dict['x22report']='X22 Report'\nusername_dict['who']='World Health Organization (WHO)'\nusername_dict['cjtruth']='CJTRUTH⭐️⭐️⭐️'\nusername_dict['clarkemicah']='Peter Hitchens'\nusername_dict['potus']='President Trump'\nusername_dict['alexbkane']='Alex Kane'\nusername_dict['billgates']='Bill Gates'\nusername_dict['amandaholden']='Amanda Holden'\nusername_dict['drisapantami']='Isa Ali Pantami, PhD'\nusername_dict['ncdcgov']='NCDC'\nusername_dict['worldstar']='WORLDSTARHIPHOP'\nusername_dict['jimalkhalili']='Jim Al-Khalili'\nusername_dict['pastorchrislive']='Pastor Chris'\nusername_dict['sam_adeyemi']='Sam Adeyemi'\nusername_dict['apostlesuleman']='Apst Johnson Suleman'\n#%%\ndef twitter_rank(ind):\n df=pd.read_csv(\n os.path.join(APP_PATH, 'data', 'Twitter', datafiles[ind]),\n error_bad_lines=False,\n dtype={\n 'id': int, 'conversation_id': int, 'created_at': int, 'date': object, 'time': object,\n 'timezone': object, 'user_id': int, 'username': str, 'name': str, 'place': str,\n 'tweet': str, 'mentions': object, 'urls': object, 'photos': object, 'replies_count': int,\n 'retweets_count': int, 'likes_count': int, 'hashtags': str, 'cashtags': str,\n 'link': str, 'reweet': bool, 'quote_url': str, 'video': int, 'near': float,\n 'geo': float, 'source': float, 'user_rt_id': float, 'user_rt': float, 'retweet_id': float,\n 'reply_to': object, 'retweet_date': float, 'translate': float, 'trans_src': float,\n 'trans_dest': float,\n }\n )\n df=df.drop(['retweet','near','geo','source','user_rt_id','user_rt','retweet_id','retweet_date','translate','trans_src','trans_dest'], axis=1)\n df.drop_duplicates(subset='link', keep='first',inplace=True)\n df.sort_values('retweets_count', inplace=True, ascending=False)\n df.reset_index(drop=True, inplace=True)\n df=df[df.tweet.str.contains('corona|virus|covid', flags=re.IGNORECASE)]\n u_name=list(df['username'])\n quote_url=list(df['quote_url'])\n urls=list(map(lambda x: list(ast.literal_eval(x.lower())), list(df['urls'])))\n reply_to=list(df['reply_to'])\n reply_to_u=list(map(lambda x: list(pd.DataFrame(ast.literal_eval(x.lower()))['username']), reply_to))\n mentions=list(map(lambda x: list(ast.literal_eval(x.lower())), list(df['mentions'])))\n reacts_to=[]\n for i in range(len(mentions)):\n x=[]\n x1=mentions[i]\n x2=reply_to_u[i]\n x3=quote_url[i]\n x4=urls[i]\n x+=x1 \n x+=x2\n if isinstance(x3,str):\n x+=re.findall('twitter.com/([A-Za-z0-9]+)/status/[0-9]+', x3.lower()) \n if len(x4)>0:\n x4b=list(map(lambda x: re.findall('twitter.com/([A-Za-z0-9]+)/status/[0-9]+', x.lower()), x4))\n x4=list(set(functools.reduce(operator.iconcat, x4b,[])))\n x+=x4\n if u_name[i] in x:\n x.remove(u_name[i])\n x=list(set(x))\n reacts_to+=[x] \n df['source']=reacts_to\n reacts_to2=[]\n for j in range(len(reacts_to)):\n if len(reacts_to[j])>0:\n reacts_to2+=reacts_to[j]\n freq=Counter(reacts_to2)\n return(freq)\nfreq0=twitter_rank(0)\n#%%\nfreqs=list(map(lambda x: twitter_rank(x), range(len(datafiles))))\n#%%\n#dates=[datetime.date(2020,4,i) for i in range(1,9)]\ndates=['Apr01','Apr02','Apr03','Apr04','Apr05','Apr06','Apr07','Apr08',]\n#%%\ntop_n=20\nfreq_df=pd.DataFrame(freqs[0].most_common(top_n))\nfreq_df['date']=dates[0]\nfor i in range(1,8):\n tmp=pd.DataFrame(freqs[i].most_common(top_n))\n tmp['date']=dates[i]\n freq_df=pd.concat([freq_df, tmp])\ndel i, tmp\nfreq_df=freq_df.reset_index(drop=True)\nfreq_df.columns=['Username', 'Count','Date']\n#%%\ngrp=['bbcnews', 'bbcrealitycheck', 'bbcworld', 'channelstv', 'cnn', 'dcms', 'eorganiser',\n 'fmocdenigeria', 'huawei', 'imjustbrum', 'londonrealtv', 'mobilepunch', 'ncdcgov', 'news24',\n 'newsdeynigeria', 'nypost', 'pmnewsnigeria', 'rt_com', 'sgtreport', 'skynews', 'thecableng',\n 'verge', 'who', 'worldstar', 'x22report', 'youtube']\n\nnigeria=['alphamodella', 'apostlesuleman', 'asemota', 'channelstv', 'daddyfrz', 'dehkunle',\n 'dino_melaye', 'drisapantami', 'drjoeabah', 'fmocdenigeria', 'islimfit', 'mobilepunch',\n 'ncdcgov', 'newsdeynigeria', 'omojuwa', 'pastorchrislive', 'pmnewsnigeria', 'realffk',\n 'sam_adeyemi', 'segalink', 'thecableng', 'umvrr']\n\nuk=['afneil', 'amandaholden', 'bbcnews', 'bbcrealitycheck', 'bbcworld', 'borisjohnson',\n 'breesanna', 'carljackmiller', 'charliehtweets', 'chrisbrexitwto', 'clarkemicah', 'davidicke',\n 'dcms', 'drolufunmilayo', 'garylineker', 'imjustbrum', 'james40428873', 'jimalkhalili',\n 'lady44sassy', 'londonrealtv', 'mrjamesob', 'piersmorgan', 'prisonplanet', 'rhiannonjudithw',\n 'skynews', 'walegates']\n\nus=['alexbkane', 'billgates', 'blaackdiamonnd', 'christinepolon1', 'cjtruth', 'cnn', 'inevitable_et',\n 'jbouie', 'nypost', 'potus', 'realcandaceo', 'realdonaldtrump', 'realjameswoods', 'sgtreport',\n 'stormisuponus', 'tyrone_brother', 'verge', 'wizkhalifa', 'worldstar', 'youtube']\n\nqanon=['christinepolon1', 'cjtruth', 'inevitable_et', 'ipot1776', 'sgtreport', 'stormisuponus', 'x22report']\n#%%\nfreq_df=freq_df[freq_df['Username'].isin(grp)==0]\n#%%\nfreq_df=freq_df.assign(Group=np.where(freq_df['Username'].isin(grp), \"Group\", \"Individual\"))\nfreq_df=freq_df.assign(Country=np.where(freq_df['Username'].isin(nigeria), 'Nigeria', \n np.where(freq_df['Username'].isin(uk), 'UK',\n np.where(freq_df['Username'].isin(us), 'USA', \n 'Other'))))\nfreq_df=freq_df.assign(Size=freq_df['Count'].apply(lambda x: (math.log10(x+1))*20 if x>0 else 0))\nlabels=[]\nfor x in freq_df['Username']:\n if x.lower() in username_dict:\n labels.append(username_dict[x.lower()])\n else:\n labels.append('NA')\ndel x\nfreq_df['Label']=labels\ndel labels\nfreq_df['User']=freq_df['Label']+\"(@\"+freq_df['Username']+\")\"\n#%%\ntraces=[]\nctr=['USA','UK','Nigeria','Other']\nfor i in range(len(ctr)):\n d=freq_df[freq_df['Country']==ctr[i]]\n traces.append(\n go.Scatter(\n x=d['Date'], \n y=d['Count'],\n name=ctr[i],\n mode='markers',\n text=d['User'],\n hovertemplate='%{text}: '+ '%{y}',\n marker=go.scatter.Marker(\n size=d['Size'],\n color=plotlycl[i],\n )\n )\n )\nfig=go.Figure(data=traces)\nfig.update_layout(\n paper_bgcolor=bgcl,\n plot_bgcolor=bgcl,\n font=dict(\n family=plotfont, size=12,\n color='rgba(255, 255, 255, 0.5)',\n ),\n yaxis=dict(\n zeroline=False,\n title='Count',\n showgrid=False,\n color=grcl3[2],\n ), \n xaxis=dict(\n zeroline=False,\n title='Date',\n showgrid=False,\n color=grcl3[2],\n ),\n legend=dict(\n x=1,\n y=-0.2,\n ),\n)\nfig.write_html(os.path.join(APP_PATH, 'plots','twitter_rank_20200624.html'))","repo_name":"liu-zoe/covid_misinformation","sub_path":"CoronaV_Twitter_Rank.py","file_name":"CoronaV_Twitter_Rank.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30082147040","text":"#!/usr/bin/env python\n\nimport requests\nimport os\nimport time\nfrom hashlib import sha1\nfrom ruamel.yaml import YAML\n\nCONFIG_DIR = \"/opt/config\"\nINFILE='infile'\nOUTFILE = 'outfile'\nDONEFILE = 'donefile'\nREPO='repo'\nURL = 'url'\n\n# outfile='/opt/data/content-verify/checked.lst'\n# infile='/opt/data/content-verify/blacklisted-artifacts.csv'\n\nconfig = {}\nfor filename in os.listdir(CONFIG_DIR):\n if filename.startswith('.'):\n continue\n\n print(f\"Reading configmap file: {filename}\")\n with open(os.path.join(CONFIG_DIR, filename)) as f:\n v = f.read()\n config[filename] = v\n\n\ninfile = config[INFILE]\noutfile = config[OUTFILE]\ndonefile = config[DONEFILE]\n\nrepo = config[REPO].replace(':', '/')\n\nurl = config[URL]\nif url.startswith('/'):\n url = url[1:]\n\n\nprint(f\"\"\"USING CONFIGURATION:\n---------------------------------------- \ninput file = {infile}\noutput file = {outfile}\ndone (marker) file = {donefile}\n\nIndy URL = {url}\nIndy repo = {repo}\n---------------------------------------- \n\n\n\"\"\")\n\nif not os.path.exists(donefile):\n processed = []\n if os.path.exists(outfile):\n with open(outfile) as f:\n processed = [line.rstrip().split(':')[0] for line in f.readlines() if 'ERROR' not in line] \n\n while not os.path.exists(infile):\n print(\"No input file yet. Waiting 10s...\")\n time.sleep(10)\n\n with open(infile) as f:\n for line in f:\n parts = line.rstrip().split(',')\n path = parts[0]\n if path.startswith('/'):\n path = path[1:]\n\n badsum = parts[1]\n\n if path in processed:\n print(f\"Skipping: {path}\")\n continue\n\n print(f\"Checking: {path}\")\n with requests.get(f\"{url}/api/content/{repo}/{path}\", stream=True) as resp:\n if resp.status_code == 404:\n result = \"MISSING\"\n elif resp.status_code != 200:\n result = f\"ERROR {resp.status_code}\"\n else:\n realsum = sha1()\n for chunk in resp.iter_content(chunk_size=16384): \n if chunk: # filter out keep-alive new chunks\n realsum.update(chunk)\n\n realsum = realsum.hexdigest()\n if realsum == badsum:\n result = \"MATCH\"\n else:\n result = \"MISMATCH\"\n\n processed.append(path)\n with open(outfile, 'a') as f:\n f.write(f\"{path}:{result}\\n\")\n\n # print(f\"Removing input file {infile}\")\n # os.remove(infile)\n with open(donefile, 'w') as f:\n f.write(\"DONE\")\n\nprint(\"Finished. Sleeping so results can be extracted...\")\nwhile True:\n time.sleep(10)\n","repo_name":"Commonjava/indy-blacklist-verify","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39839922979","text":"import copy\nimport traceback\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\n\nfrom .kalman_filter import KalmanFilter\n\n\ndef get_predictor(cfg):\n if cfg.PRED_MODEL == \"linear\":\n predictor = LinearPredictor()\n elif cfg.PRED_MODEL == \"kalman\":\n predictor = KalmanFilterPredictor(dt=1/30.,\n measurement_uncertainty_x=0.1,\n measurement_uncertainty_y=0.1,\n process_uncertainty=0.1)\n elif cfg.PRED_MODEL == \"static\":\n predictor = StaticPredictor()\n elif cfg.PRED_MODEL == \"mggan\":\n \n predictor = MGGANPredictor(\n model_path=cfg.MGGAN_WEIGHTS,\n device=cfg.DEVICE,\n nr_predictions=cfg.NR_PREDICTIONS,\n dataset_name=\"motsynth\",\n pred_len=15,\n dt=cfg.DT\n )\n elif cfg.PRED_MODEL == \"gan\":\n predictor = MGGANPredictor(\n model_path=cfg.MGGAN_WEIGHTS,\n device=cfg.DEVICE,\n nr_predictions=cfg.NR_PREDICTIONS,\n dataset_name=\"motsynth\",\n pred_len=15,\n dt=cfg.DT\n )\n else:\n raise ValueError(\n \"No valid prediction model given for option 'PRED_MODEL'.\")\n return predictor\n\n\nclass Predictor(ABC):\n \n def __init__(self, sequence=None):\n self.sequence = sequence\n \n def __call__(self, frame, tracks, sequence=None):\n if len(tracks) == 0:\n return\n if self.mode == \"single\":\n for track in tracks:\n predictions = self.predict(frame=frame, tracks=track,\n sequence=sequence)\n if predictions is not None:\n\n for key, prediction in predictions.items():\n\n track.init_prediction(id=key, **prediction)\n else:\n track.set_prediction(False)\n elif self.mode == \"multiple\":\n predictions = self.predict(frame, tracks=tracks, sequence=sequence)\n for track in tracks:\n track_predictions = predictions[track.id]\n\n if track_predictions is None:\n track.set_prediction(False)\n else:\n for key, prediction in track_predictions.items():\n if prediction is None:\n continue\n track.init_prediction(id=key, **prediction)\n\n @abstractmethod\n def predict(self, frame, tracks, sequence):\n pass\n\n def step(self, frame=None, tracks=None):\n pass\n\n @abstractmethod\n def predict_trajectory(self, **kwargs):\n pass\n\n\nclass KalmanFilterPredictor(Predictor):\n name = \"kalman_filter\"\n mode = \"single\"\n\n def __init__(self, dt=1/20., process_uncertainty=1,\n measurement_uncertainty_x=2,\n measurement_uncertainty_y=2):\n self.dt = dt\n self.process_uncertainty = process_uncertainty\n self.measurement_uncertainty_x = measurement_uncertainty_x\n self.measurement_uncertainty_y = measurement_uncertainty_y\n\n def predict_trajectory(self, obs, gap=1, max_back_view=20, **kwargs):\n\n obs_xy = obs[[\"x\", \"y\"]].values\n initial_state = np.concatenate(\n (obs_xy[0, :2], np.array([0., 0.])))\n kf = KalmanFilter(initial_state=initial_state,\n measurement_uncertainty_x=self.measurement_uncertainty_x,\n measurement_uncertainty_y=self.measurement_uncertainty_y,\n process_uncertainty=self.process_uncertainty,\n )\n if len(obs_xy) > 1:\n kf.smooth(obs_xy[1:])\n\n return kf.predictSequence(time=range(gap))\n\n def predict(self, track, frame, **kwargs):\n\n last_position_object = track.get_last_position(\n real=True, prediction_id=0)\n last_position = last_position_object.position\n if last_position is None:\n return None\n if \"kf\" in track.memory:\n kf = track.memory[\"kf\"]\n else:\n initial_state = np.concatenate(\n (last_position[:2], np.array([0., 0.])))\n kf = KalmanFilter(initial_state=initial_state,\n measurement_uncertainty_x=self.measurement_uncertainty_x,\n measurement_uncertainty_y=self.measurement_uncertainty_y,\n process_uncertainty=self.process_uncertainty,\n frame=frame)\n track.memory[\"kf\"] = kf\n predict_kf = copy.deepcopy(kf)\n frames = np.arange(frame, frame + 180)\n trajectory = predict_kf.predictSequence(frames)\n\n prediction = kf.predict(frame=frame)\n\n if len(last_position) == 3:\n new_position = np.concatenate(\n (prediction[0, :2], last_position[-1:]))\n elif len(last_position) == 2:\n new_position = prediction[0]\n if len(track.active_predictions) > 0:\n\n age_visible = track.active_predictions[0].age_visible\n\n else:\n age_visible = 0\n\n return {0: {\"position\": new_position, \"age_visible\": age_visible,\n \"memory\": {\"frames\": frames, \"trajectory\": trajectory}}}\n\n def step(self, frame=None, tracks=None):\n\n for track in tracks:\n\n current_position = track.position()\n if current_position is None:\n continue\n assert frame == track.position.frame\n assert track.occluded == False\n\n if \"kf\" in track.memory:\n kf = track.memory[\"kf\"]\n else:\n initial_state = np.concatenate(\n (current_position[:2], np.array([0., 0.])))\n kf = KalmanFilter(initial_state=initial_state,\n measurement_uncertainty_x=self.measurement_uncertainty_x,\n measurement_uncertainty_y=self.measurement_uncertainty_y,\n process_uncertainty=self.process_uncertainty,\n frame=frame)\n track.memory[\"kf\"] = kf\n\n try:\n\n kf.step(current_position[:2], frame=frame)\n except:\n track.print()\n print(kf)\n traceback.print_exc()\n\n\nclass OraclePredictor(Predictor):\n name = \"oracle\"\n mode = \"single\"\n\n def __init__(self, tracker=None,\n sequence=None,\n motion_dim=3):\n self.motion_dim = motion_dim\n self.tracker = tracker\n if self.motion_dim == 3:\n self.position_row = [\"{}_world\".format(\n coordinate) for coordinate in [\"x\", \"y\", \"z\"]]\n elif self.motion_dim == 2:\n self.position_row = [\"{}_pixel\".format(\n coordinate) for coordinate in [\"x\", \"y\"]]\n super().__init__(sequence)\n\n def predict(self, frame, track, **kwargs):\n\n last_position_object = track.get_last_position(\n real=True, prediction_id=0)\n\n label_row = self.get_labels(\n frame, last_position_object.frame, last_position_object.tracker_id)\n\n if len(label_row) > 0:\n\n new_position = label_row[self.position_row].values[0]\n return {0: {\"position\": new_position}}\n\n else:\n return None\n\n def get_labels(self, frame, last_frame, last_track_id):\n\n labels = self.sequence.__getitem__(frame, [\"labels\"])[\"labels\"]\n\n gt_id = self.tracker.df[((self.tracker.df.id == last_track_id)\n & (self.tracker.df.frame == last_frame))][\"gt_id\"].item()\n if gt_id > 0:\n return labels[labels.id == int(gt_id)]\n else:\n return []\n\n\nclass TheilSenPredictor(Predictor):\n\n name = \"theil_sen\"\n mode = \"single\"\n\n def predict(self, track, frame, **kwargs):\n memory = {}\n last_position_object = track.get_last_position(\n real=True, prediction_id=0)\n last_frame = last_position_object.frame\n last_position = last_position_object.position\n dt = frame - last_frame\n if last_position is None:\n return None\n if track.has_prediction:\n\n prediction = track.get_predictions()[0]\n\n if \"v\" in prediction.memory:\n v = prediction.memory[\"v\"]\n else:\n past_traj = track.get_trajectory()\n if past_traj is None:\n return None\n v = self.estimate_velocity(past_traj)\n else:\n past_traj = track.get_trajectory()\n if past_traj is None:\n return None\n v = self.estimate_velocity(past_traj)\n if v is None:\n return None\n else:\n memory[\"v\"] = v\n\n new_position = last_position + v * dt\n\n return {0: {\"position\": new_position, \"memory\": memory}}\n\n def predict_trajectory(self, obs, gap=1, max_back_view=20, **kwargs):\n from sklearn.linear_model import TheilSenRegressor\n\n estimators = [TheilSenRegressor(\n random_state=42), TheilSenRegressor(random_state=42)]\n obs_xy = obs[[\"x\", \"y\"]].values\n if max_back_view is not None:\n obs_xy = obs_xy[-max_back_view:]\n\n if len(obs_xy) == 1:\n return np.repeat(obs_xy, gap, axis=0)\n\n time = np.arange(-len(obs_xy) + 1, 1)[:, np.newaxis]\n\n prediction = []\n time_prediction = np.arange(1, gap+1)\n assert len(time) == len(obs_xy), \"{} {}\".format(len(time), len(obs_xy))\n for index, estimator in enumerate(estimators):\n try:\n estimator.fit(time, obs_xy[:, index])\n except:\n print(obs_xy)\n dsds\n pred = estimator.predict(time_prediction[:, np.newaxis])\n prediction.append(pred)\n\n return np.stack(prediction, 0).T\n\n def estimate_velocity(self, past_traj, max_back_view=20):\n if len(past_traj) < 2:\n return None\n past_traj = past_traj[-max_back_view:]\n dt = past_traj[1:, :1] - past_traj[:-1, :1]\n assert (dt != 0).all(), \"dt cannot be 0\"\n dx = past_traj[1:, 1:] - past_traj[:-1, 1:]\n v = dx/dt\n v_mean = np.mean(v, 0)\n v_mean[-1] = 0\n return v_mean\n\n def get_labels(self, frame):\n\n labels = self.sequence.__getitem__(frame, [\"labels\"])[\"labels\"]\n\n return labels\n\n\nclass LinearPredictor(Predictor):\n name = \"linear\"\n mode = \"single\"\n\n def predict(self, track, frame, **kwargs):\n memory = {}\n last_position_object = track.get_last_position(\n real=True, prediction_id=0)\n last_frame = last_position_object.frame\n last_position = last_position_object.position\n dt = frame - last_frame\n if last_position is None:\n return None\n if track.has_prediction:\n\n prediction = track.get_predictions()[0]\n\n if \"v\" in prediction.memory:\n v = prediction.memory[\"v\"]\n else:\n past_traj = track.get_trajectory()\n if past_traj is None:\n return None\n v = self.estimate_velocity(past_traj)\n else:\n past_traj = track.get_trajectory()\n if past_traj is None:\n return None\n v = self.estimate_velocity(past_traj)\n if v is None:\n return None\n else:\n memory[\"v\"] = v\n\n new_position = last_position + v * dt\n\n return {0: {\"position\": new_position, \"memory\": memory}}\n\n def predict_trajectory(self, obs, gap=1, max_back_view=20, **kwargs):\n\n obs_xy = obs[[\"x\", \"y\"]].values\n past_traj = obs_xy[-max_back_view:]\n if len(obs_xy) == 1:\n v = np.array([0., 0.])\n else:\n v = past_traj[1:] - past_traj[:-1]\n\n v = np.mean(v, 0)\n v = v[np.newaxis, :]\n\n t = np.arange(1, gap + 1)\n dx = v * t[:, np.newaxis]\n\n return obs_xy[-1:] + dx\n\n def estimate_velocity(self, past_traj, max_back_view=20):\n if len(past_traj) < 2:\n return None\n past_traj = past_traj[-max_back_view:]\n dt = past_traj[1:, :1] - past_traj[:-1, :1]\n assert (dt != 0).all(), \"dt cannot be 0\"\n dx = past_traj[1:, 1:] - past_traj[:-1, 1:]\n v = dx/dt\n v_mean = np.mean(v, 0)\n v_mean[-1] = 0\n return v_mean\n\n def get_labels(self, frame):\n\n labels = self.sequence.__getitem__(frame, [\"labels\"])[\"labels\"]\n\n return labels\n\n\nclass MultimodalLinearPredictor(LinearPredictor):\n name = \"multimodal_linear\"\n mode = \"single\"\n\n def __init__(self, sequence, nr_predictions=3, alpha=30, motion_dim=3):\n super().__init__()\n self.motion_dim = motion_dim\n self.sequence = sequence\n self.nr_predictions = nr_predictions\n self.alpha = alpha\n self.angles_rad = np.radians(\n np.linspace(-self.alpha, self.alpha, self.nr_predictions))\n rotation_matrices_list = []\n self.angles_rad[0], self.angles_rad[1] = self.angles_rad[1], self.angles_rad[0]\n for phi in self.angles_rad:\n if self.motion_dim == 3:\n rotation_matrices_list.append(np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(\n phi), 0],\n [0, 0, 1]]))\n else:\n rotation_matrices_list.append(np.array([[np.cos(phi), -np.sin(phi)],\n [np.sin(phi), np.cos(\n phi)]]))\n\n self.rot_mats = np.stack(rotation_matrices_list)\n\n def predict(self, track, frame, **kwargs):\n\n memory = {}\n last_position_object = track.get_last_position(\n real=True, prediction_id=0)\n last_frame = last_position_object.frame\n last_position = last_position_object.position\n dt = frame - last_frame\n if last_position is None:\n return None\n if track.has_prediction:\n prediction = track.get_predictions()[0]\n memory = prediction.memory\n if \"v\" in memory:\n v = memory[\"v\"]\n new_predictions = {}\n for k, v_i in enumerate(v):\n new_predictions[k] = {\n \"position\": last_position + v_i * dt, \"memory\": memory}\n return new_predictions\n\n past_traj = track.get_trajectory()\n if past_traj is None:\n return None\n v = self.estimate_velocity(past_traj)\n if v is None:\n return None\n v = v[np.newaxis, :, np.newaxis]\n v = (self.rot_mats @ v)[:, :, 0]\n memory[\"v\"] = v\n v_norm = np.sqrt(np.sum(v**2, axis=1))\n\n assert (np.sum(v_norm) == 0) or ((abs(np.sum(\n abs(v_norm / (v_norm[0] + 1e-16))) - self.nr_predictions)) < 1e-5), f\"{v_norm}\"\n new_predictions = {}\n for k, v_i in enumerate(v):\n new_predictions[k] = {\n \"position\": last_position + v_i * dt, \"memory\": memory}\n\n return new_predictions\n\n\nclass StaticPredictor(Predictor):\n name = \"static\"\n mode = \"single\"\n\n def predict(self, track, **kwargs):\n\n last_position_object = track.get_last_position(\n real=True, prediction_id=0)\n\n last_position = last_position_object.position\n\n if last_position is None:\n return None\n\n if len(track.active_predictions) > 0:\n\n age_visible = track.active_predictions[0].age_visible\n\n else:\n age_visible = 0\n\n return {0: {\"position\": copy.deepcopy(last_position), \"age_visible\": age_visible}}\n\n def predict_trajectory(self, obs, gap=1, **kwargs):\n\n obs_xy = obs[[\"x\", \"y\"]].values[-1:]\n\n return np.repeat(obs_xy, gap, axis=0)\n\n\nclass LSTMPredictor(Predictor):\n name = \"lstm\"\n mode = \"single\"\n\n def predict(self, track, **kwargs):\n return copy.deepcopy(track.last_position.position)\n\n\nclass GANPredictor(Predictor):\n\n def predict(self):\n pass\n\n def get_batch(self, frame, tracks, **kwargs):\n data_list = []\n last_position_dict = {}\n for track in tracks:\n past_traj = track.get_trajectory()\n\n if past_traj is None:\n continue\n last_position_object = track.get_last_position(\n real=True, prediction_id=0)\n last_position = last_position_object.position\n if last_position is None:\n continue\n last_position_dict[track.id] = last_position\n\n ids = np.ones(len(past_traj)) * track.id\n data_list.append(np.concatenate(\n (ids[:, np.newaxis], past_traj[:, :-1]), 1))\n\n if len(data_list) == 0:\n\n return None\n data = np.concatenate(data_list, 0)\n\n data = data[:, (1, 0, 2, 3)]\n\n batch = self.dataset.create_scene(data, frame - 1)\n\n if batch is None:\n\n return None\n return batch, last_position_dict\n\n\nclass MGGANPredictor(GANPredictor):\n\n mode = \"multiple\"\n datasets = [\"mot16\", \"motsynth\", \"mot17\"]\n\n def __init__(self,\n model_path,\n device,\n img_min=np.array([10, 10]),\n checkpoint=\"best\",\n dataset_name=\"motsynth\",\n sequence=\"001\",\n nr_predictions=3,\n strategy=\"uniform_expected\",\n pred_len=12,\n dt=0.2,\n ):\n\n super().__init__()\n assert strategy in (\n \"uniform_expected\",\n \"sampling\",\n \"expected\",\n \"rejection\",\n \"smart_expected\",\n \"smart_sampling\",\n \"uniform_sampling\",\n )\n\n assert dataset_name in self.datasets, f\"`dataset_name`: {dataset_name} not valid\"\n print(\n f\"Starting MGGAN: nr_predictions: {nr_predictions}, model: {model_path}\")\n self.strategy = strategy\n self.name = \"GAN\" if \"1gen\" in model_path else \"MGGAN\"\n self.img_min = img_min\n self.sequence = sequence\n self.nr_predictions = nr_predictions\n self.dataset_name = dataset_name\n\n from mggan.data_utils import OnlineDataset\n from mggan.model.train import PiNetMultiGeneratorGAN # noqa: E2\n\n if device == \"cuda\":\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n map_location = torch.device('cuda:0')\n else:\n map_location = torch.device('cpu')\n model, config = PiNetMultiGeneratorGAN.load_from_path(\n Path(model_path), checkpoint, map_location=map_location)\n model.G.to(device)\n\n model.device = device\n model.G.eval()\n self.predictor = model\n\n self.dataset = OnlineDataset(\n img_min=self.img_min,\n cnn=(config.grid_size > 0),\n grid_size_in_global=config.grid_size,\n grid_size_out_global=config.grid_size,\n scene_batching=True,\n goal_gan=False,\n local_features=False,\n scaling_global=0.5,\n load_semantic_map=config.load_semantic_map,\n time_step=dt,\n pred_len=pred_len,\n obs_len=config.obs_len,\n dataset_name=self.dataset_name,\n sequence=self.sequence)\n self.frames_per_step = int(\n self.dataset.framerate * self.dataset.time_step)\n self.interpolation_steps = np.ones(\n self.frames_per_step)/self.frames_per_step\n\n self.predictor.G.set_pred_len(pred_len)\n self.predictor.G.pred_len = pred_len\n\n def predict_trajectory(self, obs, gap=1, **kwargs):\n\n frame = obs[\"frame\"].max()\n x = obs[[\"frame\", \"id\", \"x\", \"y\"]].values\n\n batch = self.dataset.create_scene(x, frame, padding=False)\n\n pred_len = int(\n np.ceil(gap / (self.dataset.framerate * self.dataset.time_step)))\n\n self.predictor.G.set_pred_len(pred_len)\n self.predictor.G.pred_len = pred_len\n\n prediction = self.predictor.get_predictions_batch(\n batch, num_preds=self.nr_predictions, strategy=self.strategy)\n print(prediction)\n out_dxdy = prediction[\"out_dxdy\"].unsqueeze(1).cpu().numpy()\n # print(\"predictions\", out_dxdy[:, :, 0])\n interpolated_dxdy = out_dxdy * \\\n self.interpolation_steps[np.newaxis, :,\n np.newaxis, np.newaxis, np.newaxis]\n\n time_steps, int_step, nr_pred, nr_ped, dim = interpolated_dxdy.shape\n interpolated_dxdy = np.reshape(\n interpolated_dxdy, (time_steps * int_step, nr_pred, nr_ped, dim))\n interpolated_xy = np.cumsum(interpolated_dxdy, 0)\n last_position = x[x[:, 0] == frame][0, -2:]\n\n predicted_traj = interpolated_xy[:, :, 0] + \\\n last_position[np.newaxis, np.newaxis, :2]\n\n return predicted_traj[: gap]\n\n def predict(self, frame, tracks, **kwargs):\n batch_out = self.get_batch(frame, tracks)\n\n if batch_out is None:\n return {track.id: None for track in tracks}\n (batch, last_position_dict) = batch_out\n prediction = self.predictor.get_predictions_batch(\n batch, num_preds=self.nr_predictions, strategy=self.strategy)\n\n out_dxdy = prediction[\"out_dxdy\"].unsqueeze(1).cpu().numpy()\n # print(\"predictions\", out_dxdy[:, :, 0])\n interpolated_dxdy = out_dxdy * \\\n self.interpolation_steps[np.newaxis, :,\n np.newaxis, np.newaxis, np.newaxis]\n\n time_steps, int_step, nr_pred, nr_ped, dim = interpolated_dxdy.shape\n interpolated_dxdy = np.reshape(\n interpolated_dxdy, (time_steps * int_step, nr_pred, nr_ped, dim))\n interpolated_xy = np.cumsum(interpolated_dxdy, 0)\n\n ids = list(batch[\"ids\"])\n output = {}\n frames = np.arange(len(interpolated_dxdy)) + frame\n track_ids = []\n\n for track in tracks:\n assert track.id not in track_ids\n track_ids.append(track.id)\n if track.id not in ids and not track.has_prediction:\n output[track.id] = None\n elif track.has_prediction:\n # update current prediction\n for j in range(self.nr_predictions):\n output[track.id] = {}\n\n for key, prediction in track.predictions.items():\n\n if not prediction.active:\n\n output[track.id][key] = None\n\n else:\n memory = prediction.memory\n predicted_traj = memory[\"trajectory\"][memory[\"frames\"] == frame][0]\n output[track.id][key] = {\n \"position\": predicted_traj, \"memory\": memory, \"age_visible\": prediction.age_visible}\n \n elif track.id in ids:\n id_prediction = ids.index(track.id)\n last_position = last_position_dict[track.id]\n predicted_traj = interpolated_xy[:, :, id_prediction] + \\\n last_position[np.newaxis, np.newaxis, :2]\n predicted_traj = np.concatenate((predicted_traj, np.ones_like(\n predicted_traj)[:, :, :1] * last_position[-1]), -1)\n\n output[track.id] = {}\n\n for j in range(self.nr_predictions):\n traj = predicted_traj[0, j]\n assert traj.shape == (3,), traj.shape\n\n output[track.id][j] = {\"position\": traj, \"memory\":\n {\"frames\": frames,\n \"trajectory\": predicted_traj[:, j]}\n }\n else:\n output[track.id] == None\n return output\n\n\nif __name__ == \"__main__\":\n import pandas as pd\n obs = np.stack((np.arange(10), np.arange(10))).T\n obs = pd.DataFrame(obs, columns=[\"x\", \"y\"])\n TS = TheilSenPredictor()\n x = TS.predict_trajectory(obs, 9)\n","repo_name":"dendorferpatrick/QuoVadis","sub_path":"src/quovadis/trajectory_predictor/Predictor.py","file_name":"Predictor.py","file_ext":"py","file_size_in_byte":24688,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"32"} +{"seq_id":"18065255294","text":"from itertools import permutations, product\n\n# stolen for python-ness, but understood\ndef plusAndMinusPermutations(items):\n for p in permutations(items):\n for signs in product([-1,1], repeat=len(items)):\n yield [a*sign for a,sign in zip(p,signs)]\nn=6\nitems = [i for i in range(1,n+1)]\n\nout = ''\nf = 0\nfor p in plusAndMinusPermutations(items):\n f += 1\n for j in p:\n out += str(j)+' '\n out += '\\n'\nout = str(f)+'\\n'+out\n\nf = open('sign_out.txt','w')\nf.write(out)\nf.close()\n","repo_name":"roesel/rosalind","sub_path":"sign/sign.py","file_name":"sign.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13387143547","text":"import Seminar_07.Model.Rational_model.DivRational as Div\nimport Seminar_07.Model.Complex_model.ComplexNumber as ComplexNumber\n\n\nclass DivComplex():\n '''Деление'''\n\n def result(self, x1, x2, y1, y2):\n new_real = Div.DivRational.result(\n self, (x1 * x2 + y1 * y2), (x2 * x2 + y2 * y2))\n new_image = Div.DivRational.result(\n self, (y1 * x2 - x1 * y2), (x2 * x2 + y2 * y2))\n result = ComplexNumber.Complex(new_real, new_image)\n return result\n","repo_name":"ArtemEvgTitov/OOP_Homework","sub_path":"Seminar_07/Model/Complex_model/DivComplex.py","file_name":"DivComplex.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34476939915","text":"#!/usr/bin/python\n\n######################################################################\n# Autor: Andrés Herrera Poyatos\n# Universidad de Granada, January, 2015\n# Sorting Algorithm: Dual Pivot QuickSelect\n#######################################################################\n\n# This program read an integer array from a file a execute the Dual Pivot Quickselect\n# algorithm on it to find the ith order statistic element given as an argument. \n# This algorithm uses the Dual Pivot scheme for Quicksort introduced in the Java Library.\n# It performs with eficiency O(n) with repeated elements in average. Tt performs the 80%\n# of comparisons than classical Hoare-Partition algorithm.\n# \n# This algorithm it is not such a improvement over Classical QuickSelect as happens with QuickSort\n# Since the 3 partition helps also to sort the array indirectly and reducing the size of the 3 recurrences.\n# Here the help is much less appreciable.\n# More info in http://iaroslavski.narod.ru/quicksort/DualPivotQuicksort.pdf\n\nimport sys # For arguments (syc.argv) and exit (syc.exit())\nimport time # To time the program\n\n# Swap two elements in the array\ndef swap(array, i, j):\n aux = array[i]; array[i] = array[j]; array[j] = aux\n\n# Function that do a partition of the subarray [begin,end[.\n# It returns both pivots final indexes.\ndef partition(array, begin, end):\n end -= 1; \n if array[begin] > array[end]:\n swap(array, begin, end)\n pivot1 = array[begin]; pivot2 = array[end]\n \n i = begin+1; j = end-1; k = begin+1\n while k <= j:\n if array[k] < pivot1:\n swap(array, i, k); i += 1\n else:\n if array[k] >= pivot2:\n while array[j] > pivot2 and k < j:\n j -= 1\n swap(array, k, j); j -= 1\n if array[k] < pivot1: \n swap(array, i, k); i += 1\n k += 1\n\n i -= 1; j += 1; swap(array, begin, i); swap(array, j, end)\n\n return i, j\n\n\n# Function that selects the order-th element of array [begin, end[ according to\n# the sorting relation of the elements. It is call the ith-order statistic element.\n# The algorithms runs with O(n) as average time.\n# It is a Dual Pivot QuickSelect implementation. \n# A partition it is performed and the search is done in the corresponding subarray.\ndef dualPivotQuickSelect(array, begin, end, order):\n if end - begin == 1:\n return array[begin]\n else:\n pivot1, pivot2 = partition(array, begin, end)\n \n if pivot1 > order:\n return dualPivotQuickSelect(array, begin, pivot1, order)\n elif pivot2 > order:\n if pivot1 == order:\n return array[pivot1]\n else:\n return dualPivotQuickSelect(array, pivot1+1, pivot2, order)\n elif pivot2 == order:\n return array[pivot2]\n else:\n return dualPivotQuickSelect(array, pivot2+1, end, order)\n\n\n######################## MAIN ##########################\n\n# See if arguments are correct\nif len(sys.argv) != 3:\n print(\"Error: Needs the array.txt and the order of the element asked as arguments.\")\n sys.exit()\n\n# Read array\nnumbers = open(sys.argv[1]) \narray = [ ]\nfor line in numbers:\n array.append(int(line))\n\n# Get order and check it is correct\norder = int(sys.argv[2])\nif order < 0 or order >= len(array):\n print(\"Error: Order must be between 0 and the length of the array - 1.\")\n sys.exit()\n\n# Execute dualPivotQuickSelect and count the time wasted\nstart_time = time.time()\na = dualPivotQuickSelect(array, 0, len(array), order)\nprint(\"--- %f seconds ---\" % (time.time() - start_time) )\nprint(order, \"- ith order statistic: \", a)\n\nnumbers.close()","repo_name":"andreshp/Algorithms","sub_path":"SelectionAlgorithms/Python/QuickSelect/DualPivot/DualPivotQuickSelect.py","file_name":"DualPivotQuickSelect.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"593210158","text":"import boto3\r\n\r\nAWS_REGION = \"ap-south-1\"\r\n\r\nclient = boto3.client(\"s3\", region_name=AWS_REGION)\r\nl1=[]\r\nresponse = client.list_buckets()\r\n\r\nprint(\"These are the below s3 buckets in this region:\")\r\nfor bucket in response['Buckets']:\r\n print(f\"--> {bucket['Name']}\")\r\n l1.append(bucket['Name'])\r\n\r\nprint(\"The total no of s3 buckets are:\",len(l1))","repo_name":"anilkumarvangipuram/python-Docs","sub_path":"listalls3.py","file_name":"listalls3.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20351970320","text":"'''\nPlot eigenfunction(s) for a single mode.\n'''\n\nimport argparse\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\n\nfrom Ouroboros.common import (get_Ouroboros_out_dirs, get_r_fluid_solid_boundary,\n load_eigenfreq, load_eigenfunc,\n load_model, mkdir_if_not_exist,\n read_input_file)\nfrom Ouroboros.misc.compare_eigenfunctions import (check_sign_R,\n check_sign_S, check_sign_P, check_sign_T)\n\ndef get_title_str(mode_type, n, l, code, i_toroidal = None):\n '''\n Format the mode's name for a title.\n '''\n\n # Get title string information.\n if mode_type in ['R', 'S']:\n\n mode_type_for_title = 'S'\n\n else:\n \n if code in ['mineos', 'ouroboros_homogeneous']:\n\n mode_type_for_title = mode_type\n\n elif code == 'ouroboros':\n\n if i_toroidal == 0:\n \n mode_type_for_title = 'I'\n \n else:\n \n mode_type_for_title = 'T'\n\n else:\n\n raise ValueError\n\n title = '$_{{{:d}}}${:}$_{{{:d}}}$'.format(n, mode_type_for_title, l)\n\n return title\n\ndef plot_eigenfunc_wrapper(run_info, mode_type, n, l, i_toroidal = None, ax = None, ax_imag = None, save = True, show = True, transparent = True, linestyle = '-', label_suffix = '', plot_gradient = False, plot_potential = False, x_label = 'default', norm_func = 'mineos', units = 'SI', alpha = 1.0, r_lims = None, no_title = False, relaxation = False, duplicate = False): \n '''\n Wrapper script which gathers the necessary data to plot the eigenfunction.\n '''\n \n # Get model information for axis limits, scaling and horizontal lines.\n if run_info['code'] == 'ouroboros_homogeneous':\n\n r_srf = run_info['r']*1.0E-3\n i_fluid = []\n r_solid_fluid_boundary = []\n\n else:\n\n model = load_model(run_info['path_model'])\n # Convert to km.\n model['r'] = model['r']*1.0E-3\n # r_srf Radius of planet.\n # r_solid_fluid_boundary List of radii of solid-fluid boundaries.\n r_srf = model['r'][-1]\n i_fluid, r_solid_fluid_boundary, _ = get_r_fluid_solid_boundary(model['r'], model['v_s'])\n\n # Get frequency information.\n mode_info = load_eigenfreq(run_info, mode_type, i_toroidal = i_toroidal, n_q = n, l_q = l,\n relaxation = relaxation, duplicate = duplicate)\n f = mode_info['f']\n\n # Get normalisation arguments.\n f_rad_per_s = f*1.0E-3*2.0*np.pi\n normalisation_args = {'norm_func' : norm_func, 'units' : units}\n normalisation_args['omega'] = f_rad_per_s\n\n # Get eigenfunction information.\n eigfunc_dict = load_eigenfunc(run_info, mode_type, n, l,\n i_toroidal = i_toroidal,\n norm_args = normalisation_args,\n relaxation = relaxation,\n duplicate = duplicate)\n eigfunc_dict['r'] = eigfunc_dict['r']*1.0E-3 # Convert to km.\n \n # Get title string.\n title = get_title_str(mode_type, n, l, run_info['code'], i_toroidal = i_toroidal)\n\n # Check the sign of the plotting variable.\n if plot_potential:\n \n sign = check_sign_P(eigfunc_dict['r'], eigfunc_dict['P'])\n\n else:\n\n if mode_type == 'S':\n\n sign = check_sign_S(eigfunc_dict['r'], eigfunc_dict['U'],\n eigfunc_dict['V'])\n\n elif mode_type == 'R':\n\n sign = check_sign_R(eigfunc_dict['r'], eigfunc_dict['U'])\n\n elif mode_type in ['I', 'T']:\n\n sign = check_sign_T(eigfunc_dict['r'], eigfunc_dict['W'])\n\n # Change sign to always be positive.\n if sign < 0: \n\n for val in eigfunc_dict:\n\n if val != 'r':\n\n eigfunc_dict[val] = eigfunc_dict[val]*-1.0\n \n # Find maximum value(s) of plotting variable.\n if plot_gradient:\n\n if plot_potential:\n\n vals = eigfunc_dict['Pp'] \n\n else:\n\n if mode_type == 'R':\n\n vals = eigfunc_dict['Up'] \n\n elif mode_type == 'S':\n\n vals = np.concatenate([eigfunc_dict['Up'], eigfunc_dict['Vp']])\n\n elif mode_type == 'T':\n\n vals = eigfunc_dict['Wp']\n\n else:\n\n raise ValueError\n \n\n else:\n\n if plot_potential:\n\n vals = eigfunc_dict['P']\n\n else:\n\n if mode_type == 'R':\n \n vals = eigfunc_dict['U']\n\n elif mode_type == 'S':\n\n vals = np.concatenate([eigfunc_dict['U'], eigfunc_dict['V']])\n\n elif mode_type in ['I', 'T']:\n\n vals = eigfunc_dict['W']\n\n else:\n\n raise ValueError\n\n # Get maximum value.\n max_ = np.max(np.abs(vals))\n\n # Mineos saves the toroidal eigenfunction in regions where it has a\n # value of 0. This is clipped.\n if run_info['code'] == 'mineos':\n \n clip_zero = True\n if mode_type in ['T', 'I']:\n\n if clip_zero:\n\n i_nonzero = np.nonzero(eigfunc_dict['W'])[0]\n\n i_0 = i_nonzero[0]\n i_1 = i_nonzero[-1]\n\n eigfunc_dict['r'] = eigfunc_dict['r'][i_0 : i_1]\n for val in ['W', 'Wp']:\n\n eigfunc_dict[val] = eigfunc_dict[val][i_0 : i_1]\n\n # Create axes if not provided.\n if r_lims is None:\n\n r_range = np.max(eigfunc_dict['r']) - np.min(eigfunc_dict['r'])\n r_frac = r_range/r_srf\n imag_y = 11.0*r_frac\n\n else:\n\n imag_y = 7.0\n\n if run_info['attenuation'] == 'full':\n\n imag_x = 11.0\n\n else:\n\n imag_x = 5.5\n\n if ax is None:\n \n if run_info['attenuation'] == 'full':\n \n imag_scaling = 'fixed'\n if imag_scaling == 'fixed':\n\n sharex = True\n imag_scale = 300.0 \n\n else:\n\n sharex = False\n imag_scale = 1.0\n\n fig, ax_arr = plt.subplots(1, 2, figsize = (imag_x, imag_y),\n sharey = True, sharex = sharex)\n ax = ax_arr[0]\n ax_imag = ax_arr[1]\n\n else:\n\n fig = plt.figure(figsize = (imag_x, imag_y))\n ax = plt.gca()\n\n ax_imag = None\n\n else:\n \n imag_x_pre, imag_y_pre = ax.figure.get_size_inches()\n if imag_y_pre < imag_y:\n\n ax.figure.set_size_inches((imag_x, imag_y))\n\n # Arguments for all possibilities.\n if run_info['attenuation'] == 'full':\n \n if x_label == 'default':\n\n x_label = None\n\n else:\n\n if x_label == 'default':\n\n x_label = 'Eigenfunction'\n\n common_args = {'ax' : ax, 'show' : False,\n 'title' : title, 'x_label' : x_label, 'alpha' : alpha,\n 'r_lims' : r_lims}\n\n if no_title:\n\n common_args['title'] = None\n\n # Plot.\n if plot_gradient:\n\n if plot_potential:\n\n if run_info['attenuation'] == 'full':\n\n raise NotImplementedError\n\n plot_P(eigfunc_dict['r'], eigfunc_dict['Pp'],\n h_lines = r_solid_fluid_boundary,\n linestyle = linestyle,\n label_suffix = label_suffix,\n **common_args)\n\n else:\n\n if mode_type == 'R':\n\n if run_info['attenuation'] == 'full':\n\n raise NotImplementedError\n\n plot_eigenfunc_R_or_T(eigfunc_dict['r'], eigfunc_dict['Up'],\n h_lines = r_solid_fluid_boundary,\n linestyle = linestyle,\n label = 'U{:}'.format(label_suffix),\n **common_args)\n \n elif mode_type == 'S':\n\n if run_info['attenuation'] == 'full':\n\n raise NotImplementedError\n\n plot_eigenfunc_S(eigfunc_dict['r'], eigfunc_dict['Up'], eigfunc_dict['Vp'],\n h_lines = r_solid_fluid_boundary,\n linestyles = [linestyle, linestyle],\n label_suffix = label_suffix,\n **common_args)\n\n elif mode_type in ['T', 'I']:\n\n if run_info['attenuation'] == 'full':\n\n raise NotImplementedError\n \n plot_eigenfunc_R_or_T(eigfunc_dict['r'], eigfunc_dict['Wp'],\n h_lines = None,\n linestyle = linestyle,\n label = 'W{:}'.format(label_suffix),\n **common_args)\n\n elif plot_potential:\n\n if run_info['attenuation'] == 'full':\n\n raise NotImplementedError\n\n plot_P(eigfunc_dict['r'], eigfunc_dict['P'],\n h_lines = r_solid_fluid_boundary,\n linestyle = linestyle,\n label_suffix = label_suffix,\n **common_args)\n\n else:\n\n if mode_type == 'R':\n\n if run_info['attenuation'] == 'full':\n\n raise NotImplementedError\n\n plot_eigenfunc_R_or_T(eigfunc_dict['r'], eigfunc_dict['U'],\n h_lines = r_solid_fluid_boundary,\n linestyle = linestyle,\n label = 'U{:}'.format(label_suffix),\n **common_args)\n \n elif mode_type == 'S':\n\n if run_info['attenuation'] == 'full':\n\n common_args['ax'] = ax_imag\n\n if imag_scaling == 'fixed':\n\n common_args['x_label'] = 'Imaginary $\\\\times$ {:>d}'.format(\n int(imag_scale))\n\n else:\n\n common_args['x_label'] = 'Imaginary'\n\n plot_eigenfunc_S(eigfunc_dict['r'],\n eigfunc_dict['U_im'] * imag_scale,\n eigfunc_dict['V_im'] * imag_scale,\n h_lines = r_solid_fluid_boundary,\n linestyles = [linestyle, linestyle],\n label_suffix = label_suffix,\n y_label = None,\n **common_args)\n\n common_args['ax'] = ax\n common_args['x_label'] = 'Real'\n\n plot_eigenfunc_S(eigfunc_dict['r'], eigfunc_dict['U'], eigfunc_dict['V'],\n h_lines = r_solid_fluid_boundary,\n linestyles = [linestyle, linestyle],\n label_suffix = label_suffix,\n **common_args)\n\n elif mode_type in ['T', 'I']:\n\n if run_info['attenuation'] == 'full':\n\n common_args['ax'] = ax_imag\n common_args['x_label'] = 'Imaginary'\n #common_args['title'] = None\n\n plot_eigenfunc_R_or_T(eigfunc_dict['r'], eigfunc_dict['W_im'],\n h_lines = None,\n linestyle = linestyle,\n label = 'W{:}'.format(label_suffix),\n y_label = None,\n **common_args)\n\n common_args['ax'] = ax\n common_args['x_label'] = 'Real'\n\n plot_eigenfunc_R_or_T(eigfunc_dict['r'], eigfunc_dict['W'],\n h_lines = None,\n linestyle = linestyle,\n label = 'W{:}'.format(label_suffix),\n **common_args)\n\n if run_info['attenuation'] == 'full':\n \n font_size_title = 36\n plt.suptitle(title, fontsize = font_size_title)\n\n # Make the background transparent (if requested).\n if transparent:\n \n fig = plt.gcf()\n set_patch_facecolors(fig, ax) \n\n # Set tight layout.\n plt.tight_layout()\n \n # Save (if requested).\n if save:\n\n method_str = run_info['code']\n\n if plot_gradient:\n\n gradient_str = '_gradient'\n\n else:\n\n gradient_str = ''\n\n if plot_potential:\n\n var_str = 'potential'\n\n else:\n\n var_str = 'eigfunc'\n\n fig_name = '{:}{:}_{:}'.format(var_str, gradient_str, method_str)\n \n if run_info['code'] == 'mineos':\n \n dir_out = run_info['dir_output']\n dir_plot = os.path.join(dir_out, 'plots')\n\n elif run_info['code'] in ['ouroboros', 'ouroboros_homogeneous']:\n \n _, _, _, dir_out = get_Ouroboros_out_dirs(run_info, mode_type)\n\n else:\n\n raise ValueError\n\n dir_plot = os.path.join(dir_out, 'plots')\n mkdir_if_not_exist(dir_plot)\n\n if mode_type in ['S', 'R']:\n\n fig_name = '{:}_{:>05d}_{:}_{:>05d}_{:1d}.png'.format(fig_name, n, mode_type, l, run_info['grav_switch'])\n\n else:\n\n if run_info['code'] == 'mineos':\n\n fig_name = '{:}_{:>05d}_{:}_{:>05d}_{:1d}.png'.format(fig_name, n, mode_type, l, run_info['grav_switch'])\n\n elif run_info['code'] == 'ouroboros':\n\n fig_name = '{:}_{:>05d}_{:}{:1d}_{:>05d}_{:1d}.png'.format(fig_name, n, mode_type, i_toroidal, l, run_info['grav_switch'])\n\n elif run_info['code'] == 'ouroboros_homogeneous':\n\n fig_name = '{:}_{:>05d}_{:}_{:>05d}_{:>1d}.png'.format(fig_name, n, mode_type, l, run_info['grav_switch'])\n\n else:\n\n raise ValueError\n\n fig_path = os.path.join(dir_plot, fig_name)\n print('Saving figure to {:}'.format(fig_path))\n plt.savefig(fig_path, dpi = 300, bbox_inches = 'tight')\n\n if show:\n\n plt.show()\n\n return ax, ax_imag\n\ndef plot_eigenfunc_S(r, U, V, ax = None, h_lines = None, x_label = 'Eigenfunction', y_label = 'Radial coordinate / km', title = None, show = True, add_legend = True, colors = ['r', 'b'], linestyles = ['-', '-'], label_suffix = '', alpha = 1.0, legend_loc = 'best', font_size_label = 12, r_lims = None):\n '''\n Plot spheroidal eigenfunction.\n '''\n \n # Create axes if none provided.\n if ax is None:\n\n fig = plt.figure()\n ax = plt.gca()\n \n # Set labels of lines.\n U_label = 'U'\n V_label = 'V'\n U_label = U_label + label_suffix\n V_label = V_label + label_suffix\n\n # Plot eigenfunctions.\n ax.plot(U, r, label = U_label, color = colors[0], linestyle = linestyles[0], alpha = alpha)\n ax.plot(V, r, label = V_label, color = colors[1], linestyle = linestyles[1], alpha = alpha)\n\n # Set axis limits.\n max_abs_U_plot = np.max(np.abs(U))\n max_abs_V_plot = np.max(np.abs(V))\n E_max = np.max([max_abs_U_plot, max_abs_V_plot])\n \n # Tidy axes.\n tidy_axes(ax, r, E_max, h_lines = h_lines, add_legend = add_legend, legend_loc = legend_loc, title = title, x_label = x_label, y_label = y_label, r_lims = r_lims)\n\n if show:\n\n plt.show()\n\n return ax, E_max\n\ndef plot_P(r, P, ax = None, h_lines = None, x_label = 'Potential', y_label = 'Radial coordinate / km', title = None, show = True, add_legend = True, color = 'r', linestyle = '-', label_suffix = '', alpha = 1.0, legend_loc = 'best', font_size_label = 12):\n '''\n Plot potential eigenfunction.\n '''\n \n # Create axes if none provided.\n if ax is None:\n\n fig = plt.figure()\n ax = plt.gca()\n \n # Set label for line.\n label = 'P'\n label = label + label_suffix\n \n # Plot eigenfunction.\n ax.plot(P, r, label = label, color = color, linestyle = linestyle, alpha = alpha)\n\n # Determine axis limits.\n P_max = np.max(np.abs(P))\n\n # Tidy axes.\n tidy_axes(ax, r, P_max, h_lines = h_lines, add_legend = add_legend, legend_loc = legend_loc, title = title, x_label = x_label, y_label = y_label)\n\n # Show (if requested).\n if show:\n\n plt.show()\n\n return ax, P_max\n\ndef tidy_axes(ax, r, E_max, h_lines = None, add_legend = True, legend_loc = 'best', title = None, x_label = 'Eigenfunction', y_label = 'Radius / km', r_lims = None):\n '''\n Make the axes look neater.\n '''\n \n # Set font sizes.\n font_size_label = 16\n font_size_title = 36 \n\n # Draw horizontal lines.\n if h_lines is not None:\n\n for h_line in h_lines:\n\n ax.axhline(h_line, linestyle = ':', color = 'k')\n\n # Draw vertical line at x = 0.\n ax.axvline(linestyle = ':', color = 'k')\n \n # Set eigenfunction axis limits.\n buff = 1.05\n ax.set_xlim([-buff*E_max, buff*E_max])\n\n # Set radius axis limits.\n if r_lims is None:\n\n ax.set_ylim([np.min(r), np.max(r)])\n\n else:\n\n ax.set_ylim(r_lims)\n \n # Add legend.\n if add_legend:\n\n ax.legend(loc = legend_loc)\n\n # Add title.\n if title is not None:\n \n ax.set_title(title, fontsize = font_size_title)\n\n # Label x axis. \n if x_label is not None:\n\n ax.set_xlabel(x_label, fontsize = font_size_label)\n \n # Label y axis.\n if y_label is not None:\n\n ax.set_ylabel(y_label, fontsize = font_size_label)\n\n return\n\ndef set_patch_facecolors(fig, ax):\n '''\n Make transparent plot background.\n '''\n\n ax.patch.set_facecolor('white')\n ax.patch.set_alpha(1.0)\n fig.patch.set_facecolor('white')\n fig.patch.set_alpha(0.0)\n\n return\n\ndef plot_eigenfunc_R_or_T(r, U_or_W, ax = None, show = False, h_lines = None, add_legend = True, legend_loc = 'best', title = None, label = None, x_label = 'Eigenfunction', y_label = 'Radial coordinate / km', linestyle = '-', alpha = 1.0, r_lims = None):\n '''\n Plot eigenfunction for radial or toroidal mode.\n '''\n\n # Plot the line.\n ax.plot(U_or_W, r, color = 'r', label = label,\n linestyle = linestyle, alpha = alpha)\n\n # Get eigenfunction axis limits.\n max_abs_U_or_W_plot = np.max(np.abs(U_or_W))\n \n # Tidy up axis.\n tidy_axes(ax, r, max_abs_U_or_W_plot, h_lines = h_lines,\n add_legend = add_legend, legend_loc = legend_loc,\n title = title, x_label = x_label, y_label = y_label,\n r_lims = r_lims)\n\n return\n\ndef get_label_suffixes(path_compare, code, code_compare, plot_gradient):\n '''\n Define suffixes for line labels.\n '''\n\n if plot_gradient:\n\n if code == 'mineos':\n\n label_suffix = '\\' (Mineos)'\n \n elif code == 'ouroboros':\n\n #label_suffix = '\\' (Ouroboros)'\n label_suffix = '\\' (RadialPNM)'\n\n else:\n\n raise ValueError\n\n if path_compare is not None:\n\n if code_compare == 'mineos':\n\n #label_suffix_compare = '\\' (Mineos)'\n label_suffix_compare = ''\n \n elif code_compare == 'ouroboros':\n\n #label_suffix_compare = '\\' (Ouroboros)'\n #label_suffix_compare = '\\' (RadialPNM)'\n label_suffix_compare = ''\n\n else:\n\n raise ValueError\n\n else:\n\n label_suffix_compare = None\n\n else:\n\n if code == 'mineos':\n\n label_suffix = ' (Mineos)'\n \n elif code == 'ouroboros':\n\n #label_suffix = ' (Ouroboros)'\n #label_suffix = ' (RadialPNM)'\n label_suffix = ''\n\n elif code == 'ouroboros_homogeneous':\n\n label_suffix = ''\n\n else:\n\n raise ValueError\n\n if path_compare is not None:\n\n if code_compare == 'mineos':\n\n #label_suffix_compare = ' (Mineos)'\n label_suffix_compare = ''\n \n elif code_compare == 'ouroboros':\n\n #label_suffix_compare = ' (Ouroboros)'\n label_suffix_compare = ''\n\n else:\n\n raise ValueError\n\n else:\n\n label_suffix_compare = None\n\n return label_suffix, label_suffix_compare\n\ndef main():\n\n # Read input arguments.\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path_to_input_file\", help = \"File path (relative or absolute) to Ouroboros input file.\")\n parser.add_argument(\"mode_type\", choices = ['R', 'S', 'T', 'I'], help = 'Mode type (radial, spheroidal or toroidal). Option I is for use with Mineos to plot inner-core toroidal modes. See the --toroidal flag for plotting toroidal modes with Ouroboros.')\n parser.add_argument(\"n\", type = int, help = \"Plot mode with radial order n.\")\n parser.add_argument(\"l\", type = int, help = \"Plot mode with angular order l (must be 0 for radial modes).\")\n parser.add_argument(\"--toroidal\", dest = \"layer_number\", help = \"Plot toroidal eigenfunction for the solid shell given by LAYER_NUMBER (0 is outermost solid shell).\", type = int)\n parser.add_argument(\"--gradient\", action = \"store_true\", help = \"Include this flag to plot eigenfunction gradients (default: plot eigenfunctions).\")\n parser.add_argument(\"--potential\", action = \"store_true\", help = \"Include this flag to plot potential (default: plot eigenfunctions).\")\n parser.add_argument(\"--path_compare\", help = \"Provide input path to plot a second eigenfunction for comparison.\")\n parser.add_argument(\"--norm_func\", choices = ['mineos', 'DT'], default = 'DT', help = \"Specify normalisation function. \\'mineos\\' is the normalisation function used by Mineos and Ouroboros. \\'DT\\' is the normalisation function used in the Dahlen and Tromp textbook. It does not include the factor of k. See also the --units flag. For more detail, see Ouroboros/doc/Ouroboros_normalisation_notes.pdf.\")\n parser.add_argument(\"--units\", choices = ['SI', 'ouroboros', 'mineos'], default = 'mineos', help = 'Specify units used when applying normalisation to eigenfunction. \\'SI\\' is SI units. \\'mineos\\' is Mineos units. \\'ouroboros\\' is Ouroboros units. See also the --norm_func flag. For more detail, see Ouroboros/doc/Ouroboros_normalisation_notes.pdf.')\n parser.add_argument(\"--r_lims\", nargs = 2, type = float, help = 'Specify radius limits of plot (km).')\n parser.add_argument(\"--relaxation\", action = 'store_true', help = 'Plot a relaxation mode (instead of oscillation mode). Note: only available when attenuation == \\'full\\'.')\n parser.add_argument(\"--duplicate\", action = 'store_true', help = 'Plot a duplicate mode. Note 1: only available when attenuation == \\'full\\'. Note 2: duplicate modes are not sorted by (n, l) but by a single index n (sorted by real part of frequency). l is ignored.')\n args = parser.parse_args()\n\n # Rename input arguments.\n path_input = args.path_to_input_file\n mode_type = args.mode_type\n n = args.n\n l = args.l\n i_toroidal = args.layer_number\n plot_gradient = args.gradient\n plot_potential = args.potential\n path_compare = args.path_compare\n norm_func = args.norm_func\n units = args.units\n r_lims = args.r_lims\n relaxation = args.relaxation\n duplicate = args.duplicate\n\n # Check input arguments.\n if mode_type == 'R':\n\n assert l == 0, 'Must have l = 0 for radial modes.'\n\n # Read input file.\n run_info = read_input_file(path_input)\n if path_compare is not None:\n\n run_info_compare = read_input_file(path_compare)\n code_compare = run_info_compare['code']\n\n else:\n\n code_compare = None\n\n # Get x label.\n if plot_potential:\n\n if plot_gradient:\n\n x_label = 'Potential gradient'\n\n else:\n\n x_label = 'Potential'\n\n else:\n\n if plot_gradient:\n\n x_label = 'Eigenfunction gradient'\n\n else:\n\n x_label = 'Eigenfunction'\n \n # Get label suffixes.\n label_suffix, label_suffix_compare = get_label_suffixes(path_compare,\n run_info['code'], code_compare, plot_gradient)\n\n # Plot.\n if path_compare is not None:\n\n if run_info_compare['code'] == 'mineos':\n\n run_info_compare['attenuation'] = 'none'\n\n if run_info['attenuation'] == 'full' or run_info_compare['attenuation'] == 'full':\n\n no_title = True\n else:\n\n no_title = False\n \n if ((run_info['attenuation'] == 'full') and \n (run_info_compare['attenuation'] != 'full')):\n\n # Plot two eigenfunctions overlaid on same plot.\n ax, ax_imag = plot_eigenfunc_wrapper(run_info, mode_type, n, l,\n i_toroidal = i_toroidal, ax = None, show = False,\n transparent = False, save = False, linestyle = '-',\n label_suffix = label_suffix_compare, x_label = None,\n norm_func = norm_func, units = units,\n plot_gradient = plot_gradient, plot_potential = plot_potential,\n alpha = 0.5, no_title = no_title) \n\n plot_eigenfunc_wrapper(run_info_compare, mode_type, n, l,\n i_toroidal = i_toroidal, ax = ax, ax_imag = ax_imag, show = True,\n label_suffix = label_suffix, plot_gradient = plot_gradient,\n plot_potential = plot_potential, x_label = x_label,\n norm_func = norm_func, units = units, linestyle = ':',\n no_title = no_title)\n\n else:\n\n # Plot two eigenfunctions overlaid on same plot.\n ax, ax_imag = plot_eigenfunc_wrapper(run_info_compare, mode_type, n, l,\n i_toroidal = i_toroidal, ax = None, ax_imag = None, show = False,\n transparent = False, save = False, linestyle = ':',\n label_suffix = label_suffix_compare, x_label = None,\n norm_func = norm_func, units = units,\n plot_gradient = plot_gradient, plot_potential = plot_potential,\n no_title = no_title)\n\n plot_eigenfunc_wrapper(run_info, mode_type, n, l,\n i_toroidal = i_toroidal, ax = ax, ax_imag = ax_imag, show = True,\n label_suffix = label_suffix, plot_gradient = plot_gradient,\n plot_potential = plot_potential, x_label = x_label,\n norm_func = norm_func, units = units, alpha = 0.5,\n no_title = no_title) \n\n else:\n\n if run_info['attenuation'] == 'full':\n\n no_title = True\n\n else:\n\n no_title = False\n\n # Plot a single eigenfunction.\n plot_eigenfunc_wrapper(run_info, mode_type, n, l,\n i_toroidal = i_toroidal, ax = None,\n plot_gradient = plot_gradient,\n plot_potential = plot_potential,\n label_suffix = label_suffix, x_label = x_label,\n norm_func = norm_func, units = units, r_lims = r_lims,\n no_title = no_title,\n relaxation = relaxation,\n duplicate = duplicate) \n\n return\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"harrymd/Ouroboros","sub_path":"plot/plot_eigenfunctions.py","file_name":"plot_eigenfunctions.py","file_ext":"py","file_size_in_byte":26734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"1667179211","text":"import argparse\nimport subprocess\nimport sys\n\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n # create the top-level parser\n parser = argparse.ArgumentParser(description='CLI tools for use with The Archive.app.')\n parser.add_argument('query',\n nargs='*',\n help='Query to pass to The Archive.app')\n parser.add_argument('-m', '--match',\n action='store_true',\n help='Find the best match and display it')\n\n if len(sys.argv) < 2:\n parser.print_help()\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.match:\n url_keyword = 'match'\n else:\n url_keyword = 'search'\n\n activate_command = 'tell application \"The Archive\" to activate'\n subprocess.run(['osascript', '-e', activate_command])\n\n url = 'thearchive://%s/%s' % (url_keyword, ' '.join(args.query))\n subprocess.run(['open', url])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nelsonlove/zettel-tools","sub_path":"zetteltools/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12812615854","text":"from rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport json\nfrom rest_framework.decorators import api_view\nfrom rest_framework import permissions\nfrom room.models import Coach, Game, Team, Player\nfrom room.serializers import (CoachSerializers, CoachPostSerializers,\n GameSerializers, GamePostSerializers,\n PlayerPostSerializers, PlayerSerializers,\n TeamPostSerializers, TeamSerializers,\n )\n\n\n@api_view(['GET', 'POST'])\ndef teams(request):\n \"\"\"\n List all code snippets, or create a new snippet.\n \"\"\"\n if request.method == 'GET':\n snippets = Team.objects.all()\n serializer = TeamSerializers(snippets, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n team_data = TeamPostSerializers(data=request.data)\n profile = Team.objects.filter(user=request.user)\n if team_data.is_valid():\n if profile:\n return Response({\"status\": \"The team is not first\"})\n else:\n team_data.save(user=request.user)\n return Response({\"status\": True})\n else:\n return Response(team_data.errors)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef team(request, pk):\n \"\"\"\n Retrieve, update or delete a code snippet.\n \"\"\"\n try:\n team = Team.objects.get(pk=pk)\n except Team.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = TeamSerializers(team)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = TeamPostSerializers(team, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n team.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'POST'])\ndef players_list(request):\n \"\"\"\n List all code snippets, or create a new snippet.\n \"\"\"\n if request.method == 'GET':\n players = Player.objects.filter(user=request.user)\n serializer = PlayerSerializers(players, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n player_data = PlayerPostSerializers(data=request.data)\n players = Player.objects.filter(user=request.user)\n print(players)\n if len(players) >= 18: # To do change this value\n return Response({\"status\": \"You have the maximum number of players\"})\n else:\n if player_data.is_valid():\n player_data.save(user=request.user)\n return Response({\"status\": True})\n else:\n return Response(player_data.errors)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef player_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a code snippet.\n \"\"\"\n try:\n player = Player.objects.get(pk=pk)\n except Player.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = PlayerSerializers(player)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = PlayerPostSerializers(player, data=request.data)\n if serializer.is_valid():\n serializer.save()\n\n def operation(pos, neg, tot):\n if tot == 0:\n return 0\n else:\n return int((pos - neg) / tot * 100)\n\n p = Player.objects.get(pk=pk)\n player = PlayerSerializers(player)\n\n new_index_attack = operation(player.data[\"positive_attack\"], player.data[\"negative_attack\"], player.data[\n \"total_attack\"])\n p.index_attack = new_index_attack\n\n new_index_block = operation(player.data[\"positive_block\"], player.data[\"negative_block\"], player.data[\n \"total_block\"])\n p.index_block = new_index_block\n\n new_index_set = operation(player.data[\"positive_set\"], player.data[\"negative_set\"], player.data[\n \"total_set\"])\n p.index_set = new_index_set\n\n new_index_dig = operation(player.data[\"positive_dig\"], player.data[\"negative_dig\"], player.data[\n \"total_dig\"])\n p.index_dig = new_index_dig\n\n new_index_serve = operation(player.data[\"positive_serve\"], player.data[\"negative_serve\"], player.data[\n \"total_serve\"])\n p.index_serve = new_index_serve\n\n p.save()\n\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n player.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'POST'])\ndef coaches_list(request):\n \"\"\"\n List all code snippets, or create a new snippet.\n \"\"\"\n if request.method == 'GET':\n players = Coach.objects.filter(user=request.user)\n serializer = CoachSerializers(players, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n coach_data = CoachPostSerializers(data=request.data)\n coaches = Coach.objects.filter(user=request.user)\n print(coaches)\n # if len(coaches) >= 6: # To do change this value\n # return Response({\"status\": \"You have the maximum number of coaches\"})\n # else:\n if coach_data.is_valid():\n coach_data.save(user=request.user)\n return Response({\"status\": True})\n else:\n return Response(coach_data.errors)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef coach_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a code snippet.\n \"\"\"\n try:\n coach = Coach.objects.get(pk=pk)\n except Coach.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = CoachSerializers(coach)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = CoachPostSerializers(coach, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n coach.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'POST'])\ndef games(request):\n \"\"\"\n List all code snippets, or create a new snippet.\n \"\"\"\n if request.method == 'GET':\n snippets = Game.objects.filter(user=request.user)\n serializer = GameSerializers(snippets, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n new_game = GamePostSerializers(data=request.data)\n if new_game.is_valid():\n new_game.save(user=request.user)\n return Response(request.data)\n else:\n return Response(new_game.errors)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef game(request, pk):\n \"\"\"\n Retrieve, update or delete a code snippet.\n \"\"\"\n try:\n this_game = Game.objects.get(pk=pk)\n except Coach.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = GameSerializers(this_game)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = GamePostSerializers(game, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n game.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"kolikre/statistics","sub_path":"backend/room/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21882599151","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 24 19:59:19 2017\n\n@author: rene\n\nBenchmarks the performance of different numerical Integration methods.\n\"\"\"\n\nimport MCIntegrator\nimport BoxPlotter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport time\n\n\n\n\ndef hyperbel(pos):\n '''\n analyticalAnswer = 1/6\n domain = 1\n dim = 2\n '''\n return np.sum((pos[:]-5.)**2., axis=1)\n\ndef ringStep(pos):\n '''\n Doghnut-shaped area with value 2;\n analyticalAnswer = 2*np.pi*(3**2-2**2)\n domain >= 6\n dim = 2\n '''\n d = np.linalg.norm(pos-5, axis=1)\n return ((d>2) & (d<3))*2\n\n\n\n\n#Simulation parameters\niterations = 5\nnumberOfBoxes = 5\n\n#ringStep\nintFunc = ringStep\nanalyticalAnswer = 2*np.pi*(3**2-2**2)\ndomainSize = 10\ndim = 2\naddRandom = True\n\nnumTestPoints = 25*(3**2)\n\n#hyperbel\n#intFunc = hyperbel\n#analyticalAnswer = 1/6\n#domainSize = 1\n#dim=2\n\nRESULT_PATH = \"results/integration_test\"\nIMAGE_PATH = RESULT_PATH+\"/images\"\n\n\n#For each of the four integration methods\nfor i in range(4):\n \n \n print(\"---------------------\")\n print(\"Grid method: \" + str(i))\n print(\"Number of Test points: \" + str(numTestPoints))\n \n mcer = MCIntegrator.MCIntegrator(dim=dim, numTestPoints=numTestPoints,\n domainSize=domainSize, numberOfBoxes=numberOfBoxes)\n \n bplotter = BoxPlotter.BoxPlotter(mcer, RESULT_PATH, IMAGE_PATH)\n \n density = np.ones([mcer.numberOfBoxes]*mcer.dim)\n \n #Run the number of iterations specified\n for itera in range(iterations):\n if i == 0:\n mcer.generateUniformGrid(addRandom=addRandom)\n elif i == 1:\n mcer.generateAdaptiveUniformGrid(density=density, addRandom=addRandom)\n elif i == 2:\n mcer.generateStratifiedGrid(addRandom=addRandom)\n elif i == 3:\n mcer.generateAdaptiveStratifiedGrid(density=density, addRandom=addRandom)\n\n totalIntegral, _, newDensity = mcer.integrate(function=intFunc)\n\n #print(totalIntegral)\n\n density = newDensity\n \n bplotter.plotBox(True, True)\n CSV_FILE = open(RESULT_PATH+\"/ringStep_mesh\"+\"_r-\"+str(addRandom)+\"_N-\"+str(numTestPoints)+\"_met-\"+str(i)+\".csv\", 'w', newline='') \n CSV_FILE_WRITER = csv.writer(CSV_FILE)\n points=mcer.getFlatTestPoints()\n for p in points:\n CSV_FILE_WRITER.writerow(p)\n CSV_FILE.flush()\n CSV_FILE.close()\n\n","repo_name":"evalvarez12/Variational-Quantum","sub_path":"boxExtractor.py","file_name":"boxExtractor.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41187322453","text":"import torch\nimport os\nimport os.path as osp\nfrom datetime import datetime\n\nimport neuron.ops as ops\nfrom neuron.config import registry\nfrom .meter import Meter\nfrom .logger import Logger\n\n\nclass Trainer(object):\n\n def __init__(self, cfg):\n # experimental information\n self.name = '{}_{}'.format(\n cfg.experiment,\n datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n self.work_dir = osp.join(cfg.work_dir, self.name)\n if not osp.exists(self.work_dir):\n os.makedirs(self.work_dir)\n self.log_level = cfg.log_level\n self.train_echo = cfg.train_echo\n self.val_echo = cfg.val_echo\n self.max_epochs = cfg.max_epochs\n self.start_epoch = cfg.start_epoch\n self.save_frequency = cfg.save_frequency\n self.val_frequency = cfg.val_frequency\n\n # GPU usage\n cuda = torch.cuda.is_available()\n cfg.use_gpu = cfg.use_gpu if cuda else False\n self.device = torch.device('cuda:0' if cfg.use_gpu else 'cpu')\n # store configurations\n self.cfg = cfg.deepcopy()\n\n # build model, loss and metrics\n self.model = registry.build(cfg.model).to(self.device)\n self.criterion = registry.build(cfg.loss).to(self.device)\n if cfg.metric:\n self.metric = registry.build(cfg.metric).to(self.device)\n else:\n self.metric = None\n\n # build optimizer\n if cfg.param_grouper:\n param_grouper = registry.build(cfg.param_grouper)\n params = param_grouper(self.model, self.criterion)\n else:\n params = list(self.model.parameters()) + \\\n list(self.criterion.parameters())\n params = [p for p in params if p.requires_grad]\n cfg.optimizer.update({'params': params})\n self.optimizer = registry.build(cfg.optimizer)\n\n # build lr scheduler\n if cfg.lr_scheduler:\n cfg.lr_scheduler.update({'optimizer': self.optimizer})\n self.lr_scheduler = registry.build(cfg.lr_scheduler)\n else:\n self.lr_scheduler = None\n \n # build training, validation and test data\n self.train_data = self._build_dataset(cfg.train_data)\n self.val_data = self._build_dataset(cfg.val_data)\n self.test_data = self._build_dataset(cfg.test_data)\n\n # build evaluator\n if cfg.evaluator:\n cfg.evaluator.update({'dataset': self.test_data})\n self.evaluator = registry.build(cfg.evaluator)\n else:\n self.evaluator = None\n \n # state variables\n self._train_epoch = -1\n self._train_iter = -1\n self._val_epoch = -1\n self._val_iter = -1\n self._train_meters = Meter()\n self._val_meters = Meter()\n self._logger = Logger(\n self.work_dir, self.name, log_level=self.log_level)\n\n def train(self):\n self._hook_started()\n for epoch in range(self.start_epoch, self.max_epochs):\n self.train_epoch(self.train_data, epoch=epoch)\n if ((epoch + 1) % self.val_frequency == 0 or \\\n (epoch + 1) == self.max_epochs) and \\\n self.val_data is not None:\n self.val_epoch(self.val_data, epoch=epoch)\n self._hook_completed()\n \n @torch.enable_grad()\n def train_epoch(self, train_data, epoch):\n self._set_train(True)\n self._hook_epoch_started(epoch=epoch)\n for it, batch in enumerate(train_data):\n self._hook_iter_started(iter_=it)\n loss = self.train_step(batch)\n self._hook_iter_completed(metrics=loss)\n self._hook_epoch_completed()\n\n @torch.enable_grad()\n def train_step(self, batch):\n self._set_train(True)\n batch = ops.put_device(batch, self.device)\n\n # forward pass\n output = ops.adaptive_apply(self.model.forward_train, batch)\n \n # evaluate loss and ensure it to be a dictionary\n loss = ops.adaptive_apply(self.criterion, output)\n if isinstance(loss, torch.Tensor):\n loss = {'loss': loss}\n assert isinstance(loss, dict)\n\n # optimization step\n self.optimizer.zero_grad()\n loss['loss'].backward()\n self.optimizer.step()\n\n # evaluate metrics\n if self.metric is not None:\n metrics = ops.adaptive_apply(\n self.metric, ops.detach(output))\n loss.update(metrics)\n\n # convert to CPU arrays or scalars\n for k, v in loss.items():\n if not isinstance(v, torch.Tensor):\n continue\n if v.numel() == 1:\n loss[k] = v.item()\n else:\n loss[k] = v.detach().cpu().numpy()\n\n return loss\n \n @torch.no_grad()\n def val_epoch(self, val_data, epoch):\n self._set_train(False)\n self._hook_val_epoch_started(epoch=epoch)\n for it, batch in enumerate(val_data):\n self._hook_val_iter_started(iter_=it)\n loss = self.val_step(batch)\n self._hook_val_iter_completed(metrics=loss)\n self._hook_val_epoch_completed()\n \n @torch.no_grad()\n def val_step(self, batch):\n self._set_train(False)\n batch = ops.put_device(batch, self.device)\n\n # forward pass\n output = ops.adaptive_apply(self.model.forward_val, batch)\n \n # evaluate loss and ensure it to be a dictionary\n loss = ops.adaptive_apply(self.criterion, output)\n if isinstance(loss, torch.Tensor):\n loss = {'loss': loss}\n assert isinstance(loss, dict)\n\n # evaluate metrics\n if self.metric is not None:\n metrics = ops.adaptive_apply(\n self.metric, ops.detach(output))\n loss.update(metrics)\n\n # convert to CPU arrays or scalars\n for k, v in loss.items():\n if not isinstance(v, torch.Tensor):\n continue\n if v.numel() == 1:\n loss[k] = v.item()\n else:\n loss[k] = v.detach().cpu().numpy()\n\n return loss\n \n @torch.no_grad()\n def test(self):\n self._set_train(False)\n if self.evaluator is None:\n raise ValueError('The evaluator is not configured')\n self.evaluator.run(self.model)\n self.evaluator.report(self.model.name)\n \n def _set_train(self, flag=True):\n if flag:\n self.model.train()\n self.criterion.train()\n else:\n self.model.eval()\n self.criterion.eval()\n \n def _build_dataset(self, cfg):\n if not cfg:\n return None\n \n if 'DataLoader' in cfg.type:\n dataset = registry.build(cfg.dataset)\n cfg.update({'dataset': dataset})\n if 'sampler' in cfg:\n if 'shuffle' in cfg:\n cfg.shuffle = False\n cfg.sampler.update({'dataset': dataset})\n\n return registry.build(cfg)\n \n def _adaptive_apply(self, func, args):\n if isinstance(args, (tuple, list)):\n return func(*args)\n elif isinstance(args, dict):\n return func(**args)\n else:\n return func(args)\n \n def _hook_started(self):\n self._logger.log(\n 'Experiment[{}]: Training started...'.format(self.name))\n self._logger.log('Configurations:\\n {}'.format(repr(self.cfg)))\n \n def _hook_completed(self):\n self._logger.log(\n 'Experiment[{}]: Training completed!'.format(self.name))\n\n def _hook_epoch_started(self, epoch):\n # store epoch\n self._train_epoch = epoch\n # update lr if applicable\n if self.lr_scheduler:\n self.lr_scheduler.step(epoch=epoch)\n # log text\n self._logger.log('Epoch[{}/{}]: Training started...'.format(\n epoch + 1, self.max_epochs))\n \n def _hook_epoch_completed(self):\n # log text\n self._logger.log('Epoch[{}/{}]: Training completed!'.format(\n self._train_epoch + 1, self.max_epochs))\n text = '\\t'\n for name in self.train_echo:\n if not name in self._train_meters:\n continue\n text += ' {}: {:.3f}'.format(\n name, self._train_meters[name].avg)\n self._logger.log(text)\n # log metrics\n for k, v in self._train_meters.items():\n self._logger.add_scalar(\n 'epoch_train/' + k, v.avg,\n global_step=self._train_epoch)\n # save model\n if (self._train_epoch + 1) % self.save_frequency == 0 or \\\n (self._train_epoch + 1) == self.max_epochs:\n model_file = osp.join(\n self.work_dir,\n 'model_{}.pth'.format(self._train_epoch + 1))\n torch.save(self.model, model_file)\n # reset meters\n self._train_meters.reset()\n \n def _hook_iter_started(self, iter_):\n # store iter\n self._train_iter = iter_\n # update lr if applicable\n if getattr(self.lr_scheduler, 'step_iter', None) is not None:\n self.lr_scheduler.step_iter(iter_=iter_)\n \n def _hook_iter_completed(self, metrics):\n # log text\n text = 'Epoch[{}/{}] Iter[{}/{}]'.format(\n self._train_epoch + 1, self.max_epochs,\n self._train_iter + 1, len(self.train_data))\n for name in self.train_echo:\n if not name in metrics:\n continue\n text += ' {}: {:.3f}'.format(name, metrics[name])\n self._logger.log(text)\n # log metrics\n for k, v in metrics.items():\n self._logger.add_scalar('iter_train/' + k, v)\n # update meters\n self._train_meters.update(metrics)\n \n def _hook_val_epoch_started(self, epoch):\n # store epoch\n self._val_epoch = epoch\n # log text\n self._logger.log(\n 'Val Epoch[{}/{}]: Validation started...'.format(\n epoch + 1, self.max_epochs))\n \n def _hook_val_epoch_completed(self):\n # log text\n self._logger.log(\n 'Val Epoch[{}/{}]: Validation completed!'.format(\n self._val_epoch + 1, self.max_epochs))\n text = '\\t'\n for name in self.val_echo:\n if not name in self._val_meters:\n continue\n text += ' {}: {:.3f}'.format(\n name, self._val_meters[name].avg)\n self._logger.log(text)\n # log metrics\n for k, v in self._val_meters.items():\n self._logger.add_scalar(\n 'epoch_val/' + k, v.avg,\n global_step=self._val_epoch)\n # reset meters\n self._val_meters.reset()\n \n def _hook_val_iter_started(self, iter_):\n # store iter\n self._val_iter = iter_\n \n def _hook_val_iter_completed(self, metrics):\n # log text\n text = 'Val Epoch[{}/{}] Iter[{}/{}]'.format(\n self._val_epoch + 1, self.max_epochs,\n self._val_iter + 1, len(self.val_data))\n for name in self.val_echo:\n if not name in metrics:\n continue\n text += ' {}: {:.3f}'.format(name, metrics[name])\n self._logger.log(text)\n # log metrics\n for k, v in metrics.items():\n self._logger.add_scalar('iter_val/' + k, v)\n # update meters\n self._val_meters.update(metrics)\n \n def _hook_exception_raised(self):\n pass\n","repo_name":"Daikenan/LTMU","sub_path":"DiMP_LTMU/Global_Track/_submodules/neuron/neuron/engine/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":11506,"program_lang":"python","lang":"en","doc_type":"code","stars":255,"dataset":"github-code","pt":"32"} +{"seq_id":"33923456279","text":"import numpy as np\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nimport pickle\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch.optim as optim\nimport cv2\n\nfrom utils import *\nfrom dataset import *\n\ntorch.set_num_threads(1)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nfeatures = None\n\ndef get_features_hook(self, input, output):\n global features\n features = [output]\n\ndef get_features(model, data, num_classes, device=None):\n '''\n Compute the proposed Mahalanobis confidence score on input dataset\n return: Mahalanobis score from layer_index\n '''\n model.eval()\n handle = model.fc[1].register_forward_hook(get_features_hook)\n model(data)\n handle.remove()\n global features\n out_features = features[0]\n \n out_features = out_features.view(out_features.size(0), out_features.size(1), -1)\n out_features = torch.mean(out_features, 2) #N, 128\n\n return out_features\n\n\ndef get_trainftrs(model, args, train_loader = None, device = None):\n \"\"\" Generates penultimate layer features, target and predicted label. \"\"\"\n model = model.to(args.device)\n model.eval()\n\n total = 0\n correct = 0\n\n value_inds = []\n value_oods = []\n names_oods = []\n \n features = []\n targets = []\n preds = []\n i = 0 \n \n for idx_ins, data in tqdm(enumerate(train_loader)):\n print(i)\n images, labels = data\n\n images = images.to(args.device)\n labels = labels.to(args.device)\n print(\"args.device\", args.device)\n\n with torch.no_grad():\n #forward\n feature_small = get_features(model, images, 2) #N, 128\n outputs = model(images) \n predicted_value, predicted = torch.max(outputs.data, 1)\n trgs_small = labels\n preds_small = predicted\n\n features.append(feature_small)\n targets.append(trgs_small)\n preds.append(preds_small)\n i += 1\n\n features_all = torch.cat(features)\n trgs_all = torch.cat(targets) \n preds_all = torch.cat(preds) \n dir_path = os.path.join(args.result_path, \"penultimate_ftrs\", \"seed_\" + str(args.seed))\n os.makedirs(dir_path, exist_ok=True)\n\n if args.flag_adjust:\n file_path = os.path.join(dir_path, \"ftrs_{}_{}.npy\".format(args.variation, 'train'))\n trg_pth = os.path.join(dir_path, \"trgs_{}_{}.npy\".format(args.variation, 'train'))\n preds_pth = os.path.join(dir_path, \"preds_{}_{}.npy\".format(args.variation, 'train'))\n else:\n file_path = os.path.join(dir_path, \"ftrs_age_{}.npy\".format('train'))\n trg_pth = os.path.join(dir_path, \"trgs_age_{}.npy\".format('train'))\n preds_pth = os.path.join(dir_path, \"preds_age_{}.npy\".format('train'))\n \n np.save(file_path, features_all.detach().cpu().numpy())\n np.save(trg_pth, trgs_all.detach().cpu().numpy())\n np.save(preds_pth, preds_all.detach().cpu().numpy())\n\n \ndef test(model, args, train_loader = None, loaders = None, device = None,train_loader_mu=None):\n \"\"\" Generates penultimate layer features, target and predicted label for distributionally shifted datasets. \"\"\"\n model = model.to(args.device)\n model.eval()\n\n total = 0\n correct = 0\n dict_results = dict()\n dict_results['preds'] = []\n dict_results['trues'] = []\n dict_results['correct'] = []\n dict_results['dataset_idx'] = []\n dict_results['org_labels'] = []\n dict_results['pred_labels'] = []\n value_inds = []\n value_oods = []\n names_oods = []\n \n test_loaders = loaders\n print(len(test_loaders))\n \n for idx, loader in enumerate(test_loaders):\n features = []\n targets = []\n preds = []\n for idx_ins, data in tqdm(enumerate(loader)):\n images, labels = data\n \n images = images.to(args.device)\n labels = labels.to(args.device)\n #forward\n with torch.no_grad():\n feature_small = get_features(model, images, 2) #N, 128 \n outputs = model(images) \n predicted_value, predicted = torch.max(outputs.data, 1)\n preds_small = predicted\n\n trgs_small = labels\n features.append(feature_small)\n targets.append(trgs_small)\n preds.append(preds_small)\n\n features_all = torch.cat(features)\n trgs_all = torch.cat(targets) \n preds_all = torch.cat(preds) \n\n dir_path = os.path.join(args.result_path, \"penultimate_ftrs\", \"seed_\" + str(args.seed))\n os.makedirs(dir_path, exist_ok=True)\n if args.flag_adjust:\n file_path = os.path.join(dir_path, \"ftrs_{}_{}.npy\".format(args.variation, idx))\n trg_pth = os.path.join(dir_path, \"trgs_{}_{}.npy\".format(args.variation, idx))\n preds_pth = os.path.join(dir_path, \"preds_{}_{}.npy\".format(args.variation, idx))\n else:\n file_path = os.path.join(dir_path, \"ftrs_age_{}.npy\".format(idx))\n trg_pth = os.path.join(dir_path, \"trgs_age_{}.npy\".format(idx))\n preds_pth = os.path.join(dir_path, \"preds_age_{}.npy\".format(idx))\n\n np.save(file_path, features_all.detach().cpu().numpy())\n np.save(trg_pth, trgs_all.detach().cpu().numpy())\n np.save(preds_pth, preds_all.detach().cpu().numpy())\n\n vector_pth = os.path.join(dir_path, \"class_vectors.npy\".format(idx))\n np.save(vector_pth, model.fc[2].weight.detach().cpu().numpy())\n\n \ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--flag_adjust', action='store_true', help='adjust test or not')\n parser.add_argument('--variation', type=str, help='bright or contrast')\n parser.add_argument('--num_classes', default = 2, type=int, help='path of the model')\n parser.add_argument('--result_path', default=\"./results\", type=str, help='train or test')\n parser.add_argument('--seed', default = 0, type=int, help='path of the model')\n parser.add_argument('--data_path', default=\"./data\", type=str, help='path of the dataset')\n args = parser.parse_args()\n\n set_seed(args.seed)\n args.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n ##############################\n # Obtain OOD datasets\n ##############################\n bones_df, train_df, val_df, test_df, data_transform = Data_Transform(args.data_path)\n images_dir = os.path.join(args.data_path, 'boneage-training-dataset/boneage-training-dataset/')\n age_groups = [[1,2,3,4,5],[6],[7],[8],[9],[10,11,12],[13],[14],[15,16,17,18,19]]\n if args.flag_adjust:\n \"\"\" Obtain loaders of OOD datasets shifted by variation of gaussian, impulse, shot noise or brightness, contrast. \"\"\"\n loaders, data_len, adjust_scale = get_adjust_dataloaders(bones_df, train_df, val_df, test_df, images_dir, data_transform, args.variation)\n \n else:\n \"\"\" Obtain loaders of OOD datasets shifted by the varaition of age. \"\"\"\n loaders, data_len = get_eval_dataloaders(bones_df, train_df, val_df, test_df, images_dir, data_transform, age_groups)\n\n \n train_dataset = BoneDataset(dataframe = train_df, img_dir = images_dir, mode = 'train', transform = data_transform)\n train_loader = DataLoader(train_dataset, batch_size=64, shuffle=False)\n train_loader_mu = DataLoader(train_dataset, batch_size=64, shuffle=False)\n\n ##############################\n # Define model and load weights\n ##############################\n model = define_model(device)\n model.load_state_dict(torch.load(os.path.join(args.result_path, \"models\", \"best_{}.pt\".format(args.seed))))\n model.eval()\n \n ###############################\n # Obtain penultimate layer features for ood and train datasets.\n ###############################\n test(model, args, train_loader = train_loader, loaders = loaders, device = device, train_loader_mu = train_loader_mu)\n\n dir_path = os.path.join(args.result_path, \"penultimate_ftrs\", \"seed_\" + str(args.seed))\n os.makedirs(dir_path, exist_ok=True)\n\n if args.flag_adjust:\n file_path = os.path.join(dir_path, \"ftrs_{}_{}.npy\".format(args.variation, 'train'))\n else:\n file_path = os.path.join(dir_path, \"ftrs_age_{}.npy\".format('train'))\n \n if not os.path.exists(file_path):\n get_trainftrs(model, args, train_loader = train_loader, device = device)\n \nif __name__ == '__main__':\n main()\n","repo_name":"Radhikadua123/TAPUDD","sub_path":"binary_classification/get_features.py","file_name":"get_features.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"25488603503","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/add-one-row-to-tree/\n# Author: Miao Zhang\n# Date: 2021-02-25\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def addOneRow(self, root: TreeNode, v: int, d: int) -> TreeNode:\n if not root: return None\n if d == 1:\n newroot = TreeNode(v)\n newroot.left = root\n root = newroot\n elif d == 2:\n newleft = TreeNode(v)\n newright = TreeNode(v)\n newleft.left = root.left\n newright.right = root.right\n root.left = newleft\n root.right = newright\n else:\n self.addOneRow(root.left, v, d - 1)\n self.addOneRow(root.right, v, d - 1)\n return root\n \n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/addOneRowtoTree/addOneRowtoTree.py","file_name":"addOneRowtoTree.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12890967584","text":"#\n\n__author__ = 'Aaron Yang'\n__email__ = 'byang971@usc.edu'\n__date__ = '10/9/2019 8:24 PM'\n\nimport re\nimport sys\nfrom xml.etree import ElementTree as ET\n\nfrom lxml import etree\n\nSELECTED_TAG = ['book', 'author', 'title', 'genre', 'description']\n\n\ndef prettify(root_element):\n\t\"\"\"Return a pretty-printed XML string based on the Element.\n\t\"\"\"\n\trough_string = ET.tostring(root_element, 'utf-8').decode(\"utf-8\")\n\tparser = etree.XMLParser(remove_blank_text=True)\n\ttest_root = etree.XML(rough_string, parser)\n\treturn str(etree.tostring(test_root, pretty_print=True), 'utf-8')\n\n\ndef read_file_get_root_node(file_path):\n\tinput_tree = ET.parse(file_path)\n\treturn input_tree.getroot()\n\n\ndef export_xml(out_file_path, root_element):\n\t# prettify(root_element)\n\tstr_xml = prettify(root_element)\n\tmyfile = open(out_file_path, \"w\")\n\tmyfile.write(str_xml)\n\n\ndef wrapper(raw_str):\n\traw_str = re.sub(\"[^\\w\\d'\\s]+\", '', raw_str)\n\treturn raw_str.lower().split()\n\n\ndef is_empty_or_none(itea_obj):\n\treturn None == itea_obj or len(itea_obj) == 0\n\n\ndef search_by_keywords(keywords, index_root_element):\n\tsearch_result_info = []\n\tfor keyword in keywords:\n\t\ttemp = {}\n\t\tky_node_list = index_root_element.findall(\"./keyword[@key=\\\"{}\\\"]\".format(keyword))\n\t\tif not is_empty_or_none(ky_node_list):\n\t\t\tall_info_nodes = list(ky_node_list[0])\n\t\t\tfor num, info in enumerate(all_info_nodes):\n\t\t\t\tif info.get('id') in temp:\n\t\t\t\t\ttemp.get(info.get('id')).add(info.get('where'))\n\t\t\t\telse:\n\t\t\t\t\ttemp.setdefault(info.get('id'), set([info.get('where')]))\n\n\t\tfor key, value in temp.items():\n\t\t\tvalue = sorted(value, key=SELECTED_TAG.index)\n\t\t\ttemp[key] = value\n\t\tsearch_result_info.append(temp)\n\treturn search_result_info\n\n\ndef find_in_common(result_infos):\n\tresults = {}\n\tkeySet = None\n\n\tfor index, info in enumerate(result_infos):\n\t\tif is_empty_or_none(keySet):\n\t\t\tkeySet = set(info.keys())\n\t\telse:\n\t\t\tkeySet = keySet & set(info.keys())\n\n\tfor key in keySet:\n\t\twhereSet = None\n\t\tfor index, info in enumerate(result_infos):\n\t\t\tif is_empty_or_none(whereSet):\n\t\t\t\twhereSet = set(info.get(key))\n\t\t\telse:\n\t\t\t\twhereSet = whereSet & set(info.get(key))\n\n\t\tresults[key] = whereSet if len(whereSet) > 0 else {}\n\n\treturn results\n\n\ndef traverse_searched_info(root_elemnt, results):\n\tfor key, value in results.items():\n\t\tbook_node = ET.Element('book', {\"id\": key})\n\t\tfor value_item in list(value):\n\t\t\tnode = original_data_root.findall(\"./book[@id=\\\"\" + key + \"\\\"]/{}\".format(value_item))[0]\n\t\t\tbook_node.append(node)\n\t\troot_elemnt.append(book_node)\n\n\ndef extract_elements(results, original_data_root):\n\troot = ET.Element('results')\n\tif is_empty_or_none(results):\n\t\treturn root\n\telse:\n\t\tfor key, value in results.items():\n\t\t\tbook_node = ET.Element('book', {\"id\": key})\n\t\t\tfor value_item in list(value):\n\t\t\t\tnode = original_data_root.findall(\"./book[@id=\\\"\" + key + \"\\\"]/{}\".format(value_item))[0]\n\t\t\t\tbook_node.append(node)\n\t\t\tif len(list(book_node)) > 0:\n\t\t\t\troot.append(book_node)\n\t\treturn root\n\n\nif __name__ == \"__main__\":\n\t# original_data_file_path = sys.argv[1]\n\t# inverted_index_file_path = sys.argv[2]\n\t# keywords = sys.argv[3]\n\t# output_file_path = sys.argv[4]\n\toriginal_data_file_path = './assets/books.xml'\n\tinverted_index_file_path = './index.xml'\n\tkeywords = \"A\"\n\toutput_file_path = './results.xml'\n\n\t# read files\n\toriginal_data_root = read_file_get_root_node(original_data_file_path)\n\tindex_root = read_file_get_root_node(inverted_index_file_path)\n\n\t# wrapper the keywords\n\tkeyword_list = wrapper(keywords)\n\n\tresult_infos = search_by_keywords(keyword_list, index_root)\n\tif len(keyword_list) > 1:\n\t\tresults = find_in_common(result_infos)\n\telse:\n\t\tresults = result_infos[0]\n\n\toutput_root = extract_elements(results, original_data_root)\n\t# write a output xml file\n\texport_xml(output_file_path, output_root)\n","repo_name":"AaronYang2333/DSCI_551","sub_path":"ay_hw_2/Bo_Yang_search.py","file_name":"Bo_Yang_search.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"32"} +{"seq_id":"32441882203","text":"# coding: utf-8\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom datetime import datetime\nfrom scrapy_redis.pipelines import RedisPipeline\nfrom scrapy import log\nfrom .items import Result, Wrap\nfrom .query import hash as hash_query\nfrom ..ut.time import TIME_FORMAT\n\n\nclass Output(RedisPipeline):\n\n def item_key(self, item, spider):\n \"\"\"Returns redis key based on given spider\"\"\"\n return \"torabot:spy:%s:%s:items\" % (\n spider.name,\n hash_query(item['result']['query'] if isinstance(item, Wrap) else item['query'])\n )\n\n def process_item(self, item, spider):\n try:\n return RedisPipeline.process_item(\n self,\n item if isinstance(item, Wrap) else Wrap(\n result=item,\n ctime=datetime.utcnow().strftime(TIME_FORMAT)\n ),\n spider\n )\n finally:\n self.exist_item = True\n\n def close_spider(self, spider):\n if not getattr(self, 'exist_item', False):\n log.msg('no item processed, push failed result', level=log.INFO)\n self.process_item(Result(ok=False), spider)\n","repo_name":"Answeror/torabot","sub_path":"torabot/spy/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"32"} +{"seq_id":"26245711896","text":"from fastapi import FastAPI\r\nimport pickle, uvicorn, os\r\nfrom typing import List, Literal\r\nfrom pydantic import BaseModel\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# Config & Setup\r\n## Variables of environment\r\nDIRPATH = os.path.dirname(__file__)\r\nASSETSDIRPATH = os.path.join(DIRPATH, \"asset\")\r\nml_comp_pkl = os.path.join(ASSETSDIRPATH, \"ml_comp.pkl\")\r\n\r\nprint(\r\n f\" {'*'*10} Config {'*'*10}\\n INFO: DIRPATH = {DIRPATH} \\n INFO: ASSETSDIRPATH = {ASSETSDIRPATH} \"\r\n)\r\n\r\n\r\n# API Basic config\r\napp = FastAPI(\r\n title=\"Titanic Survivors API\",\r\n version=\"0.0.1\",\r\n description=\"Prediction of Titanic Survivors\",\r\n)\r\n\r\n## Loading of assets\r\nwith open(ml_comp_pkl, \"rb\") as f:\r\n loaded_items = pickle.load(f)\r\n# print(\"INFO: Loaded assets:\", loaded_items)\r\n\r\nmodel_pipeline = loaded_items[\"pipeline\"]\r\nnum_cols = loaded_items[\"numeric_columns\"]\r\ncat_cols = loaded_items[\"categorical_columns\"]\r\n\r\n\r\n## BaseModel\r\nclass ModelInput(BaseModel):\r\n PeopleInTicket: int\r\n Age: float\r\n FarePerPerson: float\r\n SibSp: int\r\n Pclass: int\r\n Fare: float\r\n Parch: int\r\n TicketNumber: float\r\n Embarked: Literal[\"S\", \"C\", \"Q\"]\r\n Sex: Literal[\"male\", \"female\"]\r\n Title: Literal[\"Mr\", \"Mrs\", \"Miss\", \"Master\", \"FemaleChild\", \"Royalty\", \"Officer\"]\r\n\r\n\r\n## Utils\r\n# def processing_FE(\r\n# dataset, scaler, encoder,imputer, FE=model_pipeline\r\n# ): # FE : ColumnTransfromer, Pipeline\r\n# \"Cleaning, Processing and Feature Engineering of the input dataset.\"\r\n# \"\"\":dataset pandas.DataFrame\"\"\"\r\n\r\n# # if imputer is not None:\r\n# # output_dataset = imputer.transform(dataset)\r\n# # else:\r\n# # output_dataset = dataset.copy()\r\n\r\n# # output_dataset = scaler.transform(output_dataset)\r\n\r\n# # if encoder is not None:\r\n# # output_dataset = encoder.transform(output_dataset)\r\n# if FE is not None:\r\n# output_dataset = FE.fit(output_dataset)\r\n\r\n# return output_dataset\r\n\r\n\r\ndef make_prediction(\r\n Pclass,\r\n Sex,\r\n Age,\r\n SibSp,\r\n Parch,\r\n Fare,\r\n Embarked,\r\n PeopleInTicket,\r\n FarePerPerson,\r\n TicketNumber,\r\n Title,\r\n):\r\n \"Function to make one prediction\"\r\n\r\n data = {\r\n \"PeopleInTicket\": PeopleInTicket,\r\n \"Age\": Age,\r\n \"FarePerPerson\": FarePerPerson,\r\n \"SibSp\": SibSp,\r\n \"Pclass\": Pclass,\r\n \"Fare\": Fare,\r\n \"Parch\": Parch,\r\n \"TicketNumber\": TicketNumber,\r\n \"Embarked\": Embarked,\r\n \"Title\": Title,\r\n \"Sex\": Sex,\r\n }\r\n\r\n df = pd.DataFrame([data])\r\n target_idx = {\r\n 0: \"deceased\",\r\n \"1\": \"survived\",\r\n }\r\n\r\n X = df\r\n pred = model_pipeline.predict_proba(X)\r\n pred_class = int(np.argmax(pred[0]))\r\n output = {\r\n \"predicted_class\": pred_class,\r\n \"prediction_explanation\": target_idx[pred_class],\r\n \"confidence_probability\": float(pred[0][pred_class]),\r\n }\r\n\r\n return output\r\n\r\n\r\n# Endpoints\r\n## STATUS\r\n@app.post(\"/\")\r\nasync def status():\r\n return {\"message\": \"online\"}\r\n\r\n\r\n@app.get(\"/\")\r\nasync def status():\r\n return {\"message\": \"online\"}\r\n\r\n\r\n## Prediction\r\n@app.post(\"/Titanic\")\r\nasync def predict(input: ModelInput):\r\n \"\"\"__descr__\r\n --details---\r\n \"\"\"\r\n output_pred = make_prediction(\r\n PeopleInTicket=input.PeopleInTicket,\r\n Age=input.Age,\r\n FarePerPerson=input.FarePerPerson,\r\n SibSp=input.SibSp,\r\n Pclass=input.Pclass,\r\n Fare=input.Fare,\r\n Parch=input.Parch,\r\n TicketNumber=input.TicketNumber,\r\n Embarked=input.Embarked,\r\n Sex=input.Sex,\r\n Title=input.Title,\r\n )\r\n\r\n # Format the output\r\n # if output_pred == 0:\r\n # output_pred = \"No, the person didn't survive\"\r\n # else:\r\n # output_pred = \"Yes, the person survived\"\r\n output_pred[\"input\"] = input\r\n \r\n # return the output\r\n print(\"\\n\"*3, \"Output of the API\\n\", output_pred,\"\\n\"*3)\r\n return output_pred\r\n\r\n\r\n# Execution\r\nif __name__ == \"__main__\":\r\n uvicorn.run(\r\n \"main:app\",\r\n reload=True,\r\n )\r\n","repo_name":"eaedk/Titanic-Survival-API","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"39682119328","text":"\"\"\"\nSOURCE:\n Mind of Douglas Adams\nDESCRIPTION:\n Search a ProductListMetaData object for information.\nARGS:\n CheckArguments\n Type:\n python boolean\n Description:\n if true, checks the arguments with conditions written in the function\n if false, ignores those conditions\n PrintExtra\n Type:\n python integer\n Description:\n if greater than 0, prints addional information about the function\n if 0, function is expected to print nothing to console\n Additional Notes:\n The greater the number, the more output the function will print\n Most functions only use 0 or 1, but some can print more depending on the number\n ProductListMetaData\n Type:\n \n Description:\n None\n ConditionDictionary\n Type:\n \n Description:\n None\nRETURNS:\n Result\n Type:\n Description:\n\"\"\"\nimport json\nimport Library_ComponentExtract\n#-------------------------------------------------------------------------------\ndef Main(\n ProductListMetaData= None,\n ProductListMetaDataFilePath = None,\n ConditionDictionary= None,\n CheckArguments = True,\n PrintExtra = False,\n ):\n\n Result = None\n\n if ProductListMetaData is None and ProductListMetaDataFilePath is not None:\n with open(ProductListMetaDataFilePath) as FileHandleObject:\n ProductListMetaData = json.load(FileHandleObject)\n\n\n if (CheckArguments):\n ArgumentErrorMessage = \"\"\n\n if (len(ArgumentErrorMessage) > 0 ):\n if(PrintExtra):\n print(\"ArgumentErrorMessage:\\n\", ArgumentErrorMessage)\n raise Exception(ArgumentErrorMessage)\n\n #print ('ProductListMetaData')\n #print (ProductListMetaData)\n\n PossibleItems = []\n for Item in ProductListMetaData:\n for Key, RequiredValue in ConditionDictionary.items():\n Value = Library_ComponentExtract.Main(\n Object = Item,\n Key = Key,\n DefaultValue = None, \n )\n Value = str(Value) \n if Value == RequiredValue:\n PossibleItems.append( Item )\n \n Result = PossibleItems\n return Result \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"srodney/sndrizpipe","sub_path":"datafindhubble/Library_BarbaraMikulskiArchiveSearchProductListMetaData.py","file_name":"Library_BarbaraMikulskiArchiveSearchProductListMetaData.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"2488689248","text":"# -*- coding: utf-8 -*- \n\ndef get_value(obj, key): \n def extract(obj, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, key)\n elif k == key:\n return v\n elif isinstance(obj, list):\n for item in obj:\n extract(item, key)\n #return v\n results = extract(obj, key)\n return results","repo_name":"aloula/verifica_id","sub_path":"modules/extract_json.py","file_name":"extract_json.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28258547439","text":"# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\nimport scrapy\nfrom scrapy.spiders import Spider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.selector import Selector\nfrom tripAdvisor.items import TripadvisorItem\n\nclass TripAdvisorSpider(Spider):\n name = \"TripAdvisor\" # Name of the spider, to be used when crawling\n allowed_domains = [\"tripadvisor.com\"] # Where the spider is allowed to go\n start_urls = [\n \"https://www.tripadvisor.com/Restaurants-g293736-Rabat_Rabat_Sale_Zemmour_Zaer_Region.html\"\n ]\n counter = 0\n def parse(self, response):\n sel = Selector(response) # The XPath selector\n sites = sel.xpath('//div[@class=\"title\"]/a/@href').extract()\n for i in range(len(sites)):\n site = \"https://www.tripadvisor.com\"+sites[i]\n req = scrapy.Request(site, callback=self.parse_resto_details)\n yield req\n def parse_resto_details(self, response):\n sel = Selector(response) # The XPath selector\n name = sel.xpath(\n '//div[contains(@class,\"restaurantName\")]/h1/text()'\n ).extract()\n name = name[0]\n address = sel.xpath(\n '//div[contains(@class,\"businessListingContainer\")]//span[@class=\"detail\"]/span/text()'\n ).extract()\n address = ' '.join(address)\n phone = sel.xpath(\n '//div[contains(@class,\"phone\")]//span[contains(@class,\"detail\")]/text()'\n ).extract()\n phone = phone[0]\n yield {'address': address, 'name': name, 'phone': phone}\n","repo_name":"chaouchsalah/robio-server","sub_path":"scraper/scraper/tripAdvisor/tripAdvisor/spiders/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"589191478","text":"\r\n#####################################################################################################################\r\n# CS 6375.003 - Assignment 3, Neural Network Programming\r\n# This is a starter code in Python 3.6 for a 2-hidden-layer neural network.\r\n# You need to have numpy and pandas installed before running this code.\r\n# Below are the meaning of symbols:\r\n# train - training dataset - can be a link to a URL or a local file\r\n# - you can assume the last column will the label column\r\n# train - test dataset - can be a link to a URL or a local file\r\n# - you can assume the last column will the label column\r\n# h1 - number of neurons in the first hidden layer\r\n# h2 - number of neurons in the second hidden layer\r\n# X - vector of features for each instance\r\n# y - output for each instance\r\n# w01, delta01, X01 - weights, updates and outputs for connection from layer 0 (input) to layer 1 (first hidden)\r\n# w12, delata12, X12 - weights, updates and outputs for connection from layer 1 (first hidden) to layer 2 (second hidden)\r\n# w23, delta23, X23 - weights, updates and outputs for connection from layer 2 (second hidden) to layer 3 (output layer)\r\n#\r\n# You need to complete all TODO marked sections\r\n# You are free to modify this code in any way you want, but need to mention it in the README file.\r\n#\r\n#####################################################################################################################\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import *\r\nfrom sklearn.model_selection import train_test_split\r\nfrom collections import defaultdict\r\n\r\nclass NeuralNet:\r\n def __init__(self, dataset, header = True, h1 = 4, h2 = 2):\r\n np.random.seed(1)\r\n # train refers to the training dataset\r\n # test refers to the testing dataset\r\n # h1 and h2 represent the number of nodes in 1st and 2nd hidden layers\r\n\r\n #raw_input = pd.read_csv(train)\r\n # TODO: Remember to implement the preprocess method\r\n raw = pd.read_csv(dataset)\r\n raw.replace('?',np.NaN)\r\n raw.replace('',np.NaN)\r\n raw.dropna(axis=0, how='any')\r\n raw.drop_duplicates(keep='first', subset=None, inplace = False)\r\n _dataset = self.preprocess(raw)\r\n scaler = MinMaxScaler()\r\n scaledData = scaler.fit_transform(_dataset)\r\n _dataset = pd.DataFrame(scaledData[:,:])\r\n ncols = len(_dataset.columns)\r\n nrows = len(_dataset.index)\r\n self.attributeValues= _dataset.iloc[:, 0:(ncols -1)].values.reshape(nrows, ncols-1)\r\n self.classValues = _dataset.iloc[:, (ncols-1)].values.reshape(nrows, 1) \r\n self.attributeValues_train, self.attributeValues_test, self.classValues_train, self.classValues_test = train_test_split(self.attributeValues, self.classValues, test_size=0.25)\r\n self.classValues_train = pd.DataFrame(self.classValues_train[:,:])\r\n self.classValues_test = pd.DataFrame(self.classValues_test[:,:])\r\n self.attributeValues_train = pd.DataFrame(self.attributeValues_train[:,:])\r\n self.attributeValues_test = pd.DataFrame(self.attributeValues_test[:,:])\r\n ncols_TRAIN = len(self.attributeValues_train.columns)\r\n nrows_TRAIN = len(self.attributeValues_train.index)\r\n self.X = self.attributeValues_train.values.reshape(nrows_TRAIN, ncols_TRAIN)\r\n self.y= self.classValues_train.iloc[:,0].values.reshape(nrows_TRAIN,1)\r\n #\r\n # Find number of input and output layers from the dataset\r\n #\r\n input_layer_size = len(self.X[0])\r\n if not isinstance(self.y[0], np.ndarray):\r\n output_layer_size = 1\r\n else:\r\n output_layer_size = len(self.y[0])\r\n\r\n # assign random weights to matrices in network in the range -1 to 1 \r\n # number of weights connecting layers = (no. of nodes in previous layer) x (no. of nodes in following layer)\r\n self.w01 = 2 * np.random.random((input_layer_size, h1)) - 1\r\n self.X01 = self.X\r\n self.delta01 = np.zeros((input_layer_size, h1))\r\n self.w12 = 2 * np.random.random((h1, h2)) - 1\r\n self.X12 = np.zeros((len(self.X), h1))\r\n self.delta12 = np.zeros((h1, h2))\r\n self.w23 = 2 * np.random.random((h2, output_layer_size)) - 1\r\n self.X23 = np.zeros((len(self.X), h2))\r\n self.delta23 = np.zeros((h2, output_layer_size))\r\n self.deltaOut = np.zeros((output_layer_size, 1))\r\n #\r\n # TODO: I have coded the sigmoid activation function, you need to do the same for tanh and ReLu -- DONE\r\n #\r\n\r\n def __activation(self, x, activation):\r\n if activation == \"sigmoid\":\r\n self.__sigmoid(self, x)\r\n elif activation == \"tanh\":\r\n \tself.__tanh(self,x)\r\n elif activation == \"ReLu\":\r\n \tself.__relu(self,x)\r\n\r\n\r\n #\r\n # TODO: Define the function for tanh, ReLu and their derivatives \t-- DONE\r\n #\r\n\r\n def __activation_derivative(self, x, activation):\r\n if activation == \"sigmoid\":\r\n self.__sigmoid_derivative(self, x)\r\n elif activation == \"tanh\":\r\n \tself.__tanh_derivative(self,x)\r\n elif activation == \"ReLu\":\r\n \tself.__relu_derivative(self,x)\r\n\r\n def __sigmoid(self, x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\n def __tanh(self,x):\r\n \treturn (np.exp(x) - np.exp(-x))/(np.exp(x) + np.exp(-x))\r\n\r\n def __relu(self,x):\r\n return x * (x > 0)\r\n\r\n # derivative of sigmoid function, indicates confidence about existing weight\r\n\r\n def __sigmoid_derivative(self, x):\r\n return x * (1 - x)\r\n\r\n def __tanh_derivative(self,x):\r\n \treturn 1 - (x*x)\r\n\r\n def __relu_derivative(self,x):\r\n return 1 * (x > 0)\r\n \r\n #\r\n # TODO: Write code for pre-processing the dataset, which would include standardization, normalization,\r\n # categorical to numerical, etc\r\n #\r\n\r\n def preprocess(self, X):\r\n dict = defaultdict(LabelEncoder)\r\n encoded = X.apply(lambda l: dict[l.name].fit_transform(l))\r\n return encoded\r\n\r\n # Below is the training function\r\n\r\n def train(self,activation, max_iterations = 1000, learning_rate = 0.005):\r\n for iteration in range(max_iterations):\r\n out = self.forward_pass(activation)\r\n error = 0.5 * np.power((out - self.y), 2)\r\n self.backward_pass(out, activation)\r\n update_layer2 = learning_rate * self.X23.T.dot(self.deltaOut)\r\n update_layer1 = learning_rate * self.X12.T.dot(self.delta23)\r\n update_input = learning_rate * self.X01.T.dot(self.delta12)\r\n\r\n self.w23 += update_layer2\r\n self.w12 += update_layer1\r\n self.w01 += update_input\r\n\r\n print(\"After \" + str(max_iterations) + \" iterations, the total error is \" + str(np.sum(error)))\r\n print(\"The final weight vectors are (starting from input to output layers)\")\r\n print(self.w01)\r\n print(self.w12)\r\n print(self.w23)\r\n\r\n def forward_pass(self,activation):\r\n # pass our inputs through our neural network\r\n in1 = np.dot(self.X, self.w01 )\r\n if activation == \"sigmoid\":\r\n self.X12 = self.__sigmoid(in1)\r\n elif activation == \"tanh\":\r\n \tself.X12 = self.__tanh(in1)\r\n elif activation == \"ReLu\":\r\n \tself.X12 = self.__relu(in1)\r\n \r\n in2 = np.dot(self.X12, self.w12)\r\n if activation == \"sigmoid\":\r\n self.X23 = self.__sigmoid(in2)\r\n elif activation == \"tanh\":\r\n \tself.X23 = self.__tanh(in2)\r\n elif activation == \"ReLu\":\r\n \tself.X23 = self.__relu(in2)\r\n \r\n in3 = np.dot(self.X23, self.w23)\r\n out= 0\r\n if activation == \"sigmoid\":\r\n out = self.__sigmoid(in3)\r\n elif activation == \"tanh\":\r\n \tout = self.__tanh(in3)\r\n elif activation == \"ReLu\":\r\n \tout = self.__relu(in3)\r\n return out\r\n \r\n def backward_pass(self, out, activation):\r\n # pass our inputs through our neural network\r\n self.compute_output_delta(out, activation)\r\n self.compute_hidden_layer2_delta(activation)\r\n self.compute_hidden_layer1_delta(activation)\r\n\r\n # TODO: Implement other activation functions -- DONE\r\n\r\n def compute_output_delta(self, out, activation):\r\n delta_output=0\r\n if activation == \"sigmoid\":\r\n delta_output = (self.y - out) * (self.__sigmoid_derivative(out))\r\n elif activation == \"tanh\":\r\n \tdelta_output = (self.y - out) * (self.__tanh_derivative(out))\r\n elif activation == \"ReLu\":\r\n \tdelta_output = (self.y - out) * (self.__relu_derivative(out))\r\n\r\n self.deltaOut = delta_output\r\n\r\n # TODO: Implement other activation functions -- DONE\r\n\r\n def compute_hidden_layer2_delta(self, activation):\r\n delta_hidden_layer2=0\r\n if activation == \"sigmoid\":\r\n delta_hidden_layer2 = (self.deltaOut.dot(self.w23.T)) * (self.__sigmoid_derivative(self.X23))\r\n elif activation == \"tanh\":\r\n delta_hidden_layer2 = (self.deltaOut.dot(self.w23.T)) * (self.__tanh_derivative(self.X23))\r\n elif activation == \"ReLu\":\r\n delta_hidden_layer2 = (self.deltaOut.dot(self.w23.T)) * (self.__relu_derivative(self.X23))\r\n self.delta23 = delta_hidden_layer2\r\n\r\n # TODO: Implement other activation functions -- DONE\r\n\r\n def compute_hidden_layer1_delta(self, activation):\r\n delta_hidden_layer1=0\r\n if activation == \"sigmoid\":\r\n delta_hidden_layer1 = (self.delta23.dot(self.w12.T)) * (self.__sigmoid_derivative(self.X12))\r\n elif activation == \"tanh\":\r\n delta_hidden_layer1 = (self.delta23.dot(self.w12.T)) * (self.__tanh_derivative(self.X12))\r\n elif activation == \"ReLu\":\r\n delta_hidden_layer1 = (self.delta23.dot(self.w12.T)) * (self.__relu_derivative(self.X12))\r\n\r\n self.delta12 = delta_hidden_layer1\r\n\r\n # TODO: Implement other activation functions -- DONE\r\n\r\n def compute_input_layer_delta(self, activation):\r\n delta_input_layer=0\r\n if activation == \"sigmoid\":\r\n delta_input_layer = np.multiply(self.__sigmoid_derivative(self.X01), self.delta01.dot(self.w01.T))\r\n elif activation == \"tanh\":\r\n delta_input_layer = np.multiply(self.__tanh_derivative(self.X01), self.delta01.dot(self.w01.T))\r\n elif activation == \"ReLu\":\r\n delta_input_layer = np.multiply(self.__relu_derivative(self.X01), self.delta01.dot(self.w01.T))\r\n\r\n self.delta01 = delta_input_layer\r\n\r\n # TODO: Implement the predict function for applying the trained model on the test dataset.\r\n # You can assume that the test dataset has the same format as the training dataset\r\n # You have to output the test error from this function\r\n\r\n def predict(self, activation, header = True):\r\n ncols = len(self.attributeValues_test.columns)\r\n nrows = len(self.attributeValues_test.index)\r\n self.X = self.attributeValues_test.values.reshape(nrows, ncols)\r\n self.y = self.classValues_test.iloc[:,0].values.reshape(nrows,1)\r\n out = self.forward_pass(activation)\r\n error = 0.5 * np.power((out - self.y), 2)\r\n print (\"Predicted error : \",np.sum(error))\r\n return np.sum(error)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dataPath = input(\"Enter Path for data set : \")\r\n if(dataPath):\r\n print(\"-------ACTIVATION - SIGMOID-------\")\r\n neural_network = NeuralNet(dataPath)\r\n neural_network.train(\"sigmoid\")\r\n testError = neural_network.predict(\"sigmoid\")\r\n \r\n print(\"-------ACTIVATION - TANH-------\")\r\n neural_network = NeuralNet(dataPath)\r\n neural_network.train(\"tanh\")\r\n testError = neural_network.predict(\"tanh\")\r\n \r\n print(\"-------ACTIVATION - RELU-------\")\r\n neural_network = NeuralNet(dataPath)\r\n neural_network.train(\"ReLu\")\r\n testError = neural_network.predict(\"ReLu\")\r\n else:\r\n print(\"No path entered. Try Again!\")","repo_name":"sushrutpatnaik/Simple-Neural-Network","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":12098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11651106137","text":"import os.path\nimport time\n\nimport cv2 as cv\nimport numpy as np\nfrom image_recon.color_converter import ColorConverter\nfrom image_recon.color_detector import ColorDetector\n\n\nclass ObjectDetector:\n \"\"\"\n This is a class for simple object detection\n with Mask-RCNN\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The constructor of the ObjectDetector class.\n\n Initializes critical variables and options for the object detector.\n \"\"\"\n self.confidence_threshold = 0.5\n self.mask_threshold = 0.3\n\n self.color_detector = ColorDetector()\n self.color_converter = ColorConverter()\n self.colors = \"src/image_recon/colors.json\"\n\n self.text_graph = \"src/image_recon/models/inception_v2/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt\"\n self.model_weights = \"src/image_recon/models/inception_v2/frozen_inference_graph.pb\"\n\n self.net = cv.dnn.readNetFromTensorflow(self.model_weights, self.text_graph)\n self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\n\n self.frame = None\n\n self.classes_file = \"src/image_recon/models/inception_v2/mscoco_labels.names\"\n self.classes = None\n with open(self.classes_file, 'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n def get_color_of_prediction(self, frame, left, top, right, bottom, class_mask):\n \"\"\"\n Given a frame and a mask, return the color of the object inside.\n \"\"\"\n\n new_frame = self.color_detector.increase_contrast(frame)\n\n class_mask = cv.resize(class_mask, (right - left + 1, bottom - top + 1))\n mask = (class_mask > self.mask_threshold)\n roi = new_frame[top:bottom + 1, left:right + 1][mask]\n\n self.color_detector.set_roi(roi.astype(np.uint8))\n return self.color_detector.determine_color()\n\n def draw_box(self, frame, class_id, conf, left, top, right, bottom, class_mask, color, color_choice, object_choice):\n \"\"\"\n Draws a box around a detected object with the name of the object and the confidence\n \"\"\"\n label = \"%.2f\" % conf\n if self.classes:\n color_name = self.color_converter.get_closest_color(color[::-1], self.colors)\n if self.classes[class_id] == \"Person\":\n label = '%s' % (self.classes[class_id])\n else:\n label = '%s %s' % (color_name, self.classes[class_id])\n\n label_size, base_line = cv.getTextSize(label, cv.FONT_HERSHEY_DUPLEX, 0.5, 1)\n top = max(top, label_size[1])\n\n same_color_and_object = color_name.lower() == color_choice and self.classes[class_id] == object_choice\n same_color_any_object = color_name.lower() == color_choice and object_choice == \"any\"\n any_color_same_object = color_choice == \"any\" and self.classes[class_id] == object_choice\n any_color_any_object = color_choice == \"any\" and object_choice == \"any\"\n draw_rectangle = same_color_and_object or same_color_any_object or any_color_same_object or any_color_any_object\n\n if (draw_rectangle):\n cv.rectangle(frame, (left, top), (right, bottom), color, 3)\n cv.rectangle(frame, (left, top - round(1.5 * label_size[1])), (left + round(1.5 * label_size[0]),\n top + base_line), color, cv.FILLED)\n cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_DUPLEX, 0.75, (255, 255, 255), 1)\n class_mask = cv.resize(class_mask, (right - left + 1, bottom - top + 1))\n mask = (class_mask > self.mask_threshold)\n roi = frame[top:bottom + 1, left:right + 1][mask]\n\n def post_process(self, boxes, masks, color_choice, object_choice):\n \"\"\"\n Catch-all function to draw boxes and determine color of objects.\n \"\"\"\n num_detections = boxes.shape[2]\n\n colors = []\n\n frameH = self.frame.shape[0]\n frameW = self.frame.shape[1]\n\n for i in range(num_detections):\n box = boxes[0, 0, i]\n mask = masks[i]\n score = box[2]\n if score > self.confidence_threshold:\n class_id = int(box[1])\n\n left = int(frameW * box[3])\n top = int(frameH * box[4])\n right = int(frameW * box[5])\n bottom = int(frameH * box[6])\n\n left = max(0, min(left, frameW - 1))\n top = max(0, min(top, frameH - 1))\n right = max(0, min(right, frameW - 1))\n bottom = max(0, min(bottom, frameH - 1))\n\n class_mask = mask[class_id]\n\n # Get color of object\n colors.append(self.get_color_of_prediction(self.frame, left, top, right, bottom, class_mask))\n\n for i in range(num_detections):\n box = boxes[0, 0, i]\n mask = masks[i]\n score = box[2]\n if score > self.confidence_threshold:\n class_id = int(box[1])\n\n left = int(frameW * box[3])\n top = int(frameH * box[4])\n right = int(frameW * box[5])\n bottom = int(frameH * box[6])\n\n left = max(0, min(left, frameW - 1))\n top = max(0, min(top, frameH - 1))\n right = max(0, min(right, frameW - 1))\n bottom = max(0, min(bottom, frameH - 1))\n\n class_mask = mask[class_id]\n\n # Draw bounding box, colorize and show the mask on the image\n self.draw_box(self.frame,\n class_id,\n score,\n left,\n top,\n right,\n bottom,\n class_mask,\n colors[i],\n color_choice,\n object_choice)\n\n def mask_rcnn(self, file, file_type, color_choice, object_choice):\n \"\"\"\n Run the object detection neural network for all images in\n an input stream.\n \"\"\"\n output_file = \"src/static/\" + file[:-4]\n file = \"src/image_recon/uploaded/\" + file\n\n if file_type == \"image\":\n if not os.path.isfile(file):\n print(\"[OBJECT DETECTOR] Image not found!\")\n return False\n cap = cv.VideoCapture(file)\n output_file += \"_predicted.jpg\"\n\n elif file_type == \"video\":\n if not os.path.isfile(file):\n print(\"[OBJECT DETECTOR] Video not found!\")\n return False\n cap = cv.VideoCapture(file)\n output_file += \"_predicted.avi\"\n vid_writer = cv.VideoWriter(output_file, cv.VideoWriter_fourcc('M', 'J', 'P', 'G'), 28, (\n round(cap.get(cv.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))\n\n while cv.waitKey(1) < 0:\n start_time = time.time()\n has_frame, self.frame = cap.read()\n if not has_frame:\n print(\"[OBJECT DETECTOR] Done Processing!\")\n break\n\n blob = cv.dnn.blobFromImage(self.frame, swapRB=True, crop=False)\n self.net.setInput(blob)\n boxes, masks = self.net.forward(['detection_out_final', 'detection_masks'])\n self.post_process(boxes, masks, color_choice, object_choice)\n\n # label = 'yuri (your useless recognizer (of) images)'\n # cv.putText(self.frame, label, (0, 15), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 0))\n\n if file_type == \"image\":\n cv.imwrite(output_file, self.frame.astype(np.uint8))\n print(\"[OBJECT DETECTOR] Wrote the file to\", output_file)\n else:\n vid_writer.write(self.frame.astype(np.uint8))\n\n end_time = time.time()\n print(\"[OBJECT DETECTOR] Time Elapsed:\", round(end_time - start_time, 2))\n\n return True\n\n def run_prediction(self, file_name, file_type, color_choice, object_choice):\n \"\"\"\n Create an async thread to run the object detector on a file.\n \"\"\"\n from multiprocessing.pool import ThreadPool\n\n print(\"[OBJECT DETECTOR] Starting new object detector thread\")\n pool = ThreadPool(processes=1)\n async_result = pool.apply_async(self.mask_rcnn, (file_name, file_type, color_choice, object_choice))\n return async_result.get()\n","repo_name":"davidgur/yuri","sub_path":"src/image_recon/mask_rcnn.py","file_name":"mask_rcnn.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"4688493061","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom .views import UserViewSet, VeiculosViewSet, ConcessionariaViewSet\n\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.DefaultRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register('veiculos', VeiculosViewSet)\nrouter.register('concessionarias', ConcessionariaViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n path('', include(router.urls)),\n]","repo_name":"raifran1/mtv-django-uninassau","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"70230341532","text":"#!/usr/bin/env python\n\"\"\"\nCreates a Trello board used as timetable for new course\n\nhttps://developer.atlassian.com/cloud/trello/guides/rest-api/api-introduction/\nUsage: python trello.py HN2006 2020/06/25\n\"\"\"\nimport argparse\nimport datetime\nimport os\nimport logging\n\nimport requests\n\nauthen = {\n \"key\": os.environ[\"TRELLO_KEY\"],\n \"token\": os.environ[\"TRELLO_TOKEN\"],\n}\nS = requests.Session()\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef create_card(date, list_id, name):\n r = S.post(\n \"https://api.trello.com/1/cards\",\n params={**authen, \"idList\": list_id, \"name\": name, \"due\": date},\n )\n r.raise_for_status()\n\n\ndef main():\n argp = argparse.ArgumentParser()\n argp.add_argument(\"course_code\", help=\"e.g: HN2006\")\n argp.add_argument(\"start_date\", help=\"e.g 2020/06/25\")\n\n args = argp.parse_args()\n start = datetime.datetime.strptime(args.start_date, \"%Y/%m/%d\")\n course_code = args.course_code.upper()\n loc = \"Hà Nội\" if course_code.startswith(\"HN\") else \"Tp Hồ Chí Minh\"\n\n # Create board\n board_name = \"Học Python {} PYMI.vn {} timetable\".format(loc, course_code)\n logger.info(\"Creating a new Trello board named %s\", board_name)\n board_resp = S.post(\n url=\"https://api.trello.com/1/boards/\",\n json={**authen, \"name\": board_name},\n )\n board_id = board_resp.json()[\"id\"]\n\n # List lists\n # resp = S.get(\n # \"https://api.trello.com/1/boards/{}/lists\".format(board_id),\n # params=authen,\n # )\n # resp.json()\n\n logger.info(\"Creating two lists, one for Tuesday and other for Thurday\")\n resp = S.post(\n \"https://api.trello.com/1/boards/{}/lists\".format(board_id),\n params={**authen, \"name\": \"Thứ 5\"},\n )\n list_thursday_id = resp.json()[\"id\"]\n\n resp = S.post(\n \"https://api.trello.com/1/boards/{}/lists\".format(board_id),\n params={**authen, \"name\": \"Thứ 3\"},\n )\n list_tuesday_id = resp.json()[\"id\"]\n logger.info(\"Creating cards for each lesson, add due date\")\n count = 1\n day = start\n while count <= 12:\n # tuesday\n if day.isoweekday() == 2:\n create_card(\n date=day.strftime(\"%Y/%m/%d\"),\n list_id=list_tuesday_id,\n name=\"Bài {}\".format(count),\n )\n count = count + 1\n # thursday\n elif day.isoweekday() == 4:\n create_card(\n date=day.strftime(\"%Y/%m/%d\"),\n list_id=list_thursday_id,\n name=\"Bài {}\".format(count),\n )\n count = count + 1\n\n day = day + datetime.timedelta(days=1)\n\n logger.info(\"Done, URL: %s\", board_resp.json()[\"url\"])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"falcol/pyfml","sub_path":"trello.py","file_name":"trello.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39462472935","text":"from numpy import exp, pi, arange\nimport numpy as np\nfrom sympy.ntheory import totient\nfrom utilities.primes import odd_primes_up_to\n\n\ndef divisors_over_one(n):\n large_divisors = [n]\n for i in range(2, int(np.sqrt(n) + 1)):\n if n % i == 0:\n yield i\n if i*i != n:\n large_divisors.append(n // i)\n for divisor in reversed(large_divisors):\n yield divisor\n\n\ndef nth_unity_roots(n):\n return exp(2j * pi / n * arange(n))\n\n\ndef nth_unity_roots_not_one(n):\n return exp(2j * pi / n * arange(1, n))\n\n\ndef two_edge_mini_polynomial_for_unity_root(zeta):\n val = np.real(zeta + np.conjugate(zeta))\n return np.polynomial.Polynomial([-2-val, 0, 1])\n\n\ndef k3_mini_polynomial_for_unity_root(zeta):\n val = np.real(zeta + np.conjugate(zeta))\n return np.polynomial.Polynomial([-val, -3, 0, 1])\n\n\ndef loop_mini_polynomial_for_unity_root(zeta):\n val = np.real(zeta + np.conjugate(zeta))\n return np.polynomial.Polynomial([-val, 1])\n\n\ndef polynomial_product(generator, values):\n res = np.polynomial.Polynomial([1])\n for value in values:\n res *= generator(value)\n return res\n\n\ndef average_matching_polynomial(q, polynomial_generator):\n p1 = polynomial_generator(1)\n q_plus_one_polys = [q*(q-1)//2 * totient(d) * p1 ** ((q+1) // d - 1) *\n (polynomial_product(polynomial_generator, nth_unity_roots_not_one(d))) ** ((q+1) // d)\n for d in divisors_over_one(q+1)]\n q_minus_one_polys = [q*(q+1)//2 * totient(d) * p1 *\n (polynomial_product(polynomial_generator, nth_unity_roots(d))) ** ((q-1) // d)\n for d in divisors_over_one(q-1)]\n res = p1 ** q + \\\n (q*q-1)*polynomial_product(polynomial_generator, nth_unity_roots(q)) + \\\n sum(q_plus_one_polys) + sum(q_minus_one_polys)\n return res / (q**3 - q)\n\n\nqs = odd_primes_up_to(100)\n\nfor q in qs:\n p = average_matching_polynomial(q, loop_mini_polynomial_for_unity_root)\n roots = p.roots()\n roots_abs = np.abs(roots)\n real_roots = [x for x in roots if np.imag(x) == 0]\n print(real_roots)\n print()\n","repo_name":"zivg2/ThesisSimulations","sub_path":"interlacing_experiments/orbit_polynomials.py","file_name":"orbit_polynomials.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7057153729","text":"# 8-2의 피보나치 함수 코드에서 호출되는 함수 확인\n\nd = [0] * 100\n\n# 피보나치 함수를 재귀함수로 구현\ndef fibo(x):\n print('f(' + str(x) + ')', end=' ')\n if x == 1 or x == 2:\n return 1\n if d[x] != 0:\n return d[x]\n d[x] = fibo(x-1) + fibo(x-2)\n return d[x]\n\nfibo(6)","repo_name":"ymj07168/Graceful-Silvers","sub_path":"jiae/chap8/8-3.py","file_name":"8-3.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"72460556252","text":"import os\nimport sqlite3\n\n\nclass MyDBManager:\n def __init__(self):\n base_dir = os.getcwd()\n self._db = base_dir + \"/db/db_gpt2.db\"\n\n\n def __enter__(self):\n self.conn = sqlite3.connect(self._db)\n return self.conn\n\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.close()\n if exc_val:\n raise Exception(\"MyDBManager error!\")\n\n\nconnect = MyDBManager()\n\n\nclass DBConnect:\n def __init__(self, connect=connect):\n self.connect = connect\n\n\n def check_connect(self, chat_id):\n self.chat_id = chat_id\n with self.connect as conn:\n cursor = conn.cursor()\n row_query = \"\"\"SELECT * FROM users where user_id=?\"\"\"\n value_query = (self.chat_id, )\n result = cursor.execute(row_query, value_query)\n records = result.fetchone()\n\n if records is None or len(records) == 0:\n if self.create_user(cursor, self.chat_id):\n conn.commit()\n return True\n else:\n return False\n return True\n\n\n def create_user(self, cursor, chat_id):\n row_query = \"\"\"INSERT INTO users ('user_id') VALUES (?)\"\"\"\n value_query = (chat_id, )\n cursor.execute(row_query, value_query)\n return True","repo_name":"C0nstanta/TeleBotGPT_VGG","sub_path":"db/db_connect.py","file_name":"db_connect.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"30593905906","text":"# =============================================================================\n# Python script for extracting Screen_name related to media twitter urls\n# =============================================================================\n#\n#\n#\n\nfrom ural.twitter import extract_screen_name_from_twitter_url\nimport csv\nfrom tqdm import tqdm\n\n\ndef removing_screen_names_repetition(urls):\n \"\"\"Function returning a unique screen_name from a list of urls\n Args:\n urls (list of str): list of urls to test.\n Returns:\n set: returns a unique screen_name\"\"\"\n\n screen_name = set()\n for url in urls:\n nom = extract_screen_name_from_twitter_url(url)\n if nom:\n screen_name.add(nom)\n return screen_name\n\n\n# ==============================Processing csv file===========================\n\nwith open(\"polarisation.csv\", \"r\") as f, open(\"media_username.csv\", \"w\") as f2:\n file_content = csv.DictReader(f)\n # list of urls list for each entity\n writer = csv.DictWriter(f2, fieldnames=['ID', 'NAME', 'HOME PAGE', 'SCREEN_NAME'])\n writer.writeheader()\n for row in tqdm(file_content):\n if row[\"type (TAGS)\"] != \"media\":\n continue\n username = removing_screen_names_repetition(row['PREFIXES AS URL'].split(\" \"))\n for distinct_username in username:\n writer.writerow({'ID': row[\"ID\"], 'NAME': row[\"NAME\"], 'HOME PAGE': row[\"HOME PAGE\"], 'SCREEN_NAME': distinct_username})\n","repo_name":"d3scmps/media_screenname_polar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69949563293","text":"# аргументы функции\n# *args\ndef fn_args(*args):\n for arg in args:\n print(arg)\n\nparameters = (\"test.py\", \"in.txt\", \"out.csv\")\nfn_args(*parameters)\n\n# **kwargs\ndef fn_kwargs(**kwargs):\n for k,v in kwargs.items():\n print(f\"{k} {v}\")\n\nfn_kwargs(script_name=\"test.py\", input_file=\"in.txt\", output_file=\"out.csv\")\n\n# присваивание функции\ndef fn_to_delete():\n print(\"Эта функция - на удаление\")\n\n# присваиваем функцию переменной\nfn_spare = fn_to_delete\nprint(type(fn_spare))\n# функцию можно удалить так же, как и элемент списка\ndel fn_to_delete\n# вызываем \"запасную\" функцию\nprint(fn_spare())\n# здесь будет исключение, т.к. исходная функция уже не существует\nfn_to_delete()","repo_name":"Shorstko/mai_python","sub_path":"02 Functions and parameters/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4958614967","text":"### depricated file, will be deleted\n\n\nimport logging\nimport pandas as pd\nimport requests\nfrom tqdm import tqdm\n\n\ndef send_get_route_request(base_url, start_point, end_point):\n url = \"{}/route/v1/driving/{},{};{},{}?overview=false&steps=true\".format(base_url,\n start_point[0], start_point[1], end_point[0], end_point[1])\n \n response = requests.get(url)\n if response.status_code == 200:\n route = response.json()\n # print(route)\n if route['code'] == 'Ok':\n # get the route\n route = route['routes'][0]['legs']\n extracted_steps = []\n for leg in route:\n for step in leg['steps']:\n extracted_steps.extend((item['location'][0], item['location'][1])\n for item in step['intersections']) # longitude, latitude\n return extracted_steps\n \n logging.error(\"Error response from OSRM: {}\".format(response.text))\n raise Exception(\"Unable to get route from {} to {}\".format(start_point, end_point))\n\n\n# trips = pd.read_csv(\"data/nyc-taxi-trip-duration/train.csv\")\n\n# trips['route_points'] = pd.Series(dtype=object)\n# print(trips.columns)\n# trips['pickup_datetime'] = pd.to_datetime(trips['pickup_datetime'])\n# trips['dropoff_datetime'] = pd.to_datetime(trips['dropoff_datetime'])\n\n# for i in tqdm(range(len(trips))):\n# route_points = []\n# route_points.append(\n# (trips.iloc[i]['pickup_longitude'], trips.iloc[i]['pickup_latitude']))\n# route_points.extend(get_route((trips.iloc[i]['pickup_longitude'], trips.iloc[i]['pickup_latitude']), (\n# trips.iloc[i]['dropoff_longitude'], trips.iloc[i]['dropoff_latitude'])))\n# route_points.append(\n# (trips.iloc[i]['dropoff_longitude'], trips.iloc[i]['dropoff_latitude']))\n# trips.at[i, 'route_points'] = route_points\n# print(route_points)\n# if i == 3:\n# break\n\n# trips.to_csv(\"data/nyc-taxi-trip-duration/train_with_route_points.csv\")\n","repo_name":"alifa98/point2hex","sub_path":"lib/LocationToPoint.py","file_name":"LocationToPoint.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"19437308444","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom MyApp import views\nfrom MyApp.admin import mysite\n\nurlpatterns = [\n url(r'^addbuyer/', views.addBuyer),\n url(r'^deletebuyer/', views.deleteBuyer),\n url(r'^updatebuyer/', views.updateBuyer),\n url(r'^querybuyer/', views.queryBuyer),\n\n url(r'^getaccount/(\\d+)/', views.getAccount),\n url(r'^buy/(\\d+)/(\\d+)/', views.buy),\n url(r'^getorders/(\\d+)/', views.getOrders),\n url(r'^getbuyergoods/(\\d+)/', views.getBuyerGoods),\n url(r'^getgoodsbuyers/(\\d+)/', views.getGoodsBuyers),\n]","repo_name":"ouyangsuo/WhatsDjangoModel","sub_path":"MyApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69940761051","text":"# -*- coding: utf-8 -*-\n# -----------------------------------\n# @CreateTime : 2020/1/26 13:04\n# @Author : Mark Shawn\n# @Email : shawninjuly@gmai.com\n# ------------------------------------\n\n\nfrom pyecharts.faker import Faker\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Map, Geo\nfrom pyecharts.globals import ChartType\n\n\ndef map_base() -> Map:\n c = (\n Map()\n .add(\n \"商家A\",\n [list(z) for z in zip(Faker.provinces, Faker.values())],\n \"china\",\n\n )\n .set_global_opts(title_opts=opts.TitleOpts(title=\"Map-基本示例\"))\n )\n return c\n\ndef geo_heatmap() -> Geo:\n c = (\n Geo()\n .add_schema(maptype=\"china\")\n .add(\n \"geo\",\n [list(z) for z in zip(Faker.provinces, Faker.values())],\n type_=ChartType.HEATMAP,\n\n )\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(\n visualmap_opts=opts.VisualMapOpts(),\n title_opts=opts.TitleOpts(title=\"Geo-HeatMap\"),\n )\n )\n return c\n\n\n\n\n# map = map_base()\n# map = geo_heatmap()\n# map.render()\n\nguizhou_show()","repo_name":"MarkShawn2020/simple-spiders","sub_path":"wuhan_support/data_visualize/visualize_map.py","file_name":"visualize_map.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25678678824","text":"\"\"\"Occnet URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom Candidate import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"home\"),\n path(\"job/\", views.job, name=\"home\"),\n path(\"about-us/\", views.aboutus, name=\"home\"),\n path(\"jobinfo/\", views.infojob, name=\"info\"),\n path(\"contact-us\", views.contactus, name=\"contactus\"),\n path(\"log-in\", views.loginpage, name=\"login\"),\n path(\"log-out\", views.logoutpage, name=\"login\"),\n path(\"sign-up\", views.signuppage, name=\"signup\"),\n path(\"jobinfo/apply\", views.apply, name=\"apply\")\n]\n","repo_name":"nileshparab42/Web-Development","sub_path":"Occnet/Candidate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28931997012","text":"import unittest\nfrom tyre.carrigan_tyre import CarriganTyre \n\nclass TestCarriganTyre(unittest.TestCase):\n\n def test_carrigan_tyre_should_be_serviced(self):\n tyres_status = [0.1, 0.4, 0.4, 0.9]\n tyre = CarriganTyre(tyres_status)\n self.assertTrue(tyre.needs_service())\n\n def test_carrigan_tyre_should_not_be_serviced(self):\n tyres_status = [0.1, 0.4, 0.4, 0.3]\n tyre = CarriganTyre(tyres_status)\n self.assertFalse(tyre.needs_service())","repo_name":"emanAtassi/Lyft-internship","sub_path":"test/test_tyre/test_carrigan_tyre.py","file_name":"test_carrigan_tyre.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22274766173","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import bot\nimport asyncio\nimport random\nimport itertools\nimport json\nimport os\nimport random\nfrom time import sleep\nfrom discord.utils import get\n\n\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(\"bot.json\")))\nwith open(__location__ + \"\\\\bot.json\") as cfg:\n config = json.load(cfg)\n\ntoken = config[\"token\"]\nprefix = config[\"command_prefix\"]\n\n\nbot = commands.Bot(command_prefix=prefix)\nclient = discord.Client()\n\ndef getInsult():\n insultList = [\n \"finger\",\n \"fatso\",\n \"diapyhead\",\n \"fingro\",\n \"diapy farty fingro brain idiot dumdum blaster\",\n \"poopoopeepeeface\",\n \"berny\",\n \"pipper\",\n \"ddosface\",\n \"cunt\",\n \"smoothbrain\",\n \"ddos victim\"\n ]\n return random.choice(insultList)\n\ndef getEmojiByName(input):\n return get(bot.get_all_emojis(), name=input)\n\ndef getCoolEmoji(ears=False):\n emoji_list = [\n \"frndus\",\n \"viper\",\n \"fuck12\",\n \"scEybbus\",\n \"vsauce\",\n \"samloka\",\n \"boomer\",\n \"bogpepe\",\n \"ultrafastparrot\",\n \"jeffu\"\n ]\n if ears:\n return \":\" + random.choice(emoji_list) + \":\"\n else:\n return random.choice(emoji_list)\n\ndef niceTry():\n return \"nice try, {}\".format(getInsult())\n \n\n@bot.event\nasync def on_ready():\n print(\"I am running on: \" + bot.user.name)\n print(\"with the id: \" + bot.user.id)\n sleep(0.5)\n print(\"fastening diaper\")\n sleep(0.5)\n print(\"got my rice dude\")\n sleep(0.5)\n print(\"my power supply\", end='')\n for i in range(5):\n print('.', end='', flush=True)\n sleep(0.5)\n sleep(0.5)\n print()\n print(\"its gamer time my dude \")\n sleep(0.5)\n print (\"----------------------------------------------\")\n\n@bot.command(pass_context=True)\nasync def ping(ctx):\n await bot.say(\":ping_pong:\")\n\n@bot.command(pass_context=True)\nasync def fingers(ctx):\n await bot.say(\":hand_splayed:\")\n\n@bot.command(pass_context=True)\nasync def bother(ctx, user: discord.Member):\n messages = [\n \"eat fingers.....\",\n \"ÉTA BÖRN... mmmmMMmmmm\",\n \"BRAAAAAAAAAAAAAAAAP\",\n \"I EAT LITEL FINGROS LIKE U 4 BREKFIS\", \n \"HAHAHAHAHAHA\",\n \"king ass ripper right here\",\n \"hormel chili and beans\",\n \"snnnnniiiiiiffffffffffff...oh yes my dear....sssnnnnnnnnnnnniiiiiiiiffffffff....quite pungent indeed...is that....dare I say....sssssssnniff...eggs I smell?......sniff sniff....hmmm...yes...quite so my darling....sniff....quite pungent eggs yes very much so .....ssssssssssssssnnnnnnnnnnnnnnniiiiiiiffffff....ah yes...and also....a hint of....sniff....cheese.....quite wet my dear....sniff...but of yes...this will do nicely....sniff.....please my dear....another if you please....nice a big now....\",\n \"beni borðar norgnafingures\",\n \"tybi xd\",\n \"fokkin\",\n \"haha....fyndið...\",\n \"fucking gamer\",\n \" https://pbs.twimg.com/profile_images/819461052136099840/ye_cF0Mi_400x400.jpg \",\n \" https://i.pinimg.com/originals/cf/e6/21/cfe62168c7bc802e1dce030f23fa463b.jpg \"\n\n\n ] # I do not approve of this\n rawEmoji = getCoolEmoji()\n emoji = getEmojiByName(rawEmoji)\n if(emoji):\n await bot.add_reaction(ctx.message, emoji)\n else:\n print(\"ERROR EMOJI: {} NOT FOUND REEEEEEEEEEEEEEEE\".format(rawEmoji))\n message = random.choice(messages)\n message = \":fire:\" + message + \":fire:\"\n await bot.send_message(user, message)\n await bot.say(\"bothered {} with the message: {}\".format(user.name, message))\n\n\n@bot.command(pass_context=True)\nasync def insult(ctx): \n await bot.say(\"{} you {}\".format(ctx.message.author.mention, getInsult()))\n \n@bot.command(pass_context=True)\nasync def pizza(ctx):\n\n message = ctx.message.content.split()\n if len(message) != 2:\n await bot.say(niceTry())\n return\n else:\n message = message[-1]\n if message == \"0\":\n await bot.say(\"nice try idiot\")\n return\n elif message.isdigit():\n numpizza = random.randint(1, int(message))\n await bot.say(\"how about a large number \" + str(numpizza) + \" with extra cheese you fat fuck\")\n else:\n await bot.say(\"enter a number, {}\".format(getInsult()))\n\n\n@bot.command(pass_context=True)\nasync def rare(ctx):\n\n dir =__location__ + \"\\\\rare\"\n\n await bot.send_file(ctx.message.channel, dir + \"\\\\\" + random.choice(os.listdir(dir)))\n\n\nbot.run(token)","repo_name":"frndus/frndosbot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10251383559","text":"# 判断数组一语数组二的交集,也就是判断数组一中的元素是否在数组二中,一眼哈希法\n# 这题由于既要判断数的种类,又要判断出现次数,所以没有像lc349那样的简单逃课解法,使用数组的话只能老老实实定义长度为1001的数组,较为占用空间\n# 由此可看出,349只是恰好有一种简单解法而已,一般的哈希问题还是要按照标准解法来进行\ndef intersect(nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n val1 = dict()\n ans = []\n for i in range(len(nums1)):\n val1[nums1[i]] = val1.get(nums1[i], 0) + 1 # nums1中数值的种类与出现的次数\n for i in range(len(nums2)):\n if nums2[i] in val1.keys() and val1[nums2[i]] > 0:\n ans.append(nums2[i])\n val1[nums2[i]] -= 1\n return ans\n","repo_name":"Fyw1988/Leetcode","sub_path":"哈希表/350.py","file_name":"350.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11694541657","text":"from django.conf.urls import patterns, include, url\nfrom .views import *\nurlpatterns = patterns('',\n # Examples:\n #url(r'^$', index.as_view(),name='home'),\n #********************Producto*********************************************\n url(r'^$', index.as_view(),name='home'),\n url(r'^registrar/$', registrarProducto.as_view(),name='crearProducto'),\n url(r'^modificar/(?P[\\d]+)$', modificarProducto.as_view(),name='editarProducto'),\n url(r'^eliminar/(?P[\\d]+)$', eliminarProducto.as_view(),name='eliminarProducto'),\n url(r'^listar/$', listarProducto.as_view(),name='listarProducto'),\n url(r'^buscar/$', buscarProducto.as_view(),name='buscarProducto'),\n url(r'^generar_pdf/$','apps.productos.views.generar_pdf', name='pdf_Productos'),\n \n \n)","repo_name":"mangelsiguenza/HotelRCA","sub_path":"RCASISTEMM/apps/productos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4978316347","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nyield from 構文\n\n* https://www.python.org/dev/peps/pep-0380/\n\"\"\"\n\ndef g1(n):\n v = None\n for i in range(n):\n v = 0 if v is None else v\n v = yield i + v\n \n\ndef g2_delegate(n):\n \"\"\"\n send() メソッドはこのジェネレーターに値を送るため、\n ここで値を受け渡すような実装にしないといけない => g2_delegate_kai を参照\n \"\"\"\n for i in g1(n):\n yield i\n\n\ndef g2_delegate_kai(n):\n \"\"\"\n 途端にジェネレーターの委譲が難しくなった!\n \"\"\"\n g = g1(n)\n v = yield next(g)\n while True:\n v = yield g.send(v)\n\n\ndef g2_new_syntax(n):\n yield from g1(n)\n\n\ndef main():\n gd = g2_delegate(3)\n print(next(gd))\n print(gd.send(2))\n print(gd.send(4))\n\n print('-' * 72)\n\n gn = g2_new_syntax(3)\n print(next(gn))\n print(gn.send(2))\n print(gn.send(4))\n\n print('-' * 72)\n\n gdk = g2_delegate_kai(3)\n print(next(gdk))\n print(gdk.send(2))\n print(gdk.send(4))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"t2y/python-study","sub_path":"Python36ReleaseParty/async/yield_from2.py","file_name":"yield_from2.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"18799862015","text":"import numpy as np\n\n\ndef sum_squares_error(y, t):\n return 0.5 * np.sum((y - t) ** 2)\n\n\ndef cross_entropy_error(y, t):\n delta = 1e-7 # np.log 함수에 0이 들어가지 않도록 아주 작은 값 더함\n return -np.sum(t * np.log(y + delta))\n\n\nt = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]\ny1 = [0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0]\ny2 = [0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0]\nprint(sum_squares_error(np.array(y1), np.array(t))) # 첫 번째 추정 결과가 오차가 더 작으니 정답일 가능성 높음\nprint(sum_squares_error(np.array(y2), np.array(t)))\nprint(cross_entropy_error(np.array(y1), np.array(t))) # 첫 번째 추정 결과가 오차가 더 작으니 정답일 가능성 높음\nprint(cross_entropy_error(np.array(y2), np.array(t)))\n","repo_name":"rosakim83/Deep-Learning-from-Scratch","sub_path":"4-Neural-Network-Training/Ex01-SSE-CEE.py","file_name":"Ex01-SSE-CEE.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20884829903","text":"from bs4 import BeautifulSoup\nfrom glob import glob\nimport networkx as nx\nfrom networkx.drawing.nx_pydot import write_dot\n\n\ndef find_links(filename):\n with open(filename) as f:\n content = f.read()\n\n soup = BeautifulSoup(content, \"html.parser\")\n linked_files = []\n for link in soup.find_all(\"a\"):\n linked_file = link.get(\"href\")\n linked_files.append(linked_file)\n\n return linked_files\n\nif __name__ == \"__main__\":\n files = glob(\"*.html\")\n note_graph = nx.Graph()\n for f in files:\n if f.startswith((\"tag_\", \"gallery\")):\n continue\n linked_files = find_links(f)\n for link in linked_files:\n if not link.startswith((\"http\", \"#\", \"assets\", \"tag_index\", \"gallery\")):\n print(f\"{f} -> {link}\")\n note_graph.add_edge(f, link)\n\n\n write_dot(note_graph, \"links.dot\")\n","repo_name":"justinpinkney/note_builder","sub_path":"note_builder/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22127862584","text":"from asgiref.sync import async_to_sync, sync_to_async\nfrom channels.exceptions import DenyConnection\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom apps.channel_practice.models import Ip\n\n\nclass MyConsumer(AsyncWebsocketConsumer):\n\n async def connect(self):\n self.group_name=\"dashboard\"\n await self.channel_layer.group_add(self.group_name,self.channel_name,)\n await self.accept()\n\n\n\n async def disconnect(self, code):\n await self.channel_layer.group_discard(\n self.group_name,self.channel_name\n )\n\n async def receive(self, text_data):\n\n await self.channel_layer.group_send(\n self.group_name,\n {\n 'type':'deprocessing',\n 'data':text_data,\n }\n\n )\n\n print('>>>>',text_data)\n\n\n async def deprocessing(self,event):\n\n await self.send(event['data'])\n\n\n\nclass TestConsumer(AsyncWebsocketConsumer):\n groupname = 'notice'\n\n async def connect(self):\n #getting ip address of client\n self.ip=self.scope['client'][0]\n\n\n try:\n #check ip already available in database\n self.device= sync_to_async(Ip.objects.get)(address=self.ip)\n except ObjectDoesNotExist:\n #if ip address not exist raise error\n raise DenyConnection(\"Invalid User\")\n\n await self.channel_layer.group_add(self.groupname, self.channel_name)\n await self.accept()\n print(self.scope['client'][0])\n\n async def disconnect(self, code):\n await self.channel_layer.group_discard(self.groupname,self.channel_name)\n\n async def receive(self, text_data=None, bytes_data=None):\n await self.channel_layer.group_send(\n self.groupname,{\n 'type':'rahi',\n 'data':text_data,\n }\n )\n print(\">>>\",text_data)\n\n async def rahi(self,event):\n await self.send(event['data'])\n\n #comes from models post_save signal\n async def sendNotice(self,event):\n await self.send(text_data=event['notice'])\n\n","repo_name":"icerahi/dj_channel_practice","sub_path":"apps/channel_practice/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73746689050","text":"import glob\nfrom typing import Callable, Any, TypedDict\n\nfrom matplotlib import pyplot as plt\n\nfrom e2e import load_raw_dataset, DetectionDataRecord, load_data\nfrom e2e_detectors import E2EDetector\nfrom visualization import plot_samples\n\n\ndef load_sbs_dataset(dataset: str) -> list[DetectionDataRecord]:\n ret: list[DetectionDataRecord] = []\n path_to = f\"./sbs_dataset/{dataset}/\"\n bin_files = [x.split('/')[-1] for x in glob.glob(f\"{path_to}*.bin\")]\n cal_files = [x.split('/')[-1] for x in glob.glob(f\"{path_to}*.cal\")]\n for bix, b in enumerate(bin_files):\n common_part = b[0:-4]\n matching_cal_name = f\"{common_part}_CD.cal\"\n for idx, c in enumerate(cal_files):\n if c == matching_cal_name:\n ret.append({\n \"data_path\": f\"{path_to}{b}\",\n \"calibration_path\": f\"{path_to}{c}\",\n \"detection_path\": None\n })\n bin_files[bix] = \"USED UP NAME\"\n cal_files[idx] = \"USED UP NAME\"\n break\n return ret\n\n\n# Types of dissimilarity\n# Additional detections\n# Misaligned detections (+- 10 samples are permitted)\n\n\nclass SBSRecordResult(TypedDict):\n filename: str\n detector1: list[int]\n detector2: list[int]\n detector1_has_additional: bool\n detector2_has_additional: bool\n has_misaligned: bool\n\n\nclass SBSRunnerResult(TypedDict):\n detector1_name: str\n detector2_name: str\n dataset_name: str\n record_results: list[SBSRecordResult]\n\n\ndef get_d1_additonal_rate(result: SBSRunnerResult):\n cnt = 0\n for r in result['record_results']:\n if r['detector1_has_additional']:\n cnt += 1\n return cnt / len(result['record_results'])\n\n\ndef get_d2_additonal_rate(result: SBSRunnerResult):\n cnt = 0\n for r in result['record_results']:\n if r['detector2_has_additional']:\n cnt += 1\n return cnt / len(result['record_results'])\n\n\ndef get_misaligned_rate(result: SBSRunnerResult):\n cnt = 0\n for r in result['record_results']:\n if r['has_misaligned']:\n cnt += 1\n return cnt / len(result['record_results'])\n\n\nclass SBSRunner:\n def __init__(self, name: str, dataset: str,\n detector_1_builder: Callable[[Any], E2EDetector],\n detector_1_args: dict,\n detector_2_builder: Callable[[Any], E2EDetector],\n detector_2_args: dict):\n main_dataset = load_sbs_dataset(dataset)\n # main_e2e_dataset = load_raw_dataset(dataset)\n # not_dataset = load_raw_dataset(\"not\")\n self.name = name\n self.dataset_name = dataset\n self.dataset = main_dataset\n # self.dataset = main_dataset + main_e2e_dataset + not_dataset\n self.detector_1_builder = detector_1_builder\n self.detector_2_builder = detector_2_builder\n self.detector_1_args = detector_1_args\n self.detector_2_args = detector_2_args\n\n def run(self) -> SBSRunnerResult:\n db1 = lambda a=self.detector_1_args: self.detector_1_builder(**a)\n db2 = lambda a=self.detector_2_args: self.detector_2_builder(**a)\n\n final_result: SBSRunnerResult = SBSRunnerResult(\n detector1_name=db1().get_name(),\n detector2_name=db2().get_name(),\n dataset_name=self.dataset_name,\n record_results=[]\n )\n\n records = self.dataset\n for idx, rdr in enumerate(records):\n filehash = (rdr[\"data_path\"].split('/')[-1]).split('_')[0]\n\n d1: E2EDetector = db1()\n d2: E2EDetector = db2()\n\n res = SBSRecordResult(\n filename=filehash,\n detector1=[],\n detector2=[],\n detector1_has_additional=False,\n detector2_has_additional=False,\n has_misaligned=False\n )\n\n samples = load_data(rdr[\"data_path\"], rdr[\"calibration_path\"])\n for sample in samples:\n r1 = d1.add_sample(sample)\n r2 = d2.add_sample(sample)\n\n min_allowed = 200\n max_allowed = len(samples) - 200\n\n if r1 is not None:\n # Since ROCKET detector can have issues triggering at the very edges\n # Ignore edge detections for both of them to ensure fairness\n if r1 > min_allowed and r1 < max_allowed:\n res['detector1'].append(r1)\n\n if r2 is not None:\n # Since ROCKET detector can have issues triggering at the very edges\n # Ignore edge detections for both of them to ensure fairness\n if r2 > min_allowed and r2 < max_allowed:\n res['detector2'].append(r2)\n\n # Check for additional\n if len(res['detector1']) > len(res['detector2']):\n res['detector1_has_additional'] = True\n\n if len(res['detector2']) > len(res['detector1']):\n res['detector2_has_additional'] = True\n\n # Check for misaligned\n for x in res['detector1']:\n for y in res['detector2']:\n srt = [x, y]\n srt.sort()\n delta = srt[1] - srt[0]\n if delta > 10:\n res['has_misaligned'] = True\n break\n if res['has_misaligned']:\n break\n\n if res['detector1_has_additional'] or res['detector2_has_additional'] or res['has_misaligned']:\n print(\"Saving chart...\")\n # Save charts on discrepancies\n # Chart should cover area between all detections\n all_detections = res['detector1'] + res['detector2']\n all_detections.sort()\n start_idx = all_detections[0] - 200\n end_idx = all_detections[-1] + 100\n if start_idx < 0:\n start_idx = 0\n if end_idx >= len(samples):\n end_idx = len(samples) - 1\n\n d1_markers = res['detector1'].copy()\n d2_markers = res['detector2'].copy()\n for i in range(len(d1_markers)):\n d1_markers[i] -= start_idx\n for i in range(len(d2_markers)):\n d2_markers[i] -= start_idx\n\n fig, ax = plot_samples(samples[start_idx:end_idx], d1_markers, d2_markers)\n ds = rdr[\"data_path\"].split('/')[-2]\n fig.suptitle(f\"{ds} {filehash} D1A:{res['detector1_has_additional']} D2A:{res['detector2_has_additional']} MIS:{res['has_misaligned']}\", fontsize=14)\n fig.savefig(f\"DIS-{self.name}-{filehash}.png\", dpi=100)\n plt.close(fig)\n\n print(f\"Finished {idx}/{len(records)} {filehash} with {res}\")\n final_result['record_results'].append(res)\n\n return final_result","repo_name":"gampixi/bachelors-thesis-public","sub_path":"sbs.py","file_name":"sbs.py","file_ext":"py","file_size_in_byte":6978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37173420123","text":"from tkinter import*\r\nfrom tkinter import ttk\r\nfrom PIL import Image,ImageTk\r\nfrom tkinter import messagebox\r\nimport mysql.connector\r\nimport cv2\r\nimport os\r\nimport csv\r\nfrom tkinter import filedialog\r\n\r\nmydata=[]\r\nclass Attendance:\r\n def __init__(self,root):\r\n self.root=root\r\n self.root.geometry(\"1530x790+0+0\")\r\n self.root.title(\"Face Recognition System\")\r\n\r\n # ''''''''''''''''''''''''''''''''' Variables ''''''''''''''''''''''''''''''''''''''''''''''''''\r\n self.var_attend_id=StringVar()\r\n self.var_attend_roll=StringVar()\r\n self.var_attend_name=StringVar()\r\n self.var_attend_dep=StringVar()\r\n self.var_attend_time=StringVar()\r\n self.var_attend_date=StringVar()\r\n self.var_attend_attendance=StringVar()\r\n\r\n # FIRST IMAGE\r\n img=Image.open(r\"New folder\\Attendance1.png\")\r\n img=img.resize((800,200),Image.ANTIALIAS)\r\n self.photoimg=ImageTk.PhotoImage(img)\r\n\r\n f_lbl=Label(self.root,image=self.photoimg)\r\n f_lbl.place(x=0,y=0,width=800,height=200)\r\n\r\n # SECOND IMAGE\r\n img1=Image.open(r\"New folder\\Attendance2.jpg\")\r\n img1=img1.resize((800,200),Image.ANTIALIAS)\r\n self.photoimg1=ImageTk.PhotoImage(img1)\r\n\r\n f_lbl=Label(self.root,image=self.photoimg1)\r\n f_lbl.place(x=800,y=0,width=800,height=200)\r\n\r\n # BACKGROUND IMAGE\r\n img3=Image.open(r\"New folder\\Studen.jpg\")\r\n img3=img3.resize((1530,710),Image.ANTIALIAS)\r\n self.photoimg3=ImageTk.PhotoImage(img3)\r\n\r\n bg_img=Label(self.root,image=self.photoimg3)\r\n bg_img.place(x=0,y=200,width=1530,height=710)\r\n\r\n title_lbl=Label(bg_img,text=\"ATTENDANCE MANAGEMENT SYSTEM\",font=(\"times new roman\",35,\"bold\"),bg=\"white\",fg= \"darkgreen\")\r\n title_lbl.place(x=-2,y=-2,width=1530,height=45)\r\n\r\n main_frame=Frame(bg_img,bd=2,bg=\"white\")\r\n main_frame.place(x=10,y=55,width=1500,height=600)\r\n\r\n # LEFT LABEL FRAME\r\n Left_frame=LabelFrame(main_frame,bd=2,bg=\"white\",relief=RIDGE,text=\"Student Attendance Details\",font=(\"times new roman\",12,\"bold\"))\r\n Left_frame.place(x=10,y=10,width=730,height=580)\r\n\r\n img_left=Image.open(r\"New folder\\Attendance3.jpg\")\r\n img_left=img_left.resize((720,130),Image.ANTIALIAS)\r\n self.photoimg_left=ImageTk.PhotoImage(img_left)\r\n\r\n f_lbl=Label(Left_frame,image=self.photoimg_left)\r\n f_lbl.place(x=5,y=0,width=720,height=130)\r\n\r\n left_inside_frame=Frame(Left_frame,bd=2,relief=RIDGE,bg=\"white\")\r\n left_inside_frame.place(x=0,y=135,width=720,height=360)\r\n\r\n # Label and Entry\r\n\r\n # Attendance ID\r\n attendanceID_label=Label(left_inside_frame,text=\"Attendance ID:\",font=(\"times new roman\",11,\"bold\"),bg=\"white\")\r\n attendanceID_label.grid(row=0,column=0,padx=10,pady=5,sticky=W)\r\n\r\n attendanceID_entry=ttk.Entry(left_inside_frame,width=22,textvariable=self.var_attend_id,font=(\"times new roman\",11,\"bold\"))\r\n attendanceID_entry.grid(row=0,column=1,padx=10,pady=5,sticky=W)\r\n\r\n # ROLL NO\r\n roll_no_label=Label(left_inside_frame,text=\"Roll No:\",font=(\"times new roman\",11,\"bold\"),bg=\"white\")\r\n roll_no_label.grid(row=0,column=2,padx=10,pady=5,sticky=W)\r\n\r\n roll_no_entry=ttk.Entry(left_inside_frame,width=22,textvariable=self.var_attend_roll,font=(\"times new roman\",11,\"bold\"))\r\n roll_no_entry.grid(row=0,column=3,padx=10,pady=5,sticky=W)\r\n\r\n # STUDENT NAME\r\n attendanceName_label=Label(left_inside_frame,text=\"Student Name:\",font=(\"times new roman\",11,\"bold\"),bg=\"white\")\r\n attendanceName_label.grid(row=1,column=0,padx=10,pady=5,sticky=W)\r\n\r\n attendanceName_entry=ttk.Entry(left_inside_frame,width=22,textvariable=self.var_attend_name,font=(\"times new roman\",11,\"bold\"))\r\n attendanceName_entry.grid(row=1,column=1,padx=10,pady=5,sticky=W)\r\n\r\n # DEPARTMENT\r\n attendance_dep_label=Label(left_inside_frame,text=\"Department\",font=(\"times new roman\",11,\"bold\"),bg=\"white\")\r\n attendance_dep_label.grid(row=1,column=2,padx=10)\r\n\r\n attendance_dep_entry=ttk.Entry(left_inside_frame,width=22,textvariable=self.var_attend_dep,font=(\"times new roman\",11,\"bold\"))\r\n attendance_dep_entry.grid(row=1,column=3,padx=10,pady=5,sticky=W)\r\n\r\n # TIME\r\n time_label=Label(left_inside_frame,text=\"Time\",font=(\"times new roman\",11,\"bold\"),bg=\"white\")\r\n time_label.grid(row=2,column=0,padx=10)\r\n\r\n time_entry=ttk.Entry(left_inside_frame,width=22,textvariable=self.var_attend_time,font=(\"times new roman\",11,\"bold\"))\r\n time_entry.grid(row=2,column=1,padx=10,pady=5,sticky=W)\r\n\r\n # DATE\r\n date_label=Label(left_inside_frame,text=\"Date\",font=(\"times new roman\",11,\"bold\"),bg=\"white\")\r\n date_label.grid(row=2,column=2,padx=10)\r\n\r\n date_entry=ttk.Entry(left_inside_frame,width=22,textvariable=self.var_attend_date,font=(\"times new roman\",11,\"bold\"))\r\n date_entry.grid(row=2,column=3,padx=10,pady=5,sticky=W)\r\n\r\n # ATTENDANCE\r\n atten_label=Label(left_inside_frame,text=\"Attendance Status\",font=(\"times new roman\",11,\"bold\"),bg=\"white\")\r\n atten_label.grid(row=3,column=0,padx=10,sticky=W)\r\n\r\n atten_combo=ttk.Combobox(left_inside_frame,textvariable=self.var_attend_attendance,font=(\"times new roman\",11,\"bold\"),state=\"read\",width=22)\r\n atten_combo[\"values\"]=(\"Status\",\"Present\",\"Absent\")\r\n atten_combo.current(0)\r\n atten_combo.grid(row=3,column=1,padx=2,pady=10,sticky=W)\r\n\r\n # Buttons Frane\r\n btn_frame=LabelFrame(left_inside_frame,bd=2,bg=\"white\",relief=RIDGE)\r\n btn_frame.place(x=0,y=300,width=715,height=35)\r\n\r\n save_btn=Button(btn_frame,text=\"Import csv\",command=self.importCsv,width=17,font=(\"times new roman\",13,\"bold\"),bg=\"blue\",fg=\"white\")\r\n save_btn.grid(row=0,column=0)\r\n\r\n update_btn=Button(btn_frame,text=\"Export csv\",command=self.exportCsv,width=17,font=(\"times new roman\",13,\"bold\"),bg=\"blue\",fg=\"white\")\r\n update_btn.grid(row=0,column=1)\r\n\r\n delete_btn=Button(btn_frame,text=\"Update\",width=17,font=(\"times new roman\",13,\"bold\"),bg=\"blue\",fg=\"white\")\r\n delete_btn.grid(row=0,column=2)\r\n\r\n reset_btn=Button(btn_frame,text=\"Reset\",command=self.reset_data,width=17,font=(\"times new roman\",13,\"bold\"),bg=\"blue\",fg=\"white\")\r\n reset_btn.grid(row=0,column=3)\r\n\r\n\r\n\r\n # RIGHT LABEL FRAME \r\n Right_frame=LabelFrame(main_frame,bd=2,bg=\"white\",relief=RIDGE,text=\"Attendance Details\",font=(\"times new roman\",12,\"bold\"))\r\n Right_frame.place(x=750,y=10,width=730,height=580)\r\n\r\n # img_right=Image.open(r\"New folder\\Attendance4.png\")\r\n # img_right=img_right.resize((720,130),Image.ANTIALIAS)\r\n # self.photoimg_right=ImageTk.PhotoImage(img_right)\r\n\r\n # f_lbl=Label(Right_frame,image=self.photoimg_right)\r\n # f_lbl.place(x=5,y=0,width=720,height=130)\r\n\r\n table_frame=LabelFrame(Right_frame,bd=2,bg=\"white\",relief=RIDGE)\r\n table_frame.place(x=5,y=5,width=710,height=455)\r\n\r\n # ''''''''''''''''''''''''''''' Scroll Bar and Table '''''''''''''''''''''''''''''''''''''''''''''''''''\r\n scroll_x=ttk.Scrollbar(table_frame,orient=HORIZONTAL)\r\n scroll_y=ttk.Scrollbar(table_frame,orient=VERTICAL)\r\n\r\n self.AttendanceReportTable=ttk.Treeview(table_frame,column=(\"id\",\"roll\",\"name\",\"department\",\"time\",\"date\",\"attendance\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\r\n\r\n scroll_x.pack(side=BOTTOM,fill=X)\r\n scroll_y.pack(side=RIGHT,fill=Y)\r\n\r\n scroll_x.config(command=self.AttendanceReportTable.xview)\r\n scroll_y.config(command=self.AttendanceReportTable.yview)\r\n\r\n self.AttendanceReportTable.heading(\"id\",text=\"Attendance ID\")\r\n self.AttendanceReportTable.heading(\"roll\",text=\"Roll No\")\r\n self.AttendanceReportTable.heading(\"name\",text=\"Name\")\r\n self.AttendanceReportTable.heading(\"department\",text=\"Department\")\r\n self.AttendanceReportTable.heading(\"time\",text=\"Time\")\r\n self.AttendanceReportTable.heading(\"date\",text=\"Date\")\r\n self.AttendanceReportTable.heading(\"attendance\",text=\"Attendance\")\r\n\r\n self.AttendanceReportTable[\"show\"]=\"headings\"\r\n self.AttendanceReportTable.column(\"id\",width=100)\r\n self.AttendanceReportTable.column(\"roll\",width=100)\r\n self.AttendanceReportTable.column(\"name\",width=100)\r\n self.AttendanceReportTable.column(\"department\",width=100)\r\n self.AttendanceReportTable.column(\"time\",width=100)\r\n self.AttendanceReportTable.column(\"date\",width=100)\r\n self.AttendanceReportTable.column(\"attendance\",width=100)\r\n\r\n self.AttendanceReportTable.pack(fill=BOTH,expand=1)\r\n\r\n self.AttendanceReportTable.bind(\"\",self.get_cursor)\r\n\r\n # '''''''''''''''''''''''''''''''''''''''''''''''''''' Fetch Data ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\r\n def fetchData(self,rows):\r\n self.AttendanceReportTable.delete(*self.AttendanceReportTable.get_children())\r\n for i in rows:\r\n self.AttendanceReportTable.insert(\"\",END,values=i)\r\n\r\n \r\n # Import CSV\r\n def importCsv(self):\r\n global mydata\r\n mydata.clear()\r\n fln=filedialog.askopenfilename(initialdir=os.getcwd(),title=\"Open CSV\",filetypes=((\"CSV File\",\"*.csv\"),(\"All File\",\"*.*\")),parent=self.root)\r\n with open(fln) as myfile:\r\n csvread=csv.reader(myfile,delimiter=\",\")\r\n for i in csvread:\r\n mydata.append(i)\r\n self.fetchData(mydata)\r\n \r\n # Export CSV \r\n def exportCsv(self):\r\n try:\r\n if len(mydata)<1:\r\n messagebox.showerror(\"No Data\",\"No Data found to export\",parent=self.root)\r\n return False\r\n fln=filedialog.asksaveasfilename(initialdir=os.getcwd(),title=\"Open CSV\",filetypes=((\"CSV File\",\"*.csv\"),(\"All File\",\"*.*\")),parent=self.root)\r\n with open(fln,\"w\",newline=\"\") as myfile:\r\n exp_write=csv.writer(myfile,delimiter=\",\")\r\n for i in mydata:\r\n exp_write.writerow(i)\r\n messagebox.showinfo(\"Data Export\",\"Your data exported to \"+os.path.basename(fln)+\" Sucessfully\")\r\n\r\n except Exception as es:\r\n messagebox.showerror(\"Error\",f\"Due To :{str(es)}\",parent=self.root)\r\n\r\n \r\n\r\n def get_cursor(self,event=\"\"):\r\n cursor_row=self.AttendanceReportTable.focus()\r\n content=self.AttendanceReportTable.item(cursor_row)\r\n rows=content[\"values\"]\r\n self.var_attend_id.set(rows[0])\r\n self.var_attend_roll.set(rows[1])\r\n self.var_attend_name.set(rows[2])\r\n self.var_attend_dep.set(rows[3])\r\n self.var_attend_time.set(rows[4])\r\n self.var_attend_date.set(rows[5])\r\n self.var_attend_attendance.set(rows[6])\r\n\r\n def reset_data(self):\r\n self.var_attend_id.set(\"\")\r\n self.var_attend_roll.set(\"\")\r\n self.var_attend_name.set(\"\")\r\n self.var_attend_dep.set(\"\")\r\n self.var_attend_time.set(\"\")\r\n self.var_attend_date.set(\"\")\r\n self.var_attend_attendance.set(\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root=Tk()\r\n obj=Attendance(root)\r\n root.mainloop()\r\n\r\n ","repo_name":"Jain0412/face-recognition-system","sub_path":"project 1/attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":11381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74077498651","text":"from PyQt5 import QtWidgets\r\n\r\nimport sys\r\nimport ui_login\r\n\r\nclass Main():\r\n def __init__(self):\r\n app = QtWidgets.QApplication(sys.argv)\r\n prijava = ui_login.Ui_Prijava()\r\n prijava.show()\r\n sys.exit(app.exec_())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main = Main()","repo_name":"belivk1982/ShoppingApp","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21148915097","text":"import email\nfrom pprint import pprint\nfrom termcolor import colored\n\nimport errno\nimport json\nimport os\nimport random\nimport re\nimport shlex\nimport signal\nimport subprocess\nimport sys\nimport time\nimport toml\n\n\ndef get_rita_defaults():\n return toml.load(open(\"../settings/test.toml\"))\n\n\ndef get_rita_exit_defaults():\n return toml.load(open(\"../settings/test_exit.toml\"))\n\n\ndef save_rita_settings(id, x):\n file = open(\"rita-settings-n{}.toml\".format(id), \"w\")\n toml.dump(x, file)\n file.flush()\n os.fsync(file)\n file.close()\n os.system(\"sync\")\n pass\n\n\ndef get_rita_settings(id):\n return toml.load(open(\"rita-settings-n{}.toml\".format(id)))\n\n\ndef switch_binaries(node_id, VERBOSE, RITA, RITA_EXIT, COMPAT_LAYOUT, COMPAT_LAYOUTS, RITA_A, RITA_EXIT_A, RITA_B, RITA_EXIT_B):\n \"\"\"\n Switch the Rita and exit Rita binaries assigned to node with ID\n :data:`node_id`.\n\n :param int node_id: Node ID for which we're changing binaries\n \"\"\"\n if VERBOSE:\n print((\"Previous binary paths:\\nRITA:\\t\\t{}\\nRITA_EXIT:\\t{}\\n\").format(\n RITA, RITA_EXIT))\n\n release = COMPAT_LAYOUTS[COMPAT_LAYOUT][node_id - 1]\n\n if release == 'a':\n if VERBOSE:\n print(\"Using A for node {}...\".format(node_id))\n RITA = RITA_A\n RITA_EXIT = RITA_EXIT_A\n elif release == 'b':\n if VERBOSE:\n print(\"Using B for node {}...\".format(node_id))\n RITA = RITA_B\n RITA_EXIT = RITA_EXIT_B\n else:\n print(\"Unknown revision kind \\\"{}\\\" for node {}\".format(release, node_id))\n sys.exit(1)\n\n if VERBOSE:\n print((\"New binary paths:\\nRITA:\\t\\t{}\\nRITA_EXIT:\\t{}\\n\").format(\n RITA, RITA_EXIT))\n\n return (RITA, RITA_EXIT)\n\n\ndef register_to_exit(node):\n os.system((\"ip netns exec netlab-{} curl -XPOST \" +\n \"127.0.0.1:4877/exits/exit_a/register\").format(node.id))\n\n\ndef email_verif(node):\n email_text = read_email(node)\n\n code = re.search(r\"\\[([0-9]+)\\]\", email_text).group(1)\n\n print(\"Email code for node {} is {}\".format(node.id, code))\n\n exec_or_exit((\"ip netns exec netlab-{} curl -XPOST \" +\n \"127.0.0.1:4877/exits/exit_a/verify/{}\").format(node.id, code))\n exec_or_exit((\"ip netns exec netlab-{} curl \" +\n \"127.0.0.1:4877/settings\").format(node.id))\n\n\ndef read_email(node):\n id = node.id\n # TODO: this is O(n^2)\n for mail in os.listdir(\"mail\"):\n with open(os.path.join(\"mail\", mail)) as mail_file_handle:\n\n mail = email.message_from_file(mail_file_handle)\n to_value = mail.get(\"To\")\n if to_value == \"{}@example.com\".format(id):\n message = mail.get_payload()\n if \"low balance\" in message:\n continue\n return message\n raise Exception(\"cannot find email for node {}\".format(id))\n\n\ndef assert_test(x, description, verbose=True, global_fail=True):\n if verbose:\n if x:\n print(colored(\" + \", \"green\") + \"{} Succeeded\".format(description))\n else:\n sys.stderr.write(colored(\" + \", \"red\") +\n \"{} Failed\\n\".format(description))\n\n if global_fail and not x:\n TEST_PASSES = False\n return x\n\n\ndef exec_no_exit(command, blocking=True, delay=0.01):\n \"\"\"\n Executes a command and ignores it's output.\n\n :param str command: A string containing the command to run\n :param bool blocking: Decides whether to block until :data:`command` exits\n :param float delay: How long to wait before obtaining the return value\n (useful in non-blocking mode where e.g. a ``cat`` command with a\n non-existent file would very likely fail before, say, 100ms pass)\n \"\"\"\n process = subprocess.Popen(shlex.split(command))\n\n time.sleep(delay)\n\n if not blocking:\n # If it didn't fail yet we get a None\n retval = process.poll() or 0\n else:\n retval = process.wait()\n\n if retval != 0:\n try:\n errname = errno.errorcode[retval]\n except KeyError: # The error code doesn't have a canonical name\n errname = ''\n print('Command \"{c}\" failed: \"{strerr}\" (code {rv})'.format(\n c=command,\n # strerror handles unknown errors gracefuly\n strerr=os.strerror(retval),\n rv=errname,\n file=sys.stderr\n )\n )\n\n\ndef exec_or_exit(command, blocking=True, delay=0.01):\n \"\"\"\n Executes a command and terminates the program if it fails.\n\n :param str command: A string containing the command to run\n :param bool blocking: Decides whether to block until :data:`command` exits\n :param float delay: How long to wait before obtaining the return value\n (useful in non-blocking mode where e.g. a ``cat`` command with a\n non-existent file would very likely fail before, say, 100ms pass)\n \"\"\"\n process = subprocess.Popen(shlex.split(command))\n\n time.sleep(delay)\n\n if not blocking:\n # If it didn't fail yet we get a None\n retval = process.poll() or 0\n else:\n retval = process.wait()\n\n if retval != 0:\n try:\n errname = errno.errorcode[retval]\n except KeyError: # The error code doesn't have a canonical name\n errname = ''\n print('Command \"{c}\" failed: \"{strerr}\" (code {rv})'.format(\n c=command,\n # strerror handles unknown errors gracefuly\n strerr=os.strerror(retval),\n rv=errname,\n file=sys.stderr\n )\n )\n sys.exit(retval)\n\n\ndef cleanup():\n os.system(\"rm -rf *.db *.log *.pid private-key* mail\")\n os.system(\"mkdir mail\")\n os.system(\"sync\")\n # TODO: This is very inconsiderate\n os.system(\"killall babeld rita rita_exit iperf\")\n\n\ndef teardown():\n os.system(\"rm -rf *.pid private-key*\")\n os.system(\"sync\")\n # TODO: This is very inconsiderate\n os.system(\"killall babeld rita rita_exit iperf\")\n\n\ndef prep_netns(id):\n exec_or_exit(\n \"ip netns exec netlab-{} sysctl -w net.ipv4.ip_forward=1\".format(id))\n exec_or_exit(\n \"ip netns exec netlab-{} sysctl -w net.ipv6.conf.all.forwarding=1\".format(id))\n exec_or_exit(\"ip netns exec netlab-{} ip link set up lo\".format(id))\n exec_or_exit(\"ip netns exec netlab-{} nft create table inet fw4\".format(id))\n exec_or_exit(\"ip netns exec netlab-{} nft add chain inet fw4 input {{ type filter hook input priority filter; policy accept; }}\".format(id))\n exec_or_exit(\"ip netns exec netlab-{} nft add chain inet fw4 forward {{ type filter hook forward priority filter; policy accept; }}\".format(id))\n exec_or_exit(\"ip netns exec netlab-{} nft add chain inet fw4 output {{ type filter hook output priority filter; policy accept; }}\".format(id))\n\n\ndef traffic_diff(a, b):\n print(a, b)\n return {key: b[key] - a.get(key, 0) for key in b.keys()}\n\n\ndef fuzzy_traffic(a, b, VERBOSE):\n retval = b - 5e8 - abs(a) * 0.1 < a < b + 5e8 + abs(a) * 0.1\n if VERBOSE is not None:\n print('fuzzy_traffic({a}, {b}) is {retval}'.format(a=a, b=b,\n retval=retval))\n print(('Expression: {b} - 5e8 - abs({a}) * 0.1 < {a} < {b} + 5e8 + ' +\n 'abs({a}) * 0.1').format(a=a, b=b))\n\n return retval\n\n\ndef check_log_contains(f, x):\n if x in open(f).read():\n return True\n else:\n return False\n\n\ndef start_babel(node, log, scale, BABELD):\n hello_interval = 1\n update_interval = 1\n if scale:\n hello_interval = 1\n update_interval = 1\n exec_or_exit(\n (\n \"ip netns exec netlab-{id} {babeld_path} \" +\n \"-I babeld-n{id}.pid \" +\n \"-d {log} \" +\n \"-r \" +\n \"-L babeld-n{id}.log \" +\n \"-H {hello_interval} \" +\n \"-G 6872 \" +\n '-C \"default enable-timestamps true\" ' +\n '-C \"default update-interval {update_interval}\" ' +\n \"-w lo\"\n ).format(babeld_path=BABELD, ifaces=node.get_interfaces(), id=node.id, log=log, hello_interval=hello_interval, update_interval=update_interval),\n blocking=False\n )\n\n\ndef start_rita(node, dname, log, RITA, EXIT_SETTINGS):\n id = node.id\n settings = get_rita_defaults()\n\n settings[\"network\"][\"mesh_ip\"] = \"fd00::{}\".format(id)\n\n settings[\"network\"][\"wg_private_key_path\"] = \"{pwd}/private-key-{id}\".format(\n id=id, pwd=dname)\n settings[\"network\"][\"peer_interfaces\"] = node.get_veth_interfaces()\n settings[\"payment\"][\"local_fee\"] = node.local_fee\n settings[\"metric_factor\"] = 0 # We explicitly want to disregard quality\n save_rita_settings(id, settings)\n time.sleep(0.2)\n os.system(\n '(RUST_BACKTRACE=full RUST_LOG={log} ip netns exec netlab-{id} {rita} --config=rita-settings-n{id}.toml --platform=linux'\n ' 2>&1 & echo $! > rita-n{id}.pid) | '\n 'grep -Ev \"|mio|tokio_core|tokio_reactor|hyper\" > rita-n{id}.log &'.format(id=id, rita=RITA,\n pwd=dname, log=log)\n )\n time.sleep(1)\n\n email = \"{}@example.com\".format(id)\n\n # this works in travis if your looking for it\n # else:\n # time.sleep(1)\n # os.system(\"ip netns exec netlab-{id} curl -XPOST 127.0.0.1:4877/settings -H 'Content-Type: application/json' -i -d '{data}'\"\n # .format(id=id, data=json.dumps({\"exit_client\": EXIT_SETTINGS})))\n # time.sleep(1)\n # os.system(\"ip netns exec netlab-{id} curl -XPOST 127.0.0.1:4877/email -H 'Content-Type: application/json' -i -d '{data}'\"\n # .format(id=id, data=email)\n os.system(\"ip netns exec netlab-{id} curl --retry 5 --retry-connrefused -m 60 -XPOST 127.0.0.1:4877/settings -H 'Content-Type: application/json' -i -d '{data}'\"\n .format(id=id, data=json.dumps({\"exit_client\": EXIT_SETTINGS})))\n os.system(\"ip netns exec netlab-{id} curl --retry 5 --retry-connrefused -m 60 -XPOST 127.0.0.1:4877/email -H 'Content-Type: application/json' -i -d '{data}'\"\n .format(id=id, data=email))\n\n\ndef start_rita_exit(node, dname, RITA_EXIT):\n id = node.id\n settings = get_rita_exit_defaults()\n\n settings[\"network\"][\"mesh_ip\"] = \"fd00::{}\".format(id)\n\n settings[\"network\"][\"wg_private_key_path\"] = \"{pwd}/private-key-{id}\".format(\n id=id, pwd=dname)\n settings[\"network\"][\"peer_interfaces\"] = node.get_veth_interfaces()\n settings[\"payment\"][\"local_fee\"] = node.local_fee\n settings[\"metric_factor\"] = 0 # We explicity want to disregard quality\n save_rita_settings(id, settings)\n time.sleep(0.2)\n os.system(\n '(RUST_BACKTRACE=full RUST_LOG=TRACE ip netns exec netlab-{id} {rita} --config=rita-settings-n{id}.toml'\n ' 2>&1 & echo $! > rita-n{id}.pid) | '\n 'grep -Ev \"|mio|tokio_core|tokio_reactor|hyper\" > rita-n{id}.log &'.format(id=id, rita=RITA_EXIT,\n pwd=dname)\n )\n\n\ndef ip_to_num(ip):\n if ip in \"fd00::aabb\":\n return 0\n else:\n return int(ip.replace(\"fd00::\", \"\"))\n\n\ndef num_to_ip(num):\n if num == 0:\n return \"fd00::aabb\"\n else:\n return \"fd00::{}\".format(num)\n\n\ndef num_to_linklocal_ip(num):\n if num == 0:\n return \"fe80::\"\n else:\n return \"fe80::{}\".format(num)\n\n\ndef fuzzy_match(numA, numB):\n # ignore small debts\n if abs(numA) < 1000000 and abs(numB) < 1000000:\n return True\n # signs must match\n if numA > 0 and numB < 0 or numA > 0 and numB < 0:\n return False\n # 10%\n allowed_delta = 0.10\n high = 1 + allowed_delta\n low = 1 - allowed_delta\n\n if numA/numB > low and numA/numB < high:\n return True\n else:\n return False\n\n\ndef fuzzy_traffic_match(numA, numB):\n \"\"\"A matching scheme with error margins for Rita traffic, allows up to 5% lower or in the case of\n the paying party over-estimating (packet loss) it allows more\"\"\"\n # ignore small debts\n if abs(numA) < 1000000 and abs(numB) < 1000000:\n return True\n # signs must not match\n if numA > 0 and numB > 0 or numA < 0 and numB < 0:\n return False\n if numA >= 0:\n pos = numA\n neg = numB\n if numB >= 0:\n pos = numB\n neg = numA\n pos_abs = abs(pos)\n neg_abs = abs(neg)\n # 5%\n allowed_delta = 0.05\n high = 1 + allowed_delta\n low = 1 - allowed_delta\n\n # debt has been undercounted, the payer has a debt value less than\n # 95% of the node being paid\n undercounting = pos_abs < (neg_abs * low)\n # overcounting, this is not an error, but it is worth warning about\n # this should only happen if there is packet loss\n overcounting = pos_abs > (neg_abs * high)\n if overcounting:\n print(\"Payer is overpaying by {}%, this is correct if there was significant packet loss\".format(\n pos_abs/neg_abs))\n if undercounting:\n return False\n return True\n","repo_name":"althea-net/rita","sub_path":"legacy_integration_tests/integration-test-script/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13051,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"32"} +{"seq_id":"260946831","text":"\"\"\"\n@author: amber\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nfrom utils import * \nimport os \nfrom data_loader import SeqDataset, collate_fn\nfrom torch.utils.data import DataLoader \nfrom model import fc_layer,FGM\nfrom config_init import get_config\ntorch.cuda.manual_seed(1029)\ntorch.manual_seed(1029)\n\n# transfer list of tensors into one combined tensor \ndef transfer_list_tensor(b_x):\n batch_tensor = torch.zeros((len(b_x),b_x[0].size(0))) # [batch_size,1280]\n for row_id in range(len(b_x)):\n batch_tensor[row_id] = b_x[row_id]\n return batch_tensor\n\n\ndef adversarial_training(model,train_loader,device,optimizer,loss_fn,fgm_model): \n epoch_loss = 0.0 \n epoch_loss_adv = 0.0 \n train_num = 0.0 \n model.train()\n for idx,(b_x,batch_train_label) in enumerate(train_loader): \n batch_train_data = transfer_list_tensor(b_x).to(device) \n batch_y = batch_train_label.view(-1).float().to(device) \n optimizer.zero_grad() \n out = model(batch_train_data).view(-1) \n loss = loss_fn(out,batch_y) \n loss.backward()\n fgm_model.attack() \n out_attack = model(batch_train_data).view(-1) \n loss_adv = loss_fn(out_attack,batch_y) \n loss_adv.backward()\n fgm_model.restore()\n optimizer.step() \n train_num += batch_train_data.size(0)\n epoch_loss += loss.item() * batch_train_data.size(0)\n epoch_loss_adv += loss_adv.item() * batch_train_data.size(0)\n return epoch_loss/train_num, epoch_loss_adv/train_num \n \n \ndef predicting(test_loader,gnn_model,device):\n gnn_model.eval()\n total_preds = torch.Tensor()\n total_labels = torch.Tensor()\n with torch.no_grad():\n for idx,(batch_test_data,batch_test_label) in enumerate(test_loader):\n batch_test_data = transfer_list_tensor(batch_test_data).to(device)\n output = gnn_model(batch_test_data)\n total_preds = torch.cat((total_preds, output.cpu()), 0)\n total_labels = torch.cat((total_labels, batch_test_label.view(-1, 1).cpu()), 0)\n total_labels_arr = total_labels.numpy().flatten()\n total_preds_arr = total_preds.numpy().flatten() \n TP,FP,FN,TN,fpr,tpr,auc, aupr,f1_score, accuracy, recall, specificity, precision = get_metric(total_labels_arr, total_preds_arr)\n return TP,FP,FN,TN,fpr,tpr,auc, aupr,f1_score, accuracy, recall, specificity, precision\n\n\nif __name__ == \"__main__\":\n # get the hyperparameters \n \n config = get_config()\n raw_data_path = config.raw_data_path_topofallfeature\n n_splits = config.n_splits_topofallfeature \n kfold_root_path = config.kfold_root_path_topofallfeature\n model_saving_path = config.model_saving_path_topofallfeature\n train_batch_size = config.train_batch_size_topofallfeature\n test_batch_size = config.test_batch_size_topofallfeature\n num_epoches = config.num_epoches_topofallfeature\n cuda_name = config.cuda_name_topofallfeature\n drop_prob = config.drop_prob_topofallfeature\n n_output = config.n_output_topofallfeature\n lr = config.lr_topofallfeature \n weight_decay = config.weight_decay_topofallfeature \n \n #10-fold CV scheme \n test_auc = np.zeros(n_splits)\n test_aupr = np.zeros(n_splits)\n test_f1_score = np.zeros(n_splits)\n test_accuracy = np.zeros(n_splits)\n test_recall = np.zeros(n_splits)\n test_specificity = np.zeros(n_splits)\n test_precision = np.zeros(n_splits)\n \n #10-fold cross validation\n for i in range(n_splits): \n print(\"This is fold:\",i)\n print('-'*20)\n #form train_loader and test_loader for each fold\n \n fold_path = os.path.join(kfold_root_path,'fold'+str(i))\n model_file_name = 'model_dict_for_fold_{}.pkl'.format(i)\n model_file_path = os.path.join(model_saving_path,model_file_name)\n \n train_data_path = os.path.join(fold_path,'train_data.txt')\n test_data_path = os.path.join(fold_path,'test_data.txt')\n train_list = pd.read_csv(train_data_path,sep='\\t',index_col=False)['GeneSymbol'].tolist()\n test_list = pd.read_csv(test_data_path,sep='\\t',index_col=False)['GeneSymbol'].tolist() \n train_data = SeqDataset(gene_list=train_list,raw_data_path=raw_data_path) \n test_data = SeqDataset(gene_list=test_list,raw_data_path=raw_data_path) \n \n # split the train, validation and test loader\n train_size = int(0.8 * len(train_data))\n valid_size = len(train_data) - train_size\n train_data, valid_data = torch.utils.data.random_split(train_data, [train_size, valid_size]) \n \n train_loader = DataLoader(dataset=train_data,batch_size=train_batch_size,shuffle=True,collate_fn=collate_fn)\n valid_loader = DataLoader(dataset=valid_data,batch_size=test_batch_size,shuffle=False,collate_fn=collate_fn)\n test_loader = DataLoader(dataset=test_data,batch_size=test_batch_size,shuffle=False,collate_fn=collate_fn)\n \n # initialize \n device = torch.device(cuda_name if torch.cuda.is_available() else \"cpu\") \n loss_fn = nn.BCEWithLogitsLoss().to(device) \n fc_model = fc_layer(input_dim=1280,emb_dim_one=256,emb_dim_two=16,n_output=n_output,drop_prob=drop_prob).to(device)\n fgm_model = FGM(model=fc_model,fc_one_weight=\"fc_g1.weight\",fc_one_bias=\"fc_g1.bias\",fc_two_weight=\"fc_g2.weight\",fc_two_bias=\"fc_g2.bias\")\n optimizer = torch.optim.Adam(fc_model.parameters(), lr=lr,weight_decay=weight_decay)\n \n best_val_aupr = 0.0\n for epoch in range(num_epoches):\n print('Epoch{}/{}'.format(epoch,(num_epoches-1)))\n print('*'*10)\n # adversarial training function \n epoch_loss, epoch_loss_adv = adversarial_training(model=fc_model,train_loader=train_loader,device=device,optimizer=optimizer,loss_fn=loss_fn,fgm_model=fgm_model)\n print('epoch_loss:',epoch_loss)\n print('epoch_loss_adv:',epoch_loss_adv)\n val_TP,val_FP,val_FN,val_TN,val_fpr,val_tpr,val_auc, val_aupr,val_f1_score, val_accuracy, val_recall, val_specificity, val_precision = predicting(test_loader,fc_model,device)\n if val_aupr > best_val_aupr: \n print(\"val_auc:\",val_auc)\n print('val_aupr:',val_aupr)\n torch.save(fc_model, model_file_path)\n best_val_aupr = val_aupr\n \n # test procedure \n checkpoint = torch.load(model_file_path)\n TP,FP,FN,TN,fpr,tpr,auc,aupr,f1_score, accuracy, recall, specificity, precision = predicting(test_loader,checkpoint,device) \n test_auc[i] = auc\n test_aupr[i] = aupr\n test_f1_score[i] = f1_score\n test_accuracy[i] = accuracy\n test_recall[i] = recall\n test_specificity[i] = specificity\n test_precision[i] = precision \n print('TP:',TP)\n print('FP:',FP)\n print('FN:',FN)\n print('TN:',TN)\n print('fpr:',fpr)\n print('tpr:',tpr)\n print('test_auc:',auc)\n print('test_aupr:',aupr)\n print('f1_score:',f1_score)\n print('accuracy:',accuracy)\n print('recall:',recall)\n print('specificity:',specificity)\n print('precision:',precision)\n \n mean_auroc = np.mean(test_auc)\n mean_aupr = np.mean(test_aupr)\n mean_f1 = np.mean(test_f1_score)\n mean_acc = np.mean(test_accuracy) \n mean_recall = np.mean(test_recall)\n mean_specificity = np.mean(test_specificity)\n mean_precision = np.mean(test_precision)\n print('mean_auroc:',mean_auroc)\n print('mean_aupr:',mean_aupr)\n print('mean_f1:',mean_f1)\n print('mean_acc:',mean_acc)\n print('mean_recall:',mean_recall)\n print('mean_specificity:',mean_specificity)\n print('mean_precision:',mean_precision)\n std_auc = np.std(test_auc)\n std_aupr = np.std(test_aupr)\n std_f1 = np.std(test_f1_score)\n std_acc = np.std(test_accuracy)\n std_recall = np.std(test_recall)\n std_specificity = np.std(test_specificity)\n std_precision = np.std(test_precision)\n print('std_auc:',std_auc)\n print('std_aupr:',std_aupr)\n print('std_f1:',std_f1)\n print('std_acc:',std_acc)\n print('std_recall:',std_recall)\n print('std_specificity:',std_specificity)\n print('std_precision:',std_precision)\n\n\n\n","repo_name":"jianiM/Bingo","sub_path":"ablation_studies/bingo_without_gnn/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8838560675","text":"#!/usr/bin/env python3\n\nimport os\n\nimport requests\nfrom set_vars import set_vars\nset_vars()\n\napi_key = os.environ.get('YOUTUBE_API_KEY')\n\n\ndef get_youtube_fans(group_url):\n url = 'https://www.googleapis.com/youtube/v3/channels?part=statistics&id={0}&key={1}'. \\\n format(group_url, api_key)\n r = requests.get(url)\n try:\n fans = int(r.json()['items'][0]['statistics']['subscriberCount'])\n except KeyError:\n return 'Youtube вернул странный ответ, который я не могу распарсить: {}'.format(r.content)\n return fans\n","repo_name":"LuckCky/counterbot","sub_path":"social_stuff/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30008319982","text":"import discord\n\nfrom Embeds import BaseEmbed\nfrom utils import EMOJI, IMAGE\n\n\nclass InviteEmbed(BaseEmbed):\n def __init__(self, user: discord.User, **kwargs):\n super().__init__(user, **kwargs)\n\n self.title = f\"Want me on your server? {EMOJI.HEART_EYE}\"\n self.description = f\"I'm a multipurpose BOT developed by aadilvarsh#1241\"\n self.url = \"https://github.com/AadilVarsh/EddyBot\"\n self.set_footer(\n icon_url=\"https://aadilvarsh.github.io/images/pfp.png\",\n text=f\"Developed by Aadil {EMOJI.SPARKLE}\",\n )\n","repo_name":"advrxh/EddyBot","sub_path":"Eddy/Embeds/invite_embed.py","file_name":"invite_embed.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"7481140765","text":"import math\n\ndef hamming_distance(string1, string2): \n n = len(string1)\n dist = 0\n for i in range(n): \n if string1[i] != string2[i]: \n dist += 1\n return dist\ndef borderSort(matrix):\n n = len(matrix)\n sorted_matrix = [[\"_\" for i in range(n)] for j in range(n)]\n print(sorted_matrix)\n half_value = math.floor((n-1)/2)\n \n ## even case\n for i in range(half_value+1):\n temp_array = []\n # populating top row\n for j in range(i, n-i):\n temp_array.append(matrix[i][j])\n # populating right column\n for j in range(i+1, n-i): \n temp_array.append(matrix[j][n-1-i])\n # populating bottom row\n for j in range(n-2-i, i, -1): \n temp_array.append(matrix[n-i-1][j])\n # populating left column\n for j in range(n-1-i, i, -1):\n temp_array.append(matrix[j][0])\n temp_array.sort()\n count = 0\n # populating top row\n for j in range(i, n-i):\n matrix[i][j] = temp_array[count]\n count += 1\n # populating right column\n for j in range(i+1, n-i): \n matrix[j][n-1-i] = temp_array[count]\n count += 1\n # populating bottom row\n for j in range(n-2-i, i, -1): \n matrix[n-i-1][j] = temp_array[count]\n count += 1\n # populating left column\n for j in range(n-1-i, i, -1):\n matrix[j][0] = temp_array[count] \n count += 1\n return matrix \n\nif __name__ == \"__main__\":\n #Lets go.. I got this\n matrix = [[9, 7, 4],\n [1, 6, 2],\n [12, 20, 3]]\n print(borderSort(matrix))","repo_name":"reading-stiener/For-the-love-of-algos","sub_path":"Tests/clearstreet_test_10.20.2020.py","file_name":"clearstreet_test_10.20.2020.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37729854615","text":" # -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nCode made for the course Data Processing with Python\n\nPlataform: Udemy; Instrutor: Ardite Dulce.\n\n@author: Cristofer\n\nEste código abre, navega entre os diretórios do FTP e fecha o a conexão ftp\n\n\"\"\"\n\nfrom ftplib import FTP\n\nftp=FTP(\"ftp.pyclass.com\")\n\nftp.login(\"student@pyclass.com\",\"student123\")\n\n#exibe o conteúdo de um ftp\nprint(ftp.nlst())\n\n#detalha o conteúdo de uma pasta específica do ftp\nprint(ftp.nlst(\"Data\"))\n\n#alterando o diretório de trabalho\nftp.cwd(\"Data\")\nprint(ftp.nlst())\n\n#descendo um nível o diretório\nftp.cwd(\"..\")\nprint(ftp.nlst())\n\n#close the conection\nftp.close()","repo_name":"eu-cristofer/reusable-components","sub_path":"03_Python/Data Processing with Python/01 - Ftp login n navigate directories.py","file_name":"01 - Ftp login n navigate directories.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30137819097","text":"\"\"\"\r\nGiven a column title as it appears in an Excel sheet return its corresponding\ncolumn number.\n\nThe number is computed in the following way:\n\n A -> 1\n B -> 2\n C -> 3\n ...\n Z -> 26\n AA -> 27\n AB -> 28\n ...\n\n### Examples\n\n title_to_number(\"A\") ➞ 1\n \n title_to_number(\"R\") ➞ 18\n \n title_to_number(\"AB\") ➞ 28\n\n### Notes\n\n * `1 <= len(s) <= 7`\n * `s` consists only of uppercase English letters.\n\n\"\"\"\r\n\nimport functools\ndef title_to_number(s):\n def convert(string):\n list1 = []\n list1[:0] = string\n return list1\n rev = list(reversed(convert(s)))\n i = 0\n while i < len(rev):\n rev[i] = (ord(rev[i]) - 64) * 26 ** i\n i += 1\n return functools.reduce(lambda a, b: a + b, rev)\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"p8iNsRCtj3AdJsYjS_13.py","file_name":"p8iNsRCtj3AdJsYjS_13.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42423522635","text":"def calculateFraction(num, den) :\n\tif (num == 0):\n\t\treturn \"0\"\n\tsign = -1 if (num < 0) or (den < 0) else 1\n\tnum = abs(num)\n\tden = abs(den)\n\tinitial = num // den\n\tres = \"\"\n\tif (sign == -1):\n\t\tres += \"-\"\n\tres += str(initial)\n\tif (num % den == 0):\n\t\treturn res\n\tres += \".\"\n\trem = num % den\n\tmp = {}\n\tindex = 0\n\trepeating = False\n\twhile (rem > 0 and not repeating) :\n\t\tif ( rem in mp):\n\t\t\tindex = mp[rem]\n\t\t\trepeating = True\n\t\t\tbreak\n\t\telse:\n\t\t\tmp[rem] = len(res)\n\t\trem = rem * 10\n\t\ttemp = rem // den\n\t\tres += str(temp )\n\t\trem = rem % den\n\tif (repeating) :\n\t\tres += \")\"\n\t\tx = res[:index]\n\t\tx += \"(\"\n\t\tx += res[index:]\n\t\tres = x\n\treturn res\n\nnum = 50\nden = 22\nprint(calculateFraction(num, den))\nnum = -1\nden = 2\nprint(calculateFraction(num, den))","repo_name":"Dwij1704/Python","sub_path":"IPE/31) Fraction Class.py","file_name":"31) Fraction Class.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73735269211","text":"import requests\nfrom config import APP_KEY, BASE_URL\n\n\nclass OER(object):\n\n\tdef __init__(self, base=None, symbols=None, prettyprint=None, show_alternative=None):\n\t\tself.base = base\n\t\tself.symbols = symbols\n\t\tself.prettyprint = prettyprint\n\t\tself.show_alternative = show_alternative\n\n\tdef _get_params(self):\n\t\tparams = {}\n\t\tif self.base:\n\t\t\tparams['base'] = self.base\n\t\tif self.symbols:\n\t\t\tparams['symbols'] = self.symbols\n\t\tif self.prettyprint:\n\t\t\tparams['prettyprint'] = self.prettyprint\n\t\tif self.show_alternative:\n\t\t\tparams['show_alternative'] = self.show_alternative\n\t\tparams['app_id'] = APP_KEY\n\t\treturn params\n\n\tdef getRate(self, currency):\n\t\tresponse = requests.get(BASE_URL, params=self._get_params())\n\t\tdata = response.json()\n\t\trate = data['rates'][currency]\n\t\treturn rate\n\n\nif __name__ == '__main__':\n\toer = OER()\n\trate = oer.getRate('EUR')\n\tprint(rate)\n\n\n\n\n\n\n","repo_name":"garg10may/SampleRESTApi","sub_path":"rates.py","file_name":"rates.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16220654868","text":"\"\"\"Produces word-index dictionary.\"\"\"\nimport sys\nimport re\nimport random\nimport torch\nimport json\nfrom torch.autograd import Variable\nfrom stanfordcorenlp import StanfordCoreNLP\nreload(sys)\nsys.setdefaultencoding('UTF8')\n\nUNK_token = 0\nclass Preprocess:\n def __init__(self, name):\n self.name = name\n self.word2index = {}\n self.index2word = {0: 'UNK'}\n self.word2count = {}\n self.n_words = 1\n if name == 'blogs':\n self.tag2idx = {'ne': 0, 'hp': 1, 'sd': 2, 'ag': 3, 'dg': 4,\n 'sp': 5, 'fr': 6}\n elif name == 'twitter':\n self.tag2idx = {'anger': 0, 'disgust': 1, 'fear': 2, 'joy': 3,\n 'sadness': 4, 'surprise': 5}\n elif name == 'bopang':\n self.tag2idx = {'ne': 0, 'po': 1}\n\n def addSentence(self, tokenized_sentence):\n for word in tokenized_sentence:\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.index2word[self.n_words] = word\n self.word2count[word] = 1\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\ndef filterText(text):\n text = text.lower().strip()\n text = re.sub(r\"([.:;)(<>-_!?\\\"\\'])\", r\"\\1\", text)\n text = text.strip()\n return text\n\ndef blogsToDictionary(sentences, MAX_LENGTH = 30):\n nlp = StanfordCoreNLP('/Users/jaeickbae/Documents/projects/utils/stanford\\\n-corenlp-full-2017-06-09')\n dictionary = {'ne': {'tokens': [], 'sentences':[]},\n 'hp': {'tokens': [], 'sentences':[]},\n 'sd': {'tokens': [], 'sentences':[]},\n 'ag': {'tokens': [], 'sentences':[]},\n 'dg': {'tokens': [], 'sentences':[]},\n 'sp': {'tokens': [], 'sentences':[]},\n 'fr': {'tokens': [], 'sentences':[]}}\n min_length = MAX_LENGTH\n max_length = MAX_LENGTH\n longest_sentence = ''\n for s in sentences:\n words = s.split()\n sentence = ' '.join(words[2:])\n filtered_sentence = filterText(sentence)\n tokenized_sentence = nlp.word_tokenize(filtered_sentence)\n if len(tokenized_sentence) < 5 or len(tokenized_sentence) > MAX_LENGTH:\n continue\n dictionary[words[0]]['tokens'].append(tokenized_sentence)\n dictionary[words[0]]['sentences'].append(sentence)\n if len(tokenized_sentence) < min_length:\n min_length = len(tokenized_sentence)\n if len(tokenized_sentence) >= max_length:\n max_length = len(tokenized_sentence)\n longest_sentence = sentence\n\n #print stat\n print('**************Statistics**************')\n print('longest: ' + str(max_length))\n print('shortest: '+ str(min_length))\n print('Longest Sentence example: ' + longest_sentence)\n\n total = 0\n for key in dictionary:\n print(key + ':' + str(len(dictionary[key]['tokens'])))\n total += len(dictionary[key]['tokens'])\n print('total : '+str(total))\n print('**************************************')\n with open('./blogs'+str(MAX_LENGTH)+'.json', 'w') as jsonfile:\n json.dump(dictionary, jsonfile)\n return dictionary\n\ndef twitterToDictionary(sentences, MAX_LENGTH=30):\n # Twitter Emotion Corpus\n nlp = StanfordCoreNLP('/Users/jaeickbae/Documents/projects/utils/stanford\\\n-corenlp-full-2017-06-09')\n dictionary = {'anger': {'tokens': [], 'sentences':[]},\n 'disgust': {'tokens': [], 'sentences':[]},\n 'fear': {'tokens': [], 'sentences':[]},\n 'joy': {'tokens': [], 'sentences':[]},\n 'sadness': {'tokens': [], 'sentences':[]},\n 'surprise': {'tokens': [], 'sentences':[]}}\n min_length = MAX_LENGTH\n max_length = MAX_LENGTH\n longest_sentence = ''\n for s in sentences:\n phrase = s.split('\\t')\n # phrase[0]: twitter number\n # phrase[1]: content\n # phrase[2]: tag\n tag = phrase[2][3:].strip()\n if tag not in dictionary:\n print(tag)\n continue\n sentence = phrase[1]\n filtered_sentence = filterText(sentence)\n tokenized_sentence = nlp.word_tokenize(filtered_sentence)\n if len(tokenized_sentence) < 5 or len(tokenized_sentence) > MAX_LENGTH:\n continue\n dictionary[tag]['tokens'].append(tokenized_sentence)\n dictionary[tag]['sentences'].append(sentence)\n if len(tokenized_sentence) < min_length:\n min_length = len(tokenized_sentence)\n if len(tokenized_sentence) >= max_length:\n max_length = len(tokenized_sentence)\n longest_sentence = sentence\n\n #print stat\n print('**************Statistics**************')\n print('longest: ' + str(max_length))\n print('shortest: '+ str(min_length))\n print('Longest Sentence example: ' + longest_sentence)\n\n total = 0\n for key in dictionary:\n print(key + ':' + str(len(dictionary[key]['tokens'])))\n total += len(dictionary[key]['tokens'])\n print('total : '+str(total))\n print('**************************************')\n\n with open('./twitter'+str(MAX_LENGTH)+'.json', 'w') as jsonfile:\n json.dump(dictionary, jsonfile)\n return dictionary\n\ndef bopangToDictionary(sentences_ne, sentences_po, MAX_LENGTH=30):\n # Twitter Emotion Corpus\n nlp = StanfordCoreNLP('/Users/jaeickbae/Documents/projects/utils/stanford\\\n-corenlp-full-2017-06-09')\n dictionary = {'ne': {'tokens': [], 'sentences':[]},\n 'po': {'tokens': [], 'sentences':[]}}\n min_length = MAX_LENGTH\n max_length = MAX_LENGTH\n longest_sentence = ''\n for s in sentences_ne:\n try:\n s.encode('utf-8')\n s.decode('utf-8')\n except:\n continue\n filtered_sentence = filterText(s)\n tokenized_sentence = nlp.word_tokenize(filtered_sentence)\n if len(tokenized_sentence) < 5 or len(tokenized_sentence) > MAX_LENGTH:\n continue\n dictionary['ne']['tokens'].append(tokenized_sentence)\n dictionary['ne']['sentences'].append(s)\n if len(tokenized_sentence) < min_length:\n min_length = len(tokenized_sentence)\n if len(tokenized_sentence) >= max_length:\n max_length = len(tokenized_sentence)\n longest_sentence = s\n\n for s in sentences_po:\n try:\n s.encode('utf-8')\n s.decode('utf-8')\n except:\n continue\n filtered_sentence = filterText(s)\n tokenized_sentence = nlp.word_tokenize(filtered_sentence)\n if len(tokenized_sentence) < 5 or len(tokenized_sentence) > MAX_LENGTH:\n continue\n dictionary['po']['tokens'].append(tokenized_sentence)\n dictionary['po']['sentences'].append(s)\n if len(tokenized_sentence) < min_length:\n min_length = len(tokenized_sentence)\n if len(tokenized_sentence) >= max_length:\n max_length = len(tokenized_sentence)\n longest_sentence = s\n\n #print stat\n print('**************Statistics**************')\n print('longest: ' + str(max_length))\n print('shortest: '+ str(min_length))\n print('Longest Sentence example: ' + longest_sentence)\n\n total = 0\n for key in dictionary:\n print(key + ':' + str(len(dictionary[key]['tokens'])))\n total += len(dictionary[key]['tokens'])\n print('total : '+str(total))\n print('**************************************')\n with open('./bopang'+str(MAX_LENGTH)+'.json', 'w') as jsonfile:\n json.dump(dictionary, jsonfile, ensure_ascii=False)\n return dictionary\n\ndef splitTrainAndTestData(data, train = 0.8, test = 0.2):\n train_data = {}\n test_data = {}\n for key in data:\n zipped = list(zip(data[key]['tokens'], data[key]['sentences']))\n random.shuffle(zipped)\n data[key]['tokens'], data[key]['sentences'] = zip(*zipped)\n train_portion = int(len(data[key]['tokens']) * train)\n train_data[key] = {'tokens': [], 'sentences': []}\n test_data[key] = {'tokens': [], 'sentnces': []}\n train_data[key]['tokens'] = data[key]['tokens'][:train_portion]\n test_data[key]['tokens'] = data[key]['tokens'][train_portion+1:]\n train_data[key]['sentences'] = data[key]['sentences'][:train_portion]\n test_data[key]['sentences'] = data[key]['sentences'][train_portion+1:]\n return train_data, test_data\n\ndef getPairs(train_data, test_data):\n train_pair = []\n test_pair = []\n test_sentence = []\n for key in train_data:\n for item in train_data[key]['tokens']:\n train_pair.append([item, key])\n random.shuffle(train_pair)\n\n for key in test_data:\n for idx, item in enumerate(test_data[key]['tokens']):\n test_pair.append([item, key])\n test_sentence.append(test_data[key]['sentences'])\n zipped = list(zip(test_pair, test_sentence))\n random.shuffle(zipped)\n test_pair, test_sentence = zip(*zipped)\n return train_pair, test_pair, test_sentence\n\ndef changeToVariables(train_pair, test_pair, CUDA_use, data_name, MAX_VOCAB):\n train_input = Preprocess(data_name)\n for pair in train_pair:\n train_input.addSentence(pair[0])\n\n # Vocabulary frequency check\n print('train words ' + str(train_input.n_words))\n if train_input.n_words > MAX_VOCAB:\n pass\n\n train_input_idx = []\n for pair in train_pair:\n train_input_idx.append([train_input.word2index[word]\n for word in pair[0]])\n #train_input_var = [idx_list for idx_list in train_input_idx]\n\n if CUDA_use:\n train_input_var = \\\n [Variable(torch.LongTensor(idx_list).unsqueeze(0)).cuda()\n for idx_list in train_input_idx]\n else:\n train_input_var = [Variable(torch.LongTensor(idx_list).unsqueeze(0))\n for idx_list in train_input_idx]\n\n train_output_label = [[train_input.tag2idx[pair[1]]] for pair in train_pair]\n\n test_input_idx = []\n for pair in test_pair:\n sequence = []\n for word in pair[0]:\n if word in train_input.word2index:\n sequence.append(train_input.word2index[word])\n else:\n sequence.append(UNK_token)\n test_input_idx.append(sequence)\n #test_input_var = [idx_list for idx_list in test_input_idx]\n\n if CUDA_use:\n test_input_var = \\\n [Variable(torch.LongTensor(idx_list).unsqueeze(0)).cuda()\n for idx_list in test_input_idx]\n else:\n test_input_var = [Variable(torch.LongTensor(idx_list).unsqueeze(0))\n for idx_list in test_input_idx]\n\n test_output_label = [[train_input.tag2idx[pair[1]]] for pair in test_pair]\n\n return train_input_var, train_output_label,\\\n test_input_var, test_output_label, train_input\n\ndef prepareData(data_name, data_dir, CUDA_use = False, MAX_LENGTH=30,\nMAX_VOCAB=80000):\n # make dictionary\n # split into train and test data\n # make into pairs (train_pair, test_pair)\n # make pairs into variables two pairs of\n # (train_input_vec, train_ouput_label)\n # return train_input_var, train_ouptut_label, test_input_var,\n # test_output_label, test_sentence\n print(\"Making into Dictionary...\")\n if data_name == 'blogs':\n try:\n with open('./blogs'+str(MAX_LENGTH)+'.json', 'r') as jsonfile:\n dictionary = json.load(jsonfile)\n except:\n with open(data_dir, 'r') as f:\n sentence_list = f.readlines()\n dictionary = blogsToDictionary(sentence_list, MAX_LENGTH)\n elif data_name == 'twitter':\n try:\n with open('./twitter'+str(MAX_LENGTH)+'.json', 'r') as jsonfile:\n dictionary = json.load(jsonfile)\n except:\n with open(data_dir, 'r') as f:\n sentence_list = f.readlines()\n dictionary = twitterToDictionary(sentence_list, MAX_LENGTH)\n elif data_name == 'bopang':\n try:\n with open('./bopang' + str(MAX_LENGTH) + '.json', 'r') as jsonfile:\n dictionary = json.load(jsonfile)\n except:\n with open(data_dir + '/rt-polarity.neg', 'r') as f:\n sentence_neg = f.readlines()\n with open(data_dir + '/rt-polarity.pos', 'r') as f:\n sentence_pos = f.readlines()\n dictionary = bopangToDictionary(sentence_neg, sentence_pos,\n MAX_LENGTH)\n #dictionary: {'tag': {'tokens': [], 'sentences': []}}\n print(\"Chaning to Variables...\")\n train_data, test_data = splitTrainAndTestData(dictionary)\n train_pair, test_pair, test_sentence = getPairs(train_data, test_data)\n train_input_var, train_output_label, test_input_var, test_output_label,\\\n train_input = changeToVariables(train_pair, test_pair, CUDA_use, data_name,\n MAX_VOCAB)\n print(\"Data Preparation Done.\")\n return train_input_var, train_output_label, test_input_var,\\\n test_output_label, test_sentence, train_input\n\n\"\"\"\nif __name__ == \"__main__\":\n twitter_data_dir = '/Users/jaeickbae/Documents/projects/'+\\\n '2017 Affective Computing/Jan9-2012-tweets-clean.txt'\n blogs_data_dir = '/Users/jaeickbae/Documents/projects/2017 Affective Computing/Emotion-Data/Benchmark/category_gold_std.txt'\n bopang_data_dir = '/Users/jaeickbae/Documents/projects/data/bopang_twitter/rt-polaritydata/'\n prepareData('bopang', bopang_data_dir, MAX_LENGTH=30)\n\"\"\"\n","repo_name":"SnowIsWhite/EmoNet-PyTorch","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":13390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"345098102","text":"\"\"\"\n\tUsing SP18_all_data_v2.csv , create an array of week objects\n\twhere each week holds an array of assignment objects\n\n\tinput file: SP18_all_data_v2.csv\n\toutput file: assignments_by_week.json\n\n\"\"\"\n\n#!/usr/bin/python\n\nimport csv\nimport json\nimport sys\nimport datetime as dt\n\n# file names\nALL_DATA = \"SP18_all_data_v2.csv\"\nOUTPUT_FILE = \"assignments_by_week.json\"\n\ndef processAllData():\n\t\n\t# const indices for the reader\n\tACCOUNT = 0\n\tWEEK = 1\n\tDATE = 2\n\tTIME = 3\n\tASSIGN = 4\n\tURL_NUM = 5\n\tTYPE = 6\n\n\tprocessedData = []\n\tnum_weeks = -1\n\n\twith open(ALL_DATA , 'rb') as ifp:\n\n\t\t# find number of week objects to be made\n\t\treader = csv.reader(ifp)\n\t\tnext(reader, None) # skip the headers\n\n\t\tfor row in reader:\n\t\t\t\n\t\t\tif( int(row[1]) > num_weeks ):\n\t\t\t\tnum_weeks = int(row[1])\n\t\t\t\n\t\t# make empty week 'objects' array\n\t\tfor i in range( num_weeks + 1 ) :\n\t\t\tprocessedData.append( { str(i) : [] } )\n\n\t\tprint(processedData)\n\n\n\twith open(ALL_DATA , 'rb') as ifp:\n\n\t\t# group all assignments by week\n\t\treader = csv.reader(ifp)\n\t\tnext(reader , None) # skip the headers\n\n\t\tprint( \"Processing data...\" )\n\n\t\tfor row in reader:\n\t\t\tfor week in processedData:\n\n\t\t\t\t# add assignments of mathing week\n\t\t\t\tif int( row[1] ) == int( week.keys()[0] ):\n\t\t\t\t\tweek[ week.keys()[0] ].append( row )\n\n\n\t\tprint(\"Processing complete.\")\n\n\treturn processedData\n\n\n\ndef outputToJSON( data ):\n\n\tprint(\"Writing data to file...\")\n\n\t# output data to file\n\timport json\n\twith open(OUTPUT_FILE, 'w') as outfile:\n\t\tjson.dump(data, outfile)\n\n\tprint(\"Writing completed\")\n\n\ndef main():\n\n\t# process the data for each file\n\tallData = processAllData()\n\n\n\t# output to json\n\toutputToJSON( allData )\n\nmain()\n","repo_name":"bonomali/DISC_REU_2018","sub_path":"d3js_projects/data_formatting/data_converter_episogram/assignments_by_week.py","file_name":"assignments_by_week.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36967064848","text":"#encoding:utf-8\ninfo:swjtu.edu.cn\nusername:2016044857\npasswd:swjtu2016\n\nimport csv\n#写入csv文件\ndef write_csv_demo1():\n headers = ['username','age','height']\n values = [\n ('张三','18','180'),\n ('李四', '19', '190'),\n ('王五', '20', '170')\n ]\n with open('student.csv','w',encoding='gbk',newline='') as fp:\n writer = csv.writer(fp)\n writer.writerow(headers)\n writer.writerow(values)\n\n#以字典的方式写入csv,更规范\ndef write_csv_demo2():\n headers = ['username','age','height']\n values = [\n {'username':'zhangsan','age':18,'height':180},\n {'username': 'lisi', 'age': 19, 'height': 90},\n {'username': 'wangwu', 'age':20,'height':170}\n ]\n with open('class.csv','w',encoding='gbk',newline='') as fp:\n writer = csv.DictWriter(fp,headers)\n #写入表头数据需要调用writeheader方法\n writer.writeheader()\n writer.writerows(values)\n\n#读取csv文件\nwith open('class.csv','r') as fp:\n reader = csv.reader(fp)\n titles = next(reader)\n for x in reader:\n print(x)\n\n#按标题读取使用DictReader\nwith open('class.csv','r') as fp:\n reader = csv.DictReader(fp)\n for x in reader:\n print(x)\n","repo_name":"R00T-1024/Tools","sub_path":"csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36179876517","text":"from pymongo import MongoClient\n#1 . connect to database server\nuri = \"mongodb://admin:admin@ds021182.mlab.com:21182/c4e\"\n\nclient = MongoClient(uri)\n#2 . get default database\ndb = client.get_default_database()\n#3 . Get blog collection\nblog = db[\"post\"]\n\npost = {\n'title':\"câu trả lời với c4e\",\n'author':\"nhatminh\",\n'content':'''Cảm ơn C4e đẫ cho e cảm thấy lúc nào cũng sợ mât tiền , áp lực của deadline làm bài nè và đã cho e kiến thức , niềm đam mê vs code . Thứ mà đại học ko có đuoc'''\n}\nblog.insert_one(post)\n","repo_name":"nhatminh5197/Nguynnhatminh-fundamental-C4E17","sub_path":"btvn_lap/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42214192114","text":"import sys\nsys.path.insert(1, 'feature_extractors/r2d2')\nimport os, pdb\nfrom PIL import Image\nimport numpy as np\nimport torch\nimport glob\nfrom tools import common\nfrom tools.dataloader import norm_RGB\nfrom nets.patchnet import *\nimport time\nfrom skimage.util.shape import view_as_windows\nfrom scipy.optimize import least_squares\nimport copy\n\nimport numpy as np\nimport cv2\nimport torch\nimport glob\nimport pickle\n#import matplotlib.pyplot as plt\nimport logging\n# import TRT.trt_inference\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('module_R2D2')\nlogger.setLevel(logging.INFO)\nfrom Utils.debug_utils import *\n\ndef mnn_matcher(descriptors_a, descriptors_b, threshold = 0.9):\n device = descriptors_a.device\n sim = descriptors_a @ descriptors_b.t()\n nn_sim, nn12 = torch.max(sim, dim=1)\n nn21 = torch.max(sim, dim=0)[1]\n ids1 = torch.arange(0, sim.shape[0], device=device)\n mask = ((nn_sim >= threshold) & (ids1 == nn21[nn12]))\n matches = torch.stack([ids1[mask], nn12[mask]])\n return matches.t().data.cpu().numpy()\n\n\ndef similarity_matcher(descriptors1, descriptors2, threshold=0.9):\n # Similarity threshold matcher for L2 normalized descriptors.\n device = descriptors1.device\n \n sim = descriptors1 @ descriptors2.t()\n nn_sim, nn12 = torch.max(sim, dim=1)\n nn_dist = torch.sqrt(2 - 2 * nn_sim)\n nn21 = torch.max(sim, dim=0)[1]\n ids1 = torch.arange(0, sim.shape[0], device=device)\n mask = (nn_sim >= threshold)\n matches = torch.stack([ids1[mask], nn12[mask]])\n return matches.t(), nn_dist[mask]\n\ndef ratio_mutual_nn_matcher(descriptors1, descriptors2, ratio=0.90):\n # Lowe's ratio matcher + mutual NN for L2 normalized descriptors.\n device = descriptors1.device\n sim = descriptors1 @ descriptors2.t()\n nns_sim, nns = torch.topk(sim, 2, dim=1)\n nn12 = nns[:, 0]\n nns_dist = torch.sqrt(2 - 2 * nns_sim)\n nn21 = torch.max(sim, dim=0)[1]\n ids1 = torch.arange(0, sim.shape[0], device=device)\n matches = torch.stack([ids1, nns[:, 0]])\n ratios = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)\n mask = torch.min(ids1 == nn21[nn12], ratios <= ratio)\n matches = matches[:, mask]\n return matches.t().data.cpu().numpy(), nns_dist[mask, 0]\n\ndef load_network(model_fn): \n checkpoint = torch.load(model_fn)\n print(\"\\n>> Creating net = \" + checkpoint['net']) \n net = eval(checkpoint['net'])\n nb_of_weights = common.model_size(net)\n print(f\" ( Model size: {nb_of_weights/1000:.0f}K parameters )\")\n\n # initialization\n weights = checkpoint['state_dict']\n net.load_state_dict({k.replace('module.',''):v for k,v in weights.items()})\n return net.eval()\n\n\nclass NonMaxSuppression (torch.nn.Module):\n def __init__(self, rel_thr=0.7, rep_thr=0.7):\n nn.Module.__init__(self)\n self.max_filter = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.rel_thr = rel_thr\n self.rep_thr = rep_thr\n \n def forward(self, reliability, repeatability, **kw):\n # assert len(reliability) == len(repeatability) == 1\n # reliability, repeatability = reliability[0], repeatability[0]\n\n # local maxima\n maxima = (repeatability == self.max_filter(repeatability))\n # print(\"maxima:\",maxima)\n\n # remove low peaks\n maxima *= (repeatability >= self.rep_thr)\n maxima *= (reliability >= self.rel_thr) \n return maxima.nonzero().t()[2:4]\n\n\ndef extract_multiscale( net, img, detector, scale_f=2**0.25, \n min_scale=0.0, max_scale=1, \n min_size=256, max_size=1280, \n trt = True,\n verbose=False):\n\n start_time = None\n if(trt == False):\n old_bm = torch.backends.cudnn.benchmark \n torch.backends.cudnn.benchmark = False # speedup\n \n # extract keypoints at multiple scales\n B, three, H, W = img.shape\n assert B == 1 and three == 3, \"should be a batch with a single RGB image\"\n \n assert max_scale <= 1\n s = 1.0 # current scale factor\n X,Y,S,C,Q,D = [],[],[],[],[],[]\n while s+0.001 >= max(min_scale, min_size / max(H,W)):\n if s-0.001 <= min(max_scale, max_size / max(H,W)):\n nh, nw = img.shape[2:]\n if verbose: print(f\"extracting at scale x{s:.02f} = {nw:4d}x{nh:3d}\")\n # extract descriptors\n if(trt == False):\n with torch.no_grad():\n print(img.size())\n res = net(imgs=[img])\n\n \n else:\n res = TRT.trt_inference.r2d2_trt_inference(img)\n # get output and reliability map\n descriptors = res['descriptors'][0]\n reliability = res['reliability'][0]\n repeatability = res['repeatability'][0]\n \n assert len(reliability) == len(repeatability) == 1\n with torch.no_grad():\n y,x = detector(reliability, repeatability) # nms\n\n c = reliability[0,0,y,x]\n q = repeatability[0,0,y,x]\n d = descriptors[0,:,y,x].t()\n n = d.shape[0]\n\n # accumulate multiple scales\n X.append(x.float() * W/nw)\n Y.append(y.float() * H/nh)\n S.append((32/s) * torch.ones(n, dtype=torch.float32, device=d.device))\n C.append(c)\n Q.append(q)\n D.append(d)\n s /= scale_f\n\n # down-scale the image for next iteration\n break\n nh, nw = round(H*s), round(W*s)\n img = F.interpolate(img, (nh,nw), mode='bilinear', align_corners=False)\n\n # restore value\n if(trt == False):\n torch.backends.cudnn.benchmark = old_bm\n\n Y = torch.cat(Y)\n X = torch.cat(X)\n S = torch.cat(S) # scale\n scores = torch.cat(C) * torch.cat(Q) # scores = reliability * repeatability\n XYS = torch.stack([X,Y,S], dim=-1)\n D = torch.cat(D)\n return XYS, D, scores\n\n\ndef extract_keypoints(net, img, args,trt=True):\n xys, desc, scores = extract_multiscale(net, img, detector,\n scale_f = args['scale_f'], \n min_scale = args['min_scale'], \n max_scale = args['max_scale'],\n min_size = args['min_size'], \n max_size = args['max_size'],\n trt = trt, \n verbose = False)\n\n\n xys = xys.cpu().numpy()\n scores = scores.cpu().numpy()\n\n idxs = np.argwhere(scores>0.85)\n\n return (xys[idxs], desc[idxs])\n \n\n#Global variables\nargs = {'model' : 'feature_extractors/r2d2/models/faster2d2_WASF_N16.pt', 'scale_f' : 2**0.25, 'min_size' : 256, 'max_size' : 1380, 'min_scale' : 0, 'max_scale' : 1, 'reliability_thr' : 0.7, 'repeatability_thr' : 0.7 , 'gpu' : [0]}\niscuda = common.torch_set_gpu(args['gpu'])\ndetector = NonMaxSuppression( rel_thr = args['reliability_thr'], rep_thr = args['repeatability_thr'])\n#Global variables\ninit_net = False\nnet = None\ntrt_infer_obj = None\n\ndef extract_features_and_desc(image,trt=False):\n '''\n image: np.uint8\n '''\n global init_net, net, detector, trt_infer_obj\n if(trt == False and not(init_net)):\n #Perform inference using pytorch\n net = load_network(args['model'])\n if iscuda: \n net = net.cuda()\n detector = detector.cuda()\n elif(trt == True and not(init_net)):\n trt_infer_obj = TRT.trt_inference.trt_infer()\n detector = detector.cuda()\n init_net = True\n\n img_pil = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n img_pil = Image.fromarray(img_pil)\n img_cpu = img_pil\n # print(\"type(img_cpu):\",type(img_cpu))\n img = norm_RGB(img_cpu)[None]\n if(trt == False): #\n if iscuda: \n img = img.cuda()\n kps, desc = extract_keypoints(net, img, args,trt=trt)\n else:\n kps, desc = extract_keypoints(None, img, args,trt=trt)\n \n # alldesc = np.transpose(alldesc, (1, 2,0))\n\n return np.squeeze(kps), np.squeeze(desc)\n\ndef get_matches(ref_kp, ref_desc, cur_kp, cur_desc, imgshape):\n matches = ratio_mutual_nn_matcher(ref_desc, cur_desc)[0]\n return matches\n\n","repo_name":"Varghese-Kuruvilla/Visual-Odometry-pipeline","sub_path":"R2D2.py","file_name":"R2D2.py","file_ext":"py","file_size_in_byte":8022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4604901309","text":"import gecatsim as xc\nfrom gecatsim.pyfiles.CommonTools import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n# Need to import new recons as they are added\n\n\ndef recon(cfg):\n\n # If doing the recon, load the projection data, do the recon, and save the resulting image volume.\n if cfg.do_Recon:\n prep = load_prep(cfg)\n\n # The following line doesn't work - need to fix it when new recons are added.\n # imageVolume3D = feval(\"reconstruction.\" + cfg.recon.reconType, cfg, prep)\n imageVolume3D = feval(\"gecatsim.reconstruction.pyfiles.\" + cfg.recon.reconType, cfg, prep)\n\n # A hack until the previous line is fixed.\n #imageVolume3D = fdk_equiAngle(cfg, prep)\n imageVolume3D = scaleReconData(cfg, imageVolume3D)\n\n if cfg.recon.saveImageVolume:\n saveImageVolume(cfg, imageVolume3D)\n\n # If not doing the recon, load the previously-saved recon image volume.\n else:\n imageVolume3D = loadImageVolume(cfg)\n\n # In either case, save the results as individual images and display results at the specified window/level.\n if cfg.recon.saveSingleImages:\n saveSingleImages(cfg, imageVolume3D)\n \n if cfg.recon.displayImagePictures:\n cfg = displayImagePictures(cfg, imageVolume3D)\n\n if cfg.recon.saveImagePictureFiles:\n cfg = saveImagePictureFiles(cfg, imageVolume3D)\n\n return cfg\n\n \ndef load_prep(cfg):\n\n print(\"* Loading the projection data...\")\n prep = xc.rawread(cfg.resultsName + \".prep\",\n [cfg.protocol.viewCount, cfg.scanner.detectorRowCount, cfg.scanner.detectorColCount],\n 'float')\n \n return prep\n\n\ndef scaleReconData(cfg, imageVolume3D):\n\n print('* Scaling recon data...')\n if cfg.recon.unit =='HU':\n imageVolume3D = imageVolume3D*(1000/(cfg.recon.mu)) + cfg.recon.huOffset\n elif cfg.recon.unit == '/mm':\n pass\n elif cfg.recon.unit == '/cm':\n imageVolume3D = imageVolume3D*10\n else:\n raise Exception('******** Error! An unsupported recon unit was specified: {:s}. ********'.format(cfg.recon.unit))\n\n return imageVolume3D\n\n\ndef saveImageVolume(cfg, imageVolume3D):\n\n print('* Writing the recon results to one big file...')\n\n imageVolume3D_size_string = str(cfg.recon.imageSize) + 'x' + str(cfg.recon.imageSize) + 'x' + str(cfg.recon.sliceCount)\n fname = cfg.resultsName + '_' + imageVolume3D_size_string + '.raw'\n imageVolume3D = imageVolume3D.transpose(2, 0, 1)\n imageVolume3D = imageVolume3D.copy(order='C')\n xc.rawwrite(fname, imageVolume3D)\n\n\ndef loadImageVolume(cfg):\n\n print('* Reading the recon results from one big file...')\n\n imageVolume3D_size_string = str(cfg.recon.imageSize) + 'x' + str(cfg.recon.imageSize) + 'x' + str(cfg.recon.sliceCount)\n fname = cfg.resultsName + '_' + imageVolume3D_size_string + '.raw'\n imageVolume3D = xc.rawread(fname,\n [cfg.recon.sliceCount, cfg.recon.imageSize, cfg.recon.imageSize],\n 'float')\n imageVolume3D = imageVolume3D.copy(order='C')\n imageVolume3D = imageVolume3D.transpose(1, 2, 0)\n\n return imageVolume3D\n\n\ndef saveSingleImages(cfg, imageVolume3D):\n\n print('* Writing the recon results to individual files...')\n\n sliceIndicesToSave = range(0, cfg.recon.sliceCount)\n for sliceIndexToSave in sliceIndicesToSave:\n imageVolume3D_size_string = str(cfg.recon.imageSize) + 'x' + str(cfg.recon.imageSize) + 'x1'\n sliceNumberString = 'slice' + str(sliceIndexToSave+1).zfill(3) + 'of' + str(cfg.recon.sliceCount).zfill(3)\n fileName = cfg.resultsName + '_' + sliceNumberString + '_' + imageVolume3D_size_string + '.raw'\n sliceToSave = imageVolume3D[:, :, sliceIndexToSave]\n sliceToSave = sliceToSave.copy(order='C')\n xc.rawwrite(fileName, sliceToSave)\n\n\ndef displayImagePictures(cfg, imageVolume3D):\n\n cfg = drawImages('screen', cfg, imageVolume3D)\n \n return cfg\n\n\ndef saveImagePictureFiles(cfg, imageVolume3D):\n\n print('* Saving the recon results to individual .png files...')\n\n cfg = drawImages('file', cfg, imageVolume3D)\n \n return cfg\n\n\ndef drawImages(drawTo, cfg, imageVolume3D):\n\n # Draw all images.\n # Future improvement: allow caller to specifiy a list of images to draw.\n sliceIndicesToDraw = range(0, cfg.recon.sliceCount)\n\n # If displayWindowMin and displayWindowMax were not passed in,\n # get them from the image data, so all images are displayed using the same W/L.\n if not hasattr(cfg.recon, 'displayWindowMin'):\n cfg.recon.displayWindowMin = np.min(imageVolume3D)\n if not hasattr(cfg.recon, 'displayWindowMax'):\n cfg.recon.displayWindowMax = np.max(imageVolume3D)\n\n for sliceIndexToDraw in sliceIndicesToDraw:\n sliceToDraw = imageVolume3D[:, :, sliceIndexToDraw]\n sliceToDraw = sliceToDraw.copy(order='C')\n sliceNumberString = 'slice' + str(sliceIndexToDraw+1).zfill(3) + 'of' + str(cfg.recon.sliceCount).zfill(3)\n fileName = cfg.resultsName + '_' + sliceNumberString + '.png'\n plt.figure(int(sliceIndexToDraw+1))\n plt.imshow(sliceToDraw, cmap='gray', vmin=cfg.recon.displayWindowMin, vmax=cfg.recon.displayWindowMax)\n if not cfg.recon.displayImagePictureAxes:\n plt.axis('off')\n\n if cfg.recon.displayImagePictureTitles:\n sliceString = \"slice \" + str(sliceIndexToDraw+1) + \" of \" + str(cfg.recon.sliceCount)\n if hasattr(cfg, 'reconImageTitle'):\n # If a plot title is specified, use it, and add the slice info if specified.\n if hasattr(cfg, 'addSliceInfoToReconImageTitle') \\\n and cfg.addSliceInfoToReconImageTitle:\n titleString = cfg.reconImageTitle + \"\\n\" + sliceString\n else:\n titleString = cfg.reconImageTitle\n else:\n # Otherwise, title the plot with the slice info.\n titleString = sliceString\n plt.title(titleString, fontsize=10)\n\n if drawTo == 'file':\n plt.savefig(fileName, bbox_inches='tight')\n plt.close()\n elif drawTo == 'screen':\n plt.draw()\n \n if drawTo == 'screen':\n plt.pause(1)\n if cfg.waitForKeypress:\n print('********************************************')\n print('* Press Enter to close images and continue *')\n input('********************************************')\n plt.close('all')\n\n return cfg\n","repo_name":"xcist/main","sub_path":"gecatsim/reconstruction/pyfiles/recon.py","file_name":"recon.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"7973042417","text":"import numpy as np\n\narr1 = np.linspace(0,10,5)\nprint(arr1)\n\nnp.random.seed(42) # 랜덤한 패턴이 고정(동일)하게 나옴 seed가 정해져잇음\nprint(np.random.randint(0,10,(2,3)))\n\narr1 = np.arange(0,10)\narr2 = arr1.reshape(-1,2)\narr1 = np.array([1,2,3])\nprint(arr1)\nprint(arr2)\n\narr1 = np.arange(0,10)\narr2 = arr1.copy()\narr1 = np.array([1,2,3])\nprint(arr1)\nprint(arr2)","repo_name":"ipynbs/class","sub_path":"data_cv2_work/230224/ex06.py","file_name":"ex06.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8270612823","text":"import time\nimport duckdb\nimport pathlib\nimport pandas\nfrom pandas_profiling import ProfileReport\nimport numpy as np\nfrom scipy import stats\nimport joblib\nimport random\nfrom datetime import datetime\nimport logging\n\nlogger = logging.getLogger(\"Trusted logger\")\nc_handler = logging.StreamHandler()\nc_handler.setFormatter(logging.Formatter(fmt=' %(name)s :: %(levelname)-8s :: %(message)s'))\nlogger.addHandler(c_handler)\nlogger.setLevel(logging.DEBUG)\n\ndef data_quality(df, name):\n logger.info(\"Data quality report for \" + name)\n profile = ProfileReport(df, title=\"Profiling Report\")\n now = datetime.now().strftime(\"%m-%d-%Y-%H-%M-%S\") \n profile.to_file(str(pathlib.Path().resolve().parent) + \"/scripts/formatted/\" + str(now) + \"@\" + name + \".html\")\n logger.info(\"Data quality report saved in \" + str(pathlib.Path().resolve().parent) + \"/scripts/formatted/\" + str(now) + \"@\" + name + \".html\")\n \ndef remove_duplicates(name_no_prefix, df, cursor):\n logger.info(\"Removing duplicates from \" + name_no_prefix + \" database\")\n df_clean = df.drop_duplicates()\n # removing previous table\n cursor.execute(f\"DROP TABLE {name_no_prefix}\")\n # create new one\n cursor.execute(f\"CREATE TABLE IF NOT EXISTS {name_no_prefix} AS SELECT * FROM df_clean\")\n logger.info(\"Done removing duplicates from \" + name_no_prefix)\n\n\ndef run():\n sleeping_time = 5\n while True:\n try:\n offset = random.randint(0, 5)\n logger.info(\"Trying to adquire db lock...\")\n conn_formatted = duckdb.connect(database=str(pathlib.Path().resolve().parent) + '/scripts/formatted/my-db.duckdb', read_only=False)\n conn_trusted = duckdb.connect(database=str(pathlib.Path().resolve().parent) + '/scripts/trusted/my-db.duckdb', read_only=False)\n conn_logs = duckdb.connect(database=str(pathlib.Path().resolve().parent) +'/scripts/trusted/logs.duckdb', read_only=False)\n\n cursor = conn_formatted.cursor()\n cursor2 = conn_trusted.cursor()\n log_cursor = conn_logs.cursor()\n logger.info(\"Lock adquired!\")\n\n log_cursor.execute(\"CREATE TABLE IF NOT EXISTS Logs (ID Varchar(255), name Varchar(255))\")\n\n fetch_query = \"SHOW TABLES\"\n cursor.execute(fetch_query)\n\n cursor.execute(fetch_query)\n tables = cursor.fetchall()\n for table in tables:\n df = cursor.execute(f\"SELECT * FROM {table[0]}\").df()\n df_hash = joblib.hash(df)\n log_cursor.execute(f\"SELECT * FROM LOGS WHERE Logs.ID = \\'{df_hash}\\'\")\n # check that is a new table based on the hashed value\n df_not_exists = log_cursor.fetchall() == []\n if df_not_exists:\n logger.info(f\"Found a new table with no hash registered: {table[0]}\")\n log_cursor.execute(f\"INSERT INTO LOGS VALUES (\\'{df_hash}\\', \\'{table[0]}\\')\")\n name_no_prefix = table[0].split(\"_\")[0]\n cursor2.execute(\"SHOW TABLES\")\n existing_tables = [x[0] for x in cursor2.fetchall()]\n \n if name_no_prefix not in existing_tables:\n logger.info(\"Name without prefix not found in db, creating table...\")\n cursor2.execute(f\"CREATE TABLE IF NOT EXISTS {name_no_prefix} AS SELECT * FROM df\")\n else:\n logger.info(f\"Inserting values into {name_no_prefix}\")\n cursor2.execute(f\"INSERT INTO {name_no_prefix} SELECT * FROM df\")\n \n # Reporting and deduplicating\n df = cursor2.execute(f\"SELECT * FROM {name_no_prefix}\").df()\n data_quality(df, name_no_prefix)\n remove_duplicates(name_no_prefix, df, cursor2)\n \n \n except BaseException as error:\n logger.warning(\"Can't lock the db. Retrying shortly...\")\n time.sleep(1)\n \n finally:\n conn_formatted.close()\n conn_trusted.close()\n conn_logs.close()\n logger.info(\"Lock released\")\n time.sleep(sleeping_time + offset)","repo_name":"norhther/ADSDB-MDS","sub_path":"d1/scripts/trusted/trusted.py","file_name":"trusted.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8650927620","text":"num = list() #cria lista vazia\ncont = 0 #variavel de manipulação\nwhile True:\n if cont == 0: #condiçao para primeiro numero a ser adicionado\n n1 = (int(input('Digite um valor: ')))\n num.append(n1)\n print('Numero adicionado com sucesso')\n print(num)\n cont += 1 #condição para que o laço nao entre dentro deste if\n condicao = str(input('Quer continuar? [S/N]')).upper().strip() #pergunta para usuario\n if condicao == 'N': #usuario responder não sai do programa\n break\n else: #caso contrario executa os comandos abaixo\n n1 = (int(input('Digite um valor: ')))\n num.append(n1)\n print(num)\n while n1 in num[:-1]: #condição se o valor estiver em n1 do 0 ate o ultimo excluindo ele fica em loop\n num.pop() #exclui o numero repetido\n print('Valor duplicado! Não vou adicionar:')\n condicao = str(input('Quer continuar? [S/N]')).upper().strip()\n if condicao == 'N':\n break\n else:\n n1 = (int(input('Digite um valor: ' )))\n num.append(n1) #inclui novo numero e verifica se esta repetido\n print('Numero adicionado com sucesso')\nprint('-='*30)\nprint('Voce digitou os valores {}'.format(sorted(num)))\n\n\n\n\n\n\n","repo_name":"DenisSantos35/Aulas-Python-Curso-em-Video","sub_path":"Curso Python/pythonExercicios/ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19927633574","text":"import pandas as pd\nimport json\nimport csv\nimport numpy as np\n\nfinal_output = []\ndict_for_q6_week={}\nfinal_output_month=[]\ndict_for_q6_month={}\nfinal_output_overall=[]\ndict_for_q6_overall={}\n\nwith open('output.json') as json_file:\n matched_districts = json.load(json_file)\nwith open('neighbor-district-modified.json') as json_file:\n allData = json.load(json_file)\nwith open('q4_week_json.json') as json_file:\n\tweek_data = json.load(json_file)\nwith open('q4_month_json.json') as json_file:\n\tmonth_data = json.load(json_file)\nwith open('q4_overall_json.json') as json_file:\n\toverall_data = json.load(json_file)\n\n\nfor week_id in range(1,26):\n\tfor dis,neigh in allData.items():\n\t\tarray=[]\n\t\tfor neigh_ele in neigh:\n\t\t\tdis_id=matched_districts[neigh_ele]\n\t\t\tcases=week_data[str(dis_id)+'/'+str(week_id)]\n\t\t\tarray.append(cases)\n\t\t\t\t\t\t\t\t\t\t\n\t\tfinal_output.append({'district id':matched_districts[dis],'week id':week_id, 'neighbormean':round(np.mean(array),2),'neighborstdev':round(np.std(array),2)})\n\t\tdict_for_q6_week[str(matched_districts[dis])+'/'+str(week_id)+'/nm']={'neighbormean':round(np.mean(array),2),'neighborstdev':round(np.std(array),2)}\n\t\ncsv_columns = ['district id','week id','neighbormean','neighborstdev']\ntry:\n with open(\"neighbor-week.csv\", 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n writer.writerows(final_output)\nexcept IOError:\n print(\"I/O error\")\n\nwith open('dict_for_q6_week.json', 'w') as fp:\n json.dump(dict_for_q6_week, fp)\n\n\n\n\nfor month_id in range(1,8):\n\tfor dis,neigh in allData.items():\n\t\tarray=[]\n\t\tfor neigh_ele in neigh:\n\t\t\tdis_id=matched_districts[neigh_ele]\n\t\t\tcases=month_data[str(dis_id)+'/'+str(month_id)]\n\t\t\tarray.append(cases)\n\t\t\t\t\t\t\n\t\tfinal_output_month.append({'district id':matched_districts[dis],'month id':month_id, 'neighbormean':round(np.mean(array),2),'neighborstdev':round(np.std(array),2)})\n\t\tdict_for_q6_month[str(matched_districts[dis])+'/'+str(month_id)+'/nm']={'neighbormean':round(np.mean(array),2),'neighborstdev':round(np.std(array),2)}\n\t\ncsv_month_columns = ['district id','month id','neighbormean','neighborstdev']\ntry:\n with open(\"neighbor-month.csv\", 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_month_columns)\n writer.writeheader()\n writer.writerows(final_output_month)\nexcept IOError:\n print(\"I/O error\")\n\nwith open('dict_for_q6_month.json', 'w') as fp:\n json.dump(dict_for_q6_month, fp)\n\n\nfor dis,neigh in allData.items():\n\tarray=[]\n\tfor neigh_ele in neigh:\t\n\t\tdis_id=matched_districts[neigh_ele]\n\t\tcases=overall_data[str(dis_id)+'/1']\n\t\tarray.append(cases)\n\t\t\t\n\tfinal_output_overall.append({'district id':matched_districts[dis],'overall id':1, 'neighbormean':round(np.mean(array),2),'neighborstdev':round(np.std(array),2)})\n\tdict_for_q6_overall[str(matched_districts[dis])+'/1/nm']={'neighbormean':round(np.mean(array),2),'neighborstdev':round(np.std(array),2)}\n\t\ncsv_overall_columns = ['district id','overall id','neighbormean','neighborstdev']\ntry:\n with open(\"neighbor-overall.csv\", 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_overall_columns)\n writer.writeheader()\n writer.writerows(final_output_overall)\nexcept IOError:\n print(\"I/O error\")\n\nwith open('dict_for_q6_overall.json', 'w') as fp:\n json.dump(dict_for_q6_overall, fp)\n\n\n\n","repo_name":"Sharvarioka/CS685-Data_Mining_assignments","sub_path":"Assignment1/assign_q4.py","file_name":"assign_q4.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2344090804","text":"import sys\nimport shutil\nimport os\n\n\ndef readParameters(parameterKey):\n\n\t# Scan through file to find parameterKey\n\tparameterKeyCounter = 0\n\twith open('parameters.txt', \"r\") as ifile:\n\t\tfor line in ifile:\n\t\t\tline = line.rstrip().split('\\t')\n\n\t\t\t# If parameterKey is found\n\t\t\tif line[0] == parameterKey:\n\n\t\t\t\t# Increment parameterKeyCounter\n\t\t\t\tparameterKeyCounter += 1\n\n\t\t\t\t# Get the data type and read in the data\n\t\t\t\tdataType = line[1]\n\t\t\t\tif dataType == \"string\":\n\t\t\t\t\tparameterValue = line[2]\n\t\t\t\telif dataType == \"list\":\n\t\t\t\t\tparameterValue = line[2].split(',')\n\t\t\t\telif dataType == \"int\":\n\t\t\t\t\tparameterValue = int(line[2])\n\n\t# If only one parameterKey was found, then return\n\tif parameterKeyCounter == 1:\n\t\treturn parameterValue\n\telse:\n\t\tsys.exit(\"ERROR:\\t\" + parameterKey + \" in parameters.txt must occur exactly once.\")\n\n\ndef createEmptyDirectory(fileName):\n\n\t# If fileName exists, delete and remake it. Else delete it.\n\ttry:\n\t\tshutil.rmtree(fileName)\n\t\tos.makedirs(fileName)\n\texcept:\n\t\tos.makedirs(fileName)\n","repo_name":"scoliann/BitcoinMarkov","sub_path":"functionModule.py","file_name":"functionModule.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"70136475292","text":"# !pip install sentence_transformers\r\n# !pip install kss\r\n\r\nfrom sentence_transformers import SentenceTransformer, util\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport kss\r\ndef cosine_similarity(a, b):\r\n return np.dot(a, b) / (np.linalg.norm(a) * (np.linalg.norm(b)))\r\n\r\nmodel = SentenceTransformer('snunlp/KR-SBERT-V40K-klueNLI-augSTS').cuda()\r\n\r\nmodel.eval()\r\n\r\ndata = pd.read_csv('data/train_fold.csv', index_col=0)\r\n\r\ntexts = pd.read_csv(\"data/train_fold.csv\")['text'].values\r\n\r\nprompt = input('prompt : ')\r\nprompt_embeddings = model.encode(prompt)\r\n\r\nmean_cosine = []\r\nmax_cosine = []\r\nfor i in tqdm(texts):\r\n local_text = kss.split_sentences(i)\r\n local_embeddings = model.encode(local_text) # (sentence_num, 768)\r\n\r\n sim_list = []\r\n for emb in local_embeddings: # 1st embedding , 2nd embeddings, ...\r\n sim_list.append(cosine_similarity(emb, prompt_embeddings))\r\n\r\n local_mean = np.mean(sim_list)\r\n local_max = np.max(sim_list)\r\n\r\n mean_cosine.append(local_mean)\r\n max_cosine.append(local_max)\r\n\r\n\r\ndata['mean_similarity'] = mean_cosine\r\ndata['max_similarity'] = max_cosine\r\n\r\ndata.to_csv('data/train_fold_embeddings.csv', encoding='utf8')","repo_name":"lastdefiance20/Bookathon2022_Team_Booker2","sub_path":"SBERT.py","file_name":"SBERT.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"48905413882","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport mlflow\nimport pandas as pd\nfrom pathlib import Path\nfrom mlflow.tracking.client import MlflowClient\nfrom sklearn.metrics import precision_score, recall_score, roc_auc_score, roc_curve\n\n\ndef main(\n path_to_dataset: str,\n path_to_metrics_storage: str,\n registered_model_name: str,\n experiment_name: str,\n dagshub_mlflow_tracking_uri: str,\n mlflow_tracking_username: str,\n mlflow_tracking_password: str,\n) -> None:\n \"\"\"\n Runs validation method\n Args:\n path_to_dataset: absolute path to your dataset\n path_to_metrics_storage: absolute path to your metrics storage\n experiment_name: the name of the experiment for mlflow\n registered_model_name: the name of th model in mlflow model's registry\n dagshub_mlflow_tracking_uri: path to mlflow uri provided by dags hub\n mlflow_tracking_username: dags hub username\n mlflow_tracking_password: dags hub token\n\n Returns:\n object: None\n \"\"\"\n\n os.environ[\"MLFLOW_TRACKING_USERNAME\"] = mlflow_tracking_username\n os.environ[\"MLFLOW_TRACKING_PASSWORD\"] = mlflow_tracking_password\n\n mlflow.set_tracking_uri(dagshub_mlflow_tracking_uri)\n\n client = MlflowClient()\n\n if experiment_name is None:\n\n experiments = client.list_experiments()\n current_experiment = experiments[-1]\n df = mlflow.search_runs([current_experiment.experiment_id])\n df.sort_values(by=\"start_time\", inplace=True)\n run_id = df.run_id.values[-2]\n\n else:\n current_experiment = mlflow.get_experiment_by_name(experiment_name)\n df = mlflow.search_runs([current_experiment.experiment_id])\n run_id = df.run_id.values[-2]\n\n with mlflow.start_run(run_id=run_id):\n path_to_dataset = Path(path_to_dataset)\n path_to_metrics_storage = Path(path_to_metrics_storage)\n\n # read dataset\n test = pd.read_csv(path_to_dataset).drop(columns=[\"Unnamed: 0\"])\n\n # Now separate the dataset as response variable and feature variables\n x_test = test.drop(\"target\", axis=1)\n y_test = test[\"target\"]\n\n latest_version = client.get_latest_versions(\n registered_model_name, stages=[\"None\"]\n )[0].version\n\n clf = mlflow.sklearn.load_model(\n f\"models:/{registered_model_name}/{latest_version}\"\n )\n\n predictions = clf.predict(x_test)\n predictions_proba = clf.predict_proba(x_test)\n\n # Let's see how our model performed\n precision = precision_score(y_test.values, predictions)\n recall = recall_score(y_test.values, predictions)\n roc_auc = roc_auc_score(y_test.values, predictions_proba[:, 1])\n\n mlflow.log_metrics({\"test_precision\": precision})\n mlflow.log_metrics({\"test_recall\": recall})\n mlflow.log_metrics({\"test_roc_auc\": roc_auc})\n\n fpr, tpr, _ = roc_curve(y_test.values, predictions_proba[:, 1])\n\n metrics = {\n \"train\": {\n \"precision\": precision,\n \"recall\": recall,\n \"roc_auc\": roc_auc,\n }\n }\n\n plots = {\"train\": [{\"tpr\": i, \"fpr\": j} for i, j in zip(tpr, fpr)]}\n\n with open(str(path_to_metrics_storage / \"metrics.json\"), \"w\") as handler:\n json.dump(metrics, handler)\n\n with open(str(path_to_metrics_storage / \"plots.json\"), \"w\") as handler:\n json.dump(plots, handler)\n","repo_name":"gracikk-ds/ml-ops","sub_path":"build/mlops_build/validate_model.py","file_name":"validate_model.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19779925682","text":"# https://programmers.co.kr/learn/courses/30/lessons/60058\n# 문제에 나온 설명대로 차례대로 구현\ndef rec(u, v):\n if u == '':\n return u\n\n u2, v2 = divide(v)\n\n if check(u):\n return u + rec(u2, v2)\n else:\n result = '(' + rec(u2, v2) + ')'\n\n for c in u[1:-1]:\n result += ')' if c == '(' else '('\n\n return result\n\n\n# s가 올바른 괄호 문자열인지 체크\ndef check(s):\n stack = []\n for c in s:\n if c == '(':\n stack.append(c)\n else:\n if not stack:\n return False\n stack.pop()\n\n return len(stack) == 0\n\n\n# s를 더 이상 나눌 수 없는 '균형잡힌 괄호 문자열' u와 나머지 v로 나누기\ndef divide(s):\n count = {'(': 0, ')': 0}\n\n for i in range(len(s)):\n count[s[i]] += 1\n\n if count['('] > 0 and count['('] == count[')']:\n return s[:i + 1], s[i + 1:]\n\n return s, ''\n\n\ndef solution(p):\n if len(p) == 0:\n return p\n\n u, v = divide(p)\n return rec(u, v)\n","repo_name":"YAEJIN-JEONG/Algorithm","sub_path":"프로그래머스/Lv2. 괄호 변환/경진/Lv2. 괄호 변환.py","file_name":"Lv2. 괄호 변환.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"1333732364","text":"from core.timeline import Event\nfrom core.log import log\nfrom core.modifier import Modifier, Selfbuff\n\n\nclass Tension:\n MAX_STACK = 5\n\n def __init__(self, name, mod):\n # self.adv = adv\n # self.o_dmg_make = adv.dmg_make\n # self.adv.dmg_make = self.dmg_make\n self.name = name\n self.modifier = mod\n self.modifier.off()\n self.add_event = Event(name)\n self.end_event = Event(f\"{name}_end\")\n self.stack = 0\n self.queued_stack = 0\n self.has_stack = Selfbuff(\"has_\" + self.name, 1, -1, \"effect\")\n self.active = set()\n self.disabled_reasons = set()\n self.extra_tensionable = set()\n self.not_tensionable = set()\n\n self.permanent = False\n\n def set_disabled(self, reason):\n self.disabled_reasons.add(reason)\n\n def unset_disabled(self, reason):\n self.disabled_reasons.discard(reason)\n\n @property\n def disabled(self):\n return bool(self.disabled_reasons)\n\n def set_permanent(self):\n self.permanent = True\n self.stack = self.MAX_STACK\n self.has_stack.on()\n log(self.name, \"+{}\".format(self.MAX_STACK), \"stack <{}>\".format(int(self.stack)))\n self.add_event.stack = self.stack\n self.add_event.on()\n\n def add(self, n=1, team=False, queue=False):\n if self.permanent:\n if team:\n log(self.name, \"team\", n)\n else:\n if self.disabled:\n return\n if team:\n log(self.name, \"team\", n)\n # cannot add if max stacks\n if self.stack >= self.MAX_STACK:\n if queue:\n self.queued_stack = n\n return\n self.stack += n\n self.has_stack.on()\n if self.stack >= self.MAX_STACK:\n self.stack = self.MAX_STACK\n log(self.name, \"+{}\".format(n), \"stack <{}>\".format(int(self.stack)))\n\n self.add_event.stack = self.stack\n self.add_event.on()\n\n def add_extra(self, n, team=False):\n if self.permanent:\n if team:\n log(self.name, \"team\", n)\n else:\n if self.disabled:\n return\n if team:\n log(\"{}_extra\".format(self.name), \"team\", n)\n if self.stack == self.MAX_STACK:\n return\n self.stack += n\n if self.stack >= self.MAX_STACK:\n self.stack = self.MAX_STACK\n log(\n \"{}_extra\".format(self.name),\n \"+{}\".format(n),\n \"stack <{}>\".format(int(self.stack)),\n )\n\n def on(self, e):\n if self.stack >= self.MAX_STACK and not e.name in self.not_tensionable and (e.name in self.modifier._static.damage_sources or e.name in self.extra_tensionable):\n log(self.name, \"active\", \"stack <{}>\".format(int(self.stack)))\n self.active.add(e.name)\n\n def off(self, e):\n if e.name in self.active:\n self.active.discard(e.name)\n if not self.permanent:\n self.has_stack.off()\n self.stack = 0\n self.end_event.on()\n if self.queued_stack:\n self.add(n=self.queued_stack)\n self.queued_stack = 0\n log(self.name, \"reset\", \"stack <{}>\".format(int(self.stack)))\n\n allow_acl = True\n\n def __call__(self):\n return self.stack\n\n\nclass Energy(Tension):\n def __init__(self):\n super().__init__(\"energy\", mod=Modifier(\"mod_energized\", \"s\", \"passive\", 0.50))\n\n\nclass Inspiration(Tension):\n def __init__(self):\n super().__init__(\"inspiration\", mod=Modifier(\"mod_inspired\", \"crit\", \"chance\", 1.00))\n","repo_name":"dl-stuff/dl","sub_path":"module/tension.py","file_name":"tension.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"19556938304","text":"def manual():\n def reverser(splited_text):\n c = 1\n text_leng = len(splited_text)\n for label in splited_text:\n reverse_text.append(splited_text[text_leng-c])\n c += 1\n return reverse_text\n\n\n word = input('give me the word or words')\n reverse_text = []\n splited_text = []\n for c in word:\n splited_text.append(c)\n\n reverser(splited_text)\n\n if splited_text == reverse_text:\n print('is a palindrome!')\n else:\n print('is not a palindrome')\n print(word)\n print('reverse:')\n print(reverse_text)\n\n\ndef automatic():\n text = input('give me the text:')\n reversed_text = text[::-1]\n if reversed_text == text:\n print('is a palindrome')\n else:\n print('is not a palindrome')\n print('text')\n print(text)\n print('reversed text')\n print(reversed_text)\n\nautomatic()\n","repo_name":"isai-ledezma/check_palindrome","sub_path":"check_palindrome.py","file_name":"check_palindrome.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30273551299","text":"import dash\nfrom dash import html, dcc, Input, Output, callback\n\nimport plotly.express as px\nfrom plotly.graph_objs import Layout, Figure\n\nfrom datetime import datetime as dt\nfrom datetime import date\nimport pandas as pd\n\nimport geopandas as gpd\nimport getgraphs\nimport dash_bootstrap_components as dbc\n\nfrom pathlib import Path\n\n\n# Self written\n#from textmining import GetTopHashtagsData, GetTopWordsData\n\nfrom utils import load_from_cassandra\n\n### import data ### \n\n# import data from cassandra\ndf_twitter = load_from_cassandra(\"twitter\")\ndf_news = load_from_cassandra(\"news\")\n\n# uncomment for local testing\n# df_twitter = pd.read_csv(\"././dashboard/data/twitter_labeled.csv\", on_bad_lines='skip')\n# df_news = pd.read_csv(\"././dashboard/data/news.csv\", on_bad_lines='skip')\n\ndf_twitter = df_twitter[df_twitter['tweet'] != \"real\"]\ndf_twitter = df_twitter[df_twitter['tweet'] != \"fake\"]\n\n\n# Create path\nPARENT_PATH = str(Path().resolve()) + \"/\"\nPATH = \"data/\"\nSUBPATH = \"ukraine_geojson-master/\"\nFILE = \"UA_FULL_Ukraine\"\nFORMAT = \".json\"\n\ndf_map = gpd.read_file(PARENT_PATH + PATH + SUBPATH + FILE + FORMAT)\n\ndf_twitter.created_at = pd.to_datetime(df_twitter.created_at)\ndf_news.created_at = pd.to_datetime(df_news.created_at) \n\n# -----------------#\n\nnewsSourceList = []\nfor index, row in df_news.iterrows():\n if(row['url'].startswith('https://tass.com')):\n newsSourceList.append('TASS')\n elif(row['url'].startswith('https://www.themoscowtimes.com')):\n newsSourceList.append('TheMoscowTimes')\n row['title'] = row['title'][:-4]\n elif(row['url'].startswith('https://www.ukrinform.net')):\n newsSourceList.append('Ukrinform')\n elif(row['url'].startswith('https://www.reuters.com')):\n newsSourceList.append('Reuters')\n else:\n newsSourceList.append('NaN')\n\ndf_news['NewsSource'] = newsSourceList\n\ndf_news['urlMarkDown'] = df_news['url'].apply(lambda x: \"Link\" if str(x) != None else \"\")\n\ndf_twitter['urlMarkDown'] = df_twitter['id'].apply(lambda x: \"Link\" if str(x) != None else \"\")\n# -----------------#\n\ndash.register_page(__name__, path='/')\n\n### dropdown menue ### \n\nhashtags = [\n 'All Tweets',\n '#Ukraine',\n '#UkraineWar',\n '#UkraineNazis',\n '#UkraineRussianWar',\n '#RussianWarCrimes',\n '#UkraineRussiaWar',\n '#RussiaIsATerroristState'\n]\n\ndateselection = [\n 'date selection'\n]\n\nfakeRealToggle = [\n 'all',\n 'fake',\n 'real'\n]\n\nproRussianNewsToggle = [\n 'TASS',\n 'TheMoscowTimes'\n]\n\nproUkrainNewsToggle = [\n 'Reuters',\n 'Ukrinform'\n]\n\ncapitalListEN = [ \"All Tweets\",\n \"cherkasy\",\n \"chernihiv\",\n \"chernivtsi\",\n \"crimea\",\n \"dnipropetrovsk\",\n \"donetsk\",\n \"ivano-frankivsk\",\n \"kharkiv\",\n \"kherson\",\n \"khmelnytskyi\",\n \"kiev\",\n \"kirovohrad\",\n \"luhansk\",\n \"lviv\",\n \"mykolaiv\",\n \"odessa\",\n \"poltava\",\n \"rivne\",\n \"sumy\",\n \"ternopil\",\n \"vinnytsia\",\n \"volyn\",\n \"zakarpattia\",\n \"zaporizhia\",\n \"zhytomyr\" ]\n\n### create Layout ### \n\ndef GetDatePicker(id_name):\n return dcc.DatePickerRange(\n id=id_name,\n min_date_allowed=date(2022, 12, 24),\n max_date_allowed=date(2023, 1, 24),\n initial_visible_month=date(2022, 12, 25),\n end_date=date(2023, 1, 23)\n )\n\ndef GetSingleDatePicker(id_name):\n return dcc.DatePickerSingle(\n id=id_name,\n min_date_allowed=date(2022, 12, 24),\n max_date_allowed=date(2023, 1, 24),\n initial_visible_month=date(2022, 12, 25),\n date=date(2023, 1, 23)\n )\n\n\ndef GetTimePicker(id_name):\n return dcc.Input(id=id_name, type='time')\n\ndef GetGraph(title, id_name, class_name):\n return html.Div(\n [\n html.Div(\n [\n html.P(title),\n dcc.Graph(id=id_name)\n ],\n className=class_name\n ),\n ]\n )\n\ndef GetNavBar(id_name):\n return html.Div(\n [\n ], style = BOX_STYLE,\n )\n\n\ndef GetDropDownCities(id_name):\n return html.Div(\n [\n dcc.Dropdown(id=id_name, \n options=capitalListEN,\n value=capitalListEN[0],\n placeholder='Please select a hashtag...', \n )]\n )\n\ndef GetNumberDiv(title, id_name, class_name):\n return html.Div(\n [\n html.P(title),\n html.P(id=id_name, style={'font-size': '30px'}),\n ]\n , className=class_name\n )\n\ndef GetTweetsSearch(title, id_name, class_name):\n # Real and fake tweets Figure\n return html.Div(\n [\n html.Div(\n [\n html.P(f'''Tweets'''),\n dcc.Dropdown(id='fakeReal-tweets-input', \n options=fakeRealToggle,\n value=fakeRealToggle[0], \n style={'color':'black', 'height':'0px'}\n ),\n dcc.Input(id=\"tweetSearch\", type=\"text\", placeholder=\"Search Tweets\", style={'marginRight':'10px'}),\n html.Div(html.Div(id='fake-tweets-list'))\n ],\n className=\"fake-tweets-div\"\n ),\n html.Div(id='section-three'),\n ],\n className= \"fake-tweets-info-container\"\n )\n\ndef GetNewsArticle(title, id_name, drop_down_array, list_id):\n return html.Div(\n [\n html.P(title),\n dcc.Dropdown(id=id_name, \n options=drop_down_array,\n value=drop_down_array[0], style={'color': 'black'}\n ),\n html.Div(html.Div(id=list_id))\n ]\n )\n\nNAVBAR_STYLE = {\n \"background-color\": \"#13173C\",\n \"border-radius\": \"5px\",\n \"border-color\": \"white\",\n \"text-align\": \"auto\",\n \"position\": \"fixed\",\n \"z-index\": \"5\",\n \"margin-top\": \"-27px\",\n}\n\nBOX_STYLE = {\n \"padding-top\": \"10px\",\n \"margin-top\": \"5px\",\n \"margin-bottom\": \"5px\",\n \"margin-left\": \"5px\",\n \"margin-right\": \"5px\",\n \"border-radius\": \"5px\",\n \"color\": \"white\",\n \"background-color\": \"#13173C\"\n}\n\nNAV_STYLE = {\n \"margin-left\": \"-5px\",\n \"border-radius\": \"5px\",\n \"color\": \"black\",\n \"border-color\": \"white\",\n \"background-color\": \"#13173C\",\n \"position\": \"fixed\",\n \"width\": \"77.5%\",\n \"margin-top\": \"-10px\",\n \"z-index\": \"5\",\n \"padding\": \"10px\",\n \"border-width\": \"5px\"\n}\n\nDROPDOWN_STYLE = {\n \"margin-top\": \"10px\",\n \"margin-bottom\": \"10px\",\n \"margin-left\": \"5px\",\n \"margin-right\": \"5px\",\n \"border-radius\": \"5px\",\n \"color\": \"black\",\n \"background-color\": \"#13173C\",\n \"padding\": \"10px\",\n}\n\nlayout = html.Div([\n\n dbc.Row([\n dbc.Col(dcc.Dropdown(id='drop-down-hashtags', \n options=hashtags,\n style={'height': '15px'},\n value=hashtags[0],\n placeholder='Please select a hashtag...'), width=3),\n dbc.Col(dcc.RadioItems(['day', 'span'], 'span', style={'color': 'white'}, id='day-span-picker'), width=1),\n dbc.Col(GetSingleDatePicker('my-date-picker-range-from'),width=2),\n dbc.Col([dcc.Input(type='number', min=0, max=23, step=1, id='time-from'),dcc.Input(type='number', min=0, max=23, step=1, id='time-till')], width=2),\n dbc.Col(GetDatePicker('my-date-picker-range'),width=4) ], style=NAV_STYLE),\n\n dbc.Row([\n dbc.Col(style={'margin-top': '75px'})\n ]),\n dbc.Row([\n dbc.Col(GetGraph('Ukraine Map', 'ukraine-map', 'two-third'), width=7, style=BOX_STYLE),\n dbc.Col(GetGraph('Top Cities', 'top-cities-barChart', 'one-third'), width=4, style=BOX_STYLE)\n ], align='center'),\n dbc.Row(dbc.Col([GetDropDownCities('drop-down-cities')], style=DROPDOWN_STYLE)),\n \n dbc.Row(\n [\n dbc.Col(GetNumberDiv('Total Tweets', 'total-tweets', 'first'), style=BOX_STYLE),\n dbc.Col(GetNumberDiv('Detected Fake News', 'detected-fake-news', 'second'), style=BOX_STYLE),\n dbc.Col(GetNumberDiv('Pro Ukraine Article Count', 'pro-ukraine-count', 'third'), style=BOX_STYLE),\n dbc.Col(GetNumberDiv('Pro Russia Article Count', 'pro-russia-count', 'fourth'), style=BOX_STYLE),\n ]\n ),\n dbc.Row(\n [\n dbc.Col(GetGraph(r'% of fake tweets', 'fake-tweets-pie-chart', 'one-third'),width=3, style=BOX_STYLE),\n dbc.Col(GetTweetsSearch('','',''), style=BOX_STYLE)\n ]),\n # GetGraph('Top Topics', 'top-topics-barChart', 'one-third'),\n dbc.Row(\n [\n dbc.Col(GetNewsArticle('Pro Russian News', 'pro-Russia-News-Toggle-Input', proRussianNewsToggle, 'russian-news-list'), style=BOX_STYLE),\n dbc.Col(GetNewsArticle('Pro Ukraine News', 'pro-Urkaine-News-Toggle-Input', proUkrainNewsToggle, 'ukraine-news-list'), style=BOX_STYLE),\n ], justify=\"between\"\n ),\n dbc.Row(\n [\n dbc.Col(GetGraph('Top News Words', 'top-topics-barChart', 'one-third'), width = 5,style=BOX_STYLE),\n dbc.Col(html.Div([html.P(\"Topics\"), html.Div(id='topic-modell')]), width = 6,style=BOX_STYLE)\n ]\n ),\n dbc.Row(\n [\n dbc.Col(GetGraph('Top Words', 'top-words-barChart', 'one-third'),style=BOX_STYLE),\n dbc.Col(GetGraph('Top hashtags', 'top-hashtags-barChart', 'one-third'), style=BOX_STYLE)\n ], justify=\"between\"\n ),\n dbc.Row([\n dbc.Col(\n html.Div(\n [\n html.Div(\n [\n html.P(\"Tweet Count and Topics\"),\n dcc.Dropdown(id='tweet-count-with-topic-drop-down-time', \n options=['month', 'day', 'hour'],\n value=['month', 'day', 'hour'][1], style={'color': 'black'}\n ),\n dcc.Graph(id=\"tweet-count-with-topic-lineChart\")\n ]\n ),\n ]\n ) \n ,style=BOX_STYLE\n )\n ],\n\n ),\n # GetGraph('Account Creation Dates', 'account-creation-date-barChart', 'full-width'),\n\n html.P(id = 'dummy-input'),\n ], className='reload')\n\n# # -----------------#\n# Callbacks\n\n# Callback 1\n@callback(\n Output(component_id='ukraine-map', component_property='figure'),\n Output(component_id='top-cities-barChart', component_property='figure'),\n Output(component_id='total-tweets', component_property='children'),\n Output(component_id='detected-fake-news', component_property='children'),\n Output(component_id='pro-ukraine-count', component_property='children'),\n Output(component_id='pro-russia-count', component_property='children'),\n Output(component_id='fake-tweets-pie-chart', component_property='figure'),\n Output(component_id='top-topics-barChart', component_property='figure'),\n Output(component_id='top-words-barChart', component_property='figure'),\n Output(component_id='top-hashtags-barChart', component_property='figure'),\n Output(component_id='fake-tweets-list', component_property='children'),\n Output(component_id='russian-news-list', component_property='children'),\n Output(component_id='ukraine-news-list', component_property='children'),\n Output(component_id='tweet-count-with-topic-lineChart', component_property='figure'),\n Output(component_id='topic-modell', component_property='children'),\n\n\n Input(component_id='dummy-input', component_property='value'),\n Input(component_id='drop-down-hashtags', component_property='value'),\n Input(component_id='my-date-picker-range', component_property='start_date'),\n Input(component_id='my-date-picker-range', component_property='end_date'),\n Input(component_id='drop-down-cities', component_property='value'),\n Input(component_id='pro-Russia-News-Toggle-Input', component_property='value'),\n Input(component_id='pro-Urkaine-News-Toggle-Input', component_property='value'),\n Input(component_id='tweet-count-with-topic-drop-down-time', component_property='value'),\n Input(component_id='fakeReal-tweets-input', component_property='value'),\n Input(component_id='day-span-picker', component_property='value'),\n Input(component_id='my-date-picker-range-from', component_property='date'),\n Input(component_id='time-from', component_property='value'),\n Input(component_id='time-till', component_property='value'),\n\n\n)\ndef update_dashboard(dummy_input, drop_down_hashtags, start_date, end_date, drop_down_cities, russiaArticleToggle, ukraineArticleToggle, tweetCountDropDown, fakeReal_tweets_input, day_time_picker, single_date, hour_from, hour_till):\n dateTime = df_twitter\n dateTimeArticle = df_news\n \n if(day_time_picker == 'day'):\n if single_date is not None:\n date_object = pd.to_datetime(single_date)\n if(single_date is not None):\n dateTime = dateTime[(dateTime['created_at'].dt.year == date_object.year) & (dateTime['created_at'].dt.month == date_object.month) & (dateTime['created_at'].dt.day == date_object.day)]\n dateTimeArticle = dateTimeArticle[(dateTimeArticle['created_at'].dt.year == date_object.year) & (dateTimeArticle['created_at'].dt.month == date_object.month) & (dateTimeArticle['created_at'].dt.day == date_object.day)]\n if((hour_from is not None) & (hour_till is not None)):\n if((hour_from < hour_till)):\n dateTime = dateTime[(dateTime['created_at'].dt.hour >= hour_from) & (dateTime['created_at'].dt.hour <= hour_till)]\n dateTimeArticle = dateTimeArticle[(dateTimeArticle['created_at'].dt.hour >= hour_from) & (dateTimeArticle['created_at'].dt.hour <= hour_till)]\n\n elif(day_time_picker == 'span'):\n if start_date is not None:\n start_date_object = pd.to_datetime(start_date)\n\n if end_date is not None:\n end_date_object = pd.to_datetime(end_date)\n if(start_date is not None):\n dateTime = dateTime[(dateTime['created_at'].dt.tz_localize(None) <= end_date_object) & (dateTime['created_at'].dt.tz_localize(None) >= start_date_object)]\n dateTimeArticle = dateTimeArticle[(dateTimeArticle['created_at'] <= end_date_object) & (dateTimeArticle['created_at'] >= start_date_object)]\n \n dateTimeArticleHashtagsRussia = dateTimeArticle[(dateTimeArticle['NewsSource'].str.contains(\"TASS\")) | (dateTimeArticle['NewsSource'].str.contains(\"TheMoscowTimes\"))]\n dateTimeArticleHashtagsUkraine = dateTimeArticle[(dateTimeArticle['NewsSource'].str.contains(\"Ukrinform\")) | (dateTimeArticle['NewsSource'].str.contains(\"Reuters\"))]\n\n dateTimeArticleRussiaToggle = dateTimeArticleHashtagsRussia[dateTimeArticleHashtagsRussia['NewsSource']==str(russiaArticleToggle)]\n dateTimeArticleUkraineToggle = dateTimeArticleHashtagsUkraine[dateTimeArticleHashtagsUkraine['NewsSource']==str(ukraineArticleToggle)]\n dataHashtags = dateTime\n if (drop_down_hashtags == hashtags[0]):\n dataHashtags = dateTime\n else:\n dataHashtags = dateTime[dateTime[\"tweet\"].str.contains(drop_down_hashtags.lower())]\n dataCities = dataHashtags\n if (drop_down_cities == capitalListEN[0]):\n dataCities = dataHashtags\n else:\n dataCities = dataHashtags[dataHashtags[\"tweet\"] == drop_down_cities]\n\n return getgraphs.GetUkraineMap(dataHashtags, df_map), \\\n getgraphs.GetHistCities(dataHashtags, df_map), \\\n getgraphs.GetTotalTweets(dataCities), \\\n getgraphs.GetFakeTweets(dataCities), \\\n getgraphs.GetTotalProUkraineArticles(dateTimeArticleHashtagsUkraine), \\\n getgraphs.GetTotalProRussiaArticles(dateTimeArticleHashtagsRussia), \\\n getgraphs.GetPieFakeNews(dataCities), \\\n getgraphs.GetTopWords(dateTimeArticle.rename(columns = {'title':'tweet'})), \\\n getgraphs.GetTopWords(dataCities), \\\n getgraphs.GetTopHashtags(dataCities), \\\n getgraphs.GetTweets(dataCities, fakeReal_tweets_input), \\\n getgraphs.GetNewsList(dateTimeArticleRussiaToggle),\\\n getgraphs.GetNewsList(dateTimeArticleUkraineToggle),\\\n getgraphs.GetTweetCount(dataCities, tweetCountDropDown), \\\n getgraphs.GetTopicModelling(dataCities)\n\n\n# -----------------#\n\n","repo_name":"sihensel/BDMA","sub_path":"dashboard/pages/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":18823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13724781408","text":"# 정규화 함수인 소프트맥스 함수와 손실 함수인 교차 엔트로피 오차 함수를 동시에 적용한 SoftmaxWithLoss 계층을 클래스로 구현\n\nimport numpy as np\n\n# SoftmaxWithLoss 계층 클래스\nclass SoftmaxWithLoss:\n # 클래스 생성자\n def __init__(self):\n # 계층 매개변수, 기울기를 저장할 빈 리스트를 각각 생성\n self.params, self.grads = [], []\n # 소프트맥스 함수의 결과값을 저장할 변수 y를 클래스 변수로 선언 (선언만 하고 값은 저장 안 함)\n self.y = None\n # 결과값과 비교할 정답 데이터를 저장할 변수 t를 클래스 변수로 선언 (선언만 하고 값은 저장 안 함)\n self.t = None\n \n # 순전파 구현(입력값 행렬 x, 정답 데이터(원-핫 레이블) t 입력)\n def forward(self, x, t):\n # 원-핫 레이블로 구현된 정답 데이터 행렬을 클래스 변수로 저장\n self.t = t\n # 입력값 x를 소프트맥스 함수에 입력한 계산 결과를 클래스 변수 y로 저장\n self.y = softmax(x)\n # 소프트맥스 함수 결과 y 행렬과 정답 데이터 t 행렬의 원소 크기가 같다면(정답 레이블이 원-핫 벡터일 경우)\n if self.t.size == self.y.size:\n # 정답 데이터 t에 정답 데이터 t 행렬의 1번째 축에서 가장 큰 값의 인덱스 저장(정답 인덱스로 변환하는 작업)\n self.t = self.t.argmax(axis=1)\n # 소프트맥스 계산 결과 행렬 y와 정답 데이터 행렬 t를 교차 엔트로피 오차 함수에 입력하여 손실 값 loss 계산\n loss = cross_entrophy_error(self.y, self.t)\n # 손실 값 loss 리턴\n return loss\n \n # 역전파 구현(역전파 입력값 dout 입력, 기본값은 1)\n def backward(self, dout=1):\n # 정답 데이터의 0번째 차원 크기를 배치 크기(batch_size)로 저장\n batch_size = self.t.shape[0]\n # 역전파 출력값 dx에 소프트맥스 출력값 y와 같은 값 저장\n dx = self.y.copy()\n # 역전파 출력값 dx의 0번째 인덱스에 0~배치 크기 미만 값까지의 행렬, 1번째 인덱스에 정답 데이터 행렬에 1을 뺀 값 저장\n dx[np.arange(batch_size), self.t] -= 1\n # 역전파 출력값 dx에 역전파 입력값 dout을 곱하여 저장\n dx *= dout\n # 역전파 출력값 dx를 배치 크기(batch_size)로 나눈 값 저장\n dx = dx / batch_size\n\n # 역전파 출력값 dx 리턴\n return dx\n \n\n# 소프트맥스 함수 구현\ndef softmax(x):\n # 입력값 행렬 x의 원소 중에서 가장 큰 원소를 c로 저장\n c = max(x)\n # 입력값 행렬의 원소들을 모두 c(행렬에서 가장 큰 원소)로 뺀 값을 자연상수 e의 지수로 하여 exp_x 행렬로 저장\n exp_x = np.exp(x-c)\n # exp_x 행렬의 모든 원소를 더하여 sum_exp_x에 저장\n sum_exp_x = np.sum(exp_x)\n # exp_x 행렬의 원소들을 sum_exp_x 값으로 각각 나눈 행렬을 y 행렬로 저장\n y = exp_x/sum_exp_x\n # y 행렬 리턴\n return y\n\n# 교차 엔트로피 오차 함수 구현(입력 데이터 행렬 y, 정답 데이터 t 입력)\ndef cross_entrophy_error(y, t):\n # 오류 방지를 위한 아주 작은 값을 delta로 저장\n delta = 1e-7\n # 교차 엔트로피 오차 함수 계산 결과(y 행렬의 각 원소에 delta를 더한 후 로그를 취한 값을 정답 데이터 t와 곱한 행렬을 모두 더하고 음수를 붙인 값) 리턴\n return -np.sum(t * np.log(y+delta))","repo_name":"hcm1206/DeepLearningStudy2","sub_path":"Chap1. 신경망 복습/prac1-12(softmaxWithLossLayer).py","file_name":"prac1-12(softmaxWithLossLayer).py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12409140163","text":"# evaluates or predicts using specified extractor\n# example of usage: python extractor.py diseases ./articles/diseases --evaluate 1\n\nimport os\n\nimport argparse\nimport glob\nimport re\nimport copy\n\nimport spacy\nfrom spacy.tokens import Span\nfrom spacy.matcher import Matcher, DependencyMatcher\nfrom spacy import displacy\nfrom spacy.util import filter_spans\n\nimport diseases_extractor as dis\nimport food_extractor as food\nimport relations_extractor as rel\n\nclass Extractor:\n \"\"\"\n True labels should be denoted with enclosing semi-colons exactly this way:\n ;;;;\n or (in case of multiple-token entity):\n ;; ... ;;\n \"\"\"\n\n TL_EXPRESSION = ';;.*?;;' # non-greedy regexp for identifying true labels\n\n def __init__(self, domain, datapath, to_evaluate, nohtml, for_snowball=False):\n self.domain = domain\n self.datapath = datapath\n self.to_evaluate = to_evaluate\n self.nohtml = nohtml\n\n self.nlp = spacy.load('en_core_web_lg', disable=['ner'])\n # self.nlp.max_length = 2000000\n\n # snowball\n self.for_snowball = for_snowball\n self.sents = [] # list of sentences with both DIS and FOOD from all articles\n\n self.relations_data_path = './relations_data/relations_data.txt'\n\n def run(self):\n if self.to_evaluate:\n self.evaluate()\n else:\n self.predict()\n\n def extract_labels(self, doc):\n \"\"\"\n Extract labels using a proper extractor.\n \"\"\"\n\n matcher = Matcher(self.nlp.vocab)\n matcher_dep = DependencyMatcher(self.nlp.vocab)\n\n doc.ents = tuple()\n\n if self.domain == 'diseases':\n matcher_dep.add('dependencies', dis.dependencies_patterns,\n on_match=dis.add_disease_ent_dep)\n matcher_dep(doc)\n\n elif self.domain == 'food':\n # doc.ents = tuple([ent for ent in doc.ents if ent.label_ in ('DIS')])\n\n matcher_dep.add('dependencies', food.dependencies_patterns,\n on_match=food.add_food_dep)\n matcher_dep(doc)\n\n food.merge_entities(doc)\n\n elif self.domain == 'both':\n matcher_dep.add('dependencies_dis', dis.dependencies_patterns,\n on_match=dis.add_disease_ent_dep)\n matcher_dep.add('dependencies_food', food.dependencies_patterns,\n on_match=food.add_food_dep)\n matcher_dep(doc)\n\n food.merge_entities(doc)\n\n else:\n matcher_dep.add('dependencies_dis', dis.dependencies_patterns,\n on_match=dis.add_disease_ent_dep)\n matcher_dep.add('dependencies_food', food.dependencies_patterns,\n on_match=food.add_food_dep)\n\n matcher_dep(doc)\n\n matcher.add('associations', rel.association_patterns,\n on_match=rel.add_associations_ent)\n\n matcher(doc)\n\n temp_doc = copy.deepcopy(doc)\n\n matcher_dep.remove('dependencies_dis')\n matcher_dep.remove('dependencies_food')\n\n matcher_dep.add('dependencies_rel', rel.relations_patterns,\n on_match=rel.add_relations_ent_dep)\n matcher_dep(doc)\n\n food.merge_entities(doc)\n\n self.relations_data = rel.extract_relations_data(temp_doc, doc)\n\n def predict(self):\n \"\"\"\n Predicts entities.\n \"\"\"\n\n articles = glob.glob(f'{self.datapath}/*.txt')\n\n for article in articles:\n if 'test.txt' in article:\n continue\n\n with open(article, 'r') as file:\n article_id = article.split('/')[-1].split('.')[0]\n print(f'Handling article {article_id}...')\n\n # if os.path.exists(f'./displacy/{self.domain}/{article_id}_pred.html'):\n # print(f'{article_id} already predicted...')\n # continue\n\n lines = file.readlines()\n text = ' '.join(lines)\n\n # checking for memory requirements\n if len(text) > 1000000:\n print(f'Article {article_id} is too long, skipping to the next one...')\n continue\n\n doc = self.nlp(text)\n self.extract_labels(doc)\n\n # snowball\n if self.for_snowball:\n for sent in doc.sents:\n has_food = False\n has_disease = False\n\n for entity in sent.ents:\n if entity.label_ == 'FOOD':\n has_food = True\n if entity.label_ == 'DIS':\n has_disease = True\n\n if has_food and has_disease:\n self.sents.append(sent.text)\n\n if not self.nohtml:\n self.generate_html(doc, f'./displacy/{self.domain}/{article_id}_pred.html')\n\n if self.domain == 'relations':\n print('--- EXTRACTED RELATIONS: ---')\n print(self.relations_data)\n\n self.save_relations_data(self.relations_data_path)\n\n if self.domain == 'relations':\n rel.format_relations_data(self.relations_data_path)\n\n def evaluate(self):\n \"\"\"\n Evaluates extraction method by comparing labeled (TL_EXPRESSION) .txt files\n and extracted labels.\n Prints out precision and recall.\n WARNING: takes files ending with _test in account only!\n \"\"\"\n\n tp = 0 # true positive\n fp = 0 # false positive\n fn = 0 # false negative\n\n articles = glob.glob(f'{self.datapath}/*_test.txt')\n\n if self.domain == 'diseases':\n label = 'DIS'\n elif self.domain == 'food':\n label = 'FOOD'\n else:\n pass\n\n for article in articles:\n with open(article, 'r') as file:\n article_id = article.split('/')[-1].split('.')[0]\n print(f'Handling article {article_id}...')\n\n if os.path.exists(f'./displacy/{self.domain}/{article_id}_eval.html'):\n print(f'{article_id} already evaluated...')\n continue\n\n lines = file.readlines()\n text = ' '.join(lines)\n\n # checking for memory requirements\n if len(text) > 1000000:\n print(f'Article {article_id} is too long, skipping to the next one...')\n continue\n\n # reading true labels and creating Span objects\n tl_found = 0\n tl_positions = []\n for match in re.finditer(self.TL_EXPRESSION, text): # matches are returned in left-to-right order\n start, end = match.span()\n\n tl_positions.append((start-(tl_found)*4, end-(tl_found+1)*4))\n tl_found += 1\n\n doc = self.nlp(text.replace(';;', '')) # labels not necessary anymore\n tl_spans = tuple([doc.char_span(start, end, label=label, alignment_mode='expand') \\\n for (start, end) in tl_positions])\n\n self.extract_labels(doc)\n\n el_spans = doc.ents # extracted labels (Span objects)\n\n # comparing true labels with extracted labels\n tl_spans = set([(span.start, span.end) for span in tl_spans])\n el_spans = set([(span.start, span.end) for span in el_spans if span.label_ == label])\n\n doc.ents = tuple() # reset entities (setting new ones w.r.t. error type)\n for tl_span in tl_spans:\n if tl_span in el_spans:\n entity = Span(doc, tl_span[0], tl_span[1], label='TP')\n tp +=1\n else:\n entity = Span(doc, tl_span[0], tl_span[1], label='FN')\n fn += 1\n\n doc.ents += (entity,)\n\n for el_span in el_spans:\n entity = Span(doc, el_span[0], el_span[1], label='FP')\n try:\n doc.ents += (entity,)\n except ValueError:\n pass # that wasn't a FP\n else:\n fp += 1\n\n if not self.nohtml:\n self.generate_html(doc, f'./displacy/{self.domain}/{article_id}_eval.html')\n\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n print(f'Precision = {precision}, Recall = {recall}')\n\n def generate_html(self, doc, filepath):\n \"\"\"\n Generates an html file for visualizing entities in a proccessed document (.txt file).\n \"\"\"\n\n if self.to_evaluate:\n ents = ['TP', 'FN', 'FP']\n elif self.domain == 'diseases':\n ents = ['DIS']\n elif self.domain == 'food':\n ents = ['FOOD']\n elif self.domain == 'both':\n ents = ['DIS', 'FOOD']\n elif self.domain == 'relations':\n ents = ['REL']\n\n html = displacy.render(doc, style='ent', page=True,\n options={'colors': {'TP': '#00FF00', 'FN': '#FF0000', 'FP': '#FF00FF',\n 'DIS': '#909090', 'FOOD': '#19D9FF', 'REL': '#0064FF'}, 'ents': ents})\n\n with open(filepath, 'w') as html_file:\n print(f'Saving a generated file to {filepath}')\n html_file.write(html)\n\n def save_relations_data(self, filepath):\n with open(filepath, 'a') as relations_file:\n print(f'Saving relations data to {filepath}')\n relations_file.write(self.relations_data)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('domain', help='Specifies what to extract (possible options: diseases, food, both, relations).',\n choices=['diseases', 'food', 'both', 'relations'])\n parser.add_argument('datapath', help='Specifies a path to directory containing .txt files.')\n parser.add_argument('--evaluate', help='Use --evaluate 1 to evaluate files with trailing _test.txt in name.',\n default=0, type=int)\n parser.add_argument('--nohtml', help='Use --nohtml 1 to disable generating html files with entities highlighted.',\n default=0, type=int)\n\n args = parser.parse_args()\n\n e = Extractor(args.domain, args.datapath, args.evaluate, args.nohtml)\n e.run()\n","repo_name":"dittohed/spacy-relation-extraction","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":10646,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"71068920411","text":"import json\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom get_update_create_study.models import Student\n\n\nclass StudentView(View):\n def post(self, request, *args, **kwargs):\n \"\"\"\n get_or_create에서는 \n 먼저 default 밖에 있는 값으로 데이터베이스에 객체가 있는지 판단한다.\n 만약 있다면 False를 반환하고, 없다면 True를 반환한다.\n \n False이면 default은 아무런 작업을 하지 않는다. \n True이면 default에 있는 값으로 생성한다.\n\n default는 객체를 생성할때만 사용된다.\n \"\"\"\n \n data = json.loads(request.body)\n obj, created = Student.objects.get_or_create(\n name = data[\"name\"],\n email = data[\"email\"],\n age = data[\"age\"],\n defaults={\n \"local\":data[\"local\"],\n \"gender\": data[\"gender\"]\n }\n )\n # if not created:\n # obj.local = data[\"local\"] \n # obj.save()\n print(obj)\n print(created)\n return JsonResponse({\"result\":\"success\"}, status=200)\n\n\nclass StudentTwoView(View):\n \"\"\"\n update_or_create에서는 \n 먼저 default 밖에 있는 값으로 데이터베이스에 객체가 있는지 판단한다.\n 만약 있다면 False를 반환하고, 없다면 True를 반환한다.\n \n False이면 default에 있는 값으로 업데이트 한다.\n True이면 default에 있는 값으로 생성한다.\n \"\"\"\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n obj, created = Student.objects.update_or_create(\n name = data[\"name\"],\n email = data[\"email\"],\n age = data[\"age\"],\n defaults={\n \"local\":data[\"local\"],\n \"gender\": data[\"gender\"]\n }\n )\n return JsonResponse({\"result\":\"success\"}, status=200)\n","repo_name":"kdylsky/my_study_not_use","sub_path":"get_update_create_study/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12371501679","text":"\"\"\"\nThis Project Under Team-Silent💞 < @SILENT_DEVS >\nOrgination Github Of this TeamSilent < https://github.com/TeamSilentt > Check out\nCreator Or Dev @HYPER_AD13 | @SHINING_OFF \nFound on github < https://github.com/HYPER-AD17 >\n©Team Silent™\n\"\"\"\nimport os\nimport re\nimport ffmpeg\nimport asyncio\nimport subprocess\nfrom config import Config\nfrom signal import SIGINT\nfrom yt_dlp import YoutubeDL\nfrom youtube_search import YoutubeSearch\nfrom pyrogram import Client, filters, emoji\nfrom utils import mp, RADIO, USERNAME, FFMPEG_PROCESSES\nfrom pyrogram.methods.messages.download_media import DEFAULT_DOWNLOAD_DIR\nfrom pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton\n\n\nmsg=Config.msg\nplaylist=Config.playlist\nADMINS=Config.ADMINS\nCHAT_ID=Config.CHAT_ID\nLOG_GROUP=Config.LOG_GROUP\nRADIO_TITLE=Config.RADIO_TITLE\nEDIT_TITLE=Config.EDIT_TITLE\nADMIN_ONLY=Config.ADMIN_ONLY\nDURATION_LIMIT=Config.DURATION_LIMIT\n\nasync def is_admin(_, client, message: Message):\n admins = await mp.get_admins(CHAT_ID)\n if message.from_user is None and message.sender_chat:\n return True\n if message.from_user.id in admins:\n return True\n else:\n return False\n\nADMINS_FILTER = filters.create(is_admin)\n\n\n@Client.on_message(filters.command([\"splay\", f\"splay@{USERNAME}\"]) & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)) | filters.audio & filters.private)\nasync def yplay(_, message: Message):\n if ADMIN_ONLY == \"True\":\n admins = await mp.get_admins(CHAT_ID)\n if message.from_user.id not in admins:\n m=await message.reply_sticker(\"CAACAgUAAx0CWOSA3AABBlTsYk1HSBIOyZIRxXfTsv9n6wVVYKYAAgsEAALzHiBW8YTIUS83IdAjBA\")\n await mp.delete(m)\n await mp.delete(message)\n return\n type=\"\"\n yturl=\"\"\n ysearch=\"\"\n if message.audio:\n type=\"audio\"\n m_audio = message\n elif message.reply_to_message and message.reply_to_message.audio:\n type=\"audio\"\n m_audio = message.reply_to_message\n else:\n if message.reply_to_message:\n link=message.reply_to_message.text\n regex = r\"^(https?\\:\\/\\/)?(www\\.youtube\\.com|youtu\\.?be)\\/.+\"\n match = re.match(regex,link)\n if match:\n type=\"youtube\"\n yturl=link\n elif \" \" in message.text:\n text = message.text.split(\" \", 1)\n query = text[1]\n regex = r\"^(https?\\:\\/\\/)?(www\\.youtube\\.com|youtu\\.?be)\\/.+\"\n match = re.match(regex,query)\n if match:\n type=\"youtube\"\n yturl=query\n else:\n type=\"query\"\n ysearch=query\n else:\n d=await message.reply_text(\"__💁‍♂️ sʜʜ ᴜ ᴅɪᴅ'ɴᴛ ɢɪᴠᴇ ᴍᴇ ᴀɴʏᴛʜɪɴɢ ᴛᴏ ᴘʟᴀʏ ᴀʀᴇ ᴜ ғᴏᴏʟ ɢɪᴠᴇ ᴍᴇ ʏᴛ-ʟɪɴᴋ ᴏʀ ᴀɴʏ ᴛɢ-ᴀᴜᴅɪᴏ ғɪʟᴇ ᴛᴏ sᴛᴀʀᴛ sᴛʀᴇᴀᴍ! ʜᴜʜ__\")\n await mp.delete(d)\n await mp.delete(message)\n return\n user=f\"[{message.from_user.first_name}](tg://user?id={message.from_user.id})\"\n group_call = mp.group_call\n if type==\"audio\":\n if round(m_audio.audio.duration / 360) > DURATION_LIMIT:\n d=await message.reply_text(f\" __🤐ᴀᴜᴅɪᴏ's ᴅᴜʀᴀᴛɪᴏɴ ɪᴢ ʟᴏɴɢᴇʀ ᴛʜᴇɴ {DURATION_LIMIT} ᴍɪɴᴜᴛᴇ(s) ᴀʀᴇɴ'ᴛ ᴀʟʟᴏᴡᴇᴅ, ᴛʜᴇ ᴘʀᴏᴠɪᴅᴇᴅ ᴀᴜᴅɪᴏ sᴛʀᴇᴀᴍ ɪᴢ {round(m_audio.audio.duration/360)} ᴍɪɴᴜᴛᴇ(s)!__\")\n await mp.delete(d)\n await mp.delete(message)\n return\n if playlist and playlist[-1][2] == m_audio.audio.file_id:\n d=await message.reply_text(f\"`🥀 ᴛʜɪs ɪᴢ ᴀʟʀᴇᴀᴅʏ ᴀᴅᴅᴇᴅ ᴛᴏ ᴘʟᴀʏʟɪsᴛ ʜᴜʜ`\")\n await mp.delete(d)\n await mp.delete(message)\n return\n data={1:m_audio.audio.title, 2:m_audio.audio.file_id, 3:\"telegram\", 4:user}\n playlist.append(data)\n if len(playlist) == 1:\n m_status = await message.reply_text(\"💥\")\n await mp.download_audio(playlist[0])\n if 1 in RADIO:\n if group_call:\n group_call.input_filename = ''\n RADIO.remove(1)\n RADIO.add(0)\n process = FFMPEG_PROCESSES.get(CHAT_ID)\n if process:\n try:\n process.send_signal(SIGINT)\n except subprocess.TimeoutExpired:\n process.kill()\n except Exception as e:\n print(e)\n pass\n FFMPEG_PROCESSES[CHAT_ID] = \"\"\n if not group_call.is_connected:\n await mp.start_call()\n file=playlist[0][1]\n group_call.input_filename = os.path.join(\n _.workdir,\n DEFAULT_DOWNLOAD_DIR,\n f\"{file}.raw\"\n )\n await m_status.delete()\n print(f\"- sᴛᴀʀᴛᴇᴅ sᴛʀᴇᴀᴍɪɴɢ 🙋‍♀️: {playlist[0][1]}\")\n if not playlist:\n pl = f\"{emoji.NO_ENTRY} **ᴀᴡᴡ ᴘʟᴀʏʟɪsᴛ ᴇᴍᴘᴛʏ!**\"\n else: \n pl = f\"{emoji.PLAY_BUTTON} **ᴘʟᴀʏʟɪsᴛ👾**:\\n\" + \"\\n\".join([\n f\"**{i}**. **{x[1]}**\\n - **ʀᴇǫᴜᴇsᴛᴇᴅ ʙʏ 🧚‍♀️:** {x[4]}\"\n for i, x in enumerate(playlist)\n ])\n if EDIT_TITLE:\n await mp.edit_title()\n if message.chat.type == \"private\":\n await message.reply_text(pl) \n elif LOG_GROUP:\n await mp.send_playlist()\n elif not LOG_GROUP and message.chat.type == \"supergroup\":\n k=await message.reply_text(pl)\n await mp.delete(k)\n for track in playlist[:2]:\n await mp.download_audio(track)\n\n\n if type==\"youtube\" or type==\"query\":\n if type==\"youtube\":\n msg = await message.reply_text(\"💥\")\n url=yturl\n elif type==\"query\":\n try:\n msg = await message.reply_text(\"💥\")\n ytquery=ysearch\n results = YoutubeSearch(ytquery, max_results=1).to_dict()\n url = f\"https://youtube.com{results[0]['url_suffix']}\"\n title = results[0][\"title\"][:40]\n except Exception as e:\n await msg.edit(\n \"**ᴀᴡᴡ ɴᴏᴛʜɪɴɢ ɪᴢ ғᴏᴜɴᴅ sᴇᴅ 🤐!\\nᴛʀʏ ᴛᴏ sᴇᴀʀᴄʜɪɴɢ ᴀɢᴀɪɴ🙋‍♀️**\"\n )\n print(str(e))\n return\n await mp.delete(msg)\n await mp.delete(message)\n else:\n return\n ydl_opts = {\n \"geo-bypass\": True,\n \"nocheckcertificate\": True\n }\n ydl = YoutubeDL(ydl_opts)\n try:\n info = ydl.extract_info(url, False)\n except Exception as e:\n print(e)\n k=await msg.edit(\n f\"`👩‍💻sᴇᴅ ʙᴀᴅ ʟᴜᴄᴋ ʏᴛ-ᴅᴏᴡɴʟᴏᴀᴅ ᴇʀʀᴏʀ ᴛʀʏ ᴀɢᴀɪɴ`\\n\\n{e}\"\n )\n print(str(e))\n await mp.delete(message)\n await mp.delete(k)\n return\n duration = round(info[\"duration\"] / 60)\n title= info[\"title\"]\n if int(duration) > DURATION_LIMIT:\n k=await message.reply_text(f\"💬 __sᴛʀᴇᴀᴍɪɴɢ ғɪʟᴇ ɪs ʟᴏɴɢᴇʀ ᴛʜᴇɴ {DURATION_LIMIT} ᴍɪɴᴜᴛᴇ(s) ᴀʀᴇɴ'ᴛ ᴀʟʟᴏᴡᴇᴅ, ᴛʜᴇ ᴘʀᴏᴠɪᴅᴇᴅ ғɪʟᴇ ɪᴢ ᴀʙᴏᴜᴛ {duration} ᴍɪɴᴜᴛᴇ(s)!__\")\n await mp.delete(k)\n await mp.delete(message)\n return\n data={1:title, 2:url, 3:\"youtube\", 4:user}\n playlist.append(data)\n group_call = mp.group_call\n client = group_call.client\n if len(playlist) == 1:\n m_status = await msg.edit(\"🌟\")\n await mp.download_audio(playlist[0])\n if 1 in RADIO:\n if group_call:\n group_call.input_filename = ''\n RADIO.remove(1)\n RADIO.add(0)\n process = FFMPEG_PROCESSES.get(CHAT_ID)\n if process:\n try:\n process.send_signal(SIGINT)\n except subprocess.TimeoutExpired:\n process.kill()\n except Exception as e:\n print(e)\n pass\n FFMPEG_PROCESSES[CHAT_ID] = \"\"\n if not group_call.is_connected:\n await mp.start_call()\n file=playlist[0][1]\n group_call.input_filename = os.path.join(\n client.workdir,\n DEFAULT_DOWNLOAD_DIR,\n f\"{file}.raw\"\n )\n await m_status.delete()\n print(f\"- sᴛᴀʀᴛᴇᴅ sᴛʀᴇᴀᴍɪɴɢ🧚‍♀️: {playlist[0][1]}\")\n else:\n await msg.delete()\n if not playlist:\n pl = f\"{emoji.NO_ENTRY} **ᴀᴡᴡ ᴘʟᴀʏʟɪsᴛ ᴇᴍᴘᴛʏ!**\"\n else:\n pl = f\"{emoji.PLAY_BUTTON} **ᴘʟᴀʏʟɪsᴛ💥**:\\n\" + \"\\n\".join([\n f\"**{i}**. **{x[1]}**\\n - **ʀᴇǫᴜᴇsᴛᴇʀ🧚‍♀️:** {x[4]}\"\n for i, x in enumerate(playlist)\n ])\n if EDIT_TITLE:\n await mp.edit_title()\n if message.chat.type == \"private\":\n await message.reply_text(pl)\n if LOG_GROUP:\n await mp.send_playlist()\n elif not LOG_GROUP and message.chat.type == \"supergroup\":\n k=await message.reply_text(pl)\n await mp.delete(k)\n for track in playlist[:2]:\n await mp.download_audio(track)\n await mp.delete(message)\n\n\n@Client.on_message(filters.command([\"current\", f\"current@{USERNAME}\"]) & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def current(_, m: Message):\n if not playlist:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪᴢ ᴘʟᴀʏɪɴɢ sʜʜ!**\")\n await mp.delete(k)\n await m.delete()\n return\n else:\n pl = f\"{emoji.PLAY_BUTTON} **ᴘʟᴀʏʟɪsᴛ**:\\n\" + \"\\n\".join([\n f\"**{i}**. **{x[1]}**\\n - **ʀᴇǫᴜᴇsᴛᴇʀ:** {x[4]}\"\n for i, x in enumerate(playlist)\n ])\n if m.chat.type == \"private\":\n await m.reply_text(\n pl,\n parse_mode=\"Markdown\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"🔄\", callback_data=\"rplay\"),\n InlineKeyboardButton(\"⏸\", callback_data=\"pause\"),\n InlineKeyboardButton(\"⏭\", callback_data=\"next\")\n \n ],\n\n ]\n )\n )\n else:\n if msg.get('playlist') is not None:\n await msg['playlist'].delete()\n msg['playlist'] = await m.reply_text(\n pl,\n parse_mode=\"Markdown\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"🔄\", callback_data=\"rplay\"),\n InlineKeyboardButton(\"⏸\", callback_data=\"pause\"),\n InlineKeyboardButton(\"⏭\", callback_data=\"next\")\n \n ],\n\n ]\n )\n )\n await mp.delete(m)\n\n\n@Client.on_message(filters.command([\"vol\", f\"vol@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def set_vol(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.ROBOT} **ɪ ᴅɪᴅɴ'ᴛ ᴊᴏɪɴ ᴀɴʏ ᴠᴄ ʏᴇᴛ**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n if len(m.command) < 2:\n k=await m.reply_text(f\"{emoji.ROBOT} **ᴠᴏʟᴜᴍᴇ ʟɪᴍɪᴛ ɪᴢ ᴊᴜsᴛ (0-200)!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n await group_call.set_my_volume(int(m.command[1]))\n k=await m.reply_text(f\"{emoji.SPEAKER_MEDIUM_VOLUME} **ᴠᴏʟᴜᴍᴇ sᴇᴛᴇᴅ ᴛᴏ {m.command[1]}!**\")\n await mp.delete(k)\n await mp.delete(m)\n\n\n@Client.on_message(filters.command([\"next\", f\"next@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def skip_track(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪs ʜᴇʀᴇ ᴛᴏ sᴋɪᴘ!**\")\n await mp.delete(k)\n await m.delete()\n return\n if len(m.command) == 1:\n await mp.skip_current_playing()\n if not playlist:\n pl = f\"{emoji.NO_ENTRY} **ᴘʟᴀʏʟɪsᴛ ᴇᴍᴘᴛʏ!**\"\n else:\n pl = f\"{emoji.PLAY_BUTTON} **ᴘʟᴀʏʟɪsᴛ**:\\n\" + \"\\n\".join([\n f\"**{i}**. **{x[1]}**\\n - **ʀᴇǫᴜᴇsᴛᴇʀ:** {x[4]}\"\n for i, x in enumerate(playlist)\n ])\n if m.chat.type == \"private\":\n await m.reply_text(pl)\n if LOG_GROUP:\n await mp.send_playlist()\n elif not LOG_GROUP and m.chat.type == \"supergroup\":\n k=await m.reply_text(pl)\n await mp.delete(k)\n else:\n try:\n items = list(dict.fromkeys(m.command[1:]))\n items = [int(x) for x in items if x.isdigit()]\n items.sort(reverse=True)\n text = []\n for i in items:\n if 2 <= i <= (len(playlist) - 1):\n audio = f\"{playlist[i][1]}\"\n playlist.pop(i)\n text.append(f\"{emoji.WASTEBASKET} **ᴍᴏᴠᴇᴅ ᴛᴏ ᴛʜᴇ ɴᴇxᴛ sᴏɴɢ** - {i}. **{audio}**\")\n else:\n text.append(f\"{emoji.CROSS_MARK} **ᴡᴀɪᴛ ᴄᴀɴ'ᴛ sᴋɪᴘ ᴛᴏᴏ ғᴀsᴛ** - {i}\")\n k=await m.reply_text(\"\\n\".join(text))\n await mp.delete(k)\n if not playlist:\n pl = f\"{emoji.NO_ENTRY} **ᴘʟᴀʏʟɪsᴛ ᴇᴍᴘᴛʏ!**\"\n else:\n pl = f\"{emoji.PLAY_BUTTON} **ᴘʟᴀʏʟɪsᴛ**:\\n\" + \"\\n\".join([\n f\"**{i}**. **{x[1]}**\\n - **ʀᴇǫᴜᴇsᴛᴇʀ:** {x[4]}\"\n for i, x in enumerate(playlist)\n ])\n if m.chat.type == \"private\":\n await m.reply_text(pl)\n if LOG_GROUP:\n await mp.send_playlist()\n elif not LOG_GROUP and m.chat.type == \"supergroup\":\n k=await m.reply_text(pl)\n await mp.delete(k)\n except (ValueError, TypeError):\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɪɴᴠᴀʟɪᴅ ɪɴᴘᴜᴛ**\",\n disable_web_page_preview=True)\n await mp.delete(k)\n await mp.delete(m)\n\n\n@Client.on_message(filters.command([\"joinvc\", f\"joinvc@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def join_group_call(client, m: Message):\n group_call = mp.group_call\n if group_call.is_connected:\n k=await m.reply_text(f\"{emoji.ROBOT} **ᴀʟʀᴇᴀᴅʏ ᴊᴏɪɴᴇᴅ ᴠᴏɪᴄᴇ ᴄʜᴀᴛ ᴅᴜᴅᴇ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n await mp.start_call()\n chat = await client.get_chat(CHAT_ID)\n k=await m.reply_text(f\"{emoji.CHECK_MARK_BUTTON} **ʏᴏʏᴏ sᴜᴄᴄᴇsғᴜʟʟʏ ᴊᴏɪɴᴇᴅ ᴠᴏɪᴄᴇ ᴄʜᴀᴛ ɪɴ {chat.title}!**\")\n await mp.delete(k)\n await mp.delete(m)\n\n\n@Client.on_message(filters.command([\"leavevc\", f\"leavevc@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def leave_voice_chat(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.ROBOT} **ᴅɪᴅɴ'ᴛ ᴊᴏɪɴᴇᴅ ᴠᴄ ʏᴇᴛ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n playlist.clear()\n if 1 in RADIO:\n await mp.stop_radio()\n group_call.input_filename = ''\n await group_call.stop()\n k=await m.reply_text(f\"{emoji.CROSS_MARK_BUTTON} **ʜᴜʜ ᴍᴇ ɢᴏɴᴇ ғʀᴏᴍ ᴠᴏɪᴄᴇ ᴄʜᴀᴛ**\")\n await mp.delete(k)\n await mp.delete(m)\n\n\n\n@Client.on_message(filters.command([\"end\", f\"end@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def stop_playing(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴏᴛʜɪɴɢ ɪᴢ sᴛʀᴇᴀᴍɪɴɢ ᴛᴏ ᴇɴᴅ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n if 1 in RADIO:\n await mp.stop_radio()\n group_call.stop_playout()\n k=await m.reply_text(f\"{emoji.STOP_BUTTON} **sᴛʀᴇᴀᴍ ᴇɴᴅᴇᴅ!**\")\n playlist.clear()\n await mp.delete(k)\n await mp.delete(m)\n\n\n@Client.on_message(filters.command([\"rplay\", f\"rplay@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def restart_playing(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪᴢ ᴘʟᴀʏɪɴɢ ᴛᴏ ʀᴇᴘʟᴀʏ ᴛᴀᴛ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n if not playlist:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ᴘʟᴀʏʟɪsᴛ ᴇᴍᴘᴛʏ ʜᴇʀᴇ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n group_call.restart_playout()\n k=await m.reply_text(\n f\"{emoji.COUNTERCLOCKWISE_ARROWS_BUTTON} \"\n \"**sᴛᴀʀᴛᴇᴅ ᴘʟᴀʏɪɴɢ ғʀᴏᴍ ʙᴇɢɴɪɴɢ!**\"\n )\n await mp.delete(k)\n await mp.delete(m)\n\n\n@Client.on_message(filters.command([\"pause\", f\"pause@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def pause_playing(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪᴢ sᴛʀᴇᴀᴍɪɴɢ ʟᴏʟ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n mp.group_call.pause_playout()\n k=await m.reply_text(f\"{emoji.PLAY_OR_PAUSE_BUTTON} **ʜᴜʜ sᴛʀᴇᴀᴍ ᴘᴀᴜsᴇᴅ!**\",\n quote=False)\n await mp.delete(k)\n await mp.delete(m)\n\n\n\n@Client.on_message(filters.command([\"resume\", f\"resume@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def resume_playing(_, m: Message):\n if not mp.group_call.is_connected:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪᴢ ᴘᴀᴜsᴇᴅ ᴡᴇʟʟ🤷‍♀️!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n mp.group_call.resume_playout()\n k=await m.reply_text(f\"{emoji.PLAY_OR_PAUSE_BUTTON} **ʜᴜʜ ʀᴇsᴜᴍᴇᴅ!**\",\n quote=False)\n await mp.delete(k)\n await mp.delete(m)\n\n@Client.on_message(filters.command([\"rms\", f\"rms@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def clean_raw_pcm(client, m: Message):\n download_dir = os.path.join(client.workdir, DEFAULT_DOWNLOAD_DIR)\n all_fn: list[str] = os.listdir(download_dir)\n for track in playlist[:2]:\n track_fn = f\"{track[1]}.raw\"\n if track_fn in all_fn:\n all_fn.remove(track_fn)\n count = 0\n if all_fn:\n for fn in all_fn:\n if fn.endswith(\".raw\"):\n count += 1\n os.remove(os.path.join(download_dir, fn))\n k=await m.reply_text(f\"{emoji.WASTEBASKET} **ʜᴜʜ ᴄʟᴇᴀɴᴇᴅ {count} ғɪʟᴇs!**\")\n await mp.delete(k)\n await mp.delete(m)\n\n\n@Client.on_message(filters.command([\"pmute\", f\"pmute@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def mute(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪᴢ ᴘʟᴀʏɪɴɢ ʜᴇʀᴇ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n await group_call.set_is_mute(True)\n k=await m.reply_text(f\"{emoji.MUTED_SPEAKER} **ᴀssɪsᴛᴇɴᴛ ᴍᴜᴛᴇᴅ**\")\n await mp.delete(k)\n await mp.delete(m)\n\n@Client.on_message(filters.command([\"punmute\", f\"punmute@{USERNAME}\"]) & ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def unmute(_, m: Message):\n group_call = mp.group_call\n if not group_call.is_connected:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪs ᴍᴜᴛᴇᴅ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n await group_call.set_is_mute(False)\n k=await m.reply_text(f\"{emoji.SPEAKER_MEDIUM_VOLUME} **ᴀssɪsᴛᴇɴᴛ ᴜɴᴍᴜᴛᴇᴅ!**\")\n await mp.delete(k)\n await mp.delete(m)\n\n@Client.on_message(filters.command([\"playlist\", f\"playlist@{USERNAME}\"]) & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def show_playlist(_, m: Message):\n if not playlist:\n k=await m.reply_text(f\"{emoji.NO_ENTRY} **ɴᴛɢ ɪᴢ ᴘʟᴀʏɪɴɢ!**\")\n await mp.delete(k)\n await mp.delete(m)\n return\n else:\n pl = f\"{emoji.PLAY_BUTTON} **ᴘʟᴀʏʟɪsᴛ**:\\n\" + \"\\n\".join([\n f\"**{i}**. **{x[1]}**\\n - **ʀᴇǫᴜᴇsᴛᴇʀ:** {x[4]}\"\n for i, x in enumerate(playlist)\n ])\n if m.chat.type == \"private\":\n await m.reply_text(pl)\n else:\n if msg.get('playlist') is not None:\n await msg['playlist'].delete()\n msg['playlist'] = await m.reply_text(pl)\n await mp.delete(m)\n\nadmincmds=[\"joinvc\", \"punmute\", \"pmute\", \"leavevc\", \"rms\", \"pause\", \"resume\", \"end\", \"skip\", \"radio\", \"stopradio\", \"rplay\", \"restart\", \"vol\", f\"vol@{USERNAME}\", f\"joinvc@{USERNAME}\", f\"punmute@{USERNAME}\", f\"pmute@{USERNAME}\", f\"leavevc@{USERNAME}\", f\"rms@{USERNAME}\", f\"pause@{USERNAME}\", f\"resume@{USERNAME}\", f\"end@{USERNAME}\", f\"skip@{USERNAME}\", f\"radio@{USERNAME}\", f\"stopradio@{USERNAME}\", f\"rplay@{USERNAME}\", f\"restart@{USERNAME}\"]\n\n@Client.on_message(filters.command(admincmds) & ~ADMINS_FILTER & (filters.chat(CHAT_ID) | filters.private | filters.chat(LOG_GROUP)))\nasync def notforu(_, m: Message):\n k=await m.reply_sticker(\"CAACAgUAAx0CWOSA3AABBlTsYk1HSBIOyZIRxXfTsv9n6wVVYKYAAgsEAALzHiBW8YTIUS83IdAjBA\")\n await mp.delete(k)\n await mp.delete(m)\n\nallcmd = [\"splay\", \"current\", \"playlist\", \"song\", f\"song@{USERNAME}\", f\"splay@{USERNAME}\", f\"current@{USERNAME}\", f\"playlist@{USERNAME}\"] + admincmds\n\nPICSS = \"https://telegra.ph/file/00a7f41225be419fade0b.jpg\"\n\n@Client.on_message(filters.command(allcmd) & filters.group & ~filters.chat(CHAT_ID) & ~filters.chat(LOG_GROUP))\nasync def not_chat(_, m: Message):\n buttons = [\n [\n InlineKeyboardButton(\"ᴄʜᴀɴɴᴇʟ🥀\", url=\"https://t.me/SILENT_DEVS\"),\n InlineKeyboardButton(\"ᴜᴘᴅᴀᴛᴇs👩‍💻\", url=\"https://t.me/SILENT_BOTS\"),\n ], \n ]\n k=await m.reply_photo(PICSS, caption=\"**ᴏᴏᴘs ᴛʜɪs ɪᴢ ᴘʀɪᴠᴀᴛᴇ sᴛʀᴇᴀᴍᴇʀ ʀᴏʙᴏᴛ ʙʏ [ᴛᴇᴀᴍ-sɪʟᴇɴᴛ🧚‍♀️](https://t.me/SILENT_DEVS) ᴡᴀɴᴀ ʟɪsᴛᴇɴ ʀᴀᴅɪᴏ sᴛʀᴇᴀᴍ ᴛʜᴇɴ ᴊᴏɪɴ [ʜᴇʀᴇ🥀](https://t.me/SILENT_SUPPORT1)!**\", reply_markup=InlineKeyboardMarkup(buttons))\n await mp.delete(m)\n","repo_name":"Captainamarica/NightVission-StreameVc","sub_path":"plugins/bot/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":24254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15223305182","text":"import os, sys\nimport socket\nimport cPickle as pickle\nimport binascii\nimport zlib\n\nserver_port = int(sys.argv[1])\n\n# States of Receiver\nWAIT_CALL_0 = 0\nWAIT_CALL_1 = 1\n\nclass Packet:\n def __init__(self, seqnum, acknum, payload):\n self.seqnum = seqnum # 0 / 1\n self.acknum = acknum # 0 / 1\n self.payload = payload\n\n def _set_chksum(self, chksum):\n self.chksum = chksum\n\n def _retrieve_chksum(self):\n tmp = self.chksum\n del self.chksum\n return tmp\n\nclass Receiver:\n def __init__(self):\n self.state = WAIT_CALL_0\n self.rcv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.rcv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.rcv_sock.bind(('', server_port))\n # print 'Receiving file on', server_port\n\n def _set_timeout(self, timeout):\n self.rcv_sock.settimeout(timeout)\n\n def _output(self, msg, addr):\n self.rcv_sock.sendto(msg, addr)\n\n def _recv(self, size):\n return self.rcv_sock.recvfrom(size)\n\n def _close(self):\n self.rcv_sock.close()\n\ndef verify_chksum(pkt):\n return (binascii.crc32(pkt) & 0xffffffff)\n\ndef is_corrupt(pkt):\n try:\n pkt = zlib.decompress(pkt)\n pkt = pickle.loads(pkt)\n chksum = pkt._retrieve_chksum()\n # print 'Expected checksum:', chksum\n # print 'Actual checksum:', verify_chksum(pickle.dumps(pkt))\n return chksum != verify_chksum(pickle.dumps(pkt))\n except:\n return True\n\ndef has_seq(pkt, seqnum):\n pkt = zlib.decompress(pkt)\n pkt = pickle.loads(pkt)\n return pkt.seqnum == seqnum\n\ndef create_packet(seq_num, ack_num, payload):\n pkt = Packet(seq_num, ack_num, payload)\n chksum = verify_chksum(pickle.dumps(pkt))\n # print 'Checksum size: ', sys.getsizeof(chksum)\n pkt._set_chksum(chksum)\n return pkt\n\ndef is_dest_pkt(pkt):\n pkt = zlib.decompress(pkt)\n pkt = pickle.loads(pkt)\n if pkt.payload[:4] == 'dest':\n return True\n else:\n return False\n\ndef get_dest(pkt):\n pkt = zlib.decompress(pkt)\n pkt = pickle.loads(pkt)\n return pkt.payload[6:]\n\ndef main():\n rcv = Receiver()\n\n count = 1\n try:\n while True:\n pkt_string, client_address = rcv._recv(4096)\n rcv._set_timeout(1)\n # print 'pkt_string', pkt_string\n\n if rcv.state == WAIT_CALL_0:\n if is_corrupt(pkt_string) or has_seq(pkt_string, 1):\n ack1 = create_packet(0, 1, 'ack')\n rcv._output(pickle.dumps(ack1), client_address)\n\n # print 'corrupt, ack1 sent'\n elif not is_corrupt(pkt_string) and has_seq(pkt_string, 1):\n ack1 = create_packet(0, 1, 'ack')\n rcv._output(pickle.dumps(ack1), client_address)\n\n # print 'corrupt, ack1 sent'\n elif not is_corrupt(pkt_string) and has_seq(pkt_string, 0):\n if is_dest_pkt(pkt_string):\n f = open(get_dest(pkt_string), 'w')\n # print 'Dest packet received'\n else:\n # print sys.getsizeof(pkt_string)\n pkt = zlib.decompress(pkt_string)\n pkt = pickle.loads(pkt)\n f.write(pkt.payload)\n count+=1\n # print 'Writing message', count\n\n ack0 = create_packet(0, 0, 'ack')\n rcv._output(pickle.dumps(ack0), client_address)\n # print 'ack0 sent'\n rcv.state = WAIT_CALL_1\n elif rcv.state == WAIT_CALL_1:\n if is_corrupt(pkt_string) or has_seq(pkt_string, 0):\n ack0 = create_packet(0, 0, 'ack')\n rcv._output(pickle.dumps(ack0), client_address)\n\n # print 'corrupt, ack0 sent'\n elif not is_corrupt(pkt_string) and has_seq(pkt_string, 0):\n ack0 = create_packet(0, 0, 'ack')\n rcv._output(pickle.dumps(ack0), client_address)\n\n # print 'corrupt, ack0 sent'\n elif not is_corrupt(pkt_string) and has_seq(pkt_string, 1):\n if is_dest_pkt(pkt_string):\n f = open(get_dest(pkt_string), 'w')\n # print 'Dest packet received'\n else:\n # print sys.getsizeof(pkt_string)\n pkt = zlib.decompress(pkt_string)\n pkt = pickle.loads(pkt)\n f.write(pkt.payload)\n count+=1\n # print 'Writing message', count\n\n ack1 = create_packet(0, 1, 'ack')\n rcv._output(pickle.dumps(ack1), client_address)\n # print 'ack1 sent'\n rcv.state = WAIT_CALL_0\n\n except socket.timeout:\n # print 'Downloaded.'\n f.close()\n rcv._close()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"xbili/cs2105","sub_path":"assign2/FileReceiver.py","file_name":"FileReceiver.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3003261855","text":"#https://www.cnblogs.com/kepuCS/p/5271654.html\n#这道题目不算水,我做了好久,一开始思路错了,要想到用回溯。\n#http://lib.csdn.net/article/datastructure/16705迭代解法也很清晰。\nclass Solution(object):\n dic={u'2':['a','b','c'],u'3':['d','e','f'],u'4':['g','h','i'],u'5':['j','k','l'],u'6':['m','n','o'],u'7':['p','q','r','s'],u'8':['t','u','v'],u'9':['w','x','y','z']}\n def recuive_combine(self,index,cur,digits,result):\n if index==len(digits) and len(cur)!=0:\n result.append(cur)\n return\n v=self.dic[digits[index]]\n for vv in v:\n self.recuive_combine(index+1,cur+vv,digits,result)\n \n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n #digits=u'23'\n if len(digits)==0:\n return []\n result=[]\n self.recuive_combine(0,'',digits,result)\n return result\n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"hbsun2113/LeetCodeCrawler","sub_path":"0017.letter-combinations-of-a-phone-number/letter-combinations-of-a-phone-number.py","file_name":"letter-combinations-of-a-phone-number.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"37156236403","text":"\"\"\"Config flow for Flux LED/MagicLight.\"\"\"\nimport copy\nimport logging\n\nfrom flux_led import BulbScanner\nimport voluptuous as vol\n\nfrom homeassistant import config_entries\nfrom homeassistant.const import CONF_HOST, CONF_NAME\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\n\nfrom .const import (\n CONF_AUTOMATIC_ADD,\n CONF_CONFIGURE_DEVICE,\n CONF_DEVICES,\n CONF_EFFECT_SPEED,\n CONF_REMOVE_DEVICE,\n DEFAULT_EFFECT_SPEED,\n DOMAIN,\n SIGNAL_ADD_DEVICE,\n SIGNAL_REMOVE_DEVICE,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n \"\"\"Handle a config flow for FluxLED/MagicHome Integration.\"\"\"\n\n VERSION = 1\n CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL\n\n @staticmethod\n @callback\n def async_get_options_flow(config_entry: config_entries.ConfigEntry):\n \"\"\"Get the options flow for the Flux LED component.\"\"\"\n return OptionsFlow(config_entry)\n\n async def async_step_import(self, data: dict = None):\n \"\"\"Handle configuration via YAML import.\"\"\"\n _LOGGER.debug(\"Importing configuration from YAML for flux_led\")\n config_entry = self.hass.config_entries.async_entries(DOMAIN)\n\n if config_entry:\n _LOGGER.warning(\n \"Your flux_led configuration has already been imported. Please remove configuration from your configuration.yaml\"\n )\n return self.async_abort(reason=\"single_instance_allowed\")\n\n _LOGGER.warning(\n \"Imported auto_add configuration for flux_led. Please remove from your configuration.yaml\"\n )\n return await self.async_step_user(\n user_input={\n CONF_AUTOMATIC_ADD: data[CONF_AUTOMATIC_ADD],\n CONF_DEVICES: data.get(CONF_DEVICES, {}),\n }\n )\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n errors = {}\n\n config_entry = self.hass.config_entries.async_entries(DOMAIN)\n if config_entry:\n return self.async_abort(reason=\"single_instance_allowed\")\n\n if user_input is not None:\n devices = user_input.get(CONF_DEVICES, {})\n\n if user_input[CONF_AUTOMATIC_ADD]:\n scanner = BulbScanner()\n await self.hass.async_add_executor_job(scanner.scan)\n\n for bulb in scanner.getBulbInfo():\n device_id = bulb[\"ipaddr\"].replace(\".\", \"_\")\n if not devices.get(device_id, False):\n devices[device_id] = {\n CONF_NAME: bulb[\"ipaddr\"],\n CONF_HOST: bulb[\"ipaddr\"],\n }\n\n return self.async_create_entry(\n title=\"FluxLED/MagicHome\",\n data={\n CONF_AUTOMATIC_ADD: user_input[CONF_AUTOMATIC_ADD],\n CONF_EFFECT_SPEED: DEFAULT_EFFECT_SPEED,\n CONF_DEVICES: devices,\n },\n )\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {vol.Required(CONF_AUTOMATIC_ADD, default=True): bool}\n ),\n errors=errors,\n )\n\n\nclass OptionsFlow(config_entries.OptionsFlow):\n \"\"\"Handle flux_led options.\"\"\"\n\n def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n \"\"\"Initialize the flux_led options flow.\"\"\"\n\n self._config_entry = config_entry\n self._global_options = None\n self._configure_device = None\n\n async def async_step_init(self, user_input=None):\n \"\"\"Manage the options.\"\"\"\n return await self.async_step_prompt_options()\n\n async def async_step_prompt_options(self, user_input=None):\n \"\"\"Manage the options.\"\"\"\n\n errors = {}\n\n if user_input is not None:\n self._global_options = {\n CONF_AUTOMATIC_ADD: user_input[CONF_AUTOMATIC_ADD],\n CONF_EFFECT_SPEED: user_input[CONF_EFFECT_SPEED],\n }\n\n if CONF_CONFIGURE_DEVICE in user_input:\n self._configure_device = user_input[CONF_CONFIGURE_DEVICE]\n return await self.async_step_configure_device()\n\n if CONF_REMOVE_DEVICE in user_input:\n device_id = user_input[CONF_REMOVE_DEVICE]\n config_data = copy.deepcopy(dict(self._config_entry.data))\n del config_data[CONF_DEVICES][device_id]\n\n self.hass.config_entries.async_update_entry(\n self._config_entry, data=config_data\n )\n\n async_dispatcher_send(\n self.hass, SIGNAL_REMOVE_DEVICE, {\"device_id\": device_id}\n )\n\n options_data = self._config_entry.options.copy()\n if device_id in options_data:\n del options_data[device_id]\n options_data[\"global\"] = self._global_options\n\n return self.async_create_entry(title=\"\", data=options_data)\n\n if CONF_HOST in user_input:\n device_name = (\n user_input[CONF_NAME]\n if CONF_NAME in user_input\n else user_input[CONF_HOST]\n )\n device_id = user_input[CONF_HOST].replace(\".\", \"_\")\n device_data = {\n CONF_HOST: user_input[CONF_HOST],\n CONF_NAME: device_name,\n }\n config_data = copy.deepcopy(dict(self._config_entry.data))\n config_data[CONF_DEVICES][device_id] = device_data\n\n self.hass.config_entries.async_update_entry(\n self._config_entry, data=config_data\n )\n\n async_dispatcher_send(\n self.hass, SIGNAL_ADD_DEVICE, {device_id: device_data}\n )\n\n options_data = self._config_entry.options.copy()\n options_data[\"global\"] = self._global_options\n return self.async_create_entry(title=\"\", data=options_data)\n\n existing_devices = {}\n\n for device_id, device in self._config_entry.data[CONF_DEVICES].items():\n existing_devices[device_id] = device.get(CONF_NAME, device[CONF_HOST])\n\n options = {\n vol.Optional(\n CONF_AUTOMATIC_ADD,\n default=self._config_entry.options.get(\"global\", {}).get(\n CONF_AUTOMATIC_ADD, self._config_entry.data[CONF_AUTOMATIC_ADD]\n ),\n ): bool,\n vol.Optional(\n CONF_EFFECT_SPEED,\n default=self._config_entry.options.get(\"global\", {}).get(\n CONF_EFFECT_SPEED, DEFAULT_EFFECT_SPEED\n ),\n ): vol.All(\n vol.Coerce(int),\n vol.Range(min=1, max=100),\n ),\n vol.Optional(CONF_HOST): str,\n vol.Optional(CONF_NAME): str,\n vol.Optional(CONF_CONFIGURE_DEVICE): vol.In(existing_devices),\n vol.Optional(CONF_REMOVE_DEVICE): vol.In(existing_devices),\n }\n\n return self.async_show_form(\n step_id=\"prompt_options\", data_schema=vol.Schema(options), errors=errors\n )\n\n async def async_step_configure_device(self, user_input=None):\n \"\"\"Manage the options.\"\"\"\n\n errors = {}\n\n if user_input is not None:\n options_data = self._config_entry.options.copy()\n options_data[self._configure_device] = {\n CONF_EFFECT_SPEED: user_input[CONF_EFFECT_SPEED]\n }\n options_data[\"global\"] = self._global_options\n return self.async_create_entry(title=\"\", data=options_data)\n\n options = {\n vol.Required(\n CONF_EFFECT_SPEED,\n default=self._config_entry.options.get(self._configure_device, {}).get(\n CONF_EFFECT_SPEED, DEFAULT_EFFECT_SPEED\n ),\n ): vol.All(\n vol.Coerce(int),\n vol.Range(min=1, max=100),\n )\n }\n\n return self.async_show_form(\n step_id=\"configure_device\", data_schema=vol.Schema(options), errors=errors\n )\n","repo_name":"rcdemuth/flux_led","sub_path":"custom_components/flux_led/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18666682624","text":"# coding: UTF-8\n# 求两数之和\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n dumyHead = ListNode(0)\n cur = dumyHead\n carry = 0\n while l1 or l2:\n vall1 = l1.val if l1 else 0\n vall2 = l2.val if l2 else 0\n total = vall1 + vall2 + carry\n carry = total // 10\n cur.next = ListNode(total % 10)\n cur = cur.next\n if l1:\n l1 = l1.next\n if l2:\n l2 = l2.next\n if carry > 0:\n cur.next = ListNode(carry)\n return dumyHead.next\n\n\n\n","repo_name":"ChenZhengrong15/LeetCode_czr","sub_path":"LeetCode/Add_Two_Numbers.py","file_name":"Add_Two_Numbers.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16547764338","text":"\"A module to test get all orders feature\"\nfrom tests.base_test import BaseTestCase\nclass TestPlaceOrder(BaseTestCase):\n \"A class for testing the get all orders feature\"\n\n def test_place_orders(self):\n order1 = {\n \"order_food_id\": 1,\n \"order_quantity\": 10,}\n\n order2 = {\n \"order_food_id\": 2,\n \"order_quantity\": 10, }\n\n food = {\n \"food_name\": \"fish\",\n \"food_price\": 22\n }\n\n food2 = {\n \"food_name\": \"rice\",\n \"food_price\": 22\n }\n\n invalid_order = {\n \"invalid\": 1,\n \"order_quantity\": 10,\n\n }\n\n invalid_order_input = {\n \"food_name\": 2,\n \"order_quantity\": \"\",\n\n }\n\n # test place order\n self.register_user(self.new_user)\n resp1 = self.login_user(self.resgistered_user)\n token = str(resp1.json[\"token\"])\n\n #make user admin\n self.make_admin(\"mos\")\n\n #check list length before post\n\n resp2 = self.get_all_orders(token)\n self.assertEqual(len(resp2.json), 0)\n\n #test post oder on empty menu\n\n resp6 = self.post_order(order1, token)\n self.assertEqual(resp6.status_code, 404)\n\n #check_list_after post\n\n self.post_food(food, token)\n self.post_food(food2, token)\n\n # post orders\n self.post_order(order1,token)\n resp10 = self.post_order(order2, token)\n self.assertEqual(resp10.status_code, 201)\n\n resp3 = self.get_all_orders(token)\n self.assertEqual(len(resp3.json), 2)\n self.assertEqual(resp3.status_code, 200)\n\n #test post invalid food object\n resp7 = self.post_order(invalid_order, token)\n self.assertEqual(resp7.status_code, 400)\n\n #test post invalid input\n resp8 = self.post_order(invalid_order, token)\n self.assertEqual(resp8.status_code, 400)\n\n #test post existing order\n resp9 = self.post_order(order1, token)\n self.assertEqual(resp9.status_code, 409)\n\n\n\n\n\n\n\n # resp1 = self.post_food(food, token)\n # self.assertEqual(resp1.status_code, 201)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mozzy22/Fast-Food-Fast-API","sub_path":"tests/test_place_order.py","file_name":"test_place_order.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23472733558","text":"products = [\n {\n '_id': '1',\n 'name': 'Cabrinha Moto 2020',\n 'image': '/images/moto2020.jpg',\n 'description': 'Versatile kite for beginners and advanced riders',\n 'brand': 'Cabrinha',\n 'category': 'Kites',\n 'price': 299.99,\n 'countInStock': 10,\n 'rating': 4.5,\n 'numReviews': 12,\n },\n {\n '_id': '2',\n 'name': 'Duotone Rebel 2020',\n 'image': '/images/rebel2021.png',\n 'description': 'Versatile kite for beginners and advanced riders',\n 'brand': 'Duotone',\n 'category': 'Kites',\n 'price': 289.99,\n 'countInStock': 0,\n 'rating': 4.0,\n 'numReviews': 2,\n },\n\n]","repo_name":"PiotrFiedoruk/KiteStore-Django-Rest-React","sub_path":"backend/base/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30125289258","text":"t = int(input())\nfor case_num in range(t):\n a, b, n, s = map(int, input().split(' '))\n if b > n:\n a += (b - n) // n\n b = b % n + n\n na = s // n\n nb = s % n\n print(\"YES\" if (a >= na and b >= nb) or (\n a == na - 1 and b >= nb + n) else \"NO\")\n","repo_name":"lucifer1004/codeforces","sub_path":"1256/a/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"72429883930","text":"import sys\nN = sys.stdin.readline().rstrip()\nPAT1 = sys.stdin.readline().rstrip()#将输入数字当作字符串处理 会比较方便也可以读入整数后对10整除 读入数组处理\nPAT2 = sys.stdin.readline().rstrip()\nlen1 = len( PAT1 )\nlen2 = len( PAT2 )\ndifference = len1 - len2#比较两个输入的长度 短的一方补0,方便统一计算\nif difference > 0 :\n PAT2 = ''.join(['0'*difference , PAT2 ])\n length = len1\nelif difference < 0 :\n PAT1 = '0'*(-difference) + PAT1\n length = len2\nanswer = [ 0 for i in range(len(N)+1)]#创造答案的格式,注意由于有进位的存在 所以位数最多比进制表 N的进制还要多1位\nbegin = -1 #从末位开始计算\nforward = 0 #记录是否进位 0不进 1 进\nfor i in N[::-1]:\n num = int( i )\n number = int( PAT1[begin] ) + int( PAT2[begin] )+forward\n forward = 0\n if not num:\n num=10\n if number >= num:\n number -= num\n forward = 1\n answer[begin] = number\n begin-=1\nif forward == 1:#如果最后进位为 1 在头位补一个1\n answer[begin] = 1\nfor i,value in enumerate(answer):\n if value:\n begin = i\n break\nfor i in answer[i:]:\n print(i,end='')\n","repo_name":"yhz542/PAT-Basic-Level-Practice","sub_path":"1074. 宇宙无敌加法器.py","file_name":"1074. 宇宙无敌加法器.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39274552223","text":"# -*- coding: utf-8 -*-\n# UTF-8 encoding when using korean\nfrom collections import defaultdict\n\ngenre_char = ['A', 'B', 'C', 'D', 'E']\nscore_input = list(map(float, input().split()))\nscore = defaultdict(float)\nfor idx, c in enumerate(genre_char):\n\tscore[c] = score_input[idx]\n\nn, m = map(int, input().split())\n\nY = []\nO = []\ngenre = []\n\nfor i in range(n):\n\tt = list(input())\n\tfor j in range(m):\n\t\tif t[j] == 'Y':\n\t\t\tY.append((i, j))\n\t\telif t[j] == 'O':\n\t\t\tO.append((i, j))\n\nfor i in range(n):\n\tgenre.append(list(input()))\n\ntmp = []\nfor pos in Y:\n\ttmp.append([genre[pos[0]][pos[1]], pos])\nY = tmp\n\ntmp = []\nfor pos in O:\n\ttmp.append([genre[pos[0]][pos[1]], pos])\nO = tmp\n\nY.sort(key=lambda x: x[1][1])\nY.sort(key=lambda x: x[1][0])\nY.sort(key=lambda x: score[x[0]], reverse=True)\n\nO.sort(key=lambda x: x[1][1])\nO.sort(key=lambda x: x[1][0])\nO.sort(key=lambda x: score[x[0]], reverse=True)\n\nfor y in Y:\n\tprint(f'{y[0]} {score[y[0]]} {y[1][0]} {y[1][1]}')\nfor o in O:\n\tprint(f'{o[0]} {score[o[0]]} {o[1][0]} {o[1][1]}')\n\n\"\"\"\n4.0 3.0 2.1 4.3 5.0\n2 3\nWYO\nYYO\nABC\nDCE\n\"\"\"","repo_name":"altmshfkgudtjr/Problem-Solving","sub_path":"SCF 2021/1차/안본콘텐츠없게해주세요.py","file_name":"안본콘텐츠없게해주세요.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1031682849","text":"from django.shortcuts import render, redirect, reverse, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib import messages\nfrom products.models import Product\n\n\ndef view_bag(request):\n \"\"\" a view to return the bag contents\"\"\"\n\n return render(request, 'bag/bag.html')\n\n\ndef add_to_bag(request, item_id):\n \"\"\"add quantity of a specific item to the bag\"\"\"\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n\n roast = None\n grind = None\n if 'product_roast' in request.POST:\n roast = request.POST['product_roast']\n if 'product_grind' in request.POST:\n grind = request.POST['product_grind']\n\n bag = request.session.get('bag', {})\n\n if roast or grind:\n if item_id in list(bag.keys()):\n is_new_item = True\n counter = 0\n for bean_item in bag[item_id]['bean_items']:\n if bean_item['roast'] == roast and bean_item['grind'] == grind:\n bean_item['qty'] += quantity\n is_new_item = False\n break\n counter +=1\n if is_new_item:\n # from https://stackoverflow.com/questions/39375250/in-python-append-dictionary-value-with-each-element-in-array\n new_bean_item = [{\"roast\": roast, \"grind\": grind, \"qty\": quantity}]\n bag[item_id]['bean_items'].extend(new_bean_item)\n messages.success(request,\n (f'Added {quantity} {roast.upper()} roasted {grind.upper()} ground '\n f'{product.name} to your bag.'))\n else:\n messages.success(request,\n (f'Updated {roast.upper()} roasted {grind.upper()} ground '\n f'{product.name} quantity to '\n f'{bag[item_id][\"bean_items\"][counter][\"qty\"]}'))\n\n else:\n bag[item_id] = {'bean_items': [{\"roast\": roast, \"grind\": grind, \"qty\": quantity}]}\n messages.success(request,\n (f'Added {quantity} {roast.upper()} roasted {grind.upper()} ground '\n f'{product.name} to your bag.'))\n else:\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(request, (f'Updated {product.name} ' f'quantity to {bag[item_id]}'))\n else:\n bag[item_id] = quantity\n messages.success(request, f'Added {quantity} {product.name} to your bag')\n\n request.session['bag'] = bag\n return redirect(redirect_url)\n\n\ndef adjust_bag(request, item_id):\n \"\"\"Adjust the quantity of the specified product to the specified amount\"\"\"\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n roast = None\n grind = None\n if 'product_roast' in request.POST:\n roast = request.POST['product_roast']\n if 'product_grind' in request.POST:\n grind = request.POST['product_grind']\n\n bag = request.session.get('bag', {})\n\n if roast or grind:\n counter = 0\n #loop through the items and find the matching bean_item\n for bean_item in bag[item_id]['bean_items']:\n if bean_item['roast'] == roast and bean_item['grind'] == grind:\n if quantity > 0:\n bean_item['qty'] = quantity\n messages.success(request,\n (f'Updated {roast.upper()} roasted {grind.upper()} ground '\n f'{product.name} quantity to {quantity}'))\n else:\n # if qty is zero remove the bean item from the bag\n del bag[item_id]['bean_items'][counter]\n # if the bean items are empty the item is also removed\n if len(bag[item_id]['bean_items']) == 0:\n bag.pop(item_id)\n messages.success(request,\n (f'Removed {roast.upper()} roasted {grind.upper()} ground '\n f'{product.name} from your bag'))\n # get out\n break\n # move to the next item\n counter =+ 1\n else:\n if quantity > 0:\n bag[item_id] = quantity\n messages.success(request, (f'Updated {product.name} ' f'quantity to {bag[item_id]}'))\n else:\n bag.pop(item_id)\n messages.success(request, (f'Updated {product.name} from your bag'))\n\n request.session['bag'] = bag\n return redirect(reverse('view_bag'))\n\n\ndef remove_from_bag(request, item_id):\n \"\"\"Remove the item from the shopping bag\"\"\"\n\n try:\n product = get_object_or_404(Product, pk=item_id)\n roast = None\n grind = None\n if 'roast' in request.POST:\n roast = request.POST['roast']\n if 'grind' in request.POST:\n grind = request.POST['grind']\n\n bag = request.session.get('bag', {})\n\n if roast or grind:\n counter = 0\n # loop through the items and find the matching bean_item\n for bean_item in bag[item_id]['bean_items']:\n if bean_item['roast'] == roast and bean_item['grind'] == grind:\n # remove the bean item from the bag\n del bag[item_id]['bean_items'][counter]\n # if the bean items are empty the item is also removed\n if len(bag[item_id]['bean_items']) == 0:\n bag.pop(item_id)\n messages.success(request,\n (f'Removed {roast.upper()} roasted {grind.upper()} ground '\n f'{product.name} from your bag'))\n # and exit\n break\n counter += 1\n else:\n bag.pop(item_id)\n messages.success(request, (f'Removed {product.name} from your bag'))\n\n request.session['bag'] = bag\n return HttpResponse(status=200)\n\n except Exception as exception:\n messages.error(request, f'Error removing item: {exception}')\n return HttpResponse(status=500,)","repo_name":"Code-Institute-Submissions/Morpheus-23-Beans_AugResub","sub_path":"bag/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19084552612","text":"import numpy as np\nimport os\nimport pp_dursim\nimport volat_calc\nimport scipy.constants as si\nfrom water_calc import water_calc\n\n# define function\ndef cham_up(sumt, Pnow, \n\tlight_time_cnt, tnew, nuc_ad, nucv1, nucv2, nucv3, \n\tnew_part_sum1, update_count,\n\tinjectt, gasinj_cnt, inj_indx, \n\tCt, pmode, pconc, pconct, seedt_cnt, num_comp, y0, y, N_perbin0, \n\tmean_rad, corei, seedx, seed_name, lowsize, uppsize, num_sb, MV, rad0, radn, std, \n\tH2Oi, rbou, infx_cnt, Cfactor, diff_vol, \n\tDStar_org, tempt_cnt, RHt_cnt, nuci, nuc_comp, y_mw, \n\ttemp_now, gpp_stab, t00, x, pcont, pcontf, Cinfl_now, surfT, act_coeff, \n\ttot_in_res, Compti, self, vol_Comp, volP):\n\n\t# inputs: ------------------------------------------------\n\t# sumt - cumulative time through simulation (s)\n\t# self.TEMP - temperature in chamber (K)\n\t# self.tempt - times that temperatures reached (s)\n\t# Pnow - pressure in chamber (Pa)\n\t# self.light_stat - status of lights\n\t# self.light_time - times that light attain status (s)\n\t# light_time_cnt - light status counter\n\t# self.light_ad - marker for whether to change time interval \n\t#\tin response to changing natural light intensity\n\t# tnew - time interval between chamber updates (s)\n\t# nuc_ad - flag for whether user wants time step adapted \n\t# to nucleation\n\t# nucv1 - nucleation parameter one\n\t# nucv2 - nucleation parameter two\n\t# nucv3 - nucleation parameter three\n\t# new_part_sum1 - total number concentration of new \n\t#\tparticles so far (#/cm3 (air))\n\t# self.update_stp - time interval between operator-split \n\t#\tupdates (s)\n\t# update_count - count since operator-split last \n\t#\tupdated (s)\n\t# self.lat - latitude (degrees)\n\t# self.lon - longitude (degrees)\n\t# self.dayOfYear - number of days since 31st December\n\t# self.photo_path - photochemistry parameter file\n\t# self.af_path - actinic flux file\n\t# injectt - time of instantaneous injections of \n\t#\tcomponents (s)\n\t# gasinj_cnt - count on injection times of component(s)\n\t# inj_indx - index of components being instantaneously injected after \n\t#\texperiment start\n\t# Ct - concentration(s) (ppb) of component(s) injected \n\t#\tinstantaneously after experiment start\n\t# pmode - whether particle number size distributions stated explicitly or by mode\n\t# pconc - concentration of injected particles (# particles/cm3 (air))\n\t# pconct - times of particle injection (s)\n\t# seedt_cnt - count on injections of particles\n\t# num_comp - number of components\n\t# y0 - concentration of components prior to integration (# molecules/cm3 (air))\n\t# y - variable concentration of components prior to integration (# molecules/cm3 (air))\n\t# N_perbin0 - concentration of particles (# particles/cm3 (air)) at start of time interval\n\t# mean_rad - mean radius for particle number size \n\t#\tdistribution (um)\n\t# corei - index of core component\n\t# seedx - mole ratio of non-water components comprising seed particles\n\t# seed_name - name(s) of component(s) comprising seed \n\t#\tparticles\n\t# lowsize - lower size bin boundary (um)\n\t# uppsize - upper size bin boundary (um)\n\t# num_sb - number of size bins (including wall if turned on)\n\t# MV - molar volume of components (cm3/mol)\n\t# rad0 - initial radius at size bin centres (um)\n\t# radn - current radius at size bin centres (um)\n\t# std - standard deviation for injected particle number size \n\t#\tdistributions\n\t# self.y_dens - component densities (kg/m3)\n\t# H2Oi - index of water\n\t# rbou - size bin radius bounds (um)\n\t# self.con_infl_t - times for constant influxes (s)\n\t# infx_cnt - count on constant influx occurrences\n\t# self.Cinfl - influx rate for components with constant influx (ppb/s)\n\t# self.wall_on - marker for whether wall is on\n\t# Cfactor - conversion factor from ppb to molecules/cm3 (air)\n\t# self.seedi - index of seed component(s)\n\t# diff_vol - diffusion volumes of components according to \n\t#\tFuller et al. (1969)\n\t# DStar_org - gas-phase diffusion coefficients of components (cm2/s)\n\t# self.RH - relative humidities (fraction 0-1)\n\t# self.RHt - times through experiment at which relative humidities reached (s)\n\t# tempt_cnt - count on temperatures\n\t# RHt_cnt - relative humidity counts\n\t# self.Pybel_objects - the pybel identifiers for components\n\t# nuci - index of nucleating component\n\t# nuc_comp - the nucleating component\n\t# y_mw - molar weight of components (g/mol)\n\t# temp_now - chamber temperature (K) prior to this update\n\t# self.Psat - saturation vapour pressures of components at the current \n\t#\tchamber temperature (# molecules/cm3)\n\t# gpp_stab - flag for whether to linearly interpolate any change \n\t# \tto chamber conditions (equals -1 if change needed)\n\t# t00 - the initial integration step on the current integration step (s)\n\t# x - starting sizes of particles (um)\n\t# pcont - flags for whether particle injection instantaneous or continuous\n\t# pcontf - whether current state of particle injection is continuous\n\t# Cinfl_now - influx rate of components with continuous influx (ppb/s)\n\t# surfT - surface tension of particles (g/s2 == mN/m == dyn/cm)\n\t# act_coeff - activity coefficient of components\n\t# self.seed_eq_wat - whether seed particles to be equilibrated with water prior to ODE solver\n\t# self.Vwat_inc - whether suppled seed particle volume contains equilibrated water\n\t# tot_in_res - count on total injected concentration of injected components (ug/m3)\n\t# Compti - index for total injection record for instantaneously injected components\n\t# self.cont_inf_reci - index for total injection record for continuously injected components\n\t# self.con_infl_indx - index for continuously injected components from all components\n\t# -----------------------------------------------------------------------\n\t\n\t# ensure N_perbin has a value\n\tN_perbin = np.zeros((N_perbin0.shape[0], N_perbin0.shape[1])) \n\tN_perbin[:] = N_perbin0[:] # particle number concentration (# particles/cm3)\n\n\t# check on dilution factor setting --------------------------\n\tself.dil_fac_cnt = sum(self.dil_fact <= (sumt))-1\n\n\t# update dilution factor\n\tself.dil_fac_now = self.dil_fac[self.dil_fac_cnt]\n\t\t\n\t# check on change of light setting --------------------------------------\n\n\t# begin by assuming no change to time interval required due to chamber \n\t# condition/nucleation\n\tbc_red = 0\n\t\n\tif ((len(self.light_time)) > 0):\n\t\n\t\t# whether lights on (>1) or off (0) during this step\n\t\tself.light_stat_now = self.light_stat[int(sum(self.light_time<=sumt)-1)]\n\t\t\n\t\t# check whether changes occur at start of this time step\n\t\tif (sumt == self.light_time[light_time_cnt] and light_time_cnt>-1):\n\t\t\t\n\t\t\tif (light_time_cnt<(len(self.light_stat)-1)):\n\t\t\t\tlight_time_cnt += 1 # keep count of light setting index\n\t\t\telse:\n\t\t\t\tlight_time_cnt = -1 # reached end\n\t\t\t# reset flag for time step reduction due to chamber condition\n\t\t\tbc_red = 0\n\t\t\t\t\n\t\t\n\t\t# check whether light on/off changes during proposed integration time step\n\t\tif (sumt+tnew > self.light_time[light_time_cnt] and light_time_cnt != -1):\n\t\t\t# if yes, then reset integration time step so that next step coincides \n\t\t\t# with change\n\t\t\ttnew = self.light_time[light_time_cnt]-sumt\n\t\t\tbc_red = 1 # flag for time step reduction due to boundary conditions\n\t\t\t\n\t\t# if reached final status of lights, then keep this status\n\t\tif (light_time_cnt == -1):\n\t\t\tself.light_stat_now = self.light_stat[light_time_cnt]\n \n\t# if lights are on during this step and lighting is natural, then check whether\n\t# proposed time step needs reducing to limit change to light intensity, if this\n\t# time interval adaption is requested\n\t# if using natural light\n\tcwd = os.getcwd() # address of current working directory\n\n\tif (self.light_stat_now >= 1 and self.photo_path == str(cwd + '/PyCHAM/photofiles/MCMv3.2') and self.af_path == 'no' and self.light_ad == 1):\t\n\t\t# check time step required to limit change to rate of \n\t\t# MCM photochemical equation number 6, \n\t\t# which the unit test for\n\t\t# the zenith module shows to be most photosensitive (compared to\n\t\t# other photochemical equations)\n\t\timport zenith\n\t\t# photochemical rate now\n\t\tself.sumt = sumt\n\t\t(secxn, cosxn) = zenith.zenith(self)\n\t\tJn =1.747e-1*cosxn**(0.155)*np.exp(-1.*0.125*secxn)\n\t\t\n\t\t# photochemical rate after proposed time step\n\t\tself.sumt += tnew # temporary changed (reversed below)\n\t\t(secxt, cosxt) = zenith.zenith(self)\n\t\tself.sumt -= tnew # reverse to temporary change above\n\t\t\n\t\tJt =1.747e-1*cosxn**(0.155)*np.exp(-1.*0.125*secxn)\n\t\t\n\t\t# iteratively reduce proposed time interval until photochemical\n\t\t# rate changes by acceptable amount\n\t\twhile (abs(Jt-Jn) > 5.e-3):\n\t\t\ttnew = tnew*0.9\n\t\t\tself.sumt += tnew # temporary changed (reversed below)\n\t\t\t# photochemical rate after proposed time step\n\t\t\t(secxt, cosxt) = zenith.zenith(self)\n\t\t\tself.sumt -= tnew # reverse to temporary change above\n\t\t\tJt = 1.747e-1*cosxt**(0.155)*np.exp(-1.*0.125*secxt)\n\t\t\tbc_red = 1\n\t\t\t\n\t# check on updates to temperature (K) --------------------------------------\t\n\tif (len(self.TEMP) > 1): # because a temperature must be given for experiment start\n\t\n\t\t# check whether changes occur at start of this time step\n\t\tif (sumt >= self.tempt[tempt_cnt] and tempt_cnt != -1):\n\n\t\t\t# new temperature (K)\n\t\t\tif (gpp_stab != -1): # if no linear interpolation required\n\t\t\t\n\t\t\t\ttemp_nown = self.TEMP[tempt_cnt] # new temperature (K)\n\t\t\t\tif (tempt_cnt < (len(self.tempt)-1)):\n\t\t\t\t\ttempt_cnt += 1 # keep count of temperature setting index\n\t\t\t\telse:\n\t\t\t\t\ttempt_cnt = -1 # reached end\n\t\t\t\tbc_red = 0 # reset flag for time step reduction due to boundary conditions\n\t\t\telse:\n\t\t\t\t# new temperature (K)\n\t\t\t\ttemp_nown = np.interp(tnew, [0, t00], [temp_now, self.TEMP[tempt_cnt]])\n\t\t\t\tbc_red = 1 # reset flag for time step reduction due to boundary conditions\n\t\t\t\n\t\t\t# update vapour pressure of water (log10(atm)),\n\t\t\t# but don't update gas-phase concentration of water, since\n\t\t\t# RH should be allowed to vary with temperature\n\t\t\t[_, Psat_water, _] = water_calc(temp_nown, self.RH[RHt_cnt], si.N_A)\n\n\t\t\t# update vapour pressures of all components (# molecules/cm3 and Pa), \n\t\t\t# ignore density output\n\t\t\t[self, _] = volat_calc.volat_calc(0, temp_nown, H2Oi, \n\t\t\t\t\t\t\tnum_comp, Psat_water, vol_Comp, volP, 0, corei, seed_name, \n\t\t\t\t\t\t\tpconc, 0, 0.0, [], 1, nuci, nuc_comp, self)\n\t\t\t\n\t\t\t# according to the ideal gas law, air pressure (Pa) inside chamber\n\t\t\t# is proportional to temperature, therefore pressure changes by \n\t\t\t# the same factor \n\t\t\tPnow = Pnow*(temp_nown/temp_now)\n\t\t\t\n\t\t\t# update ppb to molecules/cm3 conversion factor concentrations\n\t\t\t# total number of molecules in 1 cc air using ideal gas law. R has units cc.Pa/K.mol\n\t\t\tntot = Pnow*(si.N_A/((si.R*1.e6)*temp_nown))\n\t\t\t# one billionth of number of molecules in chamber unit volume\n\t\t\tCfactor = ntot*1.e-9 # ppb to molecules/cc conversion factor\n\t\t\t\n\t\t\t# dynamic viscosity of air (kg/m.s), eq. 4.54 of Jacobson 2005\n\t\t\tdyn_visc = 1.8325e-5*((416.16/(temp_nown+120.))*(temp_nown/296.16)**1.5)\n\t\n\t\t\tma = 28.966e-3 # molecular weight of air (kg/mol) (Eq. 16.17 Jacobson 2005)\n\t\t\t\n\t\t\t# air density (kg/m3 (air)), ideal gas law\n\t\t\trho_a = (Pnow*ma)/((si.R)*temp_nown)\n\t\t\t\t\t\t\n\t\t\t# update mean free path and thermal speed\n\t\t\t# mean thermal speed of each molecule (m/s) (11.151 Jacobson 2005)\n\t\t\t# note that we need the weight of one molecule, which is why y_mw is divided by\n\t\t\t# Avogadro's constant, and we need it in kg, which is why we multiply by 1e-3\n\t\t\ttherm_sp = ((8.*si.k*temp_nown)/(np.pi*(y_mw/si.N_A)*1.e-3))**0.5\n\t\t\t\n\t\t\t# mean free path (m) for each component (15.24 of Jacobson 2005)\n\t\t\t# molecular weight of air (28.966 g/mol taken from table 16.1 Jacobson 2005)\n\t\t\tmfp = (2.*dyn_visc/(rho_a*therm_sp)).reshape(-1, 1)\n\t\t\t\n\t\t\t# diffusion coefficient (m2/s) of components in gas phase (air), eq 4.1.4 of\n\t\t\t# the Taylor (1993) textbook \n\t\t\t# Multicomponent Mass Transfer, ISBN: 0-471-57417-1, note diffusion \n\t\t\t# volume for air (19.7) taken from Table 4.1 of Taylor (1993) and mw of \n\t\t\t# air converted to g/mol from kg/mol. This is a replication of the original method \t\t\t\n\t\t\t# from Fuller et al. (1969): doi.org/10.1021/j100845a020\n\t\t\tDStar_org = 1.013e-2*temp_nown**1.75*(((y_mw+ma*1.e3)/(y_mw*ma*1.e3))**0.5)/(Pnow*(diff_vol**(1./3.)+19.7**(1./3.))**2.)\n\t\t\t# convert to cm2/s\n\t\t\tDStar_org = DStar_org*1.e4\n\t\t\t\n\t\t\ttemp_now = temp_nown # update current temperature (K)\n\t\t\n\t\t# check whether temperature changes during proposed integration time step\n\t\tif (sumt+tnew > self.tempt[tempt_cnt] and tempt_cnt != -1 and gpp_stab != -1):\n\t\t\t# if yes, then reset integration time step so that next step coincides \n\t\t\t# with change\n\t\t\ttnew = self.tempt[tempt_cnt]-sumt\n\t\t\tbc_red = 1 # flag for time step reduction due to boundary conditions\n\t\t\n\tif (len(self.TEMP) == 1):\n\t\ttemp_now = self.TEMP[0] # temperature constant if only one value given\n\t\n\t# check on instantaneous injection of components ---------------------------------------\n\tif (len(injectt) > 0 and gasinj_cnt > -1): # if any injections occur\n\t\n\t\t# check whether changes occur at start of this time step\n\t\tif (sumt >= injectt[gasinj_cnt] and gasinj_cnt != -1):\n\t\t\n\t\t\tif (gpp_stab != -1): # if no linear interpolation required\n\t\t\t\tCt_gain = Ct[:, gasinj_cnt]\n\t\t\t\t\n\t\t\t\tif (gasinj_cnt < (Ct.shape[1]-1)):\n\t\t\t\t\tgasinj_cnt += 1 # update count on injections\n\t\t\t\telse:\n\t\t\t\t\tgasinj_cnt = -1 # reached end\n\t\t\t\tbc_red = 0 # reset flag for time step reduction due to boundary conditions\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t# loop through components with instantaneous injection\n\t\t\t\tinj_cntn = 0 # keep count on components\n\t\t\t\tCt_gain = np.zeros((len(inj_indx))) # empty results\n\t\t\t\tfor inj_indxi in inj_indx:\n\t\t\t\t\tCt_gain[inj_cntn] = np. interp(tnew, [0, t00], [y0[inj_indxi]/Cfactor, Ct[inj_cntn, gasinj_cnt]])\n\t\t\t\t\tinj_cntn += 1 # keep count on components\n\t\t\t\tbc_red = 1 # reset flag for time step reduction due to boundary conditions\n\t\t\n\t\t\t# record additional injection of components (ug/m3)\n\t\t\ttot_in_res[Compti] += (((Ct_gain*Cfactor-y[inj_indx])/si.N_A)*(y_mw[inj_indx].squeeze()))*1.e12\n\n\t\t\t# account for change in gas-phase concentration,\n\t\t\t# convert from ppb to molecules/cm3 (air)\n\t\t\ty[inj_indx] = Ct_gain*Cfactor\n\t\t\t\n\t\t# check whether changes occur during proposed integration time step\n\t\t# and that time step has not been forced to reduce due to unstable ode solver\n\t\tif (sumt+tnew > injectt[gasinj_cnt] and gasinj_cnt != -1 and gpp_stab != -1):\n\t\t\t# if yes, then reset integration time step so that next step coincides \n\t\t\t# with change\n\t\t\ttnew = injectt[gasinj_cnt]-sumt\n\t\t\tbc_red = 1 # flag for time step reduction due to boundary conditions\n\t\n\t# check on instantaneous change in relative humidity ---------------------------------------\n\tif (len(self.RHt) > 0 and RHt_cnt > -1): # if any injections occur\n\t\n\t\t# check whether changes occur at start of this time step\n\t\tif (sumt >= self.RHt[RHt_cnt] and RHt_cnt != -1):\n\t\t\n\t\t\tif (gpp_stab != -1): # if no linear interpolation required\n\t\t\t\tRHn = self.RH[RHt_cnt]\n\t\t\t\t\n\t\t\t\tif (RHt_cnt < (self.RHt.shape[0]-1)):\n\t\t\t\t\tRHt_cnt += 1 # update count on RH\n\t\t\t\telse:\n\t\t\t\t\tRHt_cnt = -1 # reached end\n\t\t\t\t# reset flag for time step reduction due to boundary conditions\n\t\t\t\tbc_red = 0\t\t\t\t\n\t\t\telse:\n\t\t\t\tRHn = np. interp(tnew, [0, t00], [self.RH[RHt_cnt-1], self.RH[RHt_cnt]])\n\t\t\t\tbc_red = 1 # reset flag for time step reduction due to boundary conditions\n\t\t\n\t\t\t# update vapour pressure of water (log10(atm)), and change \n\t\t\t# gas-phase concentration of water vapour since \n\t\t\t# RH stays as stated in the RH and RHt model variables\n\t\t\t[y[H2Oi], _, _] = water_calc(temp_now, RHn, si.N_A)\n\t\t\t\t\n\t\t# check whether changes occur during next proposed integration time step\n\t\t# and that time step has not been forced to reduce due to unstable ode solvers\n\t\tif ((sumt+tnew > self.RHt[RHt_cnt]) and (RHt_cnt != -1) and gpp_stab != -1):\n\t\t\t# if yes, then reset integration time step so that next step coincides \n\t\t\t# with change\n\t\t\ttnew = self.RHt[RHt_cnt]-sumt\n\t\t\tbc_red = 1 # flag for time step reduction due to boundary conditions\n\t\n\t# get whether next/current injection of seed is instantaneous or continuous\n\tpcontf = pcont[0, seedt_cnt]\n\t\n\t# check on injection of particles --------------------------------------\n\t# filler for fraction of new seed particles injected so far\n\tpconcn_frac = 0.\n\t\n\t# if influx occurs and we are not on the cham_up \n\t# call from the rec module (which has tnew=0)\n\t# note, if this particle influx section called \n\t# during the call to rec, then continuous influx is \n\t# cancelled when called later\n\tif ((sum(pconct[0, :]) > 0) and (seedt_cnt > -1) and (num_sb-self.wall_on > 0) and tnew > 0.):\n\t\n\t\t# in case particle influx repeated every 24 hours\n\t\tif (self.pconctf == 1 and sumt >= 24.*3.6e3):\n\t\t\tpinsumt = (sumt % (24.*3.6e3))\n\t\t\tif (sumt >= (24.*3.6e3) and (sumt % (24.*3.6e3)) == 0):\n\t\t\t\tpinsumt = 0\n\t\t\t# if seedt_cnt already been reset to zero and time \n\t\t\t# through simulation not yet caught up with reset \n\t\t\t# time, then prevent influx at reset time occurring now\n\t\telse:\n\t\t\tpinsumt = sumt\n\n\t\tif (seedt_cnt == 0):\n\t\t\t# if on first 24 hours\n\t\t\tif (sumt > 0. and sumt < (24.*3.6e3)):\n\t\t\t\tpinsumt = -1\n\t\t\t# if already past 24 hours\n\t\t\tif (sumt % (24.*3.6e3) > pconct[0, seedt_cnt]):\n\t\t\t\tpinsumt = -1\n\t\t\n\t\t# check whether changes occur at start of this time step\n\t\tif (pinsumt >= pconct[0, seedt_cnt]):\n\t\t\t# if no linear interpolation required, \n\t\t\t# or injection continuous\n\t\t\tif (gpp_stab != -1 or pcontf == 1): \t\t\t\t\n\t\t\t\tpconcn = pconc[:, seedt_cnt]\n\n\t\t\t\tif (pmode == 0): # if in modal mode\n\t\t\t\t\tmean_radn = mean_rad[:, seedt_cnt]\n\t\t\t\t\tstdn = std[:, seedt_cnt]\n\t\t\t\telse: # if number concentrations given per size bin\n\t\t\t\t\tmean_radn = mean_rad\n\t\t\t\t\tstdn = std\n\t\t\t\n\t\t\t# if linear interpolation required and \n\t\t\t# instantaneous injection of seed\n\t\t\tif (gpp_stab == -1 and pcont[0, seedt_cnt] == 0):\n\t\t\t\tpconcn = np.zeros((pconc.shape[0])) # empty results array\n\t\t\t\t# loop through size bins for interpolation since interpolation is one dimensional\n\t\t\t\tfor i in range(num_sb-self.wall_on):\n\t\t\t\t\tpconcn[i] = np.interp(tnew, [0, t00], [pconc[i, seedt_cnt-1], pconc[i, seedt_cnt]])\n\t\t\t\t# remember the fraction of the number concentration added so far\n\t\t\t\tpconcn_frac = pconcn/pconc[:, seedt_cnt]\n\t\t\t\tbc_red = 1 # reset flag for time step reduction due to boundary conditions\n\n\n\t\t\t# account for instantaneous change in \n\t\t\t# seed particles (continuous change dealt with below)\n\t\t\tif (pcontf == 0):\n\t\t\t\t[y[num_comp:num_comp*(num_sb-self.wall_on+1)], N_perbin, _, \n\t\t\t\t\t_] = pp_dursim.pp_dursim(y0[num_comp:num_comp*(num_sb-self.wall_on+1)], \n\t\t\t\t\tN_perbin0, mean_radn, pmode, pconcn, seedx, lowsize, \n\t\t\t\t\tuppsize, num_comp, (num_sb-self.wall_on), MV, rad0, radn, \n\t\t\t\t\tstdn, H2Oi, rbou, y_mw, surfT, self.TEMP[tempt_cnt], act_coeff, \n\t\t\t\t\tpcontf, y[H2Oi], self)\n\t\t\t\t\t\n\t\t\t\t# turn off flag for ongoing injection of particles\n\t\t\t\tself.pcont_ongoing = 0\n\t\t\n\t\t\t# account for new continuous change in seed particles\n\t\t\tif (pcontf == 1):\t\n\t\t\t\t# turn on flag for ongoing injection of particles\n\t\t\t\tself.pcont_ongoing = 1\n\t\t\t\n\t\t\t# move count on particle injections up by one\n\t\t\tif (seedt_cnt < (pconct.shape[1]-1)):\n\t\t\t\t\n\t\t\t\t# if repeating every 24 hours\n\t\t\t\tif (self.pconctf == 1):\n\t\t\t\t\tif (pconct[0, seedt_cnt+1] >= 24.*3.6e3):\n\t\t\t\t\t\tseedt_cnt = 0 # reset if necessary\n\t\t\t\t\telse:\n\t\t\t\t\t\tseedt_cnt += 1\n\n\t\t\t\tif (self.pconctf == 0):\n\t\t\t\t\tseedt_cnt += 1\n\n\t\t\telse:\n\t\t\t\t# if influx times treated explicitly\n\t\t\t\tif (self.pconctf == 0):\n\t\t\t\t\tseedt_cnt = -1 # reached end\n\t\t\t\t# if repeating every 24 hours\n\t\t\t\tif (self.pconctf == 1):\n\t\t\t\t\tseedt_cnt = 0 # reset\n\t\n\t\t# check whether changes occur during proposed \n\t\t# integration time step\n\t\t# and that time step has not been forced to \n\t\t# reduce due to unstable ode solvers\n\t\tif (pinsumt+tnew > pconct[0, seedt_cnt] and seedt_cnt!=-1 and gpp_stab != -1): \n\n\t\t\t# in case influxes repeated every 24 hours \n\t\t\t# and seedt_cnt has been reset\n\t\t\tif (self.pconctf == 1):\n\t\t\t\t# in case seedt_cnt has been reset and \n\t\t\t\t# new time interval is going into the next day\t\n\t\t\t\tif (seedt_cnt == 0 and (pinsumt+tnew) >= (24.*3.6e3)):\n\t\t\t\t\tif (sumt+tnew) % (24.*3.6e3) > pconct[0, seedt_cnt]:\n\t\t\t\t\t\ttnew -= ((sumt+tnew) % (24.*3.6e3)-pconct[0, seedt_cnt])\n\t\t\t\t\t\tbc_red = 1 # time step change flag\n\t\t\t\t# in case seedt_cnt hasn't been reset\n\t\t\t\tif (seedt_cnt != 0):\n\t\t\t\t\tif (sumt+tnew) % (24.*3.6e3) > pconct[0, seedt_cnt]:\n\t\t\t\t\t\ttnew = pconct[0, seedt_cnt]-(sumt % (24.*3.6e3))\n\t\t\t\n\t\t\t# if influxes treated explicitly\n\t\t\telse:\n\t\t\t\t# if yes, then reset integration time step \n\t\t\t\t# so that next step coincides \n\t\t\t\t# with change\n\t\t\t\ttnew = pconct[0, seedt_cnt]-sumt\n\t\t\t\tbc_red = 1 # flag for time step reduction due to boundary conditions\n\t\n\t# account for ongoing continuous influx of particles\n\t# account for new continuous change in seed particles\n\tif (self.pcont_ongoing == 1):\n\t\t\n\t\tif (seedt_cnt != -1):\n\t\t\tpconcn = pconc[:, seedt_cnt-1]\n\t\t\t\n\t\t\tif (pmode == 0): # if in modal mode\n\t\t\t\tmean_radn = mean_rad[:, seedt_cnt-1]\n\t\t\t\tstdn = std[:, seedt_cnt-1]\n\t\t\t\n\t\t\t# if number concentration per size bin \n\t\t\t# given explicitly\n\t\t\tif (pmode == 1): \t\t\t\t\n\t\t\t\tmean_radn = mean_rad\n\t\t\t\tstdn = std\t\t\n\t\t\n\t\t\t# injected seed particle number concentration \n\t\t\t# integrated over proposed \n\t\t\t# time step (# particles/cm3)\n\t\t\tpconcn = pconc[:, seedt_cnt-1]*tnew\n\t\t\n\t\tif (seedt_cnt == -1):\n\t\t\tpconcn = pconc[:, -1]\n\t\t\tmean_radn = mean_rad[:, -1]\n\t\t\tstdn = std[:, -1]\t\t\n\t\t\n\t\t\t# injected seed particle number concentration \n\t\t\t# integrated over proposed \n\t\t\t# time step (# particles/cm3)\n\t\t\tpconcn = pconc[:, -1]*tnew\n\t\t\n\t\t[y[num_comp:num_comp*(num_sb-self.wall_on+1)], N_perbin, _, \n\t\t_] = pp_dursim.pp_dursim(y0[num_comp:num_comp*(num_sb-self.wall_on+1)], \n\t\tN_perbin0, mean_radn, pmode, pconcn, seedx, lowsize, \n\t\tuppsize, num_comp, (num_sb-self.wall_on), MV, rad0, radn, \n\t\tstdn, H2Oi, rbou, y_mw, surfT, self.TEMP[tempt_cnt], act_coeff, \n\t\tpcontf, y[H2Oi], self)\n\t\t\n\t# ----------------------------------------------------------------------------------------------------------\n\t\n\t# check on continuous influx of gas-phase components ----------------------------------------------\n\tif (len(self.con_infl_t) > 0): # if influx occurs\n\n\t\t# set relevant time if loop involved\n\t\tif (self.con_infl_tf == 0):\n\t\t\tci_sumt = sumt\n\t\tif (self.con_infl_tf == 1):\n\t\t\tif (sumt >= 24.*3.6e3):\n\t\t\t\tci_sumt = 0. + sumt % (24.*3.6e3)\n\t\t\telse:\n\t\t\t\tci_sumt = sumt\n\t\t\n\t\t# in case influxes begin after simulation start create a zero array of correct shape\n\t\t# note that final condition (infx_cnt==0) means that this only activated if we're\n\t\t# really at the first supplied influx point (because cham_up in rec_prep could have)\n\t\t# moved infx_cnt up by 1\n\t\tif (sumt == 0. and self.con_infl_t[infx_cnt] != 0. and infx_cnt == 0):\n\t\t\tCinfl_now = np.zeros((self.con_infl_C.shape[0], 1))\n\t\t\n\t\t# if the final input for influxes reached\n\t\tif (infx_cnt == -1):\n\n\t\t\t# influx of components now, convert from ppb/s to # molecules/cm3/s (air)\n\t\t\tif ('ppb' in self.abun_unit):\n\t\t\t\tCinfl_now = np.float64(self.con_infl_C[:, infx_cnt]*self.Cfactor)\n\t\t\tif ('mol' in self.abun_unit):\n\t\t\t\tCinfl_now = np.float64(self.con_infl_C[:, infx_cnt])\n\n\t\t\t# ensure correct shape\n\t\t\tCinfl_now = np.array((Cinfl_now)).reshape(-1, 1)\n\n\t\t\tif (self.H2Oin == 1):\n\t\t\t\tif ('ppb' in self.abun_unit):\n\t\t\t\t\t# continuous influx rate of water now\n\t\t\t\t\tself.Cinfl_H2O_now = (self.con_infl_H2O[:, infx_cnt]*self.Cfactor)\n\t\t\t\tif ('mol' in self.abun_unit):\n\t\t\t\t\tself.Cinfl_H2O_now = self.con_infl_H2O[:, infx_cnt]\n\n\t\t\t\t# ensure correct shape\n\t\t\t\tself.Cinfl_H2O_now = np.float64(self.Cinfl_H2O_now.reshape(-1, 1))\n\t\t\t\n\t\t\t# record cumulative injection of components (ug/m3)\n\t\t\ttot_in_res[self.cont_inf_reci] += (((((Cinfl_now.squeeze())*(tnew))/si.N_A)*(y_mw[self.con_infl_indx].squeeze()))*1.e12).reshape(-1)\n\t\t\t\n\t\t# check whether changes occur at start of this time step\n\t\tif (ci_sumt == self.con_infl_t[infx_cnt] and (infx_cnt != -1)):\n\t\t\t\n\t\t\tif ('ppb' in self.abun_unit): # note that we assume units of # molecules/cm3/s\n\t\t\t\t# influx of components now, convert from ppb/s to # molecules/cm3/s (air)\n\t\t\t\tCinfl_now = np.float64(self.con_infl_C[:, infx_cnt]*Cfactor)\n\n\t\t\tif ('mol' in self.abun_unit):\n\t\t\t\tCinfl_now = np.float64(self.con_infl_C[:, infx_cnt])\n\t\t\t\n\t\t\t# ensure correct shape\n\t\t\tCinfl_now = np.array((Cinfl_now)).reshape(-1, 1)\n\n\t\t\tif (self.H2Oin == 1):\n\t\t\t\tif ('ppb' in self.abun_unit):\n\t\t\t\t\t# continuous influx rate of water now\n\t\t\t\t\tself.Cinfl_H2O_now = np.float64(self.con_infl_H2O[:, infx_cnt]*self.Cfactor)\n\t\t\t\tif ('mol' in self.abun_unit):\n\t\t\t\t\tself.Cinfl_H2O_now = np.float64(self.con_infl_H2O[:, infx_cnt])\n\n\t\t\t\t# ensure correct shape\n\t\t\t\tself.Cinfl_H2O_now = self.Cinfl_H2O_now.reshape(-1, 1)\n\t\t\t\n\t\t\t# record cumulative injection of components (ug/m3)\n\t\t\ttot_in_res[self.cont_inf_reci] += ((((((Cinfl_now.squeeze())*(tnew))/si.N_A)*(y_mw[self.con_infl_indx].squeeze()))*1.e12).reshape(-1))\n\n\t\t\t# update index counter for constant influxes - used in integrator below\n\t\t\tif (infx_cnt < (self.con_infl_C.shape[1]-1)):\n\t\t\t\tinfx_cnt += 1\n\t\t\telse:\n\t\t\t\tif (self.con_infl_tf == 1):\n\t\t\t\t\tinfx_cnt = 0\n\t\t\t\tif (self.con_infl_tf == 0):\n\t\t\t\t\tinfx_cnt = -1 # reached end\n\n\t\t\tbc_red = 0 # reset flag for time step reduction due to boundary conditions\n\t\t\n\t\t# check whether changes occur during proposed \n\t\t# integration time step\n\t\tif (ci_sumt+tnew > self.con_infl_t[infx_cnt] and (infx_cnt != -1)):\n\t\t\tif (self.con_infl_tf == 1 and infx_cnt == 0):\n\t\t\t\tif ((ci_sumt+tnew) % (24.*3.6e3) > self.con_infl_t[infx_cnt]):\n\t\t\t\t\ttnew = (24.*3.6e3+self.con_infl_t[infx_cnt])-ci_sumt\t\n\t\t\t\t# flag for time step reduction to accommodate\n\t\t\t\t# changing boundary conditions\n\t\t\t\tbc_red = 1\n\t\t\telse:\n\t\t\t\t# if yes, then reset integration time step \n\t\t\t\t# so that next step coincides \n\t\t\t\t# with change\n\t\t\t\ttnew = self.con_infl_t[infx_cnt]-ci_sumt\n\t\t\t\n\t\t\t\t# flag for time step reduction due to \n\t\t\t\t# boundary conditions\n\t\t\t\tbc_red = 1\t\t\n\telse: # if no continuous influxes, provide filler\n\t\tCinfl_now = np.zeros((1, 1))\n\t\n\t# check on nucleation ---------------------------------------------------------\n\t# if automatic time step adaption to nucleation requested, check whether number of new particles\n\t# exceeds 10 % of total number formed during nucleation event. Second part of condition is that\n\t# the specified nucleation event has not yet reached its defined finishing particle number\n\t# concentration (# particles/cm3 (air))\n\tif ((nuc_ad == 1) and (new_part_sum1 < nucv1*0.9) and ((num_sb-self.wall_on) > 0)):\n\t\n\t\t# the time step (s) needed to increase number concentration of nucleated particles by 10 %\n\t\tt_need = (0.1*nucv1+new_part_sum1)\n\t\tt_need = np.log(t_need/nucv1)\n\t\tt_need = np.log(t_need/nucv2)\n\t\tt_need = t_need*nucv3*-1.-sumt\n\t\n\t\tif (tnew > t_need): # if suggested time step exceeds this, then reduce to required time step \n\t\t\ttnew = t_need\n\t\t\tself.update_stp = t_need\n\t\t\tupdate_count = 0.\n\t\t\tbc_red = 1\n\t\t\t\n\t# nucleation check end -------------------------------------------------------------------------\n\t\n\t# check on new vapour pressure of HOM-RO2+MCM-RO2 accretion products ---------------------\n\t\n\t# convert y into components in rows and phases in columns\n\tif ('RO2_POOL' in self.comp_namelist):\n\t\n\t\ty_mat = y.reshape(num_comp, num_sb+1, order='F')\n\t\tif self.wall_on > 0:\n\t\t\ty_mat = y_mat[:, 0:-self.wall_on]\n\t\n\t\t# get the average oxygen and carbon number of the gas- and particle-phase RO2\n\t\tCnumav = sum(sum((self.Cnum[self.RO2_indices[:, 1], :])*(y_mat[self.RO2_indices[:, 1], :].sum(axis=1))/sum(sum(y_mat[self.RO2_indices[:, 1], :]))))\n\n\t\tOnumav = sum(sum((self.Onum[self.RO2_indices[:, 1], :])*(y_mat[self.RO2_indices[:, 1], :].sum(axis=1))/sum(sum(y_mat[self.RO2_indices[:, 1], :]))))\n\t\n\t\t# estimate vapour pressure (Pa) effect of the RO2 pool based on carbon and oxygen number\n\t\tRO2pool_effect_Pa = 10**(-0.12*Onumav + Cnumav*-0.22)*101325.\n\t\t\n\t\t# take effect on the HOM-RO2-MCM-RO2 accretion product, note that inside Psat_Pa_rec\n\t\t# is the estimated vapour pressure of the HOM-RO2 (Pa)\n\t\tself.Psat_Pa[:, self.RO2_POOL_APi] = self.Psat_Pa_rec[self.RO2_POOL_APi] + RO2pool_effect_Pa\n\t\t# convert to # molecules/cm3 (air) using ideal\n\t\t# gas law, R has units cm3.Pa/K.mol\n\t\tself.Psat[:, self.RO2_POOL_APi] = self.Psat_Pa[0, self.RO2_POOL_APi]*(si.N_A/((si.R*1.e6)*self.TEMP[tempt_cnt]))\n\t\t\n\t\t\n\t# end of check on new vapour pressure of HOM-RO2+MCM-RO2 accretion products -------------- \n\n\treturn(temp_now, Pnow, light_time_cnt, tnew, bc_red, update_count, \n\t\tCinfl_now, seedt_cnt, Cfactor, infx_cnt, gasinj_cnt, DStar_org, y, tempt_cnt, \n\t\tRHt_cnt, N_perbin, x, pconcn_frac, pcontf, tot_in_res, self)\n","repo_name":"simonom/PyCHAM","sub_path":"PyCHAM/cham_up.py","file_name":"cham_up.py","file_ext":"py","file_size_in_byte":28374,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"32"} +{"seq_id":"33532771259","text":"import argparse\nimport nose\n\nfrom rit.app.conf import settings\nfrom rit.core.db._sessions import dispose_all_db_connections, sessions\nfrom rit.core.environment.app import get_env_for_app\n\napp_env = get_env_for_app()\n\n\ndef setup_test_environment():\n dispose_all_db_connections()\n if app_env.cmd_dispatcher.is_registered('projectcustom-setup'):\n app_env.cmd_dispatcher.execute_command('projectcustom-setup')\n else:\n app_env.cmd_dispatcher.execute_command('migrations', 'create_db')\n app_env.cmd_dispatcher.execute_command('migrations', 'apply')\n\n\ndef teardown_test_environment():\n dispose_all_db_connections()\n if app_env.cmd_dispatcher.is_registered('projectcustom-teardown'):\n app_env.cmd_dispatcher.execute_command('projectcustom-teardown')\n else:\n app_env.cmd_dispatcher.execute_command('migrations', 'drop_db')\n\n\nclass TestRunner(object):\n\n command_params = ('--rit-verbose', )\n\n def _prepare_nose_cargs(self, *args):\n nose_params = [arg for arg in args if arg not in self.command_params]\n nose_argv = ['nosetests'] + (list(nose_params) or [])\n if hasattr(settings, 'NOSE_ARGS'):\n nose_argv = nose_argv + settings.NOSE_ARGS\n return nose_argv\n\n def _parse_cargs(self, *params):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--rit-verbose',\n help='Turn on verbose output in tests',\n action='store_true',\n )\n parsed_args, _ = parser.parse_known_args(params)\n return parsed_args\n\n def _get_settings_to_be_updated(self, c_params):\n databases = settings.DATABASES.copy()\n for alias in [db_alias for db_alias in databases\n if not db_alias.endswith('_test')]:\n databases[alias] = databases[alias + '_test']\n sessions._db_uris = databases\n return {\n 'DATABASES': databases,\n 'TESTING': True,\n 'VERBOSE_LOGGING': bool(c_params.rit_verbose),\n }\n\n def _downgrade_settings(self):\n settings.pop_custom_settings()\n\n def __call__(self, *args):\n if settings.INST_TYPE != 'dev':\n raise RuntimeError('Impossible to run tests on production')\n c_params = self._parse_cargs(*args)\n with settings.push_custom_settings(\n self._get_settings_to_be_updated(c_params)\n ):\n nose_argv = self._prepare_nose_cargs(*args)\n setup_test_environment()\n nose.core.TestProgram(argv=nose_argv, exit=False)\n teardown_test_environment()\n","repo_name":"sergeyglazyrindev/ritcore","sub_path":"src/rit/core/testing/management/trunner.py","file_name":"trunner.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35880915472","text":"'''\nCreated on 10-mrt.-2014\n\n@author: Pieterjan Lambrecht \n'''\n\nfrom library import Library\nfrom fm_player import FMPlayer\n\nclass MainPlayer:\n library = None\n player = None\n \n def __init__(self): \n ' Set the frequency of the player '\n self.library = Library()\n self.library.scan() \n ' Start the player '\n self.player = FMPlayer()\n \n def play(self):\n ' Check if there were any votes on the site... '\n if self.library.hasVotes():\n ' Play the next top voted song... '\n self.player.play(self.library.getTopVoted())\n self.play() \n else:\n ' ... else just play some random song... '\n self.player.play(self.library.getRandomSong())\n self.play()","repo_name":"pielambr/PiFMVoting","sub_path":"Python player/player/main_player.py","file_name":"main_player.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"34262092306","text":"\"\"\"Analyze a shell history.\"\"\"\n\n# Third party modules\nimport click\n\n# First party modules\nfrom shell_history_analysis.analyze import main\n\n\n@click.command()\n@click.argument(\"filename\", type=click.Path(exists=True))\n@click.option(\"--shell\", type=click.Choice([\"zsh\", \"fish\", \"bash\"]))\n@click.option(\n \"--grouping\",\n type=click.Path(exists=True),\n help=\"Path to a YAML file which groups commands together\",\n)\ndef entry_point(filename: str, shell: str, grouping: str):\n main(filename, shell, grouping)\n","repo_name":"MartinThoma/shell-history-analysis","sub_path":"shell_history_analysis/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"21587713276","text":"# Module 2 - Create a Cryptocurrency\r\n\r\n# Datetime returns the extact date the block is mined \r\nimport datetime\r\n# Hash the blocks\r\nimport hashlib\r\n# Encode the blocks before hashing them\r\nimport json\r\n# Create and object of flask class, web application itself\r\n# Jsonify returns the messages in postman when we interact with our blockchain\r\nfrom flask import Flask, jsonify, request\r\n# Request==2.18.4 install: pip install request\r\nimport requests \r\nfrom uuid import uuid4\r\nfrom urllib.parse import urlparse\r\n\r\n\r\n# Part 1 - Building a Blockchain\r\n\r\nclass Blockchain: \r\n # Takes one same argument which refer to the object we create\r\n def __init__(self):\r\n # Chain containg the blocks, List containing the blocks\r\n self.chain = []\r\n self.transactions = []\r\n # Creates the Genesis Block (First Block of the Blockchain)\r\n # Each Block will have it's own proof\r\n # Second Argument is a key that each block will have (previous hash value)\r\n # But since its the firs, gensis block, it will not have any previous hash value.\r\n # Arbitrary value \r\n self.create_block(proof = 1, previous_hash = '0')\r\n self.nodes =set()\r\n # Create a new block with all the features in a blockchain and will append this new mined block to the blockchain\r\n def create_block(self, proof, previous_hash):\r\n # Make a dictionary that will define each block in the blockchain with its four essential keys, index of the block, time stamp, proof of the block, previous hash \r\n block = {'index' : len(self.chain) + 1 ,\r\n 'timestamp' : str(datetime.datetime.now()) ,\r\n 'proof' : proof,\r\n 'previous_hash' : previous_hash,\r\n 'transactions': self.transactions}\r\n # Updates information into the empty list\r\n self.transactions =[]\r\n # Append the block to the chain \r\n self.chain.append(block)\r\n # Display the information of this block in Postman\r\n return block\r\n # Gets the previous block\r\n def get_previous_block (self):\r\n # Returns the last index of the chain\r\n return self.chain[-1]\r\n # First Argument, self (apply this proof of work method from instance object that would be created)\r\n # Second Argument, previous proof, in order to make the problem that miners have to solve, the previous proof has to be there\r\n def proof_of_work(self, previous_proof):\r\n new_proof = 1\r\n check_proof = False \r\n # Introudce while loop to increment this new proof to check if its the right proof \r\n while check_proof is False:\r\n # Leading Zeros ID (define the problem) (more = harder)\r\n # EASY CHALLENGE (CAN MAKE IT HARDER)\r\n hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()\r\n if hash_operation[:4] == \"0000\":\r\n check_proof = True\r\n else:\r\n new_proof +=1\r\n return new_proof\r\n\r\n def hash(self, block):\r\n # Encodes block in the right SHA 256 Format\r\n encoded_block = json.dumps(block, sort_keys = True).encode()\r\n # Returns the cryptographic hash of our block\r\n return hashlib.sha256(encoded_block).hexdigest()\r\n \r\n # Checks if the chain if valid or not\r\n def is_chain_valid(self, chain):\r\n previous_block = chain[0]\r\n block_index = 1\r\n while block_index < len(chain):\r\n block = chain[block_index]\r\n if block['previous_hash'] != self.hash(previous_block):\r\n return False\r\n previous_proof = previous_block['proof']\r\n proof = block['proof']\r\n hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()\r\n if hash_operation[:4] != '0000':\r\n return False \r\n previous_block = block\r\n block_index += 1 \r\n return True \r\n # Take care of our transactions\r\n def add_transaction(self, sender, receiver, amount):\r\n self.transactions.append({'sender' : sender,\r\n 'receiver' : receiver,\r\n 'amount': amount})\r\n previous_block = self.get_previous_block()\r\n return previous_block['index'] + 1\r\n \r\n def node(self, address):\r\n parsed_url = urlparse(address)\r\n # Since not a list, need to use add\r\n self.nodes.add(parsed_url.netloc)\r\n \r\n def replace_chain(self):\r\n network = self.nodes\r\n longest_chain = None\r\n max_length = len(self.chain)\r\n for node in network:\r\n response = requests.get(f'http://{node}/get_chain')\r\n if response.status_code == 200:\r\n length = response.json()['length']\r\n chain = response.json()['chain']\r\n if length > max_length and self.is_chain_valid(chain):\r\n max_length = length\r\n longest_chain = chain\r\n if longest_chain:\r\n self.chain = longest_chain\r\n return True\r\n return False\r\n\r\n# Part 2 - Mining our Blockchain\r\n\r\n# Create a web application \r\napp = Flask(__name__)\r\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\r\n# Create an address for the node on Port \r\n# UUID generates a random address\r\nnode_address = str(uuid4()).replace('-', '')\r\n\r\n\r\n# Create a Blockchain\r\nblockchain = Blockchain()\r\n\r\n# USE route() decorator to tell Flask what URL should trigger our function\r\n# Mining a new Block\r\n@app.route('/mine_block', methods = ['GET'])\r\ndef mine_block():\r\n previous_block = blockchain.get_previous_block()\r\n previous_proof = previous_block['proof']\r\n proof = blockchain.proof_of_work(previous_proof)\r\n previous_hash = blockchain.hash(previous_block)\r\n blockchain.add_transaction(sender = node_address, receiver = 'Sakin', amount = \"10\")\r\n block = blockchain.create_block(proof, previous_hash)\r\n response = {'message': \"Congratulations, you just mined a block!\",\r\n 'index' : block['index'],\r\n 'timestamp' : block['timestamp'],\r\n 'proof' : block['proof'],\r\n 'previous_hash' : block['previous_hash'],\r\n 'transactions' : block['transactions']}\r\n return jsonify(response), 200 \r\n\r\n# Getting the Full Blockchain \r\n@app.route('/get_chain', methods=['GET'])\r\n# Display Chain\r\ndef get_chain():\r\n response = {'chain' : blockchain.chain,\r\n 'length' : len(blockchain.chain)}\r\n return jsonify(response), 200 \r\n\r\n# Check if the blockchain is valid \r\n@app.route('/is_valid', methods=['GET'])\r\ndef is_valid():\r\n is_valid = blockchain.is_chain_valid(blockchain.chain)\r\n if is_valid:\r\n response = {'message': \"Blockchain is Valid\" }\r\n else:\r\n response = {'message' : \"Blockchain is not Valid\" }\r\n return jsonify(response), 200 \r\n \r\n# Adding a new transaction to the Blockchain\r\n@app.route('/add_transaction', methods=['POST'])\r\ndef add_transaction():\r\n json = request.get_json()\r\n transaction_keys = ['sender', 'receiever', 'amount']\r\n if not all (key in json for key in transaction_keys):\r\n return \"Some Elements of the Transactions are missing\" , 400\r\n # Takes values of keys\r\n index = blockchain.add_transaction(json['sender'], json['receiever'], json['amount'])\r\n response = {'message': f\"This transaction will be added to Block {index} \"}\r\n return jsonify(response), 201\r\n\r\n\r\n\r\n# Part 3 - Decentralizing our Blockchain\r\n\r\n# Connecting new Nodes \r\n@app.route('/connect_node', methods=['POST'])\r\n\r\ndef connect_node():\r\n json = request.get_json()\r\n nodes = json.get('node')\r\n if nodes is None: \r\n return \"No Mode\", 400\r\n for node in nodes:\r\n blockchain.add_node(node)\r\n response = {'message': 'All the nodes are now conncected. The Kincoin Blockchain now contains the following nodes' , \r\n 'total_nodes': list(blockchain.nodes)}\r\n return jsonify(response), 201\r\n\r\n# Replace the chain by the longest chian if needed\r\n@app.route('/replace_chain', methods=['GET'])\r\ndef replace_chain():\r\n is_chain_replaced = blockchain.replace_chain()\r\n if is_chain_replaced:\r\n response = {'message': \"Nodes had different chains so the chain was replaced by the longest chain\",\r\n 'new_chain' : blockchain.chain }\r\n else:\r\n response = {'message' : \"All good, Chain was the longest one.\",\r\n 'actual_chain' : blockchain.chain}\r\n return jsonify(response), 200\r\n\r\n# Running the Application\r\napp.run(host = '0.0.0.0', port = 5000)\r\n\r\n \r\n","repo_name":"CHaOSFrozen/CryptoCurrency101","sub_path":"Kincoin.py","file_name":"Kincoin.py","file_ext":"py","file_size_in_byte":8668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29361213408","text":"from justwatch import JustWatch\nfrom bs4 import BeautifulSoup\nimport requests\n\n# APIs related functions\n\ndef netflix(title):\n just_watch = JustWatch(country='US')\n try:\n request = just_watch.search_for_item(query=title)\n offers = request[\"items\"][0][\"offers\"]\n for e in offers:\n if e[\"monetization_type\"] == \"flatrate\" and e[\"provider_id\"] == 8:\n return True\n return False\n except Exception as e:\n return None\n\ndef hbo(title):\n just_watch = JustWatch(country='US')\n try:\n request = just_watch.search_for_item(query=title)\n offers = request[\"items\"][0][\"offers\"]\n for e in offers:\n if e[\"monetization_type\"] == \"flatrate\":\n if e[\"provider_id\"] == 118 | 27:\n return True\n return False\n except Exception as e:\n return None\n\ndef amazon(title):\n just_watch = JustWatch(country='US')\n try:\n request = just_watch.search_for_item(query=title)\n offers = request[\"items\"][0][\"offers\"]\n for e in offers:\n if e[\"monetization_type\"] == \"flatrate\":\n if e[\"provider_id\"] == 9 | 194:\n return True\n return False\n except Exception as e:\n return None\n\n# Web Scraping of Rotten Tomatoes to obtain de Users Ratings\n\ndef rating(title):\n baseUrl = \"https://www.rottentomatoes.com/m/\"\n title = title.replace(\"The \", \"\")\n title = title.replace(\" \", \"_\") \n title = title.replace(\"'\", \"\")\n res = requests.get(baseUrl+title).text\n soup = BeautifulSoup(res, 'html.parser')\n try:\n rating = soup.select('.mop-ratings-wrap__percentage')[1].text.strip()[:-1]\n normRating = round(int(rating) * 0.01 * 5)\n except Exception as e:\n return None\n return normRating\n\n\n# Data Analysis functions\n\ndef whichGenre(genre):\n df[df[\"Genre\"].str.contains(genre)]\n\ndef whereisit(title):\n if df[(df[\"Title\"] == title) & (df[\"Netflix\"] == True)].iloc[0].get(\"Netflix\"):\n return \"You can stream this movie on Netflix.\"\n \n if df[(df[\"Title\"] == title) & (df[\"Netflix\"] == True)].iloc[0].get(\"HBO\"):\n return \"You can stream this movie on HBO.\"\n \n if df[(df[\"Title\"] == title) & (df[\"Netflix\"] == True)].iloc[0].get(\"Amazon Prime\"):\n return \"You can stream this movie on Amazon Prime.\"\n\ndef theRating(title):\n try:\n return df[(df[\"Title\"] == title) & (df[\"Netflix\"] == True)].iloc[0].get(\"Rating\")\n except Exception as e:\n return \"There is no available rating for this movie.\"\n\ndef recommendation(genre):\n availableMovies = (df[df[\"Netflix\"] | df[\"HBO\"] | df[\"Amazon Prime\"] == True])\n genreFilter = availableMovies[availableMovies[\"Genre\"].str.contains(genre)]\n bestMovies = (genreFilter[genreFilter[\"Rating\"] == 5])[\"Title\"].tolist()\n return \"Another good movie you may enjoy is: {}.\".format(random.choice(bestMovies))\n\n","repo_name":"ireneisdoomed/Pipeline-project","sub_path":"src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27921517428","text":"from functools import reduce\n\n\nclass Solution:\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n https://leetcode.com/problems/letter-combinations-of-a-phone-number/discuss/8064/My-java-solution-with-FIFO-queue\n https://leetcode.com/problems/letter-combinations-of-a-phone-number/discuss/8097/My-iterative-sollution-very-simple-under-15-lines\n https://leetcode.com/problems/letter-combinations-of-a-phone-number/discuss/8070/One-line-python-solution\n http://www.runoob.com/python/python-func-reduce.html\n \"\"\"\n dir = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl',\n '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}\n queue = [''] if digits else []\n for digit in digits:\n current_list = []\n for letter in dir[digit]:\n for combination in queue:\n current_list.append(combination + letter)\n queue = current_list\n return queue\n\n\nclass Solution_0:\n '''\n https://blog.csdn.net/caimouse/article/details/78129956\n '''\n\n def letterCombinations(self, digits):\n if '' == digits:\n return []\n kvmaps = {\n '2': 'abc',\n '3': 'def',\n '4': 'ghi',\n '5': 'jkl',\n '6': 'mno',\n '7': 'pqrs',\n '8': 'tuv',\n '9': 'wxyz'\n }\n return reduce(lambda acc, digit: [x + y for x in acc for y in kvmaps[digit]], digits, [''])\n\n\nclass Solution_1:\n '''\n https://blog.csdn.net/caimouse/article/details/78129956\n '''\n\n def letterCombinations(self, digits):\n map = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl',\n '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}\n if len(digits) == 0:\n return []\n return [a+b for a in self.letterCombinations(digits[:-1])\n for b in self.letterCombinations(digits[-1])] or list(map[digits])\n\n# result = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])\n# print(result)\n\n# l = ['13','55']\n# print(l+['a'])\n","repo_name":"lailianqi/LeetCodeByPython","sub_path":"17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10743190969","text":"from bson.objectid import ObjectId\nfrom datetime import datetime\n\nfrom json import dumps\n\n# Define the format of the date and time in the string\n# buf_data = dict(data)\n# for key, value in data.items():\n# try:\n# if type(value) is str:\n\n# if model[key] == \"datetime\":\n# # if len(value) <= len(\"2023-01-16 00:00:00\"):\n# # data[key] = buf_data[key]+'.000000'\n# buf_data[key] = types[model[key]](buf_data[key], date_format)\n\n# elif value == \"None\":\n# buf_data[key] = None\n\n# else:\n# buf_data[key] = types[model[key]](value)\n# else:\n# buf_data[key] = str(value)\n\n# except:\n# del buf_data[key]\n# print('key: ', key)\n\n# return buf_data\n\ndate_format = \"%Y-%m-%d %H:%M:%S.%f\"\n\ntemp_model = {\n \"_id\": \"ObjectId\",\n \"date\": \"datetime\",\n \"place_sending\": \"str\",\n \"type\": \"str\",\n \"phone\": \"str\",\n \"code\": \"str\",\n \"description\": \"str\",\n \"count\": 'int',\n \"weight\": \"float\",\n \"space\": \"float\",\n \"density\": \"float\",\n \"place_delivery\": \"str\",\n \"packaging\": \"float\",\n \"delivery\": \"float\",\n \"other\": \"float\",\n \"insurance\": \"int\",\n \"unit_price\": \"float\",\n \"total\": \"float\",\n \"arrival_date\": \"datetime\",\n \"received_positions\": \"int\",\n \"log_colection\": \"list\",\n \"created_at\": \"datetime\",\n \"updated_at\": \"datetime\"\n}\n\ntypes = {\n \"ObjectId\": ObjectId,\n \"datetime\": datetime.strptime,\n \"str\": str,\n \"float\": float,\n \"int\": int,\n \"list\": list\n}\n\n\ndef is_jsonable(x):\n try:\n dumps(x)\n return True\n except (TypeError, OverflowError):\n return False\n\n\ndef is_convertable(s: str):\n try:\n num = int(s)\n return num\n except ValueError:\n pass\n\n try:\n num = float(s)\n return num\n except ValueError:\n pass\n\n if ObjectId.is_valid(s):\n return ObjectId(s)\n\n return s\n\n\ndef get_serialize_document(data: dict) -> dict:\n buf_data = dict(data)\n\n for key, value in buf_data.items():\n if not is_jsonable(value):\n buf_data[key] = str(value)\n\n if buf_data.get('log_collection') is not None:\n del buf_data['log_collection']\n\n return buf_data\n","repo_name":"GOSUKZ/hag_motor","sub_path":"app/iternal/serializers/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11158225512","text":"from pickle import FALSE, TRUE\nimport torch\n\nfrom tqdm import tqdm\nfrom tqdm import trange\nfrom time import sleep\n\nimport torch.nn as nn\nimport torch.optim as optim\nfrom lightunet import LightUnet\nfrom lightutils import (\n save_model,\n save_entire_model,\n load_model,\n seg_acc,\n check_accuracy,\n save_predictions_as_imgs,\n save_plots,\n)\nimport argparse\n\n# from albumentations.pytorch import ToTensorV2\nimport numpy as np\nfrom lightdata import JinglingDataset, transform\nfrom torch.utils.data import DataLoader, Subset\n# from sklearn.model_selection import train_test_split\nimport torchvision.transforms.functional as TF\nimport matplotlib.pyplot as plt\n# Hyperparameters: batch size, number of workers, image size, train_val_split, model\nBatch_size = 1\nNum_workers = 0\nImage_hight = 400\nImage_weight = 400\nPin_memory = True\nValid_split = 0.2\nModeluse = LightUnet\n# flexible hyper params: epochs, dataset, learning rate, load_model\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '-e',\n '--epochs',\n type = int,\n default = 5,\n help = 'Numbers of epochs to train the network'\n)\nparser.add_argument(\n '-t',\n '--troot',\n type = str,\n default = '/home/qiao/dev/giao/dataset/imgs/jinglingseg/images',\n help = 'Input the image dataset path'\n)\nparser.add_argument(\n '-m',\n '--mroot',\n type = str,\n default = '/home/qiao/dev/giao/dataset/imgs/jinglingseg/masks',\n help = 'Input the mask dataset path'\n)\nparser.add_argument(\n '-l',\n '--lr',\n type = np.float32,\n default = 1e-4,\n help = 'Learning rate for training'\n)\nparser.add_argument(\n '-load',\n '--load',\n default = None,\n help = 'loading the trained model for prediction'\n)\n\nargs = vars(parser.parse_args())\nNum_epochs = args['epochs']\nImg_dir = args['troot']\nMask_dir = args['mroot']\nLearning_rate = args['lr']\nLoad_model = args['load']\n\n# the device used fir training\nDevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# load the model\nmodel = Modeluse(in_channels=3, out_channels=1)\nmodel.to(device = Device)\n# print the parameter numbers of the model\ntotal_params = sum(p.numel() for p in model.parameters())\nprint(model.eval())\nprint('#############################################################')\nprint(f'There are {total_params:,} total parameters in the model.\\n')\n# optimizer used for training\noptimizer = optim.Adam(model.parameters(), lr = Learning_rate)\n# loss function for training\nloss_fn = nn.BCELoss()\n# load dataset\ndata = JinglingDataset(img_dir = Img_dir,mask_dir = Mask_dir, transform = transform)\ndataset_size = len(data)\nprint(f\"Total number of images: {dataset_size}\")\nvalid_split = 0.2\nvalid_size = int(valid_split*dataset_size)\nindices = torch.randperm(len(data)).tolist()\ntrain_data = Subset(data, indices[:-valid_size])\nval_data = Subset(data, indices[-valid_size:])\nprint(f\"Total training images: {len(train_data)}\")\nprint(f\"Total valid_images: {len(val_data)}\")\n\nprint(f'\\nComputation device: {Device}\\n')\n\ntrain_loader = DataLoader(train_data, batch_size = Batch_size, \n num_workers = Num_workers, \n pin_memory = Pin_memory,\n shuffle = True)\nval_loader = DataLoader(val_data, batch_size = Batch_size, \n num_workers = Num_workers, \n pin_memory = Pin_memory,\n shuffle = True)\n\n# def modeltrans(input, target):\n# preds = model(input)n_epochs, n_steps = 5, 100\n# if preds.shape != target.shape:\n# preds = TF.resize(preds, size=target.shape[2:])\n# return preds\n\ndef fit(train_loader, model, optimizer, loss_fn, scaler):\n print('====> Fitting process')\n\n train_running_loss = 0.0\n train_running_acc = 0.0\n counter = 0\n for i, data in tqdm(enumerate(train_loader), total = len(train_data)):\n counter += 1\n\n img, mask = data\n img.to(device = Device)\n\n mask = mask.unsqueeze(1)\n mask = mask.float()\n mask.to(device = Device)\n\n # forward\n with torch.cuda.amp.autocast():\n preds = model(img)\n if preds.shape != mask.shape:\n preds = TF.resize(preds, size=mask.shape[2:])\n\n loss = loss_fn(preds, mask)\n train_running_loss += loss.item()\n train_running_acc += seg_acc(preds, mask).sum().item()\n # backward\n optimizer.zero_grad()\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update() \n\n n_epochs, n_steps = Num_epochs, len(train_data)\n with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n for epoch in epochs:\n with trange(1, n_steps + 1, desc=\"Epoch {}/{}\".format(epoch, n_epochs)) as steps:\n for step in steps:\n epochs.set_postfix(foo=epoch * n_steps + step)\n steps.set_postfix(loss=loss.item(), baz=1 / step)\n sleep(0.01)\n\n # # update tqdm loop\n # tqdm(enumerate(train_loader)).set_postfix(loss = loss.item())\n # tqdm(enumerate(train_loader)).set_postfix(loss = loss.item())\n\n epoch_loss = train_running_loss / counter\n epoch_acc = 100. * train_running_acc\n return epoch_loss, epoch_acc\n\ndef valid(val_loader, model, loss_fn):\n print('====> Validation process')\n\n val_running_loss = 0.0\n val_running_acc = 0.0\n counter = 0\n for i, data in tqdm(enumerate(val_loader), total = len(val_data)):\n counter += 1\n\n img, mask = data\n img.to(device = Device)\n\n mask = mask.unsqueeze(1)\n mask = mask.float()\n mask.to(device = Device)\n\n # forward\n with torch.cuda.amp.autocast():\n preds = model(img)\n if preds.shape != mask.shape:\n preds = TF.resize(preds, size = mask.shape[2:])\n\n val_loss = loss_fn(preds, mask)\n val_running_loss += val_loss.item()\n val_running_acc += seg_acc(preds, mask).sum().item()\n\n n_epochs, n_steps = Num_epochs, len(val_data)\n with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n for epoch in epochs:\n with trange(1, n_steps + 1, desc=\"Epoch {}/{}\".format(epoch, n_epochs)) as steps:\n for step in steps:\n epochs.set_postfix(foo=epoch * n_steps + step)\n steps.set_postfix(loss=val_loss.item(), baz=1 / step)\n sleep(0.01)\n\n # # update tqdm loop\n # tqdm(enumerate(train_loader)).set_postfix(loss = val_loss.item())\n\n val_epoch_loss = val_running_loss / counter\n val_epoch_acc = 100. * val_running_acc\n return val_epoch_loss, val_epoch_acc\n\ndef main():\n if Load_model is not None:\n pass\n # load_model(torch.load('Lightuent18S_1e5_e18.pth'), model)\n\n train_loss, val_loss = [], []\n train_acc, val_acc = [], []\n # check_accuracy(val_loader, model, device = Device)\n scaler = torch.cuda.amp.GradScaler()\n for epoch in range(Num_epochs):\n train_epoch_loss, train_epoch_acc = fit(train_loader, model,\n optimizer, loss_fn, scaler)\n val_epoch_loss, val_epoch_acc = valid(val_loader, model,loss_fn)\n\n train_loss.append(train_epoch_loss)\n val_loss.append(val_epoch_loss)\n train_acc.append(train_epoch_acc)\n val_acc.append(val_epoch_acc)\n\n # save entire model\n save_model(Num_epochs, model, optimizer, loss_fn)\n # check accuracy\n # check_accuracy(val_loader, model, device = Device)\n\n # print some examples to a folder\n # save_predictions_as_imgs(val_loader, \n # model, \n # folder = 'saved_imgs/', \n # device = Device)\n\n # plot loss and acc\n save_plots(train_acc, val_acc, train_loss, val_loss)\n\n # # save final model\n save_entire_model(Num_epochs, model, optimizer, loss_fn)\n\n print('\\n============> TEST PASS!!!\\n')\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n","repo_name":"ConcordiaNAVlab/forest_fire_detection_system","sub_path":"scripts/develop/semantic_segmentation_unet/lightweight/lighttrain.py","file_name":"lighttrain.py","file_ext":"py","file_size_in_byte":8045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8802487694","text":"class Solution:\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if s == \"\":\n return 0\n\n dp = [0 for i in range(len(s))]\n\n left = 0\n\n for i in range(len(s)):\n if s[i] == '(':\n left += 1\n else:\n if left > 0:\n left -= 1\n dp[i] = 2 + dp[i - 1]\n if i - dp[i] >= 0:\n dp[i] += dp[i - dp[i]]\n return max(dp)\n\n # Error 1: logic error: the final output dp, the last element is not always the largest since it may not be valid.\n\n","repo_name":"RioAraki/leetcode2020","sub_path":"leetcode_python/32_LongestValidParenthesis.py","file_name":"32_LongestValidParenthesis.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2042738267","text":"from __future__ import absolute_import, unicode_literals\nfrom celery import shared_task\nimport logging\nfrom valhalla.proposals.models import Semester, TimeAllocation\nfrom valhalla.proposals.accounting import get_time_totals_from_pond\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task\ndef update_time_allocation(time_allocation_id):\n talloc = TimeAllocation.objects.get(pk=time_allocation_id)\n logger.info('Updating timeallocation for %s', talloc.proposal, extra={'tags': {'proposal': talloc.proposal.id}})\n std_total = get_time_totals_from_pond(talloc, talloc.semester.start, talloc.semester.end, too=False)\n too_total = get_time_totals_from_pond(talloc, talloc.semester.start, talloc.semester.end, too=True)\n talloc.std_time_used = std_total\n talloc.too_time_used = too_total\n talloc.save()\n\n\n@shared_task\ndef run_accounting(semesters=None):\n if not semesters:\n semesters = Semester.current_semesters()\n\n for semester in semesters:\n logger.info('Performing accounting for semester: %s', semester)\n for talloc in semester.timeallocation_set.all():\n update_time_allocation.delay(talloc.id)\n","repo_name":"LCOGT/valhalla","sub_path":"valhalla/proposals/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"44579509113","text":"import numpy as np\nfrom PIL import Image\nfrom typing import Union, Iterable\nfrom xml.etree.ElementTree import Element\n\nfrom ntm_classifier.extract import alt_str_format\n\n# Reminder the pdfminer6 is giving me the xmls\n\n\ndef png_buffer_from_filepath(filepath):\n with Image.open(filepath) as img:\n img = img.crop((0, 0, img.size[0], img.size[1]))\n return img\n\n\ndef tree_extract_xml_figures(xml_tree, page_number):\n page = xml_tree.getroot()[page_number]\n return page_extract_xml_figures(page)\n\n\ndef page_extract_xml_figures(page_xml):\n def gen():\n for element in page_xml:\n if element.tag == 'figure':\n # yield {'page':page_number, element.get('bbox')}\n yield element\n\n return list(gen())\n\n\ndef tree_get_textbox_locations_list(xml_tree, page_number):\n page = xml_tree[page_number]\n return page_get_textbox_locations_list(page)\n\n\ndef page_get_textbox_locations_list(page_xml, gap=0.1):\n def gen():\n for element in page_xml:\n if hasattr(element, 'text') and element.text.strip() != '':\n bbox = element.get('bbox', None)\n if bbox is not None:\n yield bbox\n return combine_adjacent_bboxes(gen(), gap=gap)\n\n\ndef combine_adjacent_bboxes(bbox_gen, gap=0.1):\n def gen():\n prev = ('0', '0', '0', '0')\n for bbox in bbox_gen:\n current = bbox\n curr = current.split(',')\n if not len(curr) == 4:\n continue\n try:\n (float(x) for x in curr)\n except ValueError:\n continue\n right_left_meet = (float(curr[0]) - float(prev[2]) <= gap)\n top_bottom_match = (curr[1] == prev[1] and curr[3] == prev[3])\n if right_left_meet and top_bottom_match:\n prev[2] = curr[2]\n else:\n yield ','.join(prev)\n prev = curr\n yield ','.join(prev)\n return list(gen())[1:]\n\n\ndef whiteout_page_text(\n page: Union[np.ndarray, Image.Image],\n page_xml: Element,\n invert_color=False,\n gap=0.1):\n\n as_array = isinstance(page, np.ndarray)\n page_bb = page_xml.get('bbox', \"0.000,0.000,595.320,841.920\")\n bboxes = page_get_textbox_locations_list(page_xml, gap=gap)\n for bbox in bboxes:\n page = whiteout_box(\n page,\n bbox,\n page_bb,\n as_array,\n invert_color=invert_color)\n\n return page\n\n\ndef whiteout_box(\n image: Union[np.ndarray, Image.Image],\n bbox: Union[str, tuple],\n page_bb: Union[str, tuple] = \"0.000,0.000,595.320,841.920\",\n as_array: Union[None, bool] = None,\n raise_error: bool = False,\n invert_color: bool = False):\n as_array = as_array or not isinstance(image, Image.Image)\n matrix = None\n if isinstance(image, Image.Image):\n matrix = np.asarray(image)\n elif isinstance(image, np.ndarray):\n matrix = image\n if not isinstance(matrix, np.ndarray):\n if raise_error:\n raise ValueError(\"Image passed to whiteout_box\"\n \" was not PIL Image or numpy array\")\n return image\n\n (x1, y1), (x2, y2) = alt_str_format(bbox)\n (_, _), (px, py) = alt_str_format(page_bb)\n\n width = matrix.shape[1]\n height = matrix.shape[0]\n\n x1 = max((0, int(round(width * x1 / px))))\n # y1 = max((0, int(round(height*y1/py))))\n x2 = min((width, int(round(width * x2 / px))))\n # y2 = min((height, int(round(height*y2/py))))\n # x2 = width-max((0, int(round(width*x1/px))))\n y1 = height - max((0, int(round(height * y1 / py))))\n # x1 = width-min((width, int(round(width*x2/px))))\n y2 = height - min((height, int(round(height * y2 / py))))\n\n # print(x1, x2, y1, x2)\n\n if invert_color:\n matrix[y2:y1, x1:x2] = 0\n else:\n matrix[y2:y1, x1:x2] = 255\n\n if as_array is True:\n return matrix\n return Image.fromarray(matrix)\n\n\ndef floatify_str(bbox: str):\n splits = bbox.split(',')[:4]\n splits = splits + ['0'] * min(0, 4 - len(splits))\n x1, y1, x2, y2 = splits\n\n def num(s: str):\n return float(s.strip())\n return (num(x1), num(y1)), (num(x2), num(y2))\n\n\ndef extract_page_image_bbox(\n image: Union[np.ndarray, Image.Image],\n bbox: Union[str, tuple],\n page_bb: str = \"0.000,0.000,595.320,841.920\",):\n\n (x1, y1), (x2, y2) = floatify_str(bbox)\n (_, _), (px, py) = floatify_str(page_bb)\n\n width = image.width\n height = image.height\n\n x1 = max(0, (width * x1 / px))\n x2 = min(width, (width * x2 / px))\n y1 = height - max(0, (height * y1 / py))\n y2 = height - min(height, (height * y2 / py))\n\n # Note: it is necessary to swap the order of the y coordinates\n return image.crop((x1, y2, x2, y1))\n\n\ndef extract_page_images_bboxes(\n image: Union[np.ndarray, Image.Image],\n bboxes: Iterable[str],\n page_bb: Union[str, tuple] = \"0.000,0.000,595.320,841.920\"):\n\n def gen():\n for bbox in bboxes:\n yield bbox, extract_page_image_bbox(image, bbox, page_bb)\n\n return {k: v for (k, v) in gen()}\n","repo_name":"araminty/ntm_classifier","sub_path":"ntm_classifier/image_from_document.py","file_name":"image_from_document.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11813264815","text":"import numpy\r\nimport pyaudio\r\nimport math\r\n\r\nclass ToneGenerator(object):\r\n\r\n def __init__(self, samplerate=44100, frames_per_buffer=4410):\r\n self.p = pyaudio.PyAudio()\r\n self.samplerate = samplerate\r\n self.frames_per_buffer = frames_per_buffer\r\n self.streamOpen = False\r\n\r\n def sinewave(self):\r\n if self.buffer_offset + self.frames_per_buffer - 1 > self.x_max:\r\n # We don't need a full buffer or audio so pad the end with 0's\r\n xs = numpy.arange(self.buffer_offset,\r\n self.x_max)\r\n tmp = self.amplitude * numpy.sin(xs * self.omega)\r\n out = numpy.append(tmp,\r\n numpy.zeros(self.frames_per_buffer - len(tmp)))\r\n else:\r\n xs = numpy.arange(self.buffer_offset,\r\n self.buffer_offset + self.frames_per_buffer)\r\n out = self.amplitude * numpy.sin(xs * self.omega)\r\n self.buffer_offset += self.frames_per_buffer\r\n return out\r\n\r\n def callback(self, in_data, frame_count, time_info, status):\r\n if self.buffer_offset < self.x_max:\r\n data = self.sinewave().astype(numpy.float32)\r\n return (data.tostring(), pyaudio.paContinue)\r\n else:\r\n return (None, pyaudio.paComplete)\r\n\r\n def is_playing(self):\r\n if self.stream.is_active():\r\n return True\r\n else:\r\n if self.streamOpen:\r\n self.stream.stop_stream()\r\n self.stream.close()\r\n self.streamOpen = False\r\n return False\r\n\r\n def play(self, frequency, duration, amplitude):\r\n self.omega = float(frequency) * (math.pi * 2) / self.samplerate\r\n self.amplitude = amplitude\r\n self.buffer_offset = 0\r\n self.streamOpen = True\r\n self.x_max = math.ceil(self.samplerate * duration) - 1\r\n self.stream = self.p.open(format=pyaudio.paFloat32,\r\n channels=1,\r\n rate=self.samplerate,\r\n output=True,\r\n frames_per_buffer=self.frames_per_buffer,\r\n stream_callback=self.callback)\r\n\r\n","repo_name":"Sklyvan/RainbowSorting","sub_path":"Sound_Generator.py","file_name":"Sound_Generator.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"32"} +{"seq_id":"11510454112","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 23 10:32:43 2022\n\n@author: antoinesedoh\n\"\"\"\n#Créer une fonction qui remplace tous les termes dans la diagonale d'un tableau\n# carré par la chaîne \"diag\". \n\ndef remplacement_diag (tableau):\n for i in range(len(tableau)):\n for j in range (len(tableau)):\n if (i==j):\n tableau[i][j] = \"diag\"\n return tableau\n\n\n\ntab = (([[0,1,2,3],[4,5,6,7],[7,8,9,3],[7,8,9,0]]))\nremplacement_diag(tab)\nprint(remplacement_diag(tab))\n\n\n","repo_name":"StagiairesMIASHS/testing","sub_path":"Antoine/Exercice5.py","file_name":"Exercice5.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28257277759","text":"from django import forms\n\nfrom player_app.models import Player\n\n\nclass PlayerForm(forms.ModelForm):\n \"\"\"\n hobbies = forms.MultipleChoiceField(\n required=False,\n widget=forms.CheckboxSelectMultiple,\n choices=Player.HOBBIES_CHOICES,\n )\n \"\"\"\n class Meta:\n model = Player\n fields = \"__all__\"\n widgets = {\n 'name': forms.TextInput(attrs={'class': \"shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline\",'placeholder': 'Enter the name of the player','maxlength': 10}),\n 'birthDate': forms.DateInput(attrs={'type': 'date','class': \"shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline\"}),\n 'country': forms.TextInput(attrs={'class': \"shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline\",'placeholder': 'Enter the country of the player'}),\n 'email': forms.EmailInput(attrs={'class': \"shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline\",'placeholder': 'Enter the email of the player'}),\n 'tshirtNumber': forms.NumberInput(attrs={'class': \"shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline\",'placeholder': 'Enter the tshirt number of the player'}),\n 'photo': forms.FileInput(attrs={'class': 'form-control'}),\n 'password': forms.PasswordInput(attrs={'class':\"shadow appearance-none border border-red-500 rounded w-full py-2 px-3 text-gray-700 mb-3 leading-tight focus:outline-none focus:shadow-outline\"}),\n #'gender':forms.Select(attrs={'class': \"shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline\"}),\n 'gender':forms.RadioSelect(),\n 'hobbies': forms.CheckboxSelectMultiple(),\n 'description': forms.Textarea(attrs={'class': \"shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline\",'placeholder': 'Enter the description of the player'}),\n\n }\n labels = {\n 'name': 'Player Name',\n 'birthDate': 'Player BirthDate',\n 'country': 'Player Country',\n 'email': 'Player Email',\n 'photo': 'Player Photo',\n 'gender': 'Player Gender',\n 'description': 'Player Description',\n 'hobbies': 'Player Hobbies',\n }\n # help_texts = {\n # 'name': 'Enter the name of the player',\n # 'birthDate': 'Enter the birthdate of the player',\n # 'country': 'Enter the country of the player',\n # 'email': 'Enter the email of the player',\n # 'photo': 'Enter the photo of the player',\n # }\n error_messages = {\n 'name': {\n 'max_length': 'The player name must be less than 100 characters',\n 'min_length': 'The player name must be greater than 1 character',\n 'invalid': 'The player name is invalid',\n 'required': 'The player name is required',\n\n 'no_spaces': 'The player name must not contain any spaces',\n 'no_numbers': 'The player name must not contain any numbers',\n 'no_special_characters': 'The player name must not contain any special characters',\n },\n 'birthDate': {\n 'invalid': 'The birthdate is invalid',\n 'required': 'The birthdate is required',\n },\n\n }\n\n ","repo_name":"ChaoukiBayoudhi/django_forms_and_templates","sub_path":"player_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39363384008","text":"from death.post.inputgen_planJ import InputGenJ\nimport pandas as pd\nfrom collections import Counter\nimport tqdm\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom death.post.inputgen_planH import InputGenH, pad_collate\n\ndef get_death_code_proportion(ig):\n index_list=[]\n dic=ig.death_code_dict\n total_patients=len(ig.death_rep_person_id) # equals to the intersection\n patients_lookup={id: 0 for id in ig.death_rep_person_id}\n\n patient_list = []\n last_patient = None\n\n for index, row in tqdm.tqdm(ig.death.iterrows(), total=ig.death.shape[0]):\n rep_id=index[0]\n if rep_id in patients_lookup:\n if rep_id != last_patient:\n # new patient\n index_list+=patient_list\n patient_list = []\n last_patient=rep_id\n\n code = row['code']\n while code is not \"\":\n idx=dic[code]\n patient_list.append(idx)\n code=code[:-1]\n patient_list=list(set(patient_list))\n index_list += patient_list\n\n # for row in tqdm.tqdm(series):\n # idx=dic[row]\n # index_list.append(idx)\n #\n counter=Counter(index_list)\n # code_proportion=list(counter.values())\n #\n # for i in range(len(code_proportion)):\n # code_proportion[i]/=total_patients\n\n prop=np.zeros((ig.output_dim-1))\n\n for key, value in counter.items():\n prop[key]+=value\n if value==0 or value<0:\n print(\"look into it.\")\n prop=prop/total_patients\n\n return prop\n\n# these are the debugging functions after seeing ROC is bigger than 1 for trivial predictions\n# average of ROC, not ROC of average\n\ndef ROC_3():\n \"\"\"\n Well, the ROC_2() apparently does not take into consideration of the strcutural codes.\n These things are tricky as hell. Unexpected.\n Well why did you not use NP?\n Why was ROC_2() formula still wrong? Jesus.\n :return:\n \"\"\"\n\n ig=InputGenJ(no_underlying=True, death_only=True)\n\n print(\"Generating prior\")\n code_proportion=get_death_code_proportion(ig)\n code_proportion=np.asarray(code_proportion)\n\n # actual positives is code proportion\n expected_true_positives=(code_proportion*code_proportion)\n\n setp=sum(expected_true_positives)\n scp=sum(code_proportion)\n sensitivity_average=expected_true_positives/code_proportion\n\n\n print(\"Expected true positives per patient\", setp) # 1.276\n print(\"Expected positives per patient\", scp) # 10.7094\n print(\"Expected sensitivity:\",np.mean(sensitivity_average)) # 0.02467\n # higher than actual\n\n # what's the expected specificity?\n negative_code_proportion=1-code_proportion\n expected_true_negatives=negative_code_proportion*negative_code_proportion\n specificity_average=expected_true_negatives/negative_code_proportion\n actual_negatives=sum(negative_code_proportion)\n true_negatives=sum(expected_true_negatives)\n\n # what is the Gini impurity compared to the AUC?\n print(\"Expected true negatives per patient\", true_negatives) # 413.85\n print(\"Expected negatives per patient\", actual_negatives) # 423.29\n print(\"Expected specificity:\",np.mean(specificity_average)) # 0.97532\n\n # what is the expected binary cross entropy with random guess?\n expected_BCE=-code_proportion*np.log(code_proportion)-negative_code_proportion*np.log(negative_code_proportion)\n\n print(np.mean(expected_BCE)) # 0.09295, which is close to the loss on the first validation before training\n\ndef ROC_2():\n \"\"\"\n Because codes have been merged, and we do not know the interaction among the merged codes,\n I have to get the ROC from the data itself.\n It's very fast to generate numpy with the dataloader, so I have to count the pandas\n :return:\n \"\"\"\n\n ig=InputGenJ(no_underlying=True, death_only=True)\n series=ig.get_series(\"death\",\"code\")\n index_list=[]\n dic=ig.death_code_dict\n total_patients=len(ig.death_rep_person_id) # equals to the intersection\n patients_lookup={id: 0 for id in ig.death_rep_person_id}\n\n for index, row in tqdm.tqdm(ig.death.iterrows()):\n if index[0] in patients_lookup:\n code=row['code']\n idx=dic[code]\n index_list.append(idx)\n\n # for row in tqdm.tqdm(series):\n # idx=dic[row]\n # index_list.append(idx)\n\n counter=Counter(index_list)\n code_proportion=list(counter.values())\n\n for i in range(len(code_proportion)):\n code_proportion[i]/=total_patients\n\n expected_true_positives=[]\n for prop in code_proportion:\n # a code has 30% chance happening\n # 30% background guess\n # true positive exepctation for one patient: 30%*30%\n expected_true_positives.append(prop*prop)\n\n # actual positives is code proportion\n setp=sum(expected_true_positives)\n scp=sum(code_proportion)\n print(\"Expected true positives per patient\", setp) # 0.653923\n print(\"Expected positives per patient\", scp) # 3.482510\n print(\"Expected sensitivity:\",setp/scp) # 0.187773\n # higher than actual\n\n # what's the expected specificity?\n negative_code_proportion=[]\n for i in range(len(code_proportion)):\n negative_code_proportion.append(1-code_proportion[i])\n\n expected_true_negatives=[]\n for prop in negative_code_proportion:\n # the rate of actual negative labels times the background prediction\n expected_true_negatives.append(prop*prop)\n actual_negatives=sum(negative_code_proportion)\n true_negatives=sum(expected_true_negatives)\n\n # what is the Gini impurity compared to the AUC?\n print(\"Expected true negatives per patient\", true_negatives) # 186.688\n print(\"Expected negatives per patient\", actual_negatives) # 189.517\n print(\"Expected specificity:\",true_negatives/actual_negatives) # 0.98507\n\n # what is the expected binary cross entropy with random guess?\n prop=np.asarray(code_proportion)\n info=np.log(prop)\n\n negat_prop=1-prop\n neginfo=np.log(negat_prop)\n\n expected_BCE=-prop*info-negat_prop*neginfo\n # 0.0673\n print(np.mean(expected_BCE))\n\n print(\"Really?\")\n print(\"Question, why is the ROC not 1? Toss of coin should always be 1 right?\")\n\n\ndef ROC_1():\n \"\"\"\n What is the expected ROC given background prediction?\n We have 15667 patients in training set.\n Calculate the average proportion of the code. See the chance of predicting by backround.\n This is like Gini Impurity? We will see.\n :return:\n \"\"\"\n ig=InputGenJ(no_underlying=True, death_only=True)\n death_count=ig.death_code_count\n death_sort=ig.death_code_sort\n death_dict=ig.death_code_dict\n total_patients=ig.death.index.get_level_values('rep_person_id').nunique()\n\n code_proportion=list(death_count.values())\n\n for i in range(len(code_proportion)):\n code_proportion[i]/=total_patients\n\n expected_true_positives=[]\n for prop in code_proportion:\n # a code has 30% chance happening\n # 30% background guess\n # true positive exepctation for one patient: 30%*30%\n expected_true_positives.append(prop*prop)\n\n # actual positives is code proportion\n setp=sum(expected_true_positives)\n scp=sum(code_proportion)\n print(\"Expected true positives per patient\", setp)\n print(\"Expected positives per patient\", scp)\n print(\"Expected sensitivity:\",setp/scp)\n # higher than actual\n\n # what's the expected specificity?\n negative_code_proportion=[]\n for i in range(len(code_proportion)):\n negative_code_proportion.append(1-code_proportion[i])\n\n expected_true_negatives=[]\n for prop in negative_code_proportion:\n # the rate of actual negative labels times the background prediction\n expected_true_negatives.append(prop*prop)\n actual_negatives=sum(negative_code_proportion)\n true_negatives=sum(expected_true_negatives)\n\n # what is the Gini impurity compared to the AUC?\n print(\"Expected true negatives per patient\", true_negatives)\n print(\"Expected negatives per patient\", actual_negatives)\n print(\"Expected specificity:\",true_negatives/actual_negatives)\n\n\nif __name__ == '__main__':\n ROC_3()\n","repo_name":"phimachine/mayoehr","sub_path":"death/analysis/expectedroc.py","file_name":"expectedroc.py","file_ext":"py","file_size_in_byte":8178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34978753855","text":"#!/usr/bin/python3\n\"\"\"\nTest suite for Review class\n\"\"\"\nimport unittest\nfrom models.base_model import BaseModel\nfrom models.review import Review\n\n\nclass TestReviewModel(unittest.TestCase):\n def setUp(self):\n self.review = Review()\n self.review.user_id = \"user123\"\n self.review.place_id = \"place456\"\n self.review.text = \"A wonderful experience!\"\n\n def test_class_exists(self):\n self.assertEqual(str(type(self.review)), \"\")\n\n def test_inheritance_review_model(self):\n self.assertIsInstance(self.review, Review)\n self.assertEqual(type(self.review), Review)\n self.assertEqual(issubclass(self.review.__class__, BaseModel), True)\n\n # Add more test cases based on the attributes and methods of your Review class\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"mouad-suuu/AirBnB_clone","sub_path":"tests/test_models/test_model_review.py","file_name":"test_model_review.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27833861094","text":"# You will be receiving to-do notes until you get the command \"End\". The notes will be in the format: \"{importance}-{note}\". \r\n# Return a list containing all to-do notes sorted by importance in ascending order. \r\n# The importance value will always be an integer between 1 and 10 (inclusive).\r\n\r\n\r\ntodo = [\"\" for i in range(11)]\r\ncommand = input()\r\n\r\nwhile command != 'End':\r\n xplode = command.split(\"-\")\r\n priority = int(xplode[0])\r\n task = xplode[1]\r\n todo[priority] = task\r\n command = input()\r\nfinal_todo = [task for task in todo if task != \"\"]\r\nprint(final_todo)\r\n","repo_name":"MiroSultanov/Programing_Fundamentals_Python","sub_path":"lists_advanced/To_do_list.py","file_name":"To_do_list.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33776644150","text":"import pygame as pg\n\nimport ErrorEvent\nimport imageFunction, fatherFunction, applicationFunction, objectFunction\n\nprint('Screen library imported')\n\nclass Screen:\n\n def update(self):\n if self.mustUpdate :\n for object in self.object.handler.objects.values() :\n if object.screen.mustUpdate :\n object.screen.update()\n\n self.updateBlitRect() ###- precaution\n self.updateBlitList()\n self.reset()\n self.blitText()\n self.surface.blits(self.blitList)\n self.didUpdate()\n\n def __init__(self,object,color):\n '''\n It blits all objects on the screen.surface'''\n\n self.object = object\n if not color :\n color = objectFunction.Attribute.NO_IMAGE_COLOR\n self.color = color\n # self.object.handler = None\n\n self.surface = self.newSurface()\n self.blitRect = self.getBlitRect()\n self.blitList = []\n\n self.mustUpdateNextFrame()\n\n def reset(self):\n if fatherFunction.isNotAplication(self.object) :\n self.surface = self.object.handler.originalSurface.copy()\n\n def newSurface(self):\n if fatherFunction.isNotAplication(self.object) :\n if self.object.noImage :\n return imageFunction.newAlphaSurface(self.object,\n color = self.color\n )\n else :\n return imageFunction.newImageSurface(self.object)\n else :\n return imageFunction.newDisplay(self.object.size)\n\n def getBlitRect(self):\n return pg.Rect(\n 0,\n 0,\n self.object.size[0],\n self.object.size[1]\n )\n\n def getBlitList(self):\n return [\n (object.screen.surface,object.rect)\n for object in self.object.handler.objects.values()\n if self.blitRect.colliderect(object.rect) and object.visible\n ]\n\n def mustUpdateNextFrame(self):\n self.mustUpdate = True\n self.fatherMustUpdateNextFrame(self.object)\n\n def fatherMustUpdateNextFrame(self,object) :\n if fatherFunction.isNotAplication(object) and not object.father.screen.mustUpdate :\n object.father.screen.mustUpdateNextFrame()\n\n def didUpdate(self):\n self.mustUpdate = False\n\n def updateBlitRect(self):\n self.blitRect = self.getBlitRect()\n\n def updateBlitList(self):\n self.blitList = self.getBlitList()\n\n def blitText(self):\n if self.object.textList :\n for textIndex in range(len(self.object.textList)) :\n self.surface.blit(\n self.object.textList[textIndex],\n self.object.textPositionList[textIndex],\n )\n\n def revealObjects(self,objectNames):\n if objectNames :\n for objectName in objectNames :\n if objectName in self.object.handler.objects :\n self.object.handler.objects[objectName].visible = True\n else :\n ErrorEvent.ErrorEvent(None,\n message = f'{objectName} not found in {self.object.name}.handler.objects'\n )\n self.object.screen.mustUpdateNextFrame()\n\n def hideAllObjects(self):\n for object in self.object.handler.objects.values() :\n object.visible = False\n self.object.screen.mustUpdateNextFrame()\n\n def remove(self):\n imageFunction.removeObjectImageAndSurface(self.object)\n","repo_name":"SamuelJansen/Application","sub_path":"api/src/domain/input_output/Screen.py","file_name":"Screen.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29964066617","text":"\ndef how_bad(n):\n flag = True\n lst = []\n number = bin(n).count(\"1\")\n \n for foo in range(2, number // 2 + 1):\n if number % foo == 0:\n flag = False\n break\n if number % 2 == 0:\n lst.append(\"Evil\")\n else:\n lst.append(\"Odious\")\n if flag and number >= 2:\n lst.append(\"Pernicious\")\n return lst\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"WixXhsdqcNHe3vTn3_7.py","file_name":"WixXhsdqcNHe3vTn3_7.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29690972436","text":"from typing import List, TypeVar, Optional, Callable, Union\nfrom types import FunctionType\nimport asyncio\n\nT = TypeVar('T')\n\nclass RenderedPromptSection:\n def __init__(self, output: T, length: int, tooLong: bool):\n self.output = output\n self.length = length\n self.tooLong = tooLong\n\nclass PromptSectionLayout:\n def __init__(self, section: 'PromptSection', layout = None):\n self.section = section\n self.layout = layout\n\nclass PromptSection:\n def __init__(self, sections, tokens: int, required: bool, separator: str):\n self.sections = sections\n self.required = required\n self.tokens = tokens\n self.separator = separator\n\nclass LayoutEngine(PromptSection):\n def __init__(self, sections: List[PromptSection], tokens: int, required: bool, separator: str):\n super().__init__(sections, tokens, required, separator)\n\n def renderAsText(self, memory, functions, tokenizer, maxTokens):\n layout = []\n self.addSectionsToLayout(self.sections, layout)\n\n remaining = self.layoutSections(\n layout,\n maxTokens,\n lambda section: section.renderAsText(memory, functions, tokenizer, maxTokens),\n lambda section, remaining: section.renderAsText(memory, functions, tokenizer, remaining),\n True,\n tokenizer\n )\n\n output = [section.layout.output for section in layout if section.layout]\n text = self.separator.join(output)\n return RenderedPromptSection(text, len(tokenizer.encode(text)), remaining < 0)\n\n def renderAsMessages(self, memory: 'PromptMemory', functions: 'PromptFunctions', tokenizer: 'Tokenizer', maxTokens: int) -> RenderedPromptSection:\n\n layout = []\n self.addSectionsToLayout(self.sections, layout)\n\n remaining = self.layoutSections(\n layout,\n maxTokens,\n lambda section: section.renderAsMessages(memory, functions, tokenizer, maxTokens),\n lambda section, remaining: section.renderAsMessages(memory, functions, tokenizer, remaining)\n )\n\n output = [message for section in layout if section.layout for message in section.layout.output]\n return RenderedPromptSection(output, self.getLayoutLength(layout), remaining < 0)\n\n def addSectionsToLayout(self, sections: List[PromptSection], layout: List):\n for section in sections:\n if isinstance(section, LayoutEngine):\n self.addSectionsToLayout(section.sections, layout)\n else:\n layout.append(PromptSectionLayout(section))\n\n def layoutSections(self, layout, maxTokens, cbFixed, cbProportional, textLayout=False, tokenizer=None):\n self.layoutFixedSections(layout, cbFixed)\n\n remaining = maxTokens - self.getLayoutLength(layout, textLayout, tokenizer)\n while remaining < 0 and self.dropLastOptionalSection(layout):\n remaining = maxTokens - self.getLayoutLength(layout, textLayout, tokenizer)\n\n if self.needsMoreLayout(layout) and remaining > 0:\n self.layoutProportionalSections(layout, lambda section: cbProportional(section, remaining))\n\n remaining = maxTokens - self.getLayoutLength(layout, textLayout, tokenizer)\n while remaining < 0 and self.dropLastOptionalSection(layout):\n remaining = maxTokens - self.getLayoutLength(layout, textLayout, tokenizer)\n\n return remaining\n\n def layoutFixedSections(self, layout, callback):\n\n def process_section(section):\n output = callback(section.section)\n setattr(section, 'layout', output)\n\n tasks = [process_section(section) for section in layout if section.section.tokens < 0 or section.section.tokens > 1.0]\n #promises = [callback(section.section).then(lambda output: setattr(section, 'layout', output)) for section in layout if section.section.tokens < 0 or section.section.tokens > 1.0]\n\n\n def layoutProportionalSections(self, layout, callback):\n def process_section(section):\n output = callback(section.section)\n setattr(section, 'layout', output)\n\n tasks = [process_section(section) for section in layout if 0.0 <= section.section.tokens <= 1.0]\n\n def getLayoutLength(self, layout, textLayout=False, tokenizer=None) -> int:\n if textLayout and tokenizer:\n output = [section.layout.output for section in layout if section.layout]\n return len(tokenizer.encode(self.separator.join(output)))\n else:\n return sum(section.layout.length for section in layout if section.layout)\n\n def dropLastOptionalSection(self, layout) -> bool:\n for i in range(len(layout) - 1, -1, -1):\n if not layout[i].section.required:\n layout.pop(i)\n return True\n return False\n\n def needsMoreLayout(self, layout) -> bool:\n return any(not section.layout for section in layout)\n","repo_name":"Stevenic/promptrix-py","sub_path":"src/promptrix/LayoutEngine.py","file_name":"LayoutEngine.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"17043716954","text":"from cs50 import get_string\n\ncard_number = get_string(\"Number: \")\n\n# American Express uses 15-digit numbers\n# MasterCard uses 16-digit numbers\n# Visa uses 13- and 16-digit numbers\ncard_number_length = [13, 15, 16]\n\n# All American Express numbers start with 34 or 37\n# Most MasterCard numbers start with 51, 52, 53, 54, or 55\n# All Visa numbers start with 4\namex_start_digits = [34, 37]\nmastercard_start_digits = [51, 52, 53, 54, 55]\nvisa_start_digits = [4]\n\n\ndef check_length(card_number):\n\n length = len(card_number)\n\n if length in card_number_length:\n is_valid_length = True\n else:\n is_valid_length = False\n\n return is_valid_length\n\n\ndef check_card_type(card_number):\n\n card_type = \"\"\n\n if int(card_number[0:2]) in amex_start_digits:\n card_type = \"AMEX\"\n elif int(card_number[0:2]) in mastercard_start_digits:\n card_type = \"MASTERCARD\"\n elif int(card_number[0:1]) in visa_start_digits:\n card_type = \"VISA\"\n\n return card_type\n\n\ndef check_checksum(card_number):\n\n length = len(card_number)\n odd = []\n even = []\n even_products = []\n even_digits = []\n odd_digits = []\n\n for i in range(-1, -(length + 1), -2):\n odd.append(i)\n\n for i in range(-2, -(length + 1), -2):\n even.append(i)\n\n for i in even:\n even_products.append(int(card_number[i]) * 2)\n\n for i in range(len(even_products)):\n if even_products[i] >= 10:\n even_digits.append(1)\n even_digits.append(even_products[i] % 10)\n else:\n even_digits.append(even_products[i])\n\n for i in odd:\n odd_digits.append(int(card_number[i]))\n\n sum_even = sum(even_digits)\n sum_odd = sum(odd_digits)\n sum_all = sum_even + sum_odd\n\n checksum = sum_all % 10\n\n if checksum == 0:\n is_valid_checksum = True\n else:\n is_valid_checksum = False\n\n return is_valid_checksum\n\n\ndef validate_card(is_valid_length, card_type, is_valid_checksum):\n\n if is_valid_length == True and is_valid_checksum == True:\n card_validation = card_type\n else:\n card_validation = \"INVALID\"\n\n return card_validation\n\n\nis_valid_length = check_length(card_number)\ncard_type = check_card_type(card_number)\nis_valid_checksum = check_checksum(card_number)\ncard_validation = validate_card(is_valid_length, card_type, is_valid_checksum)\n\nprint(card_validation)\n","repo_name":"ZaraTam/harvard-cs50","sub_path":"problem_set_6/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"46364695432","text":"from Engine.Components.Component import *\n\nclass InfiniteScreen(Component):\n def __init__(self) -> None:\n super().__init__(\"InfiniteScreen\")\n\n def start(self):\n self.tr = self.entity.transform\n self.sprite = self.getComponent(\"SpriteRenderer\").sprite\n self.width = self.getApplication().width\n self.height = self.getApplication().height\n\n def update(self):\n if self.getInputManager().getKeyDown(KeyCode.ESCAPE):\n self.getSceneManager().pushScene(1)\n\n\n x, y = self.tr.position\n\n if x > self.width + self.sprite.width/2: # Right side\n x = -self.sprite.width/2 \n\n elif x < -self.sprite.width/2: # Left side\n x = self.width + self.sprite.width/2 \n\n if y > self.height + self.sprite.height/2: # Top side\n y = -self.sprite.height/2 \n\n elif y < -self.sprite.height/2 : # Bottom side\n y = self.height + self.sprite.height/2 \n\n self.tr.setPosition(x,y)\n","repo_name":"r0xANDt0l/Python-Games","sub_path":"GUI/EntityComponent/Games/GameTest/Components/InfiniteScreen.py","file_name":"InfiniteScreen.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72308392091","text":"n, k = map(int, input().split())\n\na = []\nfor i in range(n):\n a.append(int(input()))\n\ncoin = 0\nfor i in reversed(range(n)):\n coin += k // a[i]\n k = k % a[i]\nprint(coin)\n","repo_name":"hyunsu4020/baekjoon-Algorithm-python-","sub_path":"11047.py","file_name":"11047.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35261529345","text":"import serial\nimport csv\nfrom numpy import mean, exp\nfrom time import clock,sleep\nfrom pylab import plot, show, figure\n\nser = serial.Serial('com9', 9600, timeout=0)\n\n\ndef conversion(l, F): # convertit liste en csv\n\n file = open(F, 'w', )\n\n ecriture = csv.writer(file, dialect='excel', delimiter=';')\n ecriture.writerow(['Masse', 'Volts'])\n for i in range(len(l[0]) - 1):\n ecriture.writerow([l[0][i], l[1][i]])\n file.close()\n\n\ndef acquisition(pas):\n R1 = []\n R2 = []\n t = 0\n\n while t < pas:\n i = 0\n t1 = clock()\n b = ser.readline().decode('Latin-1')\n\n if b != '':\n # print(b)\n B = b.split(';')\n # print(B)\n try:\n R1.append(int(B[0]))\n R2.append(int(B[1]))\n t2 = clock()\n t = t + t2 - t1\n\n except:\n i += 1\n\n # print(i)\n # print(R1,len(R1))\n # print(R2,len(R2))\n # print(mean(R1), mean(R2))\n # print(leplusfrequent(R1), leplusfrequent(R2))\n return [mean(R1), mean(R2)]\n\n\ndef prog():\n while True:\n Tmax = int(input(\"durée acquisition : \"))\n pas = 0.01\n t = 0\n T = []\n R1m, R2m = [], []\n while t < Tmax:\n t1 = clock()\n R = acquisition(pas)\n t2 = clock()\n t = t + t2 - t1\n T.append(t)\n R1m.append(R[0])\n\n R2m.append(R[1])\n\n plot(T, R1m, ':r', T, R2m, ':g')\n show()\n\n\ndef capteur1(v):\n f = 0.00993 * exp(0.02513 * v)\n return f\n\n\ndef capteur2(v):\n f = 0.04857 * exp(0.01578 * v)\n return f\n\n\ndef force():\n while True:\n try:\n valeur = acquisition(0.01)\n f1, f2 = capteur1(valeur[0]), capteur2(valeur[1])\n print(\"force capteur 1 : \", f1)\n print(\"force capteur 2 : \", f2)\n except KeyboardInterrupt:\n print(\"fin programme\")\n\n#prog()\n#force()\nwhile True :\n print(acquisition(0.01))\n sleep(1)","repo_name":"Neirda8282/Tipe","sub_path":"psoc force/lectureforce.py","file_name":"lectureforce.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4258558522","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Model input and output\r\nx_ = tf.placeholder(tf.float32,[None,1])\r\ny_ = tf.placeholder(tf.float32,[None,1])\r\n# define parameters and linear model\r\ndef new_layer(input_data, in_dim, hidden_units):\r\n global W, b\r\n W = tf.Variable(tf.random_normal([in_dim, hidden_units]))\r\n b = tf.Variable(tf.random_normal([1,hidden_units]))\r\n Y = tf.matmul(input_data, W) + b\r\n return Y\r\n\r\n# activation function and hypothesis set\r\nhidden_layer1 = tf.nn.tanh(new_layer(x_, 1, 10))\r\n##hidden_layer2 = tf.nn.relu(new_layer(hidden_layer1,4,8))\r\npredictions = new_layer(hidden_layer1 ,10 , 1)\r\n# loss\r\nloss = tf.reduce_mean(tf.square(y_ - predictions))\r\n# optimizer\r\ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\r\n# training data\r\nx_train = np.linspace(-1,1,200)[:,np.newaxis]\r\nnoise = np.random.normal(0,0.09,x_train.shape)\r\ny_train = np.power(x_train,4) - 2*np.power(x_train,3) +noise\r\n# show training data\r\nfig,ax = plt.subplots()\r\nfig.set_tight_layout(True)\r\nax.scatter(x_train,y_train)\r\n# training loop\r\nsess = tf.Session()\r\nsess.run(tf.global_variables_initializer())\r\nfor i in range(1000):\r\n sess.run(train, {x_:x_train,y_:y_train})\r\n if i %50==0:\r\n try:\r\n ax.lines.remove(lines[0])\r\n except:\r\n pass\r\n print(sess.run(loss, {x_:x_train, y_:y_train}))\r\n # show training line #use color=(R,G,B)\r\n lines = ax.plot(x_train, sess.run(predictions, {x_:x_train}),color=(0.8,0.7,0.15),lw = 3)\r\n plt.pause(0.08)\r\n\r\nplt.ion()\r\nplt.draw()\r\n# evaluate training accuracy\r\ncurr_W, curr_b, curr_loss, = sess.run([W, b, loss], {x_:x_train, y_:y_train})\r\nprint(\"W: %s \\n b: %s \\n loss: %s\"%(curr_W, curr_b, curr_loss))\r\n","repo_name":"KbWen/Python_ML","sub_path":"Tensorflow_Polynomial Regressioni.py","file_name":"Tensorflow_Polynomial Regressioni.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14409628510","text":"from ppadb.client import Client\nimport os\nimport cv2 as cv\nimport keyboard\nimport numpy as np\n\ndef connect_device():\n adb = Client(host='127.0.0.1',port=5037)\n devices = adb.devices()\n if len(devices) == 0:\n print(\"No Devices Attached\")\n quit()\n else:\n print(\"Device Connected\")\n return devices[0]\n\ndef take_screenshot(device):\n image = device.screencap()\n with open('screen.png', 'wb') as f:\n f.write(image)\n\ndef take_screenshot(device, screen_name):\n image = device.screencap()\n with open(screen_name, 'wb') as f:\n f.write(image)\n\ndef isPower8(screen_path):\n img = cv.imread(screen_path,0)\n template = cv.imread('move_8-10.png', 0)\n w, h = template.shape[::-1]\n res = cv.matchTemplate(img, template, cv.TM_CCOEFF_NORMED)\n threshold = 0.8\n return np.any(res >= threshold)\n\n\ndef isPower0(screen_path):\n img = cv.imread(screen_path,0)\n template = cv.imread('move_0-10.png', 0)\n w, h = template.shape[::-1]\n res = cv.matchTemplate(img, template, cv.TM_CCOEFF_NORMED)\n threshold = 0.8\n return np.any(res >= threshold)\n\ndef main():\n img = cv.imread('screen.png',0)\n template = cv.imread('move_0-10.png', 0)\n w, h = template.shape[::-1]\n res = cv.matchTemplate(img, template, cv.TM_CCOEFF_NORMED)\n threshold = 0.8\n loc = np.where(res >= threshold)\n for pt in zip(*loc[::-1]):\n cv.rectangle(img, pt, (pt[0] + w, pt[1] + h), (255, 0, 255), 2)\n cv.imwrite('res.png', img)\n\n print(\"is Power 8? \", isPower8('screen.png'))\n print(\"is Power 0? \", isPower0('screen.png'))\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n","repo_name":"petersonchan/battlefunction","sub_path":"venv/Scripts/test_playground.py","file_name":"test_playground.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9541731882","text":"from gurobipy import GRB, Model, quicksum\nfrom process_data import *\n\nmodel = Model()\nmodel.setParam('TimeLimit',1800)\n\nM = range(1, 5 + 1) #Materiales\nI = range(1, 7 + 1) #Regiones\nD = range(1, 3 + 1) #Dimensiones\nT = range(1, 50 + 1) #Años\nT_2 = range(0, 50 + 1)\nN = 999999999 #Big 'M' \n\n\n# Definición de parámetros a partir de data\n\ndemanda = {(i, t): demanda_anual()[i,t] for i in I for t in T} \ncosto_m = {(m, i, d, t): costo_mantencion()[m,i ,d, t] for m in M for i in I for d in D for t in T}\ncosto_i = {(m, i, d, t): costo_instalacion()[m,i,d,t] for m in M for i in I for d in D for t in T}\ncapacidad_max ={(m, i, d): capacidad_maxima()[m,i,d] for m in M for i in I for d in D}\nhumedad = {(i, t): humedad_anual()[i,t] for i in I for t in T}\neficiencia = {m: eficiencia_material()[m] for m in M}\nsuperficie = {i: superficie_efectiva()[i] for i in I}\narea = {d: area_ocupada()[d] for d in D}\nper = {i: plazo_mantenimiento()[i] for i in I}\ngamma = {(m, i, d): 584000 for m in M for i in I for d in D}\n\n# Variables\n\nx = model.addVars(M, I, D, T_2, vtype = GRB.INTEGER, name = \"x_midt\")\ny = model.addVars(M, I, D, T, vtype = GRB.INTEGER, name = \"y_midt\")\nw = model.addVars(I, T, vtype = GRB.CONTINUOUS, name = \"w_it\") \nz = model.addVars(M, I, D, T, vtype = GRB.BINARY, name = \"z_midt\") \nq = model.addVars(M, I, D, T, vtype = GRB.BINARY, name = \"q_midt\") \n\n# Restricciones\n\nmodel.addConstrs((w[i, t - 1] + quicksum(eficiencia[m] * humedad[i, t] * capacidad_max[m, i, d] * x[m, i, d, t] for d in D for m in M) == demanda[i, t] + w[i, t] for i in I for t in range(2, 51)), name = \"R1.1\")\n\nmodel.addConstrs((x[m,i,d,t] <= x[m,i,d,t+1] for m in M for i in I for d in D for t in range(1,50)), name= 'r_bonus')\n\nmodel.addConstrs(x[m,i,d, 0] == 0 for m in M for i in I for d in D)\n\nmodel.addConstrs((quicksum(eficiencia[m] * humedad[i, 1] * capacidad_max[m, i, d] * x[m, i, d, 1] for d in D for m in M) == demanda[i, 1] + w[i, 1] for i in I), name = \"R1.2\")\n\nmodel.addConstrs((y[m, i, d, t] <= N * q[m, i, d, t] for m in M for i in I for t in T for d in D), name = \"R3.1\")\n\nmodel.addConstrs((y[m, i, d, t] <= x[m, i, d, t] for i in I for m in M for d in D for t in T), name = \"R3.3\")\n\nmodel.addConstrs((y[m, i, d, t] >= x[m, i, d, t] - N * (1 - q[m, i, d, t]) for i in I for m in M for d in D for t in T), name = \"R3.4\")\n\nmodel.addConstrs((gamma[m, i, d] + z[m, i, d, t] * N >= quicksum(eficiencia[m] * humedad[i, r] * capacidad_max[m, i, d] * x[m, i, d, r] for r in range(max(3, t), min(51, t + s + 1))) for i in I for m in M for d in D for t in range(3, 51) for s in range(2, max(2, per[i]))), name=\"R4.1\")\n\nmodel.addConstrs((z[m, i, d, t] <= q[m, i, d, t] for i in I for m in M for d in D for t in T), name = \"R5\")\n\nmodel.addConstrs((quicksum(q[m, i, d, t - per[i] + j] for j in range(3, per[i] + 1)) >= 1 for m in M for i in I for d in D for t in range(max(3, per[i]), 51)), name=\"R6.1\")\n\nmodel.addConstrs((quicksum(area[d] * x[m, i, d, t] for m in M for d in D) <= superficie[i] for i in I for t in T), name = \"R8\")\n\n# Agregar restricciones al modelo\n\nmodel.update()\n\nfuncion_objetivo = quicksum(costo_m[m, i ,d , t_1] * y[m, i, d, t_1] + costo_i[m , i, d, t_1] * (x[m, i , d , t] - x[m, i, d, t - 1]) for m in M for i in I for d in D for t_1 in T for t in range(1,51))\n\nmodel.setObjective(funcion_objetivo, GRB.MINIMIZE) # Colocar la FO m.optimize()\n\n# Optimizar\n\nmodel.optimize()\n\n\nprint(f\"El valor minimo en UF para la realizacion del proyecto es: {model.ObjVal}\")\n\n\n# Guardar archivos y definir para gráficos \n\nvalor_objetivo = model.ObjVal\n\ndict_m = {\n 1: \"Rachel 35%\",\n 2: \"Rachel 50%\",\n 3: \"Tela quirurgica\",\n 4: \"Costal de fique\",\n 5: \"Guata\"}\n\ndict_d = {\n 1: 48,\n 2: 100,\n 3: 150}\n\ndict_i = {\n 1: \"Atacama\",\n 2: \"Coquimbo\",\n 3: \"Valparaiso\",\n 4: \"Metropolitana\",\n 5: \"Ohiggins\",\n 6: \"Maule\",\n 7: \"Nuble\"}\nprint(\"\")\n\n\ndemanda = demanta_total()\nprecio_peso = (model.ObjVal / demanda)* 36081\n\natrapanieblas_region = []\nagua_almacenada = []\natrapanieblas_mantenidos = []\natrapanieblas_material = []\natrapanieblas_dimension = []\n\ntotales = 0\nfor i in I:\n cantidad = sum(x[m,i,d,50].x for m in M for d in D)\n totales += cantidad\n\n\nfor i in I:\n cantidad = sum(x[m,i,d,50].x for m in M for d in D)\n region = dict_i[i]\n fila = [region, int(cantidad)]\n atrapanieblas_region.append(fila)\n\nfor t in T:\n cantidad = sum(w[i, t].x for i in I)\n año = t \n fila = [año + 2024 , int(cantidad)]\n agua_almacenada.append(fila)\n\nfor t in T:\n cantidad_mantenida = sum(y[m,i,d,t].x for m in M for i in I for d in D)\n año = t\n fila = [año + 2024, int(cantidad_mantenida)]\n atrapanieblas_mantenidos.append(fila)\n\nfor i in I:\n for m in M:\n cantidad = sum(x[m,i,d,50].x for d in D)\n region = dict_i[i]\n fila = [region, dict_m[m], int(cantidad)]\n atrapanieblas_material.append(fila)\n\nfor i in I:\n for d in D:\n cantidad = sum(x[m,i,d,50].x for m in M)\n region = dict_i[i]\n fila = [region, dict_d[d], int(cantidad)]\n atrapanieblas_dimension.append(fila)\n\n\nwith open('resultados_especificos/w[i,t].txt', 'w') as archivo:\n for i in I:\n for t in T:\n if w[i, t].x != 0:\n archivo.write(f\"Se almacenan {int(w[i, t].x)} litros de agua en la region de {dict_i[i]} en {t + 2024} \\n\")\n archivo.write(\"\")\n\nwith open('resultados_especificos/x[m,i,d,t].txt', 'w') as archivo:\n for i in I:\n for t in T:\n for d in D:\n for m in M:\n if x[m, i, d, t].x != 0:\n archivo.write(f\"Existen {int(x[m, i, d, t].x)} atrapanieblas de material {dict_m[m]} y dimension {dict_d[d]} m^2, en la region de {dict_i[i]} a finales de {t + 2024}\\n\")\n archivo.write(\"\")\n\nwith open('resultados_especificos/y[m,i,d,t].txt', 'w') as archivo:\n for i in I:\n for t in T:\n for d in D:\n for m in M:\n if y[m, i, d, t].x != 0:\n archivo.write(f\"{int(y[m, i, d, t].x)} atrapanieblas de material {dict_m[m]} y dimension {dict_d[d]} m^2, en la region de {dict_i[i]} reciben mantencion en {t + 2024}\\n\")\n archivo.write(\"\")\n\nwith open('resultados_especificos/z[m,i,d,t].txt', 'w') as archivo:\n for i in I:\n for t in T:\n for d in D:\n for m in M:\n if z[m, i, d, t].x != 0:\n archivo.write(f\"{int(z[m, i, d, t].x)} atrapanieblas de material {dict_m[m]} y dimension {dict_d[d]} m^2, en la region de {dict_i[i]} superan límite de obtención en {t + 2024}\\n\")\n archivo.write(\"\")\n\nwith open('resultados_especificos/q[m,i,d,t].txt', 'w') as archivo:\n for i in I:\n for t in T:\n for d in D:\n for m in M:\n if q[m, i, d, t].x != 0:\n archivo.write(f\"Se debe hacer mantencion a los atrapanieblas de material {dict_m[m]} y dimension {dict_d[d]} m^2, en la region de {dict_i[i]} en {t + 2024}\\n\")\n archivo.write(\"\")\n\n\n \n","repo_name":"PipeXtz/Proyecto_Atrapanieblas_Grupo63","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73410011930","text":"from .music import get, search\nfrom src.tools.utils import checknumber\nimport nonebot\nimport sys\n\nfrom nonebot import on_command\nfrom nonebot.adapters import Message\nfrom nonebot.params import CommandArg, Arg\nfrom nonebot.adapters.onebot.v11 import MessageSegment as ms\nfrom nonebot.adapters.onebot.v11 import GroupMessageEvent\nfrom nonebot.typing import T_State\n\nTOOLS = nonebot.get_driver().config.tools_path\nsys.path.append(TOOLS)\n\n\n'''\n搜歌可查询歌曲,点歌直接根据歌曲名和作者(若有)推出歌曲。\n\n数据来源:\n@网易云音乐\n@QQ音乐\n'''\n\nsearch_music = on_command(\"search_music\", aliases={\"搜歌\"}, priority=5)\n\n\n@search_music.handle()\nasync def _(state: T_State, event: GroupMessageEvent, args: Message = CommandArg()):\n data = args.extract_plain_text().split(\" \")\n if len(data) != 2:\n await search_music.finish(\"唔……参数不正确哦,只能有2个参数~\")\n platform = data[0]\n song = data[1]\n info = await search(platform, song)\n if info == \"404\":\n await search_music.finish(\"唔……没有找到您要的音乐哦~\")\n songs = info[0]\n id = info[1]\n platform = info[2]\n state[\"id\"] = id\n state[\"platform\"] = platform\n msg = \"\"\n for i in range(len(songs)):\n msg = msg + f\"\\n{i}.\" + songs[i]\n await search_music.send(msg[1:])\n return\n\n\n@search_music.got(\"num\", prompt=\"输入序号即可搜索搜索歌曲,其他内容则无视~\")\nasync def __(state: T_State, num: Message = Arg()):\n num = num.extract_plain_text()\n if checknumber(num):\n id = state[\"id\"]\n platform = state[\"platform\"]\n num = int(num)\n song = id[num]\n if platform == 1:\n p = \"qq\"\n else:\n p = \"163\"\n msg = ms.music(p, song)\n await search_music.finish(msg)\n else:\n return\n\nget_music = on_command(\"get_music\", aliases={\"点歌\"}, priority=5)\n\n\n@get_music.handle()\nasync def _(event: GroupMessageEvent, args: Message = CommandArg()):\n data = args.extract_plain_text().split(\" \")\n if len(data) not in [2, 3]:\n await get_music.finish(\"唔……参数只能有2或3个哦~\")\n singer = None\n if len(data) == 3:\n singer = data[2]\n platform = data[0]\n song_name = data[1]\n info = await get(platform, song_name, singer)\n if info == \"404\":\n await get_music.finish(\"唔……没有找到您要的歌曲~\")\n platform = info[2]\n if platform == 1:\n p = \"qq\"\n else:\n p = \"163\"\n msg = ms.music(p, info[1])\n await get_music.finish(msg)\n","repo_name":"codethink-cn/Inkar-Suki","sub_path":"src/plugins/music/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"32"} +{"seq_id":"26807144483","text":"\nimport json\n\n\ninit_string_list=[]\n\nfinal_string_list=[]\n\n\n# def clean_string(string):\n# fake_str=''\n# cleaned=[]\n \n# first_idx=0\n# last_idx=len(string)-1\n# exclude_list=[first_idx,last_idx]\n \n# for i in range(len(string)):\n# if i not in exclude_list:\n# elem=string[i]\n# cleaned.append(elem)\n \n# for i in cleaned:\n# fake_str=fake_str+i\n \n\n\n \n# return fake_str\n\nimport os\nflg=1\ndef conv_tostring(dicti,flg):\n \n bolbol=str(dicti)\n \n if flg==1:\n bolbol=bolbol +','\n \n bolbol=bolbol.replace(\"'\",\"\\\"\")\n print(bolbol)\n return bolbol\n \n \nwrite_file='Desktop/StairRampFinal/Stair_ramp_final.json' \n\n\n\npath='Desktop/StairRampFinal/'\n###Appending all json files as strings in a list>>\nfor x,y,filenames in os.walk(path):\n for name in filenames:\n if name.endswith('.json'):\n my_file=path+name\n print('file is::',my_file)\n with open(my_file) as my_json_file:\n data=json.load(my_json_file)\n data=data[0]\n init_string_list.append(data)\n \nfor idx,elem in enumerate(init_string_list):\n \n if idx==len(init_string_list)-1:\n flg=0\n else:\n flg=1\n \n \n final=conv_tostring(elem,flg)\n \n final_string_list.append(final)\n \nwith open(write_file,'w') as write_f:\n \n write_f.write('[')\n \n for string in final_string_list:\n \n write_f.write(string)\n write_f.write(']')\n \n \n \n \n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n \n\n \n \n\n\n \n \n \n\n\n\n","repo_name":"Tarek-pwd/IOS-wheelchair","sub_path":"jsonappending.py","file_name":"jsonappending.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25639047137","text":"n, k = map(int, input().split())\ncircle = list(range(1, n + 1))\n\nresult = []\nindex = 0\nwhile circle:\n index = (index + k - 1) % len(circle)\n result.append(str(circle.pop(index)))\n\nprint(\"<%s>\" %(\", \".join(result)))\n","repo_name":"yaezzin/Algorithm","sub_path":"Python/data_structure/1158.py","file_name":"1158.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15091871685","text":"from dataframe import Data\nfrom analysis import Analyse\nfrom collections import defaultdict, OrderedDict\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport config\n\nclass Visuals:\n \"\"\"Creates visual representation of analysed chats\"\"\"\n # COLORS = ['#0D2149', '#C1292E'] # DARKER\n COLORS = ['#235789', '#F03A47'] # LIGHTER\n man = config.config['MAN']\n woman = config.config['WOMAN']\n\n def __init__(self, analysed_data):\n self.analysed_data = analysed_data\n self.dictionary = self.analysed_data.collect()\n\n ## REMAINING IN DICTIONARY\n # 'Average Word Length': self.average_word_length(),\n # 'Average Character Length': self.average_character_length()\n\n def messages_by_day(self):\n dayofweek = self.dictionary['Day of Week']\n arr = np.arange(7)\n man_time, woman_time = [], []\n for each in dayofweek.values():\n man_time.append(each[self.man])\n woman_time.append(each[self.woman])\n \n p1 = plt.bar(arr, man_time, color=self.COLORS[0])\n p2 = plt.bar(arr, woman_time, bottom=man_time, color=self.COLORS[1])\n plt.xticks(np.arange(7) ,dayofweek.keys(), fontsize=7)\n plt.xlabel('Days of the Week', fontsize=7)\n plt.ylabel('Number of Messages', fontsize=7)\n plt.legend((p1[0], p2[0]), ('Boyfriend', 'Girlfriend'))\n # plt.savefig('report/messages_by_day.png', bbox_inches='tight')\n plt.show()\n\n def messages_by_time(self):\n timeofday = self.dictionary['Time of Day']\n arr = np.arange(24)\n man_time, woman_time = [], []\n for each in timeofday.values():\n man_time.append(each[self.man])\n woman_time.append(each[self.woman])\n\n p1 = plt.bar(arr, man_time, color=self.COLORS[0])\n p2 = plt.bar(arr, woman_time, bottom=man_time, color=self.COLORS[1])\n plt.xticks(np.arange(24) ,timeofday.keys(), fontsize=7)\n plt.xlabel('Hours of the Day', fontsize=7)\n plt.ylabel('Number of Messages', fontsize=7)\n plt.legend((p1[0], p2[0]), ('Boyfriend', 'Girlfriend'))\n # plt.savefig('report/messages_by_time.png', bbox_inches='tight')\n plt.show()\n\n def messages_by_month(self):\n month = self.dictionary['Messages per Month']\n print('Feature Data Acquired.\\nStarting plot')\n arr = np.arange(12)\n\n man_time, woman_time = [], []\n for each in month.values():\n man_time.append(each[self.man])\n woman_time.append(each[self.woman])\n\n p1 = plt.bar(arr, man_time, color=self.COLORS[0])\n p2 = plt.bar(arr, woman_time, bottom=man_time, color=self.COLORS[1])\n plt.xticks(arr, month.keys(), fontsize=7)\n plt.xlabel('Messages by Month', fontsize=7)\n plt.legend((p1[0], p2[0]), ('Boyfriend', 'Girlfriend'))\n # plt.savefig('report/messages_by_month.png', bbox_inches='tight') \n plt.show()\n\n def messages_from_each(self):\n from_each = self.dictionary['Messages from each']\n from_man, from_woman = from_each[self.man], from_each[self.woman]\n plt.figure(figsize=(6,6))\n plt.pie([from_man, from_woman], labels=['Boyfriend', 'Girlfriend'],\n colors=self.COLORS, autopct=lambda p: '{:.2f}% ({:.0f})'.format(p,\n p*sum(from_each.values())/100))\n\n centre_circle = plt.Circle((0,0),0.75,color='black', fc='white')\n fig = plt.gcf()\n fig.gca().add_artist(centre_circle)\n\n plt.title('Messages Sent By Each Person')\n plt.legend()\n # plt.savefig('report/messages_from_each.png', bbox_inches='tight')\n plt.show()\n\n def words_from_each(self):\n from_each = self.dictionary['Words from each']\n from_man, from_woman = from_each[self.man], from_each[self.woman]\n plt.figure(figsize=(6,6))\n plt.pie([from_man, from_woman], labels=['Boyfriend', 'Girlfriend'],\n colors=self.COLORS, autopct=lambda p: '{:.2f}% ({:.0f})'.format(p, \n p*sum(from_each.values())/100))\n\n centre_circle = plt.Circle((0,0),0.75,color='black', fc='white',linewidth=0.25)\n fig = plt.gcf()\n fig.gca().add_artist(centre_circle)\n \n plt.title('Words Written By Each Person')\n plt.legend()\n # plt.savefig('report/words_from_each.png', bbox_inches='tight')\n plt.show()\n\n def top_words(self):\n words = self.dictionary['Top Words']\n plt.bar(words.words, words.frequency, color=self.COLORS[0])\n plt.title('Most Used Words')\n # plt.savefig('report/top_words.png', bbox_inches='tight')\n plt.show()\n\nif __name__ == '__main__':\n\n data = Data(config.config['FILE_NAME'])\n df = data.parse_file()\n\n analysed_data = Analyse(df)\n visualize = Visuals(analysed_data)\n\n ## CALL FUNCTIONS TO GENERATE IMAGE\n # visualize.messages_by_day()\n # visualize.messages_by_time()\n # visualize.messages_by_month()\n # visualize.messages_from_each()\n # visualize.words_from_each()\n # visualize.top_words()\n # print(visualize.dictionary)","repo_name":"adityamonga/chat_analysis","sub_path":"visuals.py","file_name":"visuals.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3002524891","text":"# recstr.py\n# performs recursive functions on two strings entered by user\n# Rachael Schwartz\n\ndef main():\n \n def rev(s):\n if (s == \"\"):\n return s\n else:\n f = (len(s) - 1)\n return s[f] + rev(s[0:f])\n \n def pal(s):\n if (len(s) < 2):\n return True\n else:\n f = (len(s) - 1)\n if (s[0] == s[f]):\n return pal(s[1:f])\n else:\n return False\n \n def subseq(s,t):\n if (len(t) == 0):\n return True\n elif (len(t) > len(s)):\n return False\n else:\n if (t[0] == s[0]):\n return subseq(s[1:],t[1:])\n else:\n return subseq(s[1:],t)\n \n def printAns(s,t,R,P,S):\n print('The string \"', s, '\" backwards is \"', R, '\".', sep='')\n if (P == True):\n print('The string \"', s, '\" is a palindrome.', sep='')\n else:\n print('The string \"', s, '\" is not a palindrome.', sep='')\n if (S == True):\n print('The string \"', t, '\" is a subsequence of \"', s, '\".', sep='')\n else:\n print('The string \"', t, '\" is not a subsequence of \"', s, '\".', sep='')\n \n s = input(\"Please enter a string: \")\n t = input(\"Please enter another string: \")\n R = rev(s)\n P = pal(s)\n S = subseq(s,t)\n printAns(s,t,R,P,S)\n \n \n \nmain()\n","repo_name":"raeschwartz24/Python_programs","sub_path":"recursiveStrings.py","file_name":"recursiveStrings.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73083878172","text":"import tkinter\r\nfrom tkinter import ttk\r\n\r\n\r\ndef main():\r\n window = tkinter.Tk()\r\n window.title('Elige tu lenguaje favorito') # Nombre de la ventana\r\n window.resizable(True, True) # Usuario puede redimensionar\r\n window.geometry(\"300x400\") # Tamaño inicial de la ventana\r\n\r\n diccionario_lenguajes = {\r\n 'java': 'Lenguaje orientado a objetos, de propósito general, tipado y de sintaxis clara',\r\n 'python': 'Lenguaje POO de muy alto nivel y sintaxis muy concisa, fuerte en Data Science',\r\n 'javascript': 'Lenguaje orientado a dinamizar páginas web directamente interpretable por browsers',\r\n 'c++': 'Lenguaje multiparadigma que extendió el lenguaje C permitiendo manipular objetos',\r\n 'ruby': 'Lenguaje de programación orientado a objetos con una sintaxis muy atractiva',\r\n 'php': 'Lenguaje especialmente utilizado en el desarrollo web y de servidores back-end'\r\n }\r\n\r\n seleccion = tkinter.StringVar()\r\n\r\n def set_descripcion():\r\n area_descripcion.delete(1.0, \"end\")\r\n area_descripcion.insert(1.0, diccionario_lenguajes[seleccion.get()])\r\n\r\n def clear_descripcion():\r\n area_descripcion.delete(1.0, \"end\")\r\n seleccion.set(None) # Resetea el StringVar\r\n\r\n def salir():\r\n window.quit()\r\n\r\n label_pregunta = ttk.Label(text='¿Cuál es tu lenguaje de programación favorito?')\r\n radiobutton1 = ttk.Radiobutton(\r\n window, text='Java', value='java', variable=seleccion, command=set_descripcion)\r\n radiobutton2 = ttk.Radiobutton(\r\n window, text='Python', value='python', variable=seleccion, command=set_descripcion)\r\n radiobutton3 = ttk.Radiobutton(\r\n window, text='JavaScript', value='javascript', variable=seleccion, command=set_descripcion)\r\n radiobutton4 = ttk.Radiobutton(\r\n window, text='C++', value='c++', variable=seleccion, command=set_descripcion)\r\n radiobutton5 = ttk.Radiobutton(\r\n window, text='Ruby', value='ruby', variable=seleccion, command=set_descripcion)\r\n radiobutton6 = ttk.Radiobutton(\r\n window, text='PHP', value='php', variable=seleccion, command=set_descripcion)\r\n area_descripcion = tkinter.Text(\r\n window, height=4, width=34, wrap='word')\r\n boton_reinicio = tkinter.Button(\r\n window, text='Reiniciar', width=18, command=clear_descripcion)\r\n boton_salir = tkinter.Button(\r\n window, text='Salir', width=18, command=salir)\r\n\r\n label_pregunta.pack(pady=10)\r\n radiobutton1.pack(fill='x', padx=30, pady=6)\r\n radiobutton2.pack(fill='x', padx=30, pady=6)\r\n radiobutton3.pack(fill='x', padx=30, pady=6)\r\n radiobutton4.pack(fill='x', padx=30, pady=6)\r\n radiobutton5.pack(fill='x', padx=30, pady=6)\r\n radiobutton6.pack(fill='x', padx=30, pady=6)\r\n area_descripcion.pack(padx=10, pady=6)\r\n boton_reinicio.pack(padx=10, pady=6)\r\n boton_salir.pack(padx=10, pady=6)\r\n\r\n window.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"alejandroMAD/OB-Ejercicios","sub_path":"Python/radiobuttons.py","file_name":"radiobuttons.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6884291727","text":"from sregistry.logger import bot\nimport sys\nimport botocore\n\n\ndef search(self, query=None, args=None):\n \"\"\"query a s3 endpoint for an image based on a string\n\n EXAMPLE QUERIES:\n\n [empty] list all container collections\n vsoch/dinosaur look for containers with name vsoch/dinosaur\n\n \"\"\"\n\n if query is not None:\n return self._container_search(query)\n\n # Search collections across all fields\n return self._search_all()\n\n\n################################################################################\n# Search Helpers\n################################################################################\n\n\ndef search_all(self, quiet=False):\n \"\"\"a \"show all\" search that doesn't require a query\n\n Parameters\n ==========\n quiet: if quiet is True, we only are using the function to return\n rows of results.\n \"\"\"\n\n results = []\n\n for obj in self.bucket.objects.all():\n subsrc = obj.Object()\n\n # Metadata bug will capitalize all fields, workaround is to lowercase\n # https://github.com/boto/boto3/issues/1709\n try:\n metadata = dict((k.lower(), v) for k, v in subsrc.metadata.items())\n except botocore.exceptions.ClientError as e:\n bot.warning(\"Could not get metadata for {}: {}\".format(subsrc.key, str(e)))\n continue\n\n size = \"\"\n\n # MM-DD-YYYY\n datestr = \"%s-%s-%s\" % (\n obj.last_modified.month,\n obj.last_modified.day,\n obj.last_modified.year,\n )\n\n if \"sizemb\" in metadata:\n size = \"%sMB\" % metadata[\"sizemb\"]\n\n results.append([obj.key, datestr, size])\n\n if len(results) == 0:\n bot.info(\"No container collections found.\")\n sys.exit(1)\n\n if not quiet:\n bot.info(\"Containers\")\n bot.table(results)\n return results\n\n\ndef container_search(self, query, across_collections=False):\n \"\"\"search for a specific container. If across collections is False,\n the query is parsed as a full container name and a specific container\n is returned. If across_collections is True, the container is searched\n for across collections. If across collections is True, details are\n not shown\"\"\"\n\n results = self._search_all(quiet=True)\n matches = []\n for result in results:\n # This is the container name\n if query in result[0]:\n matches.append(result)\n\n if len(matches) > 0:\n bot.info(\"Containers %s\" % query)\n bot.table(matches)\n else:\n bot.info(\"No matches for %s found.\" % query)\n\n return matches\n","repo_name":"singularityhub/sregistry-cli","sub_path":"sregistry/main/s3/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"30516754022","text":"from sklearn.ensemble import BaggingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import cross_val_score\n\nimport pandas as pd\nimport pylab\nimport numpy as np\nimport re\n\ndef bagging( x, y, n_estimators ):\n\tbagger = BaggingClassifier( DecisionTreeClassifier(max_depth=20, min_samples_leaf=1), n_estimators=n_estimators, max_samples=0.5, max_features=0.5)\n\treturn bagger\n\n\t\ndef boosting( x, y, n_estimators ):\n\tbooster = AdaBoostClassifier( n_estimators=n_estimators, learning_rate=0.1 )\n\tbooster.fit( x, y)\n\treturn booster\n\nclass TrainData:\n\t\"\"\"A convenient way to grab data without saving them to disk.\"\"\"\n\turls = (\n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/thyroid_train.csv\", \n\t\t\"https://github.com/gditzler/UA-ECE-523-Sp2018/raw/master/data/echocardiogram_train.csv\", \n\t\t\"https://github.com/gditzler/UA-ECE-523-Sp2018/raw/master/data/breast-cancer_train.csv\", \n\t\t\"https://github.com/gditzler/UA-ECE-523-Sp2018/raw/master/data/congressional-voting_train.csv\", \n\t\t\"https://github.com/gditzler/UA-ECE-523-Sp2018/raw/master/data/conn-bench-sonar-mines-rocks_train.csv\", \n\t\t\"https://github.com/gditzler/UA-ECE-523-Sp2018/raw/master/data/cylinder-bands_train.csv\", \n\t\t\"https://github.com/gditzler/UA-ECE-523-Sp2018/raw/master/data/echocardiogram_train.csv\", \n\t\t\"https://github.com/gditzler/UA-ECE-523-Sp2018/raw/master/data/haberman-survival_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/hayes-roth_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/heart-hungarian_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/hill-valley_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/horse-colic_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/ionosphere_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/image-segmentation_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/mammographic_train.csv\", \n\t\t\"https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/monks-1_train.csv\", \n\t)\n\tcache = None\n\tdef __getitem__( self, key ):\n\t\tif self.cache is None:\n\t\t\tself.cache = {}\n\t\t\t\n\t\tif key in self.cache:\n\t\t\tdata = self.cache[key][:]\n\n\t\telse:\n\t\t\tdata = pd.read_csv( self.urls[key], header=None ).as_matrix()\n\t\t\tself.cache[key] = data[:]\n\t\t\t\n\n\t\treturn data\n\n\tdef __iter__(self):\n\t\tfor ii in range( len( self.urls ) ):\n\t\t\tyield self.__getitem__(ii)\n\n\n\tdef __call__( self ):\n\t\t\"\"\"Cache the data\"\"\"\n\t\tfor data in self.__iter__():\n\t\t\tpass\n\t\t\n\tdef xy( self, key ):\n\t\tdata = self.__getitem__( key )\n\t\tx, y = data[:, :-1], data[:, -1]\n\t\treturn x, y\n\n\tdef __len__(self):\n\t\treturn len(self.urls)\n\n\nclass TestData( TrainData ):\n\t\"In case we want the test data\"\n\tdef __init__( self ):\n\t\tself.urls = [ url.replace(\"_train\", \"_test\") for url in self.urls ]\n\n\n\n\t\n\t\n\ndef classify( traindata=None, testdata=None, classifier_name='bagging' ):\n\n\t\"\"\"Most of the work is done here...\n\n\t\tThis funciton Fits ensemble classifiers for 15 different data sets\n\t\tdefined in traindata. The number of classifiers\n\t\tis set by range_estimators (2 to 50). I Use cross_val_score\n\t\tto score the data and then average the score for each\n\t\tn_estimators. You should end up with a dataframe\n\t\tindexed by n_estimators with one column--'err'. \n\n\t\tEnjoy!\n\t\"\"\"\n\n\tif traindata is None:\n\t\ttraindata = TrainData()\n\tif testdata is None:\n\t\ttestdata = TestData()\n\n\t#we only want to do one of two things\n\tassert( classifier_name in ['bagging', 'boosting'] )\n\t\n\t\n\tif classifier_name == 'bagging':\n\t\tclf_funct = bagging\n\telse:\n\t\tclf_funct = boosting\n\t\n\t#initialize the output dataframe\n\toutput = pd.DataFrame(columns=('dataset', 'n_estimators', 'err') )\n\t\n\t#how many estimators\n\trange_estimators = range( 2, 50)\n\tcounter = 0\n\n\t\n\tfor ii in range( len( traindata ) ):\n\t#iterate through 15 datasets\n\n\t\tfor n_estimators in range_estimators:\n\t\t#iterated throught n_estimators\n\n\t\t\tx, y = traindata.xy( ii )\n\t\t\t#setup the classifier\n\t\t\tclf = clf_funct( x, y, n_estimators )\n\t\t\t\n\t\t\t#score it. \n\t\t\tscores = cross_val_score( clf, x, y )\n\t\t\t#grab the err\n\t\t\terr = 1-scores.mean()\n\t\t\t\n\t\t\t#populate the output dataframe\n\t\t\toutput.loc[counter] = ( traindata.urls[ii].split('/')[-1], n_estimators, err )\n\t\t\tcounter+=1\n\n\t#Too much to plot each dataset. Lets average there err together. \n\tavg_output = pd.DataFrame(columns=( 'err', ), index=range_estimators )\n\tfor n_estimators in range_estimators:\n\t\tavg_output.loc[n_estimators] = output[output.n_estimators==n_estimators].err.mean()\n\n\treturn avg_output\n\t\n\n\t\n\t\ndef main(df=None):\n\t\"\"\" Call classifier and plot the result or if df is not none\n\t\tplot the df. \"\"\"\n\n\t#train and score the ensemble. \n\tif df == None:\n\t\tdf=pd.DataFrame( columns=('boosting', 'bagging') )\n\t\n\t\tfor method in (\"bagging\", 'boosting'):\n\t\t\tdf[method]=classify(classifier_name=method).err\n\telse:\n\t\tdf=pd.read_pickle(df)\n\n\n\t#plot the data. \n\tfig=pylab.figure()\n\tax = fig.add_subplot(111)\n\tax.set_title(\"Boosting Vs Bagging\")\n\tax.set_xlabel(\"Number of Estimators\")\n\tax.set_ylabel(\"Average error of All Datasets\")\n\tax.plot(df.boosting, label='Boosting')\n\tax.plot(df.bagging, label='Bagging')\n\t\n\tax.legend()\n\tpylab.savefig(\"ensembles.png\")\n\tdf.to_pickle('ensembles.pkl')\n\treturn df\t\n\t\n\t\t\n\t\n\n","repo_name":"srswinde/ece523_hwk4","sub_path":"ensembles.py","file_name":"ensembles.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26259608617","text":"#!/usr/bin/python3\n\"\"\"Module that queries the reddit API to fetch comments recursively\n\"\"\"\n\nimport requests\n\n\ndef recurse(subreddit, hot_list=None, after=None):\n \"\"\"Recursive function that returns a list of subreddit's\n hot topics' titles\n Args: subreddit - subreddit to be queried\n hot_list[]- list to returned\n \"\"\"\n if hot_list is None:\n hot_list = []\n\n base_url = \"https://www.reddit.com/r/{}/hot.json\".format(subreddit)\n user_agent = \"0x16. API advanced\"\n headers = {\"User-Agent\": user_agent}\n\n response = requests.get(base_url, headers=headers, allow_redirects=False)\n\n if response.status_code == 200:\n data = response.json()\n posts = data[\"data\"][\"children\"]\n hot_list.extend([post[\"data\"][\"title\"] for post in posts])\n\n after = data[\"data\"][\"after\"]\n if after is not None:\n recurse(subreddit, hot_list, after)\n else:\n return hot_list\n\n else:\n return None\n","repo_name":"Markkimotho/alx-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8885194335","text":"import os\r\nimport time\r\nimport numpy as np\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom tensorflow import keras\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\nimport sys\r\nimport re\r\nfrom scapy.all import *\r\nfrom memory_profiler import profile\r\nfrom UserSetUp import *\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom pprint import pprint\r\nfrom tqdm import tqdm\r\n\r\n\r\nRGBpixels = []\r\nGraypixels =[]\r\n\r\ndef extendbyte(start, end,bi):\r\n return int((int(bi[start:end],2)/8) * 2**8)\r\n\r\ndef gray2rgb(byte):\r\n bi = format(byte,'08b').replace(\"0b\", \"\")\r\n r = extendbyte(0,3,bi)\r\n g = extendbyte(2,5,bi)\r\n b = extendbyte(7,8,bi)\r\n return([r,g,b])\r\n\r\ndef createArray(fileName):\r\n Graypixels =[]\r\n RGBpixels =[]\r\n with open(fileName, 'rb') as fp:\r\n for byte_s in fp:\r\n for byte in byte_s:\r\n byte = byte + 1\r\n if byte > 255: \r\n byte = 255;\r\n Graypixels.append(byte) \r\n RGBpixels.append(gray2rgb(byte ))\r\n if len(Graypixels)>=max_Pixels:\r\n print('Failed to Convert, too big')\r\n return(0,0)\r\n return(Graypixels, RGBpixels)\r\n\r\n\r\ndef paddArraystoMatrix(gray, colour):\r\n #print(len(gray))\r\n #print(len(colour))\r\n rgb_padded =[]\r\n gray_padded = []\r\n rgb_padded =list(colour)\r\n gray_padded = list(gray)\r\n if len(gray) 0:\r\n timefiles = [os.path.join(folder_to_watch, f) for f in files if f.endswith('.json')]\r\n timefiles.sort(key=os.path.getmtime)\r\n new_file = os.path.basename(timefiles[-1]) # Get the newest file\r\n if '.json' in new_file:\r\n # Load the JSON file and convert it to images\r\n json_path = os.path.join(folder_to_watch, new_file)\r\n PCAP_Name = os.path.splitext(new_file)[0]\r\n colour_image, gray_image = JSON2_Image(json_path, PCAP_Name)\r\n \r\n # Pass the images through the model and make a prediction\r\n colour_img_arr = np.array(colour_image)\r\n gray_img_arr = np.array(gray_image)\r\n colour_img_arr = colour_img_arr # Rescale the image\r\n gray_img_arr = gray_img_arr # Rescale the image\r\n colour_preds = predict_image(colour_img_arr)\r\n #gray_preds = predict_image(gray_img_arr)\r\n preds = colour_preds\r\n pred_class = np.argmax(preds)\r\n predictions.append(pred_class)\r\n \r\n # Raise a warning if the predicted class is 0 or 1\r\n if len(set(predictions)) == 1 and len(predictions) == 2:\r\n if predictions[0] == 0:\r\n print(\"Botnet Detected\")\r\n show_warning(\"Botnet Detected\")\r\n elif predictions[0] == 1:\r\n print(\"Exploitation Detected\")\r\n show_warning(\"Exploitation in Progress\")\r\n \r\n # Reset the prediction buffer after 3 predictions\r\n if len(predictions) == 2:\r\n predictions = []\r\n","repo_name":"Lewisc321/Masters-Code","sub_path":"Model_Runner/Classify_Decoy_logs.py","file_name":"Classify_Decoy_logs.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27206396977","text":"from flamb.autograd import Variable, no_grad\nfrom flamb.tensor import *\nfrom flamb import functional\nfrom flamb import nn\n\nenviron = {\"is_grad_enabled\": True}\n\n__all__ = [\n \"Variable\",\n \"Tensor\",\n \"zeros\",\n \"ones\",\n \"rand\",\n \"to_tensor\",\n \"concatenate\",\n \"dot\",\n \"matmul\",\n \"no_grad\",\n \"environ\",\n \"functional\",\n \"nn\",\n]\n","repo_name":"yanisadel/Flamb","sub_path":"flamb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70040319773","text":"INSTALLED_APPS = (\n 'django.contrib.sites'\n ,'django.contrib.auth'\n ,'django.contrib.admindocs'\n ,'django.contrib.contenttypes'\n ,'django.contrib.comments'\n ,'django.contrib.sessions'\n ,'django.contrib.sitemaps'\n ,'registration'\n ,'keyedcache'\n ,'livesettings'\n ,'l10n'\n ,'sorl.thumbnail'\n ,'tax'\n ,'tax.modules.no'\n ,'tax.modules.area'\n ,'tax.modules.percent'\n ,'shipping'\n ,'product'\n ,'product.modules.configurable'\n ,'payment'\n ,'payment.modules.dummy'\n ,'payment.modules.giftcertificate'\n ,'satchmo_utils'\n ,'app_plugins'\n )\n\n#TEMPLATE_DIRS = (os.path.join(DIRNAME, \"templates\"))\n#TEMPLATE_CONTEXT_PROCESSORS = ('satchmo_store.shop.context_processors.settings',\n# 'django.core.context_processors.auth',\n# )\n\nREGISTER_FORM = \"registration.forms.RegistrationForm\"\n\n","repo_name":"francisl/vcms","sub_path":"modules/store_custom_satchmo/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32438334090","text":"import collections\r\nprint(\"Order Dictionary\")\r\nd1=collections.OrderedDict()\r\nd1['a']='SAS'\r\nd1['d']='Python'\r\nd1['b']='CProgram'\r\nfor k,v in d1.items():\r\n print(k,\":\",v)\r\nprint(\"Sorted Order Dictionary\")\r\ndict=collections.OrderedDict(sorted(d1.items()))\r\nfor k,v in dict.items():\r\n print(k,\":\",v)\r\n","repo_name":"Onlinetrainingtech/PythonDSBatch","sub_path":"edu7.py","file_name":"edu7.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8147792799","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 12 11:52:39 2020\n\n@author: jruiz\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#Vamos a generar un ensamble que provienen de 2 gaussianas con diferente covarianza.\n\n\ncov1=np.array([[1.0,0.7],[0.7,1.0]])\ncov2=np.array([[1.0,-0.7],[-0.7,1.0]])\n\nmean1=np.array([2,0])\nmean2=np.array([-2,0])\n\nsamplesize = 20\n\nx1 = np.random.multivariate_normal(mean1,cov1,samplesize)\nx2= np.random.multivariate_normal(mean2,cov2,samplesize)\n\n\nx=np.concatenate( (x1,x2), axis=0)\n\n\n\n\n\nplt.plot(x1[:,0],x1[:,1],'o');plt.plot(x2[:,0],x2[:,1],'ro')\nplt.plot(x[:,0],x[:,1],'o')","repo_name":"gustfrontar/DABA","sub_path":"Lorenz_63/untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"39994081315","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot(labels, predictions, filename=None):\n conf_arr = tf.confusion_matrix(\n labels,\n predictions,\n num_classes=11\n ).eval()\n\n norm_conf = []\n for i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j)/float(a + 1e-5))\n norm_conf.append(tmp_arr)\n\n fig = plt.figure()\n plt.clf()\n ax = fig.add_subplot(111)\n ax.set_aspect(1)\n res = ax.imshow(np.array(norm_conf), cmap=plt.cm.Reds, \n interpolation='nearest')\n\n width, height = conf_arr.shape\n\n for x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(x, y), \n horizontalalignment='center',\n verticalalignment='center')\n\n cb = fig.colorbar(res)\n plt.xticks(range(width), '123')\n plt.yticks(range(height), '123')\n plt.xlabel('Ground truth labels')\n plt.ylabel('Predicted labels')\n # plt.savefig('confusion_matrix.png', format='png')\n plt.show()\n\n","repo_name":"ishansomshekar/nli_classifier","sub_path":"utils/confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18850735295","text":"\"\"\"\nCreated on 2016-11-29\n\n@author: Peer Springstübe\n\"\"\"\nimport numpy as np\nimport scipy.ndimage\nimport tensorflow as tf\nfrom functools import partial\nfrom matplotlib import pyplot as plt\n\nfrom .util import loc2xgeo, xgeo2loc\nfrom .Pursuer import Pursuer\nfrom ..Rect import Rect\nfrom concurrent import futures\nimport time\nimport multiprocessing\n\navgs = []\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass SwarmPursuer(Pursuer):\n\n def __init__(self):\n self.dtype = tf.float32\n #self.thread_executor = futures.ProcessPoolExecutor(max_workers=workers)\n self.thread_executor = None\n self.tracker = None\n self.configuration = None\n self.particle_count = None\n self.particle_scale_factor = None\n self.target_lower_limit = None\n self.target_punish_low = None\n self.target_punish_outside = None\n self.worker_count = 1\n self.thread_executor = None\n self.np_random = None\n self.initial_location = None\n\n def configure(self, configuration):\n self.configuration = configuration\n pconf = configuration['pursuer']\n self.particle_count = pconf['particle_count']\n self.particle_scale_factor = pconf['particle_scale_factor'] if 'particle_scale_factor' in pconf else 1.0\n self.target_lower_limit = float(pconf['target_lower_limit'])\n self.target_punish_low = float(pconf['target_punish_low'])\n self.target_punish_outside = float(pconf['target_punish_outside'])\n available_cpus = multiprocessing.cpu_count()\n self.worker_count = min(configuration['max_cpus'], available_cpus * 2) if 'max_cpus' in configuration else available_cpus\n logger.info(\"Spawning {} workers.\".format(self.worker_count))\n self.thread_executor = futures.ThreadPoolExecutor(max_workers=self.worker_count)\n self.np_random = configuration['np_random']\n\n def set_initial_position(self, pos):\n self.initial_location = pos\n\n def setup(self, tracker):\n self.tracker = tracker\n\n def generate_geo_particles(self, geo, img_size, lost):\n if lost == 0:\n num_particles = self.particle_count\n #spread = 0.4\n spread = 1.0\n elif lost == 1:\n num_particles = self.particle_count * 2\n # spread = 2.0\n spread = 5.0\n else:\n raise ValueError(\"Invalid value for lost: {}\".format(lost))\n\n # geo = loc2affgeo(loc)\n geos = np.tile(geo, (num_particles, 1)).T\n# r = self.np_random.randn(4, num_particles)\n# r *= 0.2\n r1 = self.np_random.randn(2, num_particles) * spread\n r2 = self.np_random.randint(-1, 2, (2, num_particles))\n r = np.concatenate((r1, r2))\n #f = np.tile([10, 10, .01, .01], (num_particles, 1)).T\n# f = np.tile([10, 10, 0.004, 0], (num_particles, 1)).T\n f = np.tile([10, 10, 0, 0], (num_particles, 1)).T\n rn = np.multiply(r, f)\n\n # geos += rn\n #\n if False:\n geos[2, geos[2, :] < 0.05] = 0.05\n geos[2, geos[2, :] > 0.95] = 0.95\n geos[3, geos[3, :] < 0.10] = 0.10\n geos[3, geos[3, :] > 10.0] = 10.0\n w = img_size[0]\n h = img_size[1]\n geos[0, geos[0, :] < (0.05 * w)] = 0.05 * w\n geos[0, geos[0, :] > (0.95 * w)] = 0.95 * w\n geos[1, geos[1, :] < (0.05 * h)] = 0.05 * h\n geos[1, geos[1, :] > (0.95 * h)] = 0.95 * h\n\n return (geos + rn).T\n\n def generate_particles(self, loc, img_size, lost):\n geo = loc2xgeo(loc)\n geos = self.generate_geo_particles(geo, img_size, lost)\n locs = [Rect(xgeo2loc(g)) for g in geos]\n # add previous position, to make sure there is at least one valid\n # position:\n locs.append(loc)\n return locs\n\n def upscale_mask(self, mask, roi, image_size):\n # scale prediction mask up to size of roi (not of sroi!):\n relation = roi.width / self.mask_size[0], roi.height / self.mask_size[1]\n #roi_mask = scipy.ndimage.zoom(mask.reshape(self.mask_size), relation)\n roi_mask = mask.reshape((mask.shape[1], mask.shape[2]))\n # crop low values\n roi_mask[roi_mask < self.target_lower_limit] = self.target_punish_low\n # put mask in capture image mask:\n img_mask = np.full((round(image_size[0] / relation[0]),\n round(image_size[1] / relation[1])), self.target_punish_outside)\n y0 = round(roi.top / relation[1])\n x0 = round(roi.left / relation[0])\n y1 = y0 + roi_mask.shape[1]\n x1 = x0 + roi_mask.shape[0]\n img_mask[y0: y1, x0: x1] = roi_mask\n return img_mask, relation\n\n def position_quality(self, pos, roi, image_mask_sum, inner_sum, scale_factor):\n #logger.info(\"QUALI: %s, %s\", image_mask.shape, pos)\n # too small?\n # p1 = time.time()\n scale_factor_squared = scale_factor[0] *scale_factor[1]\n if pos.width < 8 or pos.height < 8:\n return -1e12\n # outside roi?\n if pos.left < roi.left or pos.top < roi.top or pos.right > roi.right or pos.bottom > roi.bottom:\n return -1e12\n # p2 = time.time()\n \"\"\"inner = img_mask[\n int(pos.top):int(pos.bottom - 1),\n int(pos.left):int(pos.right - 1)].sum()\"\"\"\n inner = inner_sum\n\n # p3 = time.time()\n inner_fill = inner / (pos.pixel_count() / scale_factor_squared)\n outer = image_mask_sum - inner\n\n outer_fill = outer / max((roi.pixel_count() - pos.pixel_count()) / scale_factor_squared, 1)\n # p4 = time.time()\n # p5 = time.time()\n\n \"\"\"total = p4 - p1\n t1 = p2 - p1\n t2 = p3 - p2\n t3 = p4 - p3\n # t4 = p5 - p4\n\n print(\"part1: {:.2} ({:.2}); part2: {:.2} ({:.2}); part3: {:.2} ({:.2});\".format(t1, t1 / total, t2,\n t2 / total, t3, t3 / total))\"\"\"\n\n # for dynamic rescaling, pass 'punish_low=True' to calculate_sum and use 'inner' as quality\n if self.particle_scale_factor == 1.0: # no scaling\n quality = max(inner_fill - outer_fill, 0.0)\n else:\n quality = max(inner, 0.0)\n # print(\"quality: {}\".format(quality))\n return quality\n\n def pursue(self, state, frame, lost=0):\n #ps = [time.time()] # 0\n logger.info(\"Predicting position for frame %s, Lost: %d\", frame, lost)\n # TODO: not here...\n self.mask_size = self.configuration['mask_size']\n #\n mask = frame.prediction_mask.copy()\n\n #ps.append(time.time()) # 1\n\n mask[mask < self.target_lower_limit] = self.target_punish_low\n mask[mask < 0.0] = 0.0\n\n #print(\"a\", mask.max(), mask.min(), np.average(mask))\n img_size = [frame.size[1], frame.size[0]]\n\n #ps.append(time.time()) # 2\n img_mask, scale_factor = self.upscale_mask(mask, frame.roi, img_size)\n #print(\"a\", img_mask.max(), img_mask.min(), np.average(img_mask))\n frame.image_mask = img_mask\n\n #ps.append(time.time()) # 3\n\n locs = self.generate_particles(\n frame.previous_position, frame.size, lost)\n #total = np.sum(img_mask)\n #total_max = np.sum(img_mask[img_mask > 0])\n# total_max = np.sum(np.abs(img_mask))\n\n #ps.append(time.time()) # 4\n img_mask_sum = img_mask.sum()\n #ps.append(time.time()) # 5\n total_max = 1\n \"\"\"func = partial(position_quality_helper, img_mask, frame.roi, total_max, img_mask_sum)\n p4 = time.time()\n quals = list(self.thread_executor.map(func, locs))\"\"\"\n\n if False and self.particle_scale_factor != 1.0:\n scaled_locs = []\n for loc in locs:\n width_difference = int(loc.width * self.particle_scale_factor)\n height_difference = int(loc.height * self.particle_scale_factor)\n\n new_width = loc.width + width_difference\n new_height = loc.height + height_difference\n new_x = loc.x - (width_difference / 2)\n new_y = loc.y - (height_difference / 2)\n scaled_locs.append(Rect(new_x, new_y, new_width, new_height))\n\n new_width = loc.width - width_difference\n new_height = loc.height - height_difference\n new_x = loc.x + (width_difference / 2)\n new_y = loc.y + (height_difference / 2)\n scaled_locs.append(Rect(new_x, new_y, new_width, new_height))\n\n width_difference = self.initial_location.width - loc.width\n height_difference = self.initial_location.height - loc.height\n new_width = loc.width + width_difference\n new_height = loc.height + height_difference\n new_x = loc.x - (width_difference / 2)\n new_y = loc.y - (height_difference / 2)\n scaled_locs.append(Rect(new_x, new_y, new_width, new_height))\n\n locs.extend(scaled_locs)\n\n # if scaling is enabled, punish pixels with low feature rating\n punish_low = self.particle_scale_factor != 1.0\n slices = [img_mask[round(pos.top / scale_factor[1]):round((pos.bottom - 1) / scale_factor[1]),\n round(pos.left / scale_factor[0]):round((pos.right - 1) / scale_factor[0])] for pos in locs]\n\n #ps.append(time.time()) # 6\n\n sums = list(self.thread_executor.map(np.sum, slices))\n #ps.append(time.time()) # 7\n\n quals = [self.position_quality(pos, frame.roi, img_mask_sum, inner_sum, scale_factor) / total_max\n for pos, inner_sum in zip(locs, sums)]\n\n #ps.append(time.time()) # 8\n\n best_arg = np.argmax(quals)\n frame.predicted_position = Rect(locs[best_arg])\n # quality of prediction needs to be absolute, so we normalise it with\n # the \"perfect\" value this prediction would have:\n perfect_quality = 1\n #print(quals[best_arg], perfect_quality)\n frame.prediction_quality = max(\n 0.0, min(1.0, quals[best_arg] / perfect_quality))\n logger.info(\"Prediction: %s, quality: %f\",\n frame.predicted_position, frame.prediction_quality)\n\n #ps.append(time.time()) # 9\n\n # i = i+1 - i\n\n #ts = [p1 - p0 for p0,p1 in zip(ps[:-1], ps[1:])]\n\n #total = ps[-1] - ps[0]\n #log = \"\"\n #for n, t in enumerate(ts):\n # log += \"; part{}: {:.2} ({:.2})\".format(n, t, t / total)\n #print(log[2:])\n\n return frame.predicted_position\n\n\n @staticmethod\n def calculate_sum(mat, punish_low=False):\n if punish_low:\n return np.multiply(mat - 0.2, 3, where=(mat < 0)).sum()\n else:\n return mat.sum()\n","repo_name":"kratenko/HIOB","sub_path":"core/pursuing/SwarmPursuer.py","file_name":"SwarmPursuer.py","file_ext":"py","file_size_in_byte":10875,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"13789484153","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom cnvd_spider.items import CnvdSpiderItem\nimport re\nfrom scrapy.selector import Selector\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule, Spider\nimport time\nimport random\nfrom datetime import date\nfrom scrapy import FormRequest\nfrom scrapy.http import Request, HtmlResponse\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport ast\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\nclass ExampleSpider(CrawlSpider):\n name = \"myspider\"\n count = 0\n cookies = {}\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'\n }\n rules = (\n Rule(LinkExtractor(allow=r\"www.cnvd.org.cn/flaw/show/*\", unique=True),\n callback=\"parse_news\", follow=True),\n )\n\n allowed_domains = [\"www.cnvd.org.cn\"]\n # start_urls = ['https://www.cnvd.org.cn/flaw/list.htm?max=20&offset=2050']\n\n def start_requests(self):\n self.cookies = self.get_cnvd_cookies()\n # , meta={'cookiejar': 1}\n yield scrapy.Request(url='https://www.cnvd.org.cn/flaw/list.htm?max=20&offset=2050', headers=self.headers, cookies=self.cookies)\n\n def _build_request(self, rule, link):\n r = Request(url=link.url, headers=self.headers, cookies=self.cookies,meta={'dont_merge_cookies': True},\n callback=self._response_downloaded)\n # r.meta['dont_merge_cookies']=True\n r.meta.update(rule=rule, link_text=link.text)\n return r\n\n def parse_news(self, response):\n response.headers = {}\n item = CnvdSpiderItem()\n time.sleep(random.randint(2, 3))\n self.count += 1\n print(self.count)\n print(self.cookies)\n if (self.count == 3):\n self.cookies = self.get_cnvd_cookies()\n self.count = 0\n # self.cookies = self.get_cnvd_cookies()\n self.get_id(response, item)\n self.get_url(response, item)\n self.get_date(response, item)\n self.get_level(response, item)\n self.get_cve_id(response, item)\n time.sleep(random.randint(1, 2))\n self.get_name(response, item)\n self.get_products(response, item)\n self.get_detail(response, item)\n self.get_types(response, item)\n self.get_refer_url(response, item)\n self.get_method(response, item)\n return item\n\n def get_cnvd_cookies(self):\n chrome_options = Options()\n # 加上下面两行,解决报错\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n # chrome_options.add_argument('--headless')\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.get(\"https://www.cnvd.org.cn/flaw/list.htm?max=20&offset=2550\")\n cj = driver.get_cookies()\n cookie = ''\n for c in cj:\n cookie += \"'\"+c['name'] + \"':'\" + c['value'] + \"',\"\n cookie = ast.literal_eval('{'+cookie+'}')\n driver.quit()\n return cookie\n\n def get_url(self, response, item):\n time.sleep(random.randint(1, 2))\n item['cnvd_url'] = response.url\n\n def get_name(self, response, item):\n name = response.xpath(\n \"//h1/text()\").extract()\n # print(\"\\n=======\"+response.meta['cookiejar']+\"================\\n\")\n if name:\n item['cnvd_name'] = name[0].strip()\n\n # cnvd_id\n\n def get_id(self, response, item):\n item[\"cnvd_id\"] = response.xpath(\n \"//td[text()='CNVD-ID']/following-sibling::td[1]/text()\").extract()\n if item[\"cnvd_id\"]:\n item[\"cnvd_id\"] = \"\".join(\n [i.strip() for i in item[\"cnvd_id\"]])\n else:\n item[\"cnvd_id\"] = 'Null'\n\n # 发布日期\n\n def get_date(self, response, item):\n item[\"cnvd_date\"] = response.xpath(\n \"//div[@class='tableDiv']/table[@class='gg_detail']//tr[2]/td[2]/text()\").extract()\n if item[\"cnvd_date\"]:\n item[\"cnvd_date\"] = \"\".join(\n [i.strip() for i in item[\"cnvd_date\"]]).replace('-', '')\n item[\"cnvd_date\"] = self.convertstringtodate(item[\"cnvd_date\"])\n else:\n item[\"cnvd_date\"] = '2000-01-01'.replace('-', '')\n item[\"cnvd_date\"] = self.convertstringtodate(item[\"cnvd_date\"])\n\n # 危害级别\n\n def get_level(self, response, item):\n item[\"cnvd_level\"] = response.xpath(\n \"//td[text()='危害级别']/following-sibling::td[1]//text()\").extract()\n if item[\"cnvd_level\"]:\n item[\"cnvd_level\"] = \"\".join(\n [i.replace(\"(\", \"\").replace(\")\", \"\").strip() for i in item[\"cnvd_level\"]])\n else:\n item[\"cnvd_level\"] = 'Null'\n\n # 影响产品\n\n def get_products(self, response, item):\n item[\"cnvd_products\"] = response.xpath(\n \"//table[@class='gg_detail']//tr[td[1]='影响产品']/td[2]/text()\").extract()\n if item[\"cnvd_products\"]:\n item[\"cnvd_products\"] = \";\".join(\n [i.strip() for i in item[\"cnvd_products\"]])\n else:\n item[\"cnvd_products\"] = 'Null'\n\n # cve_id\n\n def get_cve_id(self, response, item):\n item[\"cnvd_cve_id\"] = response.xpath(\n \"//td[text()='CVE ID']/following-sibling::td[1]//text()\").extract()\n if item[\"cnvd_cve_id\"]:\n item[\"cnvd_cve_id\"] = \"\".join(\n [i.strip() for i in item[\"cnvd_cve_id\"]])\n else:\n item[\"cnvd_cve_id\"] = 'Null'\n\n # 漏洞描述\n\n def get_detail(self, response, item):\n item[\"cnvd_detail\"] = response.xpath(\n \"//td[text()='漏洞描述']/following-sibling::td[1]//text()\").extract()\n if item[\"cnvd_detail\"]:\n item[\"cnvd_detail\"] = \"\".join(\n [i.strip() for i in item[\"cnvd_detail\"]]).replace(\"\\u200b\", \"\")\n else:\n item[\"cnvd_detail\"] = 'Null'\n # 漏洞类型\n\n def get_types(self, response, item):\n\n types = response.xpath(\n \"//table[@class='gg_detail']//tr[td[1]='漏洞类型']/td[2]/text()\").extract()\n\n if types:\n item['cnvd_types'] = types[0].strip()\n\n # 参考链接\n def get_refer_url(self, response, item):\n item[\"cnvd_refer_url\"] = response.xpath(\n \"//td[text()='参考链接']/following-sibling::td[1]/a/@href\").extract()\n if item[\"cnvd_refer_url\"]:\n item[\"cnvd_refer_url\"] = item[\"cnvd_refer_url\"][0].replace(\n '\\r', '')\n else:\n item[\"cnvd_refer_url\"] = 'Null'\n\n # 漏洞解决方案\n\n def get_method(self, response, item):\n item[\"cnvd_method\"] = response.xpath(\n \"//td[text()='漏洞解决方案']/following-sibling::td[1]//text()\").extract()\n if item[\"cnvd_method\"]:\n item[\"cnvd_method\"] = \"\".join(\n [i.strip() for i in item[\"cnvd_method\"]])\n else:\n item[\"cnvd_method\"] = 'Null'\n\n def convertstringtodate(self, stringtime):\n \"把字符串类型转换为date类型\"\n # 把数据里的时间格式替换成数据库需要的格式。日期格式,便于后期提取数据,\n if stringtime[0:2] == \"20\":\n year = stringtime[0:4]\n month = stringtime[4:6]\n day = stringtime[6:8]\n if day == \"\":\n day = \"01\"\n begintime = date(int(year), int(month), int(day))\n return begintime\n else:\n year = \"20\" + stringtime[0:2]\n month = stringtime[2:4]\n day = stringtime[4:6]\n\n begintime = date(int(year), int(month), int(day))\n return begintime\n","repo_name":"RyQcan/cnvd_spider","sub_path":"cnvd_spider/spiders/myspider.py","file_name":"myspider.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"7561066051","text":"def main():\r\n # set up dictionary\r\n dt01 = { 1: 'one',\r\n 2: 'two',\r\n 3: 'three',\r\n 4: 'four'}\r\n print('Dictionary example')\r\n print(dt01)\r\n v01 = dt01[2]\r\n print('Value for 2 is: ',v01)\r\n a=4\r\n v02 = dt01[a]\r\n print('Value for ', a, ' is: ',v02)\r\n print('-----------')\r\n for v88 in dt01:\r\n print(v88, ' - ',dt01[v88])\r\nmain()\r\n","repo_name":"nonushalini1210/python","sub_path":"Programs/Lab- assignments/P13/Python Dictionary/PGMS Dictionary/dt03.py","file_name":"dt03.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4421949126","text":"\"\"\"Multi-Processing Utilities.\"\"\"\nimport torch.multiprocessing as mp\n\n\ndef run_parallel_returns(\n function, args_list, num_cpu=None, max_process_time=300, max_timeouts=4\n):\n \"\"\"Run a function in parallel and gather the return value of the function.\n\n Parameters\n ----------\n function: callable.\n Function ot execute in parallel.\n args_list: List[Tuple]\n List of tuples of arguments to pass to the function.\n num_cpu: int, optional.\n Number of cpus to run in parallel the code.\n max_process_time: int, optional.\n Maximum number of seconds to run each process.\n max_timeouts: int, optional.\n Maximum number of timeouts tolerated before it raises an Error.\n\n Returns\n -------\n results: List[Any]\n It returns a list of all the return values of the function in parallel.\n You are in charge of the gathering.\n\n \"\"\"\n # Base case\n if max_timeouts == 0:\n return None\n num_cpu = mp.cpu_count() if num_cpu is None else num_cpu\n\n if len(args_list) == 1:\n results = [function(*args_list[0])]\n elif num_cpu <= 1:\n results = []\n for args in args_list:\n results.append(function(*args))\n else:\n pool = mp.Pool(processes=num_cpu, maxtasksperchild=1)\n parallel_runs = [pool.apply_async(function, args=args) for args in args_list]\n try:\n results = [p.get(timeout=max_process_time) for p in parallel_runs]\n except Exception as e:\n print(str(e))\n print(\"Timeout Error raised... Trying again\")\n pool.close()\n pool.terminate()\n pool.join()\n return run_parallel_returns(\n function, args_list, num_cpu, max_process_time, max_timeouts - 1\n )\n\n pool.close()\n pool.terminate()\n pool.join()\n\n return results\n\n\ndef modify_parallel(function, args_list, num_cpu=None):\n \"\"\"Run a function that mutates a variable inside the args_list parallel.\n\n Parameters\n ----------\n function: callable.\n Function ot execute in parallel.\n args_list: List[Tuple]\n List of tuples of arguments to pass to the function.\n num_cpu: int, optional.\n Number of cpus to run in parallel the code.\n\n Returns\n -------\n None.\n\n Notes\n -----\n Remember to call tensor.share_memory_() or module.share_memory() before calling this\n function.\n \"\"\"\n num_cpu = mp.cpu_count() if num_cpu is None else num_cpu\n num_calls = len(args_list)\n if num_calls == 1:\n function(*args_list[0])\n else:\n processes = []\n for rank in range(num_calls):\n p = mp.Process(target=function, args=(*args_list[rank],))\n processes.append(p)\n\n for p in processes:\n p.join()\n","repo_name":"sebascuri/rllib","sub_path":"rllib/util/multiprocessing.py","file_name":"multiprocessing.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"29971730425","text":"# y = wx + b\r\nimport tensorflow as tf\r\ntf.set_random_seed(77) \r\n\r\n#1. 데이터 \r\nx_train = [1,2,3]\r\ny_train = [1,2,3]\r\n\r\n# w = tf.Variable(1, dtype = tf.float32) \r\n# b = tf.Variable(1, dtype = tf.float32) \r\nw = tf.Variable(tf.random_normal([1], dtype = tf.float32))\r\nb = tf.Variable(tf.random_normal([1], dtype = tf.float32))\r\n\r\n# sess = tf.compat.v1.Session()\r\n# sess.run(tf.global_variables_initializer())\r\n# print(sess.run(w)) # [1.014144]\r\n\r\n#2. 모델구성\r\nhypothesis = x_train * w + b # y = wx + b \r\n\r\n#3-1. 컴파일\r\nloss = tf.reduce_mean(tf.square(hypothesis - y_train)) \r\n \r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) \r\ntrain = optimizer.minimize(loss) \r\n\r\n#3-2. 훈련\r\nsess = tf.compat.v1.Session()\r\nsess.run(tf.global_variables_initializer())\r\n\r\nfor step in range(2001):\r\n sess.run(train) \r\n if step % 20 == 0:\r\n print(step, sess.run(loss), sess.run(w), sess.run(b)) \r\n \r\nsess.close() ","repo_name":"NamKungJiHee/Study","sub_path":"tf114/tf07_Linear3_random.py","file_name":"tf07_Linear3_random.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6340327909","text":"# favorite languages dictionary\r\n\r\nfavorite_languages = {\r\n 'jen': \"python\",\r\n 'sarah': \"\",\r\n 'edward': \"C++\",\r\n 'phil': \"Rust\",\r\n 'kaleb': \"\",\r\n}\r\n\r\n# loop thruogh the list to check if the user has taken the favoite languages poll\r\n# if user has a stored value for the language poll, print, \"thank you for participating\"\r\n# if user has no vlaue stored for the langauge poll, print, \"take the fuckin poll you fat fcuk\"\r\nfor result in favorite_languages.values():\r\n if result == \"\":\r\n print(\"take the fuckin poll you fat fcuk\")\r\n else:\r\n print(\"thanks for taking the survery!\")","repo_name":"Triston-Riddle/Python_Crash_Course_2nd","sub_path":"Chapter_6/6.6_polling.py","file_name":"6.6_polling.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32240740223","text":"# x: a token (e.g., character, subword, word).\n# X: a sequence of tokens.\n# X_set: a set of sequences.\n# xs: a concatnation of sets.\n# y: a number for an example\n# ys: numbers for examples\nimport math\nfrom msnc.util import Util\n\n\nclass Dataset():\n\n def __init__(self, examples, x_to_index=None, isregression=False):\n self.util = Util()\n self.pad_index = self.util.PAD_INDEX\n self.unk_index = self.util.UNK_INDEX\n\n X_sets = [[example['Xs'][i] for example in examples]\n for i in range(len(examples[0]['Xs']))]\n\n self.x_to_index = x_to_index\n if x_to_index is None:\n self.x_to_index = []\n for i in range(len(examples[0]['Xs'])):\n xs = [x for X in X_sets[i] for x in X]\n self.x_to_index.append(self._make_index(xs))\n\n self.Xs = []\n self.raw_Xs = [] # for debug\n for i in range(len(examples[0]['Xs'])):\n self.Xs.append(self._degitize(X_sets[i], self.x_to_index[i]))\n self.raw_Xs.append(X_sets[i])\n\n # indices\n self.indices = [example['index'] for example in examples]\n\n if isregression:\n self.ys = [math.log10(example['y']) for example in examples]\n else:\n self.ys = [example['y'] for example in examples]\n\n def _make_index(self, xs):\n x_to_index = {'': self.pad_index, '': self.unk_index}\n for x in xs:\n if x not in x_to_index:\n x_to_index[x] = len(x_to_index)\n return x_to_index\n\n def _get_index(self, x, x_to_index):\n if x not in x_to_index:\n return x_to_index['']\n return x_to_index[x]\n\n def _degitize(self, X_set, x_to_index):\n X = []\n for _X in X_set:\n _X = [self._get_index(x, x_to_index) for x in _X]\n X.append(_X)\n return X\n\n def split(self, batch_size):\n example_num = len(self.Xs[0])\n batch_num = int(example_num / batch_size)\n batches = [[] for _ in range(batch_num)]\n for X_set in self.Xs:\n self._append(batches, X_set, batch_size)\n self._append(batches, self.ys, batch_size)\n return batches\n\n def _append(self, batches, Z_set, batch_size): # Z_set is X_set or ys\n for i in range(len(batches)):\n start = batch_size * i\n end = batch_size * (i + 1)\n batches[i].append(Z_set[start:end])\n","repo_name":"jun-harashima/msnc","sub_path":"msnc/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73546034330","text":"import pygame\nimport os\n\npygame.init() #초기화\n\n#화면 크기 설정\nscreen_width = 640 #가로\nscreen_height = 480 #세로\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n#화면 타이틀 설정\npygame.display.set_caption(\"Nado pang\") # 게임 타이틀\n\n# fps\nclock = pygame.time.Clock()\n\n#여기까지는 초기화 부분 반드시 필요\n\n#1\n#배경 이미지 불러오기\n\ncurrent_path = os.path.dirname(__file__) #현재 파일의 위치 반환\nimage_path = os.path.join(current_path, \"images\") #images 폴더 위치 반환\n\n#배경그리기\nbackground = pygame.image.load(os.path.join(image_path, \"background.png\"))\n\n# 스테이지 만들기\nstage = pygame.image.load(os.path.join(image_path, \"stage.png\"))\nstage_size = stage.get_rect().size\nstage_height = stage_size[1] #스테이지 높이 위에 캐릭터\n\n# 캐릭터 만들기\ncharacter = pygame.image.load(os.path.join(image_path, \"character.png\"))\ncharacter_size = character.get_rect().size\ncharacter_width = character_size[0]\ncharacter_height = character_size[1]\ncharacter_x_pos = (screen_width / 2) - (character_width/2)\ncharacter_y_pos = screen_height - character_height - stage_height\n# 이동방향\ncharacter_to_x = 0\n# speed\ncharacter_speed = 5\n\n# 무기\nweapon = pygame.image.load(os.path.join(image_path, \"weapon.png\"))\nweapon_size = weapon.get_rect().size\nweapon_width = weapon_size[0]\n\n#무기는 여러발\nweapons = []\n\n#무기 스피드\nweapon_speed = 10\n\n#공만들기 (4개)\nball_images = [\n pygame.image.load(os.path.join(image_path, \"balloon1.png\")),\n pygame.image.load(os.path.join(image_path, \"balloon2.png\")),\n pygame.image.load(os.path.join(image_path, \"balloon3.png\")),\n pygame.image.load(os.path.join(image_path, \"balloon4.png\")),\n ]\n\n\n\n# 공 크기 스피드\nball_speed_y = [-18, -15, -12, -9] # index 0,1,2,3 해당하는 값\n\n# 공들\nballs = []\n# 최초발생 공\nballs.append({\n \"pos_x\": 50, #공의 x좌표\n \"pos_y\": 50,\n \"img_idx\" : 0, \n \"to_x\": 3,\n \"to_y\": -6,\n \"init_spd_y\": ball_speed_y[0]\n})\n\nweapon_to_remove = -1\nball_to_remove = -1\n\n#폰트 정의\ngame_font = pygame.font.Font(None, 40)\ntotal_time = 100\nstart_ticks = pygame.time.get_ticks()\n\n\n\ngame_result = \"GAME OVER\"\n\n#이벤트 루프\nrunning = True # 게임이 진행중인가\nwhile running:\n dt = clock.tick(30) #게임화면의 초당 프레임 설정\n\n# 이벤트 처리\n for event in pygame.event.get(): #이벤트가 발생하였는가?\n if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하였는가?\n running = False #게임이 진행중이 아님\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n character_to_x -= character_speed\n elif event.key == pygame.K_RIGHT:\n character_to_x += character_speed\n elif event.key == pygame.K_SPACE:\n weapon_x_pos = character_x_pos + (character_width / 2) - (weapon_width / 2)\n weapon_y_pos = character_y_pos\n weapons.append([weapon_x_pos, weapon_y_pos])\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n character_to_x = 0\n\n\n #캐릭터 위치 정위\n character_x_pos += character_to_x\n if character_x_pos < 0:\n character_x_pos = 0\n\n elif character_x_pos > screen_width - character_width:\n character_x_pos = screen_width - character_width\n\n #무기 위치 조정\\\n #100 ,200 -> 180, 160, 140...\n\n\n weapons = [ [w[0], w[1] - weapon_speed] for w in weapons]\n\n # 천장 없에기\n weapons = [ [w[0], w[1]] for w in weapons if w[1] > 0]\n # 공 위치 정의\n\n for ball_idx, ball_val in enumerate(balls):\n ball_pos_x = ball_val[\"pos_x\"]\n ball_pos_y = ball_val[\"pos_y\"]\n\n ball_img_idx = ball_val[\"img_idx\"]\n\n ball_size = ball_images[ball_img_idx].get_rect().size\n ball_width = ball_size[0]\n ball_height = ball_size[1]\n\n # 가로벽에 닿았을때의 공처리 법\n\n if ball_pos_x < 0 or ball_pos_x > screen_width - ball_width:\n ball_val[\"to_x\"] = ball_val[\"to_x\"] * -1\n\n #세로 위치\n #스테이지 튕기는 처리\n if ball_pos_y >= screen_height - stage_height - ball_height:\n ball_val[\"to_y\"] = ball_val[\"init_spd_y\"]\n\n else:\n ball_val[\"to_y\"] += 0.5\n \n ball_val[\"pos_x\"] += ball_val[\"to_x\"]\n ball_val[\"pos_y\"] += ball_val[\"to_y\"]\n\n\n #충돌 처리\n\n #캐릭터 rect 정보 업데이트\n character_rect = character.get_rect()\n character_rect.left = character_x_pos\n character_rect.top = character_y_pos\n\n for ball_idx, ball_val in enumerate(balls):\n ball_pos_x = ball_val[\"pos_x\"]\n ball_pos_y = ball_val[\"pos_y\"]\n ball_img_idx = ball_val[\"img_idx\"]\n \n #공 rect 정보 업데이트\n ball_rect = ball_images[ball_img_idx].get_rect()\n ball_rect.left = ball_pos_x\n ball_rect.top = ball_pos_y\n \n if character_rect.colliderect(ball_rect):\n running = False\n break\n\n #공과 무기들 충돌 처리\n for weapon_idx, weapon_val in enumerate(weapons):\n weapon_pos_x = weapon_val[0]\n weapon_pos_y = weapon_val[1]\n\n #무기 rect 정보\n weapon_rect = weapon.get_rect()\n weapon_rect.left = weapon_pos_x\n weapon_rect.top = weapon_pos_y\n\n #충돌 체크\n if weapon_rect.colliderect(ball_rect):\n weapon_to_remove = weapon_idx\n ball_to_remove = ball_idx\n \n if ball_img_idx < 3:\n #현재 공크기 정보\n ball_width = ball_rect.size[0]\n ball_height = ball_rect.size[1]\n\n #나누어진 공 정보\n small_ball_rect = ball_images[ball_img_idx + 1].get_rect()\n small_ball_width = small_ball_rect.size[0]\n small_ball_height = small_ball_rect.size[1]\n \n balls.append({\n \"pos_x\": ball_pos_x + (ball_width /2 ) - (small_ball_width / 2), #공의 x좌표\n \"pos_y\": ball_pos_y + (ball_height /2 ) - (small_ball_height / 2),\n \"img_idx\" : ball_img_idx + 1, \n \"to_x\": -3,\n \"to_y\": -6,\n \"init_spd_y\": ball_speed_y[ball_img_idx + 1]})\n\n balls.append({\n \"pos_x\": ball_pos_x + (ball_width /2 ) - (small_ball_width / 2), #공의 x좌표\n \"pos_y\": ball_pos_y + (ball_height /2 ) - (small_ball_height / 2),\n \"img_idx\" : ball_img_idx + 1, \n \"to_x\": 3,\n \"to_y\": -6,\n \"init_spd_y\": ball_speed_y[ball_img_idx + 1]})\n break\n else:\n continue\n break\n\n\n #충돌된 공 무기 지우기\n\n if ball_to_remove > -1:\n del balls[ball_to_remove]\n ball_to_remove = -1\n\n if weapon_to_remove > -1:\n del weapons[weapon_to_remove]\n weapon_to_remove = -1\n\n # 모든 공 없애기\n if len(balls) == 0:\n game_result = \"MISSION COMPLETE\"\n running = False\n\n\n\n\n# 화면 송출\n screen.blit(background, (0,0))\n\n for weapon_x_pos, weapon_y_pos in weapons:\n screen.blit(weapon, ( weapon_x_pos, weapon_y_pos))\n\n for idx, val in enumerate(balls):\n ball_pos_x = val[\"pos_x\"]\n ball_pos_y = val[\"pos_y\"]\n ball_img_idx = val[\"img_idx\"]\n screen.blit(ball_images[ball_img_idx],(ball_pos_x, ball_pos_y))\n\n screen.blit(stage, (0, screen_height - stage_height))\n screen.blit(character, (character_x_pos, character_y_pos))\n\n #경과 시간 계산\n elapsed_time = (pygame.time.get_ticks() - start_ticks) / 1000\n timer = game_font.render(\"Time : {}\".format(int(total_time - elapsed_time)), True, (255, 255, 255))\n screen.blit(timer, (10, 10))\n\n #시간 초과\n if total_time - elapsed_time <= 0:\n game_result = \"TIME OVER\"\n running = False\n\n\n pygame.display.update() # 게임화면을 다시 그리기\n\nmsg = game_font.render(game_result, True, (255, 255, 0))\nmsg_rect = msg.get_rect(center=(int(screen_width / 2), int(screen_height / 2)))\nscreen.blit(msg, msg_rect)\npygame.display.update()\n\npygame.time.delay(2000)\n# pygame 종료\npygame.quit()\n\n\n","repo_name":"epter/python","sub_path":"game_frame1.py","file_name":"game_frame1.py","file_ext":"py","file_size_in_byte":8604,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9947725600","text":"from collections import defaultdict\n\ndef dfs(node , curr_color , color_array ,adj_list):\n color_array[node] = curr_color\n\n for neibhour in adj_list[node]:\n if color_array[neibhour] == curr_color:\n return False\n elif not dfs(neibhour, 1-curr_color,color_array,adj_list):\n return False\n return True\n\n\nwhile True:\n n = int(input())\n e = int(input())\n if n == 0:\n break\n adj_list = defaultdict(set)\n color = [-1]*n\n\n for i in range(e):\n node1,node2 = list(map(int,input().split()))\n adj_list[node1 - 1].add(node2 - 1)\n adj_list[node2 - 1].add(node1 - 1 )\n\n flag = True\n for key in adj_list:\n if color[key] == -1:\n if not dfs(key,0,color,adj_list):\n flag = False\n break\n \n if flag :\n print('BICOLOURABLE.')\n else:\n print('NOT BICOLOURABLE.')\n","repo_name":"Gizaw-Agodo/A2sV","sub_path":"Eolymp/bi-coloring.py","file_name":"bi-coloring.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"24580182438","text":"import db\nimport geopy\nfrom geopy.distance import distance\nimport pandas as pd\n\n\ndef form_answer():\n answer = {}\n mas1 = []\n mas2 = []\n mas3 = []\n dct_stations = db.get_dict_stations()\n dct_apartments = db.get_dict_apartments()\n for key_apartment, value_apartment in dct_apartments.items():\n start = geopy.Point(value_apartment[0], value_apartment[1])\n d = geopy.distance.distance(kilometers=1.2)\n north = d.destination(point=start, bearing=0) # Сдвиг на 1.2 км на север\n east = d.destination(point=start, bearing=90) # Сдвиг на 1.2 км на восток\n south = d.destination(point=start, bearing=180) # Сдвиг на 1.2 км на юг\n west = d.destination(point=start, bearing=270) # Сдвиг на 1.2 км на запад\n north_coord = north[0] # Получение самой северной долготы для квадрата\n east_coord = east[1] # Получение самой восточной широты для квадрата\n south_coord = south[0] # Получение самой южной долготы для квадрата\n west_coord = west[1] # Получение самой западной широты для квадрата\n for key_station, value_station in dct_stations.items():\n longitude = value_station[0]\n latitude = value_station[1]\n\n # Проверка того, что остановка лежит в квадрате 2.4 км * 2.4 км\n if south_coord < longitude < north_coord and west_coord < latitude < east_coord:\n dist = distance((longitude, latitude), start).m\n if dist <= 1000:\n mas1.append(key_apartment)\n mas2.append(key_station)\n mas3.append(dist)\n x = zip(mas1, mas2, mas3)\n xs = sorted(x, key=lambda tup: (tup[0], tup[2])) # Сортировка по удалённости, а затем по названию ЖК\n answer['Название ЖК'] = [x[0] for x in xs]\n answer['Название Остановки'] = [x[1] for x in xs]\n answer['Расстояние до остановки'] = [x[2] for x in xs]\n z = pd.DataFrame(answer)\n z.to_excel(\"final_table.xlsx\", index=False)\n\n\n","repo_name":"ziyazetdinoff/test_task","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72842549531","text":"\"\"\"\n\n\"\"\"\n\nimport sys\nimport psycopg2\nfrom configparser import ConfigParser\n\n\ndef FormatRow(Cn, Row, COLSP):\n \"\"\"\n \"\"\"\n fRow = \"\"\n for i, c in enumerate(Row):\n sc = str(c)\n lcn = len(Cn[i])\n sc = sc[ 0 : min(len(sc), lcn+COLSP-2) ] \n fRow += sc + \" \"*(COLSP+lcn-len(sc))\n return fRow\n\n\ndef Display(ColName, Recs):\n \"\"\"\n \n \"\"\"\n NCols = len( ColName )\n if NCols == 0:\n return\n\n Nr = len(Recs)\n A = 'y'\n if Nr>50:\n A = input(\"There are {} records. Show? (y/n):\".format(Nr) )\n if A.lower()=='y':\n H=\"\" \n COLSP = int( 80 /(NCols+1) )\n for i, cn in enumerate(ColName):\n H += ( cn + COLSP*\" \" ) \n print(H)\n for i,Row in enumerate(Recs):\n R = FormatRow(ColName, Row, COLSP)\n print(R)\n #if i>0 and ((i+1)%10)==0:\n # C=input(\"Continue?:\")\n # if C!=\"y\":\n # break\n return\n\n\ndef ReadCfg(filename='database.cfg', section='connection'):\n \"\"\"\n \n \"\"\"\n \n # create a parser\n parser = ConfigParser()\n # read config file\n parser.read(filename)\n # get section\n db = {}\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n db[param[0]] = param[1]\n else:\n raise Exception( 'Section {0} not found in the {1} file'.format(section, filename) )\n if db[\"ps\"]==\"\":\n db[\"ps\"] = input(\"Password:\")\n return db\n\n\ndef Connect(ConInfo):\n \"\"\" Connect\n\n \"\"\"\n\n DbCon = psycopg2.connect(host=ConInfo[\"host\"], \n port = ConInfo[\"port\"],\n database = ConInfo[\"db\"], \n user = ConInfo[\"us\"], \n password = ConInfo[\"ps\"] )\n Cursor = DbCon.cursor()\n return DbCon, Cursor\n\n\ndef Version(Cur):\n \"\"\" Version\n \n \"\"\"\n\n # execute a statement\n Cur.execute('SELECT version()')\n # display the PostgreSQL database server version\n db_version = Cur.fetchone()\n print(\"PostgreSQL version: \" + db_version[0] )\n return\n\n\ndef PopulateDb(DbCon, DbCur, SqlScript):\n try:\n sqlfile = open(SqlScript, 'r')\n DbCur.execute( sqlfile.read() )\n DbCon.commit()\n sqlfile.close()\n except psycopg2.errors.DuplicateTable as SErr:\n print(\"ERR:\", SErr)\n return\n\n\ndef DropTable(DbCur, Table):\n pass\n\n\ndef Query(DbCon, DbCur, Q):\n \"\"\" Query\n \n \"\"\"\n ColNames = []\n try:\n Res=\"\"\n DbCur.execute(Q)\n Res = DbCur.fetchall()\n ColNames = [Desc[0] for Desc in DbCur.description]\n except Exception as Err:\n err_type, err_obj, traceback = sys.exc_info()\n line_num = traceback.tb_lineno\n print (\"psycopg2 ERROR:\" + str(Err) + \"Line number:\", line_num)\n print (\"psycopg2 traceback:\", traceback, \"-- type:\", err_type)\n print (\"extensions.Diagnostics:\", Err.diag)\n print (\"pgerror:\", Err.pgerror)\n print (\"pgcode:\", Err.pgcode, \"\\n\")\n DbCon.rollback()\n return ColNames, Res\n\n\ndef Get_Query(Fq):\n \"\"\" Get_Query\n \n\n \"\"\"\n\n Q = \"\"\n EoF = False\n Ok = False\n while True:\n l = Fq.readline()\n if (\"--\" in l) :\n # skip line\n continue\n elif l==\"\":\n EoF=True\n break\n else:\n Q += l\n if \";\" in Q:\n Ok = True\n break\n\n return EoF, Ok, Q\n","repo_name":"xtianhb/sql","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28035100076","text":"import pandas as pd\nimport numpy as np\n\nfrom learntools.core import *\n\nsf_permits = pd.read_csv(\"../input/building-permit-applications-data/Building_Permits.csv\")\nnp.random.seed(0) \n\nmissing_values_count = sf_permits.isnull().sum()\ntotal_cells = np.product(sf_permits.shape)\ntotal_missing = missing_values_count.sum()\npercent_missing = (total_missing/total_cells) * 100\n\nsf_permits_with_na_dropped = sf_permits.dropna(axis=1)\n\ncols_in_original_dataset = sf_permits.shape[1]\ncols_in_na_dropped = sf_permits_with_na_dropped.shape[1]\ndropped_columns = cols_in_original_dataset - cols_in_na_dropped\n\nclass TakeFirstLook(ThoughtExperiment):\n _hint = \"Use `sf_permits.head()` to view the first five rows of the data.\"\n _solution = ('The first five rows of the data does show that several columns have '\n 'missing values. You can see this in the \"Street Number Suffix\", \"Proposed Construction Type\" ' \n 'and \"Site Permit\" columns, among others.')\n \nclass PercentMissingValues(EqualityCheckProblem):\n _var = 'percent_missing'\n _expected = percent_missing\n _hint = (\"You can begin by getting the number of missing entries in each column \"\n \"with `missing_values_count = sf_permits.isnull().sum()`.\")\n _solution = CS(\n\"\"\"# get the number of missing data points per column\nmissing_values_count = sf_permits.isnull().sum()\n\n# how many total missing values do we have?\ntotal_cells = np.product(sf_permits.shape)\ntotal_missing = missing_values_count.sum()\n\n# percent of data that is missing\npercent_missing = (total_missing/total_cells) * 100\n\"\"\")\n\nclass WhyDataMissing(ThoughtExperiment):\n _hint = (\"Do all addresses generally have a street number suffix? Do all addresses generally have a zipcode?\")\n _solution = ('If a value in the \"Street Number Suffix\" column is missing, it is likely because it does not exist. '\n 'If a value in the \"Zipcode\" column is missing, it was not recorded.')\n \nclass DropMissingRows(ThoughtExperiment):\n _hint = (\"Use `sf_permits.dropna()` to drop all missing rows.\")\n _solution = (\"There are no rows remaining in the dataset!\")\n \nclass DropMissingCols(EqualityCheckProblem):\n _vars = ['sf_permits_with_na_dropped', 'dropped_columns']\n _expected = [sf_permits_with_na_dropped, dropped_columns]\n _hint = (\"You can begin by getting the dropping all columns with missing values \"\n \"with `sf_permits.dropna(axis=1)`.\")\n _solution = CS(\n\"\"\"# remove all columns with at least one missing value\nsf_permits_with_na_dropped = sf_permits.dropna(axis=1)\n\n# calculate number of dropped columns\ncols_in_original_dataset = sf_permits.shape[1]\ncols_in_na_dropped = sf_permits_with_na_dropped.shape[1]\ndropped_columns = cols_in_original_dataset - cols_in_na_dropped\n\"\"\")\n \nclass ImputeAutomatically(EqualityCheckProblem):\n _var = 'sf_permits_with_na_imputed'\n _expected = sf_permits.fillna(method='bfill', axis=0).fillna(0)\n _hint = (\"Use the `.fillna()` method twice.\")\n _solution = CS(\n\"\"\"sf_permits_with_na_imputed = sf_permits.fillna(method='bfill', axis=0).fillna(0)\n\"\"\")\n \nqvars = bind_exercises(globals(), [\n TakeFirstLook,\n PercentMissingValues,\n WhyDataMissing,\n DropMissingRows,\n DropMissingCols,\n ImputeAutomatically\n ],\n var_format='q{n}',\n )\n__all__ = list(qvars)\n","repo_name":"Kaggle/learntools","sub_path":"learntools/data_cleaning/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":419,"dataset":"github-code","pt":"32"} +{"seq_id":"33090669937","text":"#!/usr/bin/python3\n# coding: utf-8\nfrom gensim.models import word2vec\nimport logging, os\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # 官方推荐格式\n##################################################################\n## MySentences\n# 将输入视为 Python 的内置列表很简单, 但是在输入很大时会占用大量的内存.\n# 所以 Gensim 只要求输入按顺序提供句子, 并不将这些句子存储在内存, 然后 Gensim 可以加载一个句子, 处理该句子, 然后加载下一个句子\n# 例如, 如果输入分布在硬盘上的多个文件中, 文件的每一行是一个句子, 那么可以逐个文件, 逐行的处理输入:\nclass MySentences(object):\n def __init__(self, path): self.path = path # 传进来的可能是 文件 或者 目录\n def __iter__(self): # a memory-friendly iterator\n if os.path.isdir(self.path): # 如果传进来的是目录\n for fname in os.listdir(self.path):\n for line in open(os.path.join(self.dirname, fname)):\n yield line.split()\n else: # 如果传进来的是文件\n for line in open(self.path):\n yield line.split()\n# Google 之前公开了 20000 条左右的语法与语义化训练样本 questions-words.txt, 每一条遵循 A is to B as C is to D 这个格式\n# wc questions-words.txt # 19558 78204 603955 questions-words.txt; 19558 行, 78204 个单词, 603955 字节\nsentences = MySentences('./tmp_dataset/questions-words_Word2Vec-accuracy-test.txt')\nprint(len(list(sentences))) # 19558 行\nprint(list(sentences)[0]) # [':', 'capital-common-countries']\n##################################################################\n## 训练模型\nsentences = MySentences('./tmp_dataset/questions-words_Word2Vec-accuracy-test.txt') # 重新打开, 因为上面已经��动了指针\nmodel = word2vec.Word2Vec(sentences, size=256, window=10, min_count=10, sg=1, hs=1, iter=10, workers=25)\n# 如果需要对文件中的单词做其他处理, 比如转换为 unicode, 转换大小写, 删除数字, 抽取命名实体等, 所有这些都可以在 MySentence 迭代器中进行处理.\n","repo_name":"HCShi/jShellscript","sub_path":"bin/template/src/jptgensim/l3_mysentences-yeild-读入文件.py","file_name":"l3_mysentences-yeild-读入文件.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39181063885","text":"\"\"\"\nCommunication statistics script using data from multiple applications.\nThis script reads all communication matrices in a given directory tree.\nFor each communication matrix, it computes all communication statistics\nusing the code available at https://github.com/llpilla/communication-statistics .\nIt organizes its results in two categories ('comm' & 'page') depending on the\nnature of the communication matrix.\n\"\"\"\n\nimport pandas as pd # for dataframes\nfrom os import walk # to find files in the directory hierarchy\nfrom communicationstatistics.commstats import CommunicationStatistics\n\nfilepath = 'input-ml-explo-matrices' # directory containing all comm matrices\n\nmyfile = 'input-ml-explo-matrices/__cere__blackscholes_m4__Z9bs_threadPv_368/conf1/trace/comm_mat_numalize__cere__blackscholes_m4__Z9bs_threadPv_368_0_16.csv'\n\ndef extract_app_and_conf(dirpath):\n \"\"\"\n Format of 'dirpath':\n {filepath}/{application}/conf[123]/trace\n \"\"\"\n parts = dirpath.split('/')\n return(parts[1], parts[2])\n\n\ndef extract_type_and_threads(filename):\n \"\"\"\n Format of a filename:\n [comm|page]_mat_numalize{application}_0_[16|32|64].csv\n \"\"\"\n parts = filename.split('_')\n return(parts[0], parts[-1][0:2])\n\n\ndef main():\n # Starts the dataframes to be used\n df_columns = ['application', 'conf', 'threads', 'CH', 'CHv2', 'CA',\n 'CB', 'CBv2', 'CC', 'NBC', 'SP(16)']\n page_df = pd.DataFrame(columns=df_columns)\n comm_df = pd.DataFrame(columns=df_columns)\n dfs = {'page': page_df, 'comm': comm_df}\n\n # Iterates over the directory and sub-directories\n for (dirpath, dirnames, filenames) in walk(filepath):\n # If there are no files in this directory, skips to the next\n if not filenames:\n continue\n # Gets the application name and the configuration number\n application, conf = extract_app_and_conf(dirpath)\n # Iterates over files\n for csvfile in filenames:\n # Gets the type of the matrix and the number of threads used\n matrix_type, threads = extract_type_and_threads(csvfile)\n # Organizes the complete filepath\n completepath = (dirpath + '/' + csvfile)\n print(completepath)\n # Opens the file to compute metrics\n stats = CommunicationStatistics(completepath)\n # Computes metrics\n dfs[matrix_type] = dfs[matrix_type].append({\n 'application': application,\n 'conf': conf,\n 'threads': threads,\n 'CH': stats.ch(),\n 'CHv2': stats.ch_v2(),\n 'CA': stats.ca(),\n 'CB': stats.cb(),\n 'CBv2': stats.cb_v2(),\n 'CC': stats.cc(),\n 'NBC': stats.nbc(),\n 'SP(16)': stats.sp(16)\n }, ignore_index=True)\n\n # Saves the dataframes as CSV files\n dfs['comm'].to_csv('statistics-comm.csv', sep=',', index=False)\n dfs['page'].to_csv('statistics-page.csv', sep=',', index=False)\n\nif __name__ == '__main__':\n main()\n","repo_name":"gateauaulait/numa-prefetch-perf-energy-size-opt","sub_path":"communication-matrix/generate_comm_metrics.py","file_name":"generate_comm_metrics.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37844279227","text":"import sys\nsys.path.append(r'/Users/zwalters/pythonscripts')\nfrom makefigs import *\n\n\n\ndef fouriermat(ftinpvec):\n nvec=len(ftinpvec)\n retmat=zeros((nvec,nvec))*0j\n for j in range(nvec):\n retmat[:,j]=roll(ftinpvec,-int(floor(nvec/2))+j)\n# for i,j in product(range(nvec),range(nvec)):\n## k=i-j\n## if((k>=-nvec/2) and (k0:\n for server in servers:\n if self.ip == server[\"address\"]:\n if \"username\" in server:\n user = server[\"username\"]\n if \"password\" in server:\n password = server[\"password\"]\n break\n server = \"rtsp://{}:{}@{}\".format(user,password,self.ip)\n log.info(\"Got server {}\".format(server))\n interval = interval*1000 # Convert interval in seconds to milliseconds\n path = p.expandvars(path) # In case of environment variables\n if not p.isdir(path):\n system(\"mkdir -p {}\".format(path))\n chdir(path)\n \n log.info(\"Connecting to {}\".format(server))\n vid = VideoCapture(server)\n while True:\n # Read frame\n ret,frame = vid.read()\n if not ret:\n # Something wrong with video stream\n # Check if `self.ip` is reachable\n if lan_scan.scan(self.ip):\n vid = VideoCapture(server)\n log.error(\"Bad frame. Restarted capture.\".format(self.ip))\n continue\n else:\n log.error(\"Server went offline. Closing capture.\")\n self.available.take(self.ip)\n return\n\n # Build file name\n t = datetime.now()\n path = \"{}/{}/{}\".format(t.year,t.month,t.day)\n if not p.isdir(path):\n system(\"mkdir -p {}\".format(path))\n fname = \"{}/{}_{}.{}.{}.jpg\".format(path,self.ip,t.hour,t.minute,t.second)\n # Write frame\n self.write_frame(frame,fname)\n frame_h,frame_w,_ = frame.shape\n log.debug(\"Wrote {}x{} (50%) frame to {}\".format(frame_w,frame_h,fname))\n # Wait `interval` milliseconds\n waitKey(interval)\n log.info(\"Exiting thread\".format(self.ip))\n\n # Write `frame` as a JPG image\n def write_frame(self,frame,fname,compression=50):\n imwrite(fname, frame, [IMWRITE_JPEG_QUALITY, compression])\n\n# Initializes logging\n# Logs are written to `$CWD/camera_service.log`\ndef set_logging():\n logFormatter = log.Formatter(\"[%(asctime)s] %(threadName)s - %(message)s\")\n rootLogger = log.getLogger()\n t = datetime.now()\n t_string = \"{}-{}-{}_{}:{}:{}\".format(t.year,t.month,t.day,t.hour,t.minute,t.second)\n fileHandler = log.FileHandler(\"camera_service_{}.log\".format(t_string))\n fileHandler.setFormatter(logFormatter)\n rootLogger.addHandler(fileHandler)\n rootLogger.setLevel(log.INFO)\n\ndef parse_json(fname=\"./config.json\"):\n f = open(fname)\n j = load(f)\n ret = []\n for k in j:\n log.debug(\"Read parameter '{}:{}'\".format(k,j[k]))\n ret.append(j[k])\n f.close()\n return ret\n\ndef scan(available):\n log.info(\"Scanning...\")\n ret = lan_scan.lan_scan()\n log.info(\"Available: {}\".format(ret))\n log.info(\"var available: {}\".format(available.snapshot()))\n for ip in ret:\n if not available.contains(ip):\n available.put(ip)\n cam = cameraThread(ip,available)\n cam.start()\n\n# Main function\ndef main():\n set_logging()\n log.info(\"Service starting...\")\n scan_cooldown = 30*60 # 30 minutes\n available = SafeList() # list of IP addresses\n _,_,_,_,servers = parse_json()\n for server in servers:\n ip = server[\"address\"]\n log.info(\"Starting server {} from config.json...\".format(ip))\n cam = cameraThread(ip,available)\n cam.start()\n while True:\n scan(available)\n sleep(scan_cooldown)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ricardocchaves/save_rtsp_camera","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74474416092","text":"import numpy as np\nimport numpy.polynomial.polynomial as npol\nfrom numpy import cos,sin,sqrt,pi,arctan,log\nimport matplotlib.pyplot as plt\nimport scipy.interpolate as interpol\nimport scipy.linalg as la\nfrom scipy.integrate import quad\nfrom scipy.interpolate import CubicSpline, pade, CubicSpline\n\n\ndef chebysevAB(a,b,grado):\n poly = []\n def BaseChebysev(n): # Calcula la base ortogonalizada de grado n\n if n == 0:\n return np.array([1.])\n else:\n T = [np.array([1.]),np.array([1.,0])] \n while len(T) < n+1:\n T.append(np.polysub(np.polymul(np.array([2.,0]),T[-1]),T[-2]))\n return np.array(T,dtype=object)[n]\n\n for i in range(grado+1):\n poly.append(npol.polyfromroots(a+((np.roots(BaseChebysev(i))+1)*(b-a))/2)[::-1])\n return poly\n\ndef legendreAB(a,b,grado):\n poly = []\n def BaseLegendre(n): # Calcula la base ortogonalizada de grado n\n if n == 0:\n return np.array([1])\n if n == 1:\n return np.array([1,0])\n return np.polyadd(((2*(n-1)+1)/(n))*np.polymul(np.array([1,0]),BaseLegendre(n-1)),(-(n-1))/(n)*BaseLegendre(n-2))\n \n for i in range(grado+1):\n poly.append(npol.polyfromroots(a+((np.roots(BaseLegendre(i))+1)*(b-a))/2)[::-1])\n return poly\n\nbaseCheb = chebysevAB(3, 6, 3)\nbaseLege = legendreAB(3, 6, 3)\nprint('Chebysev:')\nfor i in baseCheb:\n print(np.poly1d(i))\nprint('Legendre:')\nfor i in baseLege:\n print(np.poly1d(i))","repo_name":"vmr48-ua/fisicaua","sub_path":"primero/métodos/P4/ej3.py","file_name":"ej3.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"23055199917","text":"from pandas import read_csv\n\npathfiles = \"data\"\n\n\ndef sort_csv_data_by_timestamp(file: str) -> None:\n df = read_csv(pathfiles + \"/\" + file)\n df = df.sort_values(by=\"timestamp\", ascending=True)\n sorted_filename = file.replace(\".csv\", \"\") + \"_sorted.csv\"\n df.to_csv(pathfiles + \"/\" + sorted_filename, index=False)\n\n\nsort_csv_data_by_timestamp(\"prices.csv\")\nsort_csv_data_by_timestamp(\"dataset 2.csv\")\n","repo_name":"Andrea-Valentini/dashboard_microservice","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16386862074","text":"mvlog = '../logs/mvlog.txt'\nprog = '../logs/prog.txt'\nvaultserver = 'http://projectmagpy.tk/myvault/appvault.php?link=true&u='\nsavelogloc = 'C://savedlog.txt'\nfileloc = '../files/'\ntexttop = '../data.txt'\ntasklog = '../logs/tasklog.txt'\nmvloc = '../files/'\ntaskdbloc = 'tasksdb.db'\nsearch_algo = 'rec'\n","repo_name":"projectmagpy/MagPy","sub_path":"bin/core/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"72702135132","text":"# coding: utf-8\n\nimport json\nimport re\nimport random\nimport numpy as np\nfrom sklearn.svm import SVC\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nimport jieba\nfrom bert_serving.client import BertClient\n\n\ndef get_stories(file_path):\n with open(file_path, \"r\", encoding=\"utf-8\") as story_fd:\n return story_fd.readlines()\n\n\ndef get_sentences(story):\n sentences = list()\n for sentence in cut_sent(story):\n sentence = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\", \"\", sentence)\n cutted_sentence = list(jieba.cut(sentence))\n if len(cutted_sentence) < 1:\n cutted_sentence = [\"None\"]\n sentences += [cutted_sentence]\n return sentences\n\n\ndef cut_sent(para):\n para = re.sub('([。!?\\?])([^”])', r\"\\1\\n\\2\", para)\n para = re.sub('(\\.{6})([^”])', r\"\\1\\n\\2\", para)\n para = re.sub('(\\…{2})([^”])', r\"\\1\\n\\2\", para)\n para = re.sub('(”)', '”\\n', para)\n para = para.rstrip()\n return para.split(\"\\n\")\n\n\ndef get_vector(word_list):\n result = bc.encode([word_list], is_tokenized=True)\n return result\n\n\ndef sentence_process(sentence):\n sentence_vector = get_vector(sentence)\n return sentence_vector[0]\n\n\ndef process(story):\n story_vector_list = list()\n _real_data = list()\n for sentence in get_sentences(story):\n sentence_vector = sentence_process(sentence)\n story_vector_list += [sentence_vector]\n\n for index in range(1, len(story_vector_list)):\n new_vector = list(story_vector_list[index - 1]) + list(story_vector_list[index])\n _real_data += [new_vector]\n\n return story_vector_list, _real_data\n\n\nif __name__ == \"__main__\":\n bc = BertClient()\n stories = get_stories(\"es_stories.txt\")\n\n vector_list = list()\n real_data = list()\n fake_data = list()\n\n for story in stories:\n story = json.loads(story)\n story_vector_list, _real_data = process(story[\"content\"])\n vector_list += story_vector_list\n real_data += _real_data\n\n for i in range(len(real_data)):\n a, b = random.sample(vector_list, 2)\n new_vector = list(a) + list(b)\n fake_data += [new_vector]\n\n test_data = real_data + fake_data\n\n\n X = np.array(test_data)\n y = np.array([1]*len(real_data) + [0]*len(fake_data))\n\n clf = SVC(gamma='auto')\n clf.fit(X, y)\n\n y_pred = list()\n\n for test in X:\n result = clf.predict(test.reshape(1, -1))\n y_pred += [result]\n\n print(accuracy_score(y, y_pred))\n tn, fp, fn, tp = confusion_matrix(y, y_pred).ravel()\n print(tn, fp, fn, tp)\n","repo_name":"Elfsong/WintersWrath","sub_path":"python/demo_new.py","file_name":"demo_new.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"20125792431","text":"from flask import Blueprint, render_template, request, g\nfrom sqlalchemy import desc\n\nfrom configs.config import ADMIN_PER_PAGE\nfrom www.accounts.models import User, Profile\nfrom www.commons.required import admin_required\nfrom www.commons.utils import c_orm_id\nfrom www.ecomm.carts.models import Cart\nfrom www.ecomm.orders.models import Order, OrderTransaction, CancelPayOrder\nfrom www.ecomm.products.forms import ShopForm, ProductForm\nfrom www.ecomm.products.models import Shop, Product, ShopSubscriber, ProductCategory, ProductOption, ProductReview, ProductQuestion, ProductVoter\nfrom www.ecomm.promotions.models import Coupon, Point\n\nNAME = 'admin_ecomms'\nadmin_ecomm = Blueprint(NAME, __name__, url_prefix='/admin/ecomms')\n\n\n@admin_ecomm.route('/shop/list', methods=['GET'])\n@admin_required\ndef shop_list():\n shop_query = Shop.query.order_by(desc(Shop.id))\n page = request.args.get('page', type=int, default=1)\n pagination = shop_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n shops = pagination.items\n return render_template('ecomm/admin/shops/list.html',\n shops=shops,\n pagination=pagination)\n\n\n@admin_ecomm.route('/shop/create', methods=['GET'])\n@admin_required\ndef shop_create():\n form = ShopForm()\n user_objs = User.query.all()\n return render_template('ecomm/admin/shops/create.html',\n form=form,\n users=user_objs,\n )\n\n\n@admin_ecomm.route('/shop/<_id>/change', methods=['GET'])\n@admin_required\ndef shop_change(_id):\n form = ShopForm()\n user_objs = User.query.all()\n target_shop = Shop.query.filter_by(id=_id).first()\n target_categories = ProductCategory.query.filter_by(shop_id=target_shop.id).all()\n target_user = User.query.filter_by(id=target_shop.user_id).first()\n profile_obj = Profile.query.filter_by(user_id=target_shop.user_id).first()\n products_query = Product.query.order_by(desc(Product.created_at)).filter_by(shop_id=target_shop.id)\n page = request.args.get('page', type=int, default=1)\n pagination = products_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n product_objs = pagination.items\n return render_template('ecomm/admin/shops/change.html',\n form=form,\n users=user_objs,\n target_user=target_user,\n target_profile=profile_obj,\n target_shop=target_shop,\n target_categories=target_categories,\n target_products=product_objs,\n orm_id=target_shop.orm_id,\n )\n\n\n@admin_ecomm.route('/shop/subscription/shop/list', methods=['GET'])\n@admin_required\ndef subscription_shop_list():\n shops_query = Shop.query.order_by(desc(Shop.id))\n page = request.args.get('page', type=int, default=1)\n pagination = shops_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n shops = pagination.items\n users_all = User.query.all()\n return render_template('ecomm/admin/shops/subscription/shops.html',\n shops=shops,\n users_all=users_all,\n pagination=pagination)\n\n\n@admin_ecomm.route('/shop/<_id>/subscriber/list', methods=['GET'])\n@admin_required\ndef shop_subscriber_list(_id):\n users_all = User.query.all()\n target_shop = Shop.query.filter_by(id=_id).first()\n target_user = User.query.filter_by(id=target_shop.user_id).first()\n subscriber_query = ShopSubscriber.query.order_by(desc(ShopSubscriber.user_id)).filter_by(shop_id=target_shop.id)\n\n page = request.args.get('page', type=int, default=1)\n pagination = subscriber_query.paginate(page=page,\n per_page=ADMIN_PER_PAGE,\n error_out=False)\n _subscribers = pagination.items # AcSubscriber\n subscribers = list() # User\n for s in _subscribers:\n user = User.query.filter_by(id=s.user_id).first()\n subscribers.append(user)\n return render_template('ecomm/admin/shops/subscription/subscribers.html',\n users=users_all,\n target_user=target_user,\n target_shop=target_shop,\n _subscribers=_subscribers,\n subscribers=subscribers,\n pagination=pagination)\n\n\n@admin_ecomm.route('/product/list', methods=['GET'])\n@admin_required\ndef product_list():\n product_query = Product.query.order_by(desc(Product.id))\n page = request.args.get('page', type=int, default=1)\n pagination = product_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n products = pagination.items\n return render_template('ecomm/admin/products/list.html',\n products=products,\n pagination=pagination)\n\n\n@admin_ecomm.route('/product/create', methods=['GET'])\n@admin_required\ndef product_create():\n form = ProductForm()\n user_objs = User.query.all()\n products_all = Product.query.all()\n orm_id = c_orm_id(products_all, g.user)\n return render_template('ecomm/admin/products/create.html',\n form=form,\n users=user_objs,\n orm_id=orm_id)\n\n\n@admin_ecomm.route('/product/<_id>/change', methods=['GET'])\n@admin_required\ndef product_change(_id):\n form = ProductForm()\n user_objs = User.query.all()\n\n target_product = Product.query.filter_by(id=_id).first()\n target_options = ProductOption.query.filter_by(product_id=target_product.id).all()\n\n target_shop = Shop.query.filter_by(id=target_product.shop_id).first()\n target_category = ProductCategory.query.filter_by(id=target_product.pc_id).first()\n categories = ProductCategory.query.filter_by(shop_id=target_product.shop_id).all()\n\n target_user = User.query.filter_by(id=target_product.user_id).first()\n profile_obj = Profile.query.filter_by(user_id=target_product.user_id).first()\n\n reviews_query = ProductReview.query.order_by(desc(ProductReview.created_at)).filter_by(product_id=target_product.id)\n page = request.args.get('page', type=int, default=1)\n pagination = reviews_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n review_objs = pagination.items\n\n questions_query = ProductQuestion.query.order_by(desc(ProductQuestion.created_at)).filter_by(product_id=target_product.id)\n page_2 = request.args.get('page_2', type=int, default=1)\n pagination_2 = questions_query.paginate(page=page_2, per_page=ADMIN_PER_PAGE, error_out=False)\n question_objs = pagination_2.items\n return render_template('ecomm/admin/products/change.html',\n form=form,\n users=user_objs,\n target_product=target_product,\n target_options=target_options,\n target_shop=target_shop,\n target_category=target_category,\n categories=categories,\n target_user=target_user,\n pagination=pagination,\n review_objs=review_objs,\n pagination_2=pagination_2,\n question_objs=question_objs)\n\n\n@admin_ecomm.route('/product/vote/product/list', methods=['GET'])\n@admin_required\ndef vote_product_list():\n product_query = Product.query.order_by(desc(Product.id))\n page = request.args.get('page', type=int, default=1)\n pagination = product_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n products = pagination.items\n users_all = User.query.all()\n return render_template('ecomm/admin/products/vote/products.html',\n products=products,\n users_all=users_all,\n pagination=pagination)\n\n\n@admin_ecomm.route('/product/<_id>/voter/list', methods=['GET'])\n@admin_required\ndef product_voter_list(_id):\n users_all = User.query.all()\n target_product = Product.query.filter_by(id=_id).first()\n target_user = User.query.filter_by(id=target_product.user_id).first()\n voter_query = ProductVoter.query.order_by(desc(ProductVoter.user_id)).filter_by(product_id=target_product.id)\n\n page = request.args.get('page', type=int, default=1)\n pagination = voter_query.paginate(page=page,\n per_page=ADMIN_PER_PAGE,\n error_out=False)\n _voters = pagination.items # ArticleVoter\n voters = list() # User\n for v in _voters:\n user = User.query.filter_by(id=v.user_id).first()\n voters.append(user)\n return render_template('ecomm/admin/products/vote/voters.html',\n users=users_all,\n target_user=target_user,\n target_product=target_product,\n _voters=_voters,\n voters=voters,\n pagination=pagination)\n\n\n@admin_ecomm.route('/product/review/list', methods=['GET'])\n@admin_required\ndef product_review_list():\n review_query = ProductReview.query.order_by(desc(ProductReview.id))\n page = request.args.get('page', type=int, default=1)\n pagination = review_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n reviews = pagination.items\n return render_template('ecomm/admin/products/reviews/list.html',\n reviews=reviews,\n pagination=pagination\n )\n\n\n@admin_ecomm.route('/product/review/<_id>/change', methods=['GET'])\n@admin_required\ndef product_review_change(_id):\n target_review = ProductReview.query.filter_by(id=_id).first()\n target_product = Product.query.filter_by(id=target_review.product_id).first()\n target_user = User.query.filter_by(id=target_review.user_id).first()\n users_all = User.query.all()\n return render_template('ecomm/admin/products/reviews/change.html',\n users=users_all,\n target_review=target_review,\n target_user=target_user,\n target_product=target_product)\n\n\n@admin_ecomm.route('/product/review/create', methods=['GET'])\n@admin_required\ndef product_review_create():\n return render_template('ecomm/admin/products/reviews/create.html')\n\n\n@admin_ecomm.route('/product/question/list', methods=['GET'])\n@admin_required\ndef product_question_list():\n return render_template('ecomm/admin/products/qandas/questions/list.html')\n\n\n@admin_ecomm.route('/product/question/change', methods=['GET'])\n@admin_required\ndef product_question_change():\n return render_template('ecomm/admin/products/qandas/questions/change.html')\n\n\n@admin_ecomm.route('/product/question/create', methods=['GET'])\n@admin_required\ndef product_question_create():\n return render_template('ecomm/admin/products/qandas/questions/create.html')\n\n\n@admin_ecomm.route('/product/answer/list', methods=['GET'])\n@admin_required\ndef product_answer_list():\n return render_template('ecomm/admin/products/qandas/answers/list.html')\n\n\n@admin_ecomm.route('/product/answer/change', methods=['GET'])\n@admin_required\ndef product_answer_change():\n return render_template('ecomm/admin/products/qandas/answers/change.html')\n\n\n@admin_ecomm.route('/product/answer/create', methods=['GET'])\n@admin_required\ndef product_answer_create():\n return render_template('ecomm/admin/products/qandas/answers/create.html')\n\n\n@admin_ecomm.route('/order/list', methods=['GET'])\n@admin_required\ndef order_list():\n order_query = Order.query.order_by(desc(Order.id))\n page = request.args.get('page', type=int, default=1)\n pagination = order_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n orders = pagination.items\n return render_template('ecomm/admin/orders/list.html',\n orders=orders,\n pagination=pagination)\n\n\n@admin_ecomm.route('/cart/list', methods=['GET'])\n@admin_required\ndef cart_list():\n cart_query = Cart.query.order_by(desc(Cart.id))\n page = request.args.get('page', type=int, default=1)\n pagination = cart_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n carts = pagination.items\n return render_template('ecomm/admin/carts/list.html',\n carts=carts,\n pagination=pagination)\n\n\n@admin_ecomm.route('/order/transaction/list', methods=['GET'])\n@admin_required\ndef order_transaction_list():\n order_transaction_query = OrderTransaction.query.order_by(desc(OrderTransaction.id))\n page = request.args.get('page', type=int, default=1)\n pagination = order_transaction_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n order_transactions = pagination.items\n return render_template('ecomm/admin/orders/transaction_list.html',\n order_transactions=order_transactions,\n pagination=pagination)\n\n\n@admin_ecomm.route('/order/cancel/pay/list', methods=['GET'])\n@admin_required\ndef order_cancel_pay_list():\n order_cancel_pay_query = CancelPayOrder.query.order_by(desc(CancelPayOrder.id))\n page = request.args.get('page', type=int, default=1)\n pagination = order_cancel_pay_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n order_cancel_pays = pagination.items\n return render_template('ecomm/admin/orders/cancel_pay_list.html',\n order_cancel_pays=order_cancel_pays,\n pagination=pagination)\n\n\n@admin_ecomm.route('/coupon/list', methods=['GET'])\n@admin_required\ndef coupon_list():\n coupon_query = Coupon.query.order_by(desc(Coupon.id))\n page = request.args.get('page', type=int, default=1)\n pagination = coupon_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n coupons = pagination.items\n return render_template('ecomm/admin/promotions/coupons/list.html',\n coupons=coupons,\n pagination=pagination)\n\n\n@admin_ecomm.route('/point/list', methods=['GET'])\n@admin_required\ndef point_list():\n point_query = Point.query.order_by(desc(Point.id))\n page = request.args.get('page', type=int, default=1)\n pagination = point_query.paginate(page=page, per_page=ADMIN_PER_PAGE, error_out=False)\n points = pagination.items\n return render_template('ecomm/admin/promotions/points/list.html',\n points=points,\n pagination=pagination)\n","repo_name":"moljin/flapro_1.4","sub_path":"www/ecomm/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8176191039","text":"from State_Ground_Truth.banking.bank import Bank\nfrom State_Ground_Truth.banking.controller import BankController\nfrom State_Ground_Truth.banking.commands import Deposit, Withdrawal, Transfer, Batch\n\n\ndef main() -> None:\n bank = Bank()\n\n controller = BankController()\n\n # create accounts\n account1 = bank.create_account(\"Fergus\")\n account2 = bank.create_account(\"Paul\")\n account3 = bank.create_account(\"Simon\")\n\n controller.execute(Deposit(account1, 100000))\n\n controller.execute(\n Batch(\n commands=[\n Deposit(account2, 100000),\n Deposit(account3, 100000),\n ]\n )\n )\n controller.undo()\n controller.undo()\n controller.redo()\n controller.redo()\n\n # transfer\n controller.execute(Transfer(from_account=account2, to_account=account1, amount=50000))\n\n # withdrawal\n controller.execute(Withdrawal(account1, 150000))\n controller.undo()\n\n print(bank)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Quickmotions/Personal_Studying","sub_path":"Command Design Pattern/State_Ground_Truth/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10385084629","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom .models import Todo\n\n# Create your views here.\n\ndef todo(request):\n if request.method == 'POST':\n text2 = request.POST['text']\n todo = Todo()\n todo.text = text2\n todo.save()\n text = Todo.objects.all()\n context = {'text':text}\n return render(request,'todo/index.html',context)\n\n else:\n text = Todo.objects.all()\n context = {'text':text}\n return render(request,'todo/index.html',context) \n\n\ndef addComplete(request,t_id):\n print(\"hello \",t_id)\n todo = Todo.objects.get(pk=t_id)\n todo.complete = True\n todo.save()\n return redirect('todo')\n\n\ndef delComplete(request):\n complete_todo = Todo.objects.filter(complete = True)\n complete_todo.delete()\n return redirect('todo')\n\n\ndef delAll(request):\n deleteToDo = Todo.objects.all()\n deleteToDo.delete()\n return redirect(\"todo\")\n \n","repo_name":"Ishwor007/Todo","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16497668468","text":"#! /usr/bin/python\n\"\"\"\nCode to split html tables to make one table per row\nUsed for kindle where large tables are partially omitted\n\"\"\"\nfrom bs4 import BeautifulSoup\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('file', metavar='File', help='path of html file')\nargs = parser.parse_args()\n \nf = open(args.file)\narr = f.read()\nf.close()\nsoup = BeautifulSoup(arr)\ntl = soup.find_all('table')\nfor t in tl:\n tr = t.find_all('tr')\n temp = []\n for row in tr[1:]:\n temp.append(row.extract())\n temp.reverse()\n for row in temp:\n st = BeautifulSoup(str(t))\n l = st.find_all('caption')\n for c in l:\n c.extract()\n st.tr.replaceWith(row)\n t.insert_after(st.table)\nf = open(args.file, 'w')\nf.write(str(soup))\nf.close()\n","repo_name":"kulkarniniraj/python-codes","sub_path":"html-multi-table.py","file_name":"html-multi-table.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37117457188","text":"import pygame as pg\nimport library as lib\n\nif __name__ == '__main__':\n pg.init()\n run = True\n size = 1\n scale = []\n window = lib.new_window(\"Scale vectors\")\n while run:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n if event.type == pg.MOUSEBUTTONDOWN:\n scale = pg.mouse.get_pos()\n lib.vectors(window, lib.cts.Origin, scale)\n scale = lib.cartesian_into_screen(scale)\n if event.type == pg.KEYDOWN:\n size += 1\n scale = lib.scale(scale, size)\n lib.vectors(window, lib.cts.Origin, lib.screen_into_cartesian(scale))\n lib.cartesian_plane(window, lib.cts.Origin)\n lib.flip()\n pg.quit()\n","repo_name":"AndresMpa/graphic_computing","sub_path":"clase_3_3.py","file_name":"clase_3_3.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26942389711","text":"import unittest\n\nfrom m_mock.m_random import m_name\nfrom m_mock.test_case.common_utils import execute\n\n\nclass TestName(unittest.TestCase):\n def test_name(self):\n execute(\"\"\"@clast()\"\"\")\n execute(\"\"\"@cfirst()\"\"\")\n execute(\"\"\"@cname()\"\"\")\n execute(\"\"\"@cname(3)\"\"\")\n execute(\"\"\"@last()\"\"\")\n execute(\"\"\"@first()\"\"\")\n execute(\"\"\"@name()\"\"\")\n execute(\"\"\"@name(True)\"\"\")\n print(m_name.cfirst())\n print(m_name.clast())\n print(m_name.cname())\n print(m_name.first())\n print(m_name.last())\n print(m_name.name())\n print(m_name.name(True))\n\n def test_name2(self):\n for i in range(1000):\n i = m_name.cname()\n assert not '\\n' in i\n print(i)\n","repo_name":"Franciz008/m_mock","sub_path":"m_mock/test_case/test_name.py","file_name":"test_name.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26155150453","text":"\nimport unittest\nfrom src.math_oper import MathOper\nfrom numpy import *\nfrom src.data_manager import DataManager\nfrom scipy.interpolate import UnivariateSpline\nfrom src.normalizer import Normalizer\nfrom src.norm_type import NormType\nimport matplotlib.pyplot as plt\n\nclass MathOperTest(unittest.TestCase):\n\n def setUp(self):\n url = '../resources/50k.txt'\n data = DataManager.load_data(url, False, True, ', ')\n data = array(data, dtype='object')\n\n # filter\n no_item_sign = '?'\n data = DataManager.data_filter(data, no_item_sign)\n N = 1000\n inputs = data[0:N, 0:14]\n self.test_data = inputs\n\n \"\"\"\n def test_get_prop_data(self):\n test_data = array(self.test_data)\n test = MathOper.get_prop_data(test_data)\n print(test)\n \"\"\"\n\n def test_get_norm_prob(self):\n # norm_test_data = Normalizer.normalize(self.test_data, NormType.min_max_norm, [2])\n norm_test_data = self.test_data[:, 2]\n # data_probs = MathOper.get_norm_prob(norm_test_data)\n test_data_mean = mean(norm_test_data)\n test_data_std = std(norm_test_data)\n test_data_prob_dist = random.normal(test_data_mean, test_data_std, 1000)\n plt.hist(test_data_prob_dist, 50)\n plt.show()\n\n\n \"\"\"\n def test_prop_hist(self):\n n = len(self.test_data)\n p, x = histogram(self.test_data, bins=5) # bin it into n = N/10 bins\n x = x[:-1] + (x[1] - x[0]) / 2 # convert bin edges to centers\n f = UnivariateSpline(x, p, s=5)\n plt.plot(x, f(x))\n plt.show()\n \"\"\"\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"maciejbihun9/machine_learning_in_action","sub_path":"test/math_oper_test.py","file_name":"math_oper_test.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33717127895","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Time : 2021/1/25 13:04\n# @Author : yuwenli\n\n\nfrom openpyxl import Workbook, load_workbook\nimport random\n\nwb = Workbook()\nws = wb.active\ndest_filename = \"test.xlsx\"\nsheet_name = \"test1\"\nws.title = sheet_name\nws['A1'] = \"TEST01\"\nwb.save(dest_filename)\n\nwb = load_workbook(dest_filename)\nws = wb[sheet_name]\n# print(ws['A1'])\n\nwb.create_sheet(\"test02\")\n\nws2 = wb[\"test02\"]\nfor i in range(1, 3):\n for j in range(1, 5):\n ws2.cell(row=i, column=j, value=random.randint(1, 50))\n# 按行读取\nfor row in ws2.rows:\n for cell in row:\n print(cell.value)\n# 按列读取\nfor column in ws2.columns:\n for cell in column:\n print(cell.value)\n\nprint(\"最大的行:%d \" % ws2.max_row)\nprint(\"最大的列:%d \" % ws2.max_column)\n# 第2行,输出为一个tuple\n# print(ws2[2])\n# print(tuple(ws2.rows))\nprint(ws2['A1':'B2'])\n# 输出为tuple\nprint(type(ws2['A1':'B2']))\n\nfor i in range(1, 10):\n for j in range(1, 5):\n ws2.cell(row=i, column=j).value = random.randint(1, 50)\n\nwb.save(dest_filename)\n","repo_name":"yuwenli/hogwarts_yuwenli","sub_path":"hogwarts_practices/excute_data/excelPractices.py","file_name":"excelPractices.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"31578532084","text":"\n\ndef list_grammer():\n # initialize list with 0~9\n b = [i for i in range(10)]\n print(b) #[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n b.append(10)\n print(b) #[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n b.insert(10, 11)\n print(b) #[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 10]\n\n del b[10]\n print(b) #[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n b.pop(10)\n print(b) #[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\ndef set_grammer():\n li = [\"12\", \"12\", \"123\", \"1\", \"2\", \"2\", \"3\"]\n s = list(set(li))\n print(s)\n\n\ndef map_grammer():\n li = [i for i in range(5)]\n print(li)\n\n def func(x):\n return x*2\n\n result = list(map(func, li))\n print(result)\n\n\ndef dict_grammer():\n dict = {\n \"a\": 1,\n \"b\": 2,\n \"c\": 3,\n \"d\": 4,\n \"e\": 5\n }\n\n print(dict.get(\"f\"))\n print(dict.keys())\n print(dict.values())\n\n\ndef sort_grammer():\n students = [\n ('hong', 3.9, 2016303),\n ('kim', 3.0, 2016302),\n ('choi', 4.3, 2016301),\n ]\n sorted_list = sorted(students, key=lambda student:student[1], reverse=True)\n print(sorted_list)\n\nif __name__ == \"__main__\":\n sort_grammer()","repo_name":"be-lgreen/algorithm","sub_path":"python/grammer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73435787291","text":"\nnums = [2,7,11,15]\ntarget = 9\n\ntable = {}\n\nfor i in range(len(nums)):\n complement = target - nums[i]\n if complement in table.keys():\n secondIndex = nums.index(complement)\n if i != secondIndex:\n print(sorted([i,secondIndex]))\n table.update({nums[i]:i}) \n\n\n","repo_name":"cullinap/dev","sub_path":"practice/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35716198710","text":"f = open(\"4/input.txt\")\n\noverlap = 0\n\nfor line in f:\n line = line.rstrip()\n bounds_1, bounds_2 = line.split(\",\")\n bounds_1 = tuple(map(int, bounds_1.split(\"-\")))\n bounds_2 = tuple(map(int, bounds_2.split(\"-\")))\n\n range_1 = range(bounds_1[0], bounds_1[1] + 1)\n range_2 = range(bounds_2[0], bounds_2[1] + 1)\n\n if set(range_1) & set(range_2):\n overlap += 1\n\nprint(overlap)\n","repo_name":"goedwig/advent-of-code-2022","sub_path":"04/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22108102025","text":"from flask import Flask, render_template\n\nimport database\n\napp = Flask(__name__)\n\ndata = [\n {\n 'name': 'aakriti',\n 'age': 25\n },\n {\n 'name': 'abc',\n 'age': 30\n },\n {\n 'name': 'mno',\n 'age': 45\n },\n {\n 'name': 'xyz',\n 'age': 45\n },\n]\n\ndata1 = database\nprint(data)\n\n\n@app.route(\"/\")\ndef hello_world():\n # return \"

Hello, World!

\"\n return render_template('home.html', data=data)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"aakriti04/python-web-test","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30945109616","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import *\ndef checkIfPilable(arr):\n #start with a none pile element and we just pop towards the prev pile\n prev_pile = None\n #check if the left most or the right most is greater and then check if that is greater than the prev_pile if it is return no. Until we have no element left to pop\n while arr:\n if arr[-1] > arr[0]:\n prev_pile = arr.pop()\n else:\n prev_pile = arr.popleft()\n if len(arr) == 0:\n return \"Yes\"\n if arr[-1] > prev_pile or arr[0] > prev_pile:\n return \"No\"\n \n\nfor i in range(int(input())):\n number_of_blocks = int(input())\n blocks = deque(map(int, input().split()))\n print(checkIfPilable(blocks))\n","repo_name":"primequantuM4/competitive_programming","sub_path":"PilingUp.py","file_name":"PilingUp.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3117449844","text":"import logging\nimport pathlib\nimport threading\nfrom typing import Optional\n\nimport psycopg\nfrom psycopg.conninfo import make_conninfo\nimport psycopg.errors\nimport pytest\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.DEBUG)\n\n\n@pytest.fixture(scope=\"session\")\ndef datadir() -> pathlib.Path:\n return pathlib.Path(__file__).parent / \"data\"\n\n\n@pytest.fixture\ndef execute(postgresql):\n \"\"\"Create a thread and return an execute() function that will run SQL queries in that\n thread.\n \"\"\"\n threads_and_cnx = []\n\n def execute(\n query: str,\n commit: bool = False,\n autocommit: bool = False,\n dbname: Optional[str] = None,\n ) -> None:\n dsn, kwargs = postgresql.info.dsn, {}\n if dbname:\n kwargs[\"dbname\"] = dbname\n conn = psycopg.connect(make_conninfo(dsn, **kwargs))\n conn.autocommit = autocommit\n\n def _execute() -> None:\n LOGGER.info(\n \"running query %s (commit=%s, autocommit=%s) using connection <%s>\",\n query,\n commit,\n autocommit,\n id(conn),\n )\n with conn.cursor() as c:\n try:\n c.execute(query)\n except (\n psycopg.errors.AdminShutdown,\n psycopg.errors.QueryCanceled,\n ):\n return\n if not autocommit and commit:\n conn.commit()\n LOGGER.info(\"query %s finished\", query)\n\n thread = threading.Thread(target=_execute, daemon=True)\n thread.start()\n threads_and_cnx.append((thread, conn))\n\n yield execute\n\n for thread, conn in threads_and_cnx:\n thread.join(timeout=2)\n LOGGER.info(\"closing connection <%s>\", id(conn))\n conn.close()\n","repo_name":"cmattos58/pg_activity","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"72329541852","text":"'''L'objectif de ce projet est d'aider une entreprise (un salon de coiffure) à planifier son fonctionnement pour le reste du mois. Pour ce faire, nous parcourons les listes de données collectées au cours des deux dernières semaines et calculons quelques paramètres importants.\r\n\r\n#Voici trois listes:\r\nhairstyles: les noms des coupes offertes chez Carly's Clippers\r\nprices: le prix de chaque coiffure dans la hairstylesliste\r\nlast_week: le nombre de chaque coiffure hairstylesqui a été acheté la semaine dernière.'''\r\n \r\nhairstyles = [\"bouffant\", \"pixie\", \"dreadlocks\", \"crew\", \"bowl\", \"bob\", \"mohawk\", \"flattop\"]\r\n\r\nprices = [30, 25, 40, 20, 20, 35, 50, 35]\r\n\r\nlast_week = [2, 3, 5, 8, 4, 4, 6, 2]\r\n\r\ntotal_price = 0\r\n\r\nfor price in prices:\r\n total_price = total_price + price\r\n \r\n average_price = total_price/ len(prices)\r\n print(\"total_price: \"+ str(average_price))\r\n \r\n new_prices = [price - 5 for price in prices]\r\n print(new_prices)\r\n \r\n total_revenue = 0\r\n \r\n for i in range(len(hairstyles)):\r\n total_revenue = prices[i] * last_week[i]\r\n print(\"Total revenue: \" + str(total_revenue))\r\n \r\n average_daily_revenue = total_revenue/7\r\n \r\n print(average_daily_revenue)\r\n \r\n cuts_under_30 = [hairstyles[i] for i in range(len(hairstyles[1])) if new_prices[i]<30]\r\n print(cuts_under_30)\r\n \r\n \r\n","repo_name":"Dayeneriss/Carly-s-Clippers","sub_path":"clipper art.py","file_name":"clipper art.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18454296550","text":"from py2neo import *\r\nimport pandas as pd\r\nimport difflib\r\n\r\ndef insert_cve(cve_read):#86348\r\n cve_list=[]\r\n length=len(cve_read['cveID'])\r\n for i in range(length):\r\n cveid=cve_read['cveID'][i]\r\n cve_list.append(cveid)\r\n\r\n cve_list=list(set(cve_list))\r\n # 导入到neo4j\r\n i = 0\r\n for cveid in cve_list:\r\n centence = \"CREATE (n:CVE {name: '\" + cveid + \"'})\"\r\n cypher = centence\r\n cursor = graph.run(cypher)\r\n i=i+1\r\n print(i)\r\n\r\ndef insert_affect_product(cve_read):#43838\r\n product_list=[]\r\n length=len(cve_read['cpe23Uri'])\r\n k=0\r\n for i in range(length):\r\n product=cve_read['cpe23Uri'][i].split(':')[4]\r\n product=product.replace('/','')\r\n product=product.replace('\\\\','_')\r\n product=product.replace(\"'\",'')\r\n versionstart = str(cve_read['versionStartIncluding'][i])\r\n versionstart = versionstart.replace('\\\\', '')\r\n versionend=str(cve_read['versionEndExcluding'][i])\r\n versionend=versionend.replace('\\\\',',')\r\n concat_string=product+':'+versionstart+':'+versionend\r\n product_list.append(concat_string)\r\n product_list = list(set(product_list))\r\n # print(product_list)\r\n\r\n #导入到neo4j\r\n for info in product_list:\r\n product=info.split(':')[0]\r\n versionstart=info.split(':')[1]\r\n versionend=info.split(':')[2]\r\n # print(product,versionstart,versionend)\r\n centence = \"CREATE (n:Product {name: '\" + product + \"',versionStartIncluding: '\" + versionstart + \"',versionEndExcluding: '\"+versionend+\"'})\"\r\n cypher = centence\r\n cursor = graph.run(cypher)\r\n k=k+1\r\n print(k)\r\n\r\ndef insert_node(csv_read):#55057\r\n concat_list = []\r\n length = len(csv_read['package_name'])\r\n # 在导入节点时要做一步去重\r\n for i in range(length):\r\n concat_string = csv_read['package_name'][i] + ',' + csv_read['package_version'][i]\r\n # concat_string=concat_string.replace('-','_')\r\n concat_list.append(concat_string)\r\n concat_list = list(set(concat_list))\r\n # 导入到neo4j\r\n i=0\r\n for content in concat_list:\r\n package_name = content.split(',')[0]\r\n package_version = content.split(',')[1]\r\n print(package_name, package_version)\r\n centence = \"CREATE (n:Package {name: '\" + package_name + \"',version: '\" + package_version + \"'})\"\r\n cypher = centence\r\n cursor = graph.run(cypher)\r\n i=i+1\r\n print(i)\r\n\r\ndef insert_relationship(csv_read):#266287\r\n length = len(csv_read['package_name'])\r\n k=0\r\n for i in range(length):\r\n package_name=csv_read['package_name'][i]\r\n depend_name=csv_read['depend_name'][i]\r\n depend_name=depend_name.strip()\r\n depend_version=csv_read['depend_version'][i]\r\n # print(package_name,depend_name,depend_version)\r\n centence='MATCH (a:Package), (b:Package) WHERE a.name = \"'+package_name+'\" AND b.name = \"'+depend_name+'\" CREATE (a)-[r:DEPEND{start:\"'+package_name+'\",end:\"'+depend_name+'\",relation:\"depend\"}]->(b)'\r\n # print(relation_string)\r\n cypher = centence\r\n cursor = graph.run(cypher)\r\n k=k+1\r\n print(k)\r\n\r\ndef insert_cve_to_product(cve_read):#170000+\r\n cve_product_list=[]\r\n length = len(cve_read['cveID'])\r\n k=0\r\n for i in range(length):\r\n cveid=cve_read['cveID'][i]\r\n product = cve_read['cpe23Uri'][i].split(':')[4]\r\n product = product.replace('/', '')\r\n product = product.replace('\\\\', '_')\r\n product = product.replace(\"'\", '')\r\n versionstart = str(cve_read['versionStartIncluding'][i])\r\n versionstart = versionstart.replace('\\\\', '')\r\n versionend = str(cve_read['versionEndExcluding'][i])\r\n versionend = versionend.replace('\\\\', ',')\r\n concat_string =cveid+':'+ product + ':' + versionstart + ':' + versionend\r\n cve_product_list.append(concat_string)\r\n cve_product_list = list(set(cve_product_list))\r\n\r\n #将cve和其影响的产品关系导入neo4j\r\n for info in cve_product_list:\r\n cveid=info.split(':')[0]\r\n product=info.split(':')[1]\r\n versionstart=info.split(':')[2]\r\n versionend=info.split(':')[3]\r\n # print(cveid,product)\r\n centence=\"MATCH (a:CVE), (b:Product) WHERE a.name = '\"+cveid+\"' AND b.name = '\"+product+\"' AND b.versionStartIncluding='\"+versionstart+\"' AND b.versionEndExcluding='\"+versionend+\"' CREATE (a)-[r:AFFECT{start:'\"+cveid+\"',end:'\"+product+\"',relation:'affect'}]->(b)\"\r\n cypher = centence\r\n cursor = graph.run(cypher)\r\n k=k+1\r\n print(k)\r\n\r\ndef insert_product_to_node(cve_read,csv_read):#卡在了945,感觉差不多够了就没有重新跑了\r\n package_name_list = []\r\n product_list = []\r\n for cpe23Uri in cve_read['cpe23Uri']:\r\n product=cpe23Uri.split(':')[4]\r\n product = product.replace('/', '')\r\n product = product.replace('\\\\', '_')\r\n product = product.replace(\"'\", '')\r\n product_list.append(product)\r\n product_list=list(set(product_list))\r\n\r\n for package_name in csv_read['package_name']:\r\n package_name_list.append(package_name)\r\n package_name_list=list(set(package_name_list))\r\n\r\n # 此处使用difflib库中的get_close_matches方法做字符串模糊匹配,匹配度设置为0.9时准确度最高,0.8时有用的匹配结果跟多,为了准确度这里设置为0.9\r\n # 可以用命名实体识别\r\n i=0\r\n for product in product_list:\r\n result_list = difflib.get_close_matches(product, package_name_list, 10, 0.9)\r\n if len(result_list) > 0:\r\n # print(product, result_list, len(result_list))\r\n for result in result_list:\r\n # 将受影响的产品和其对应的包名关系导入neo4j\r\n centence = \"MATCH (a:Product), (b:Package) WHERE a.name = '\" + product + \"' AND b.name = '\" + result + \"' CREATE (a)-[r:LINK{start:'\"+product+\"',end:'\"+result+\"',relation:'link'}]->(b)\"\r\n # print(centence)\r\n cypher = centence\r\n cursor=graph.run(cypher)\r\n i=i+1\r\n print(i)\r\n\r\nif __name__==\"__main__\":\r\n # package_name_list = []\r\n graph=Graph('http://localhost:7474/',user='#your username#',password='#your password#')\r\n df_package = pd.read_csv('./package.csv')\r\n df_cve = pd.read_csv('./cve.csv')\r\n # insert_cve(df_cve)\r\n # insert_affect_product(df_cve)\r\n # insert_node(df_package)\r\n # insert_cve_to_product(df_cve)\r\n # insert_relationship(df_package)\r\n # insert_product_to_node(df_cve,df_package)\r\n","repo_name":"knighttt7/csv_and_package_to_neo4j","sub_path":"import_csv_to_neo4j.py","file_name":"import_csv_to_neo4j.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40392392482","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\n\nfrom cryptography.fernet import Fernet\n\n# path to search file\nsecretphrase = \"group13\"\nunlock_phrase = input(\"Enter The password to Unlock the Files\")\n\nif unlock_phrase == secretphrase:\n path = '**/*.txt'\n inc = 1\n files = []\n for file in glob.glob(path, recursive=True):\n print(file)\n if file == \"virus.py\" or file == \"thekey.key\" or file == \"decrypt.py\":\n continue\n files.append(file)\n #if os.path.isfile(file):\n print(inc)\n inc = inc+1\n\n print(files)\n\n # key = Fernet.generate_key() // This is already Defined on the Virus\n with open(\"thekey.key\" , \"rb\" ) as key:\n secretkey = key.read()\n # print(key)\n \n for file in files:\n with open(file, \"rb\") as thefile:\n contents = thefile.read()\n contents_decrypted = Fernet(secretkey).decrypt(contents)\n with open(file, \"wb\") as thefile:\n thefile.write(contents_decrypted)","repo_name":"NikhilMakkena/Ransomware","sub_path":"decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4257393752","text":"import numpy as np\nfrom random import shuffle\nfrom Exemple import *\n\n\nclass NeuralNetwork:\n def __init__(self, learning_rate, layers):\n self.learning_rate = learning_rate\n self.layers = layers\n self.size = len(layers)\n self.weights = []\n self.bias = []\n for i in range(0, self.size - 1):\n self.weights.append(np.random.randn(layers[i + 1], layers[i]) / np.sqrt(layers[i + 1]))\n for j in range(1, self.size):\n self.bias.append(np.random.randn(layers[j], 1))\n self.errors = []\n self.a_s = []\n self.z_s = []\n self.a = []\n self.z = []\n\n @staticmethod\n def sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\n @staticmethod\n def sigmoid_prime(z):\n return np.exp(-z) / ((1 + np.exp(-z)) ** 2)\n\n @staticmethod\n def to_vector(index):\n e = np.zeros((10, 1))\n e[index] = 1.0\n return e\n\n @staticmethod\n def find_output(vector):\n return np.argmax(vector)\n\n def propagate(self, x):\n \"\"\"Method used to get an input x go through the neuronal network and outputs a2\"\"\"\n self.clear()\n self.a.append(x)\n\n for i in range(0, self.size - 1):\n self.z.append(np.dot(self.weights[i], self.a[i]) + self.bias[i])\n self.a.append(self.sigmoid(self.z[i]))\n\n self.z_s.append(self.z.copy())\n retour = self.a.copy()\n del self.a[-1]\n\n self.a_s.append(self.a.copy())\n return retour[-1]\n\n def back_propagation(self, x, y):\n \"\"\"Calculate the errors vectors for a given vector\"\"\"\n\n error = [0] * (self.size - 1)\n y_hat = self.propagate(x)\n error[-1] = np.multiply(-(y - y_hat), self.sigmoid_prime(self.z[-1]))\n for i in range(1, self.size - 1):\n error[-(i + 1)] = np.dot(self.weights[-i].T, error[-i]) * self.sigmoid_prime(self.z[-(i + 1)])\n\n self.errors.append(error)\n self.a.clear()\n self.z.clear()\n\n def gradient_descent(self, m):\n \"\"\"Updates the bias and weights using the errors vectors of all\n exemples that had been through backpropagation\"\"\"\n for a in range(0, self.size - 1):\n sum = self.errors[0][a]\n for i in range(1, len(self.errors)):\n sum = sum + self.errors[i][a]\n sum = sum * self.learning_rate / m\n self.bias[a] = self.bias[a] - sum\n\n for b in range(0, self.size - 1):\n sum = np.dot(self.errors[0][b], np.transpose(self.a_s[0][b]))\n for i in range(1, len(self.errors)):\n sum = sum + np.dot(self.errors[i][b], np.transpose(self.a_s[i][b]))\n sum = sum * self.learning_rate / m\n self.weights[b] = self.weights[b] - sum\n\n self.clear()\n\n def clear(self):\n \"\"\"Clears the errors, as, and zs after a gradient descent\"\"\"\n self.errors.clear()\n self.a_s.clear()\n self.z_s.clear()\n self.a.clear()\n self.z.clear()\n\n def train(self, exemples_input, exemples_output, epoch_number, batch_size, test_input, test_output):\n exemples = []\n for o in range(0, len(exemples_input)):\n exemples.append(Exemple(exemples_input[o], exemples_output[o]))\n\n for j in range(0, epoch_number):\n h = 0\n shuffle(exemples)\n\n while h < len(exemples) - batch_size:\n\n for g in range(h, h + batch_size):\n image = np.array(exemples[g].ex_input)\n self.back_propagation(np.reshape(image, (self.layers[0], 1)),\n self.to_vector(exemples[g].ex_output))\n\n self.gradient_descent(batch_size)\n self.clear()\n h = h + batch_size\n self.test(test_input, test_output, j)\n\n def test(self, test_input, test_output, batch_number):\n count = 0\n for v in range(0, len(test_input)):\n\n \"\"\"Testing phase\"\"\"\n image = np.array(test_input[v])\n\n if self.find_output(self.propagate(np.reshape(image, (self.layers[0], 1)))) == test_output[v]:\n count = count + 1\n\n print(\n \"Epoch n°\" + str(batch_number + 1) + \" --> Réussis : \" + str(count) + \"/10000 Making an accuracy of \" + str(\n count * 100 / len(test_input)) + \"%\")\n","repo_name":"JulienMalka/NeuralNetwork","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"20352919977","text":"def convert_all_elements_to_int(input_list):\n result = input_list\n for idx in range(0, len(input_list)):\n result[idx] = [int(i) for i in input_list[idx]]\n\n return result\n\ndef is_low_point(input, x, y):\n lowest_point = 9\n if x > 0:\n if (input[x-1][y] < lowest_point):\n lowest_point = input[x-1][y]\n if x < (len(input)-1):\n if (input[x+1][y] < lowest_point):\n lowest_point = input[x+1][y]\n if y > 0:\n if (input[x][y-1] < lowest_point):\n lowest_point = input[x][y-1]\n if y < (len(input[0])-1):\n if (input[x][y+1] < lowest_point):\n lowest_point = input[x][y+1]\n\n if (input[x][y] < lowest_point):\n return True\n else:\n return False\n\ndef compute_total_risk(input):\n total_risk = 0\n for x in range(0, len(input)):\n for y in range(0, len(input[x])):\n if (is_low_point(input, x, y)):\n total_risk += input[x][y] + 1\n\n return total_risk\n\n\nwith open(\"input9\") as f:\n input = f.read().splitlines()\n\ninput = convert_all_elements_to_int(input)\n\nresult = compute_total_risk(input)\n\nprint('The answer is: ', result)\n\n","repo_name":"matsrosbach/adventofcode21","sub_path":"day9/smoke_basin1.py","file_name":"smoke_basin1.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43123958189","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom .models import Nachricht, Fach, LernSet, LernKarte, Progress\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(max_length=50, label='Username', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left;',\n 'placeholder': 'Dein Username'\n }\n ))\n password = forms.CharField(min_length=6, max_length=50, label='Passwort', widget=forms.PasswordInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left;',\n 'placeholder': 'Dein Passwort'\n }\n ))\n\n email = forms.EmailField(label='Email', widget=forms.EmailInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left;',\n 'placeholder': 'Deine Email'\n }\n ))\n\n def clean_username(self):\n username = self.cleaned_data['username']\n if User.objects.filter(username__iexact=username).exists():\n raise forms.ValidationError('Dieser Username existiert bereits!')\n return username\n\n def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email__iexact=email).exists():\n raise forms.ValidationError('Diese Email existiert bereits!')\n return email\n\nclass LoginForm(forms.Form):\n username = forms.CharField(max_length=50, label='Username', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left;',\n 'placeholder': 'Dein Username'\n }\n ))\n\n password = forms.CharField(min_length=6, max_length=50, label='Passwort', widget=forms.PasswordInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left;',\n 'placeholder': 'Dein Passwort'\n }\n ))\n\nclass FachForm(forms.ModelForm):\n name = forms.CharField(label='', empty_value='hi', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left; margin-right: 1rem;',\n 'placeholder': 'Fachname: z.B. Englisch',\n 'maxlength': '30',\n }\n ))\n descirption = forms.CharField(required=False, label='', empty_value='', widget=forms.Textarea(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left;resize: none; margin-top:1rem',\n 'placeholder': 'Beschreibung: z.B. Mein Wortschatz im Englisch in der FMS',\n 'maxlength': '255',\n }\n ))\n class Meta:\n model = Fach\n fields =['name', 'descirption']\n\nclass SetForm(forms.ModelForm):\n name = forms.CharField(label='', empty_value='Hallo', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left; margin-top:1rem;',\n 'placeholder': 'Lernset-Name: z.B. Unit 2',\n 'maxlength': '50'\n }\n ))\n descirption = forms.CharField(required=False, label='', empty_value='', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%;float:left; margin-top:1rem;',\n 'placeholder': 'Beschreibung: z.B. Für Test von Unit 2',\n 'maxlength': '255'\n }\n ))\n\n class Meta:\n model = LernSet\n fields =['name', 'descirption']\n\nclass CardForm(forms.ModelForm):\n txt_front = forms.CharField(label='', empty_value='Hallo', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%; vertical-align: middle; margin: 5px 10px 5px 0; padding: 10px; background-color: #fff; border: 1px solid #ddd;display: inline;',\n 'placeholder': 'Übersetzung/Wort z.B. Stuhl',\n 'maxlength': '100'\n }\n ))\n txt_back = forms.CharField(label='', empty_value='Hallo', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%; vertical-align: middle; margin: 5px 10px 5px 0; padding: 10px; background-color: #fff; border: 1px solid #ddd;display: inline;',\n 'placeholder': 'Übersetzung/Wort z.B. chair',\n 'maxlength': '100'\n }\n ))\n donkey_bridge = forms.CharField(required=False, label='', empty_value='', widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'style': 'width: 100%; vertical-align: middle; margin: 5px 10px 5px 0; padding: 10px; background-color: #fff; border: 1px solid #ddd;display: inline;',\n 'placeholder': 'eine Eselsbrücke / Abstand:keine Eselsbrücke',\n 'maxlength': '80'\n }\n ))\n class Meta:\n model = LernKarte\n fields =['txt_front', 'txt_back', 'donkey_bridge']","repo_name":"EliasEugiii/Metis-Using-Django","sub_path":"web/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70507986013","text":"import csv\nimport matplotlib.pyplot as plt\nimport os\nimport sys\n\nwalk_dir = sys.argv[1]\nhost = 'stackexchange.com' #Add your host here\nrun_once = True\n\n\ndef getColumn(filename, column, host):\n\tresults = csv.reader(open(filename), dialect='excel')\n\treq = []\n\tfor result in results:\n\t\tif host in result[0] and result[0] is not None:\n\t\t\treq.append(result[column])\n\treturn req\n\nfor root,subdirs,files in os.walk(walk_dir):\n\tfor filename in files:\n\t\tfile_path = os.path.join(root,filename)\n\t\twith open(file_path,'r') as f:\n\t\t\tif run_once:\n\t\t\t\tframe_size = getColumn(file_path,1,host)\n\t\t\t\tplt.xlabel('Frame size')\n\t\t\t\tplt.ylabel('Average RTT (ms)')\n\t\t\t\tplt.title('Plot For ' + host)\n\t\t\ttime = getColumn(file_path,6,host)\n\t\t\tprint(time[0])\n\t\t\trun_once = False\n\t\t\trtt_Avg = getColumn(file_path,3,host)\n\t\t\tplt.plot(frame_size,rtt_Avg,linewidth = 3, linestyle = 'dashed',marker='o',label=time[0][12:])\n\t\t\tplt.legend(loc='upper left')\nplt.savefig(host[:-4])\nplt.clf()\n\n\n \n\t \n\n","repo_name":"Arkadeep-sophoIITG/CS349_NetworksLab_IITG","sub_path":"Assignment 1/scripts/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25986681215","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom quafu import QuantumCircuit, Task, simulate\n\ndef iqft(qc, n):\n \"\"\"\n inverse quantum Fourier transform, apply the adjoint operator\n in the original quantum Fourier transform circuit in reverse order.\n \"\"\"\n\n for qubit in range(n // 2):\n qc.swap(qubit, n - qubit - 1) # swap before rotate\n for i in range(n):\n for k in range(i):\n qc.cp(n - k - 1, n - 1 - i, -np.pi * (2 ** (k - i))) # in reverse order\n qc.h(n - i - 1)\n return qc\n\n\ndef mix(qc, n):\n \"\"\"\n mix into the uniform superposition state\n \"\"\"\n for qubit in range(n):\n qc.h(qubit)\n return qc\n\n\ndef pe_test(): # just for test\n qc2 = QuantumCircuit(6)\n qc2.x(5)\n mix(qc2, 5)\n angle = 2 * np.pi / 3\n repetitions = 1\n for help_qubit in range(5):\n for i in range(repetitions):\n qc2.cp(help_qubit, 5, angle)\n repetitions *= 2\n qc2.barrier([0, 1, 2, 3, 4, 5])\n iqft(qc2, 5)\n qc2.barrier([0, 1, 2, 3, 4, 5])\n qc2.measure([0, 1, 2, 3, 4])\n qc2.draw_circuit(width=0)\n simu_res2 = simulate(qc2, output='probabilities')\n simu_res2.plot_probabilities()\n plt.show()\n\n\ndef controlledu(qc, a=7):\n for help_qubit in range(9):\n ctrl=8-help_qubit\n for iteration in range(2 ** help_qubit):\n if a in [2, 13]:\n qc.fredkin(ctrl, 9, 10)\n qc.fredkin(ctrl, 10, 11)\n qc.fredkin(ctrl, 11, 12)\n if a in [7, 8]:\n qc.fredkin(ctrl, 11, 12)\n qc.fredkin(ctrl, 10, 11)\n qc.fredkin(ctrl, 9, 10)\n if a in [4, 11]:\n qc.fredkin(ctrl, 9, 11)\n qc.fredkin(ctrl, 10, 12)\n if a in [7, 11, 13]:\n for q in range(4):\n qc.cx(ctrl, 9 + q)\n\n\ndef controlledu_test(): ##just for test\n qct = QuantumCircuit(13)\n for z in range(9):\n qct.x(z)\n qct.x(12)\n controlledu(qct)\n qct.measure([9, 10, 11, 12])\n qct.draw_circuit(width=1)\n simu_res_test = simulate(qct, output='probabilities')\n simu_res_test.plot_probabilities(full=False)\n plt.show()\n\nN = 15\na = 7\nx = 7\nn = 13\n# pe_test()\n#controlledu_test()\nqpe = QuantumCircuit(n)\nqpe.x(n - 1) # prepare |1> state in the second register\nmix(qpe, n - 4)\ncontrolledu(qpe) # controlled U\niqft(qpe, n - 4)\nqpe.measure(list(range(n - 4)))\nqpe.draw_circuit(width=1)\nsimu_res = simulate(qpe, output='state_vector')\nprint(simu_res.state_vector)\nplt.show()\n","repo_name":"ScQ-Cloud/quafu-tutorial","sub_path":"algorithm/Shor algorithm/Shor's_algorithm/pyquafu_Shor.py","file_name":"pyquafu_Shor.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"3455843601","text":"import os\nfrom keras.models import Sequential\nfrom keras.layers import Dense, GRU, Activation, LSTM, TimeDistributed\n#from keras.optimizers import SGD\n#import random\n#import math \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nimport pydot\nimport graphviz\nfrom keras.utils.visualize_util import plot\n\ndirName = os.path.dirname(os.path.realpath(__file__)).replace(\"\\\\\", \"/\")\n\nos.system('cls')\n\nLOW = 0\nHIGH = 127\nL = 8\nE = 100\nNUM_TRAIN_INPUTS = 100\nNUM_TOTAL_INPUTS = 128\nraw = list(range(LOW, HIGH + 1))\ns = len(raw)\nX1 = []\nX2 = []\nfor i in range(0, NUM_TOTAL_INPUTS):\n\tX1.append(raw[i * 7 % s ])\n\tX2.append(raw[i * 11 % s ])\n\n# X1 = list(range(LOW, HIGH + 1))\n# random.shuffle(X1)\n# X2 = list(range(LOW, HIGH + 1))\n# random.shuffle(X2)\nY = [x1 + x2 for x1, x2 in zip(X1, X2)]\n\n\ndef base(n, b):\n\t\"\"\"Base-b representation of number n\"\"\"\n\tr = n // b\n\tif r == 0:\n\t\treturn [n]\n\telse: \n\t\trest = base(r, b)\n\t\trest.append(n % b)\n\t\treturn rest\n\ndef padding(l):\n\t\"\"\"Pads given list to have a size L\"\"\"\n\treturn (L*[0] + l)[-L:]\n\nX1base = list(map(lambda i: padding(base(i, 2)), X1))\nX2base = list(map(lambda i: padding(base(i, 2)), X2))\nYbase = list(map(lambda i: padding(base(i, 2)), Y))\n\nX = []\nfor (x1, x2) in zip(X1base, X2base):\n\tfor i in range(0, len(x1)):\n\t\tX.append([x1[i], x2[i]]) \n\nX = np.array(X).reshape((-1, L, 2))\nY = np.array(Ybase).reshape((-1, L, 1))\n\n\nprint('Build model...')\nmodel = Sequential()\nmodel.add(LSTM(output_dim = 2*L, input_length = L, input_dim = 2, return_sequences = True, activation = 'linear', name=\"analyzer\"))\nmodel.add(Dense(L, activation='linear'))\nmodel.add(TimeDistributed(Dense(2, activation='sigmoid')))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.add(TimeDistributed(Dense(1, activation='sigmoid')))\nmodel.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['fbeta_score']) \nmodel.summary()\nplot(model, to_file= dirName + '/model_diagram.png')\nhistory = model.fit(X[:NUM_TRAIN_INPUTS], Y[:NUM_TRAIN_INPUTS], nb_epoch=E, batch_size=8, verbose= 0)\n\n\n\n# print('prediction', X[NUM_TRAIN_INPUTS:], model.predict(X[NUM_TRAIN_INPUTS:]))\n\nfor layer in model.layers:\n\tprint(layer.name, 'input shape', layer.input_shape, 'output shape', layer.output_shape)\n\tprint(layer.get_config())\n\nplt.plot(list(range(1, E+1)), history.history['fbeta_score'], 'k', color='green')\nplt.plot(list(range(1, E+1)), history.history['loss'], 'k', color='blue')\nplt.xlabel('Epoch')\nplt.title('Binary addition')\n\nloss_line = mlines.Line2D([], [], color='blue', label='loss')\nfscore_line = mlines.Line2D([], [], color='green', label='F1 score')\nplt.legend(handles=[loss_line, fscore_line])\n\nplt.show()\n","repo_name":"veontomo/TFStudy","sub_path":"BinaryAddition.py","file_name":"BinaryAddition.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37535843738","text":"\nimport lmdb\nimport sys\nsys.path.append('..')\n\nfrom config import config\nmyconfig = config()\n\ndef create_lmdb(path='./lmdb_emb',map_size=21474836480):\n env = lmdb.open(path,map_size=map_size)\n return env\n\ndef insert(env, key, value):\n txn = env.begin(write=True)\n txn.put(str(key), value)\n txn.commit()\n\ndef main(path='./lmdb_emb'):\n env = create_lmdb(path=path)\n count = 0\n with open(myconfig.embedding_path) as f:\n for line in f:\n key,*value = line.replace('\\n','').split(' ')\n if len(value) == 1:\n print('word num:%s' % key)\n continue\n value = ' '.join(value)\n print(key,value)\n insert(env, key, value)\n count += 1\n if count % 10000:print(count)\n env.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"currylym/entity_linking","sub_path":"preprocess/build_lmdb.py","file_name":"build_lmdb.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"32"} +{"seq_id":"12407063391","text":"green_latitudes = []\ngreen_longitudes = []\n\nwith open(\"Green.csv\") as greenFile:\n for line in greenFile.readlines():\n green_latitude, green_longitude = line.split(';')\n green_latitudes.append(float(green_latitude))\n green_longitudes.append(float(green_longitude))\n\nyellow_latitudes = []\nyellow_longitudes = []\n\nwith open(\"Yellow.csv\") as yellowFile:\n for line in yellowFile.readlines():\n yellow_latitude, yellow_longitude = line.split(';')\n yellow_latitudes.append(float(yellow_latitude))\n yellow_longitudes.append(float(yellow_longitude))\n\nred_latitudes = []\nred_longitudes = []\n\nwith open(\"Red.csv\") as redFile:\n for line in redFile.readlines():\n red_latitude, red_longitude = line.split(';')\n red_latitudes.append(float(red_latitude))\n red_longitudes.append(float(red_longitude))\n\nimport sqlite3\n\nconn = sqlite3.connect(\"base.db\")\ncur = conn.cursor()\n\nfor i in range(len(green_latitudes)):\n cur.execute(\"INSERT INTO HOUSES VALUES(NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", (\"Unknown\", \"Unknown\", \"None\", green_longitudes[i], green_latitudes[i], \"\", \"Green\", \"1.0\", \"NA\", \"NA\"))\n\nfor i in range(len(yellow_latitudes)):\n cur.execute(\"INSERT INTO HOUSES VALUES(NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", (\"Unknown\", \"Unknown\", \"None\", yellow_longitudes[i], yellow_latitudes[i], \"\", \"Yellow\", \"1.0\", \"NA\", \"NA\"))\n\nfor i in range(len(red_latitudes)):\n cur.execute(\"INSERT INTO HOUSES VALUES(NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", (\"Unknown\", \"Unknown\", \"None\", red_longitudes[i], red_latitudes[i], \"\", \"Red\", \"1.0\", \"NA\", \"NA\"))\n\nconn.commit()","repo_name":"flaviens/LDRM","sub_path":"populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34552450818","text":"import collections as col\n\nimport joblib as jl\nimport nems.db as nd\nimport numpy as np\n\nfrom src.metrics import prm_dispersion as cdisp\nfrom src.data import epochs as cep\nimport nems.modelspec as ms\nimport nems.xforms as xforms\nimport matplotlib.pyplot as plt\nfrom src.visualization import fancy_plots as cplt\n\n'''\nA more resent version of this scrip is availabe at\n/home/mateo/code/context_probe_analysis/181129_resp_pred_disp_comp_v2.py\n\nthe end word of this script is that\n'''\n\n\nbatch = 310\nresults_file = nd.get_results_file(batch)\n\nall_models = results_file.modelname.unique().tolist()\nresult_paths = results_file.modelpath.tolist()\nmod_modelnames = [ss.replace('-', '_') for ss in all_models]\n\nmodels_shortname = {'wc.2x2.c_fir.2x15_lvl.1_dexp.1': 'LN',\n 'wc.2x2.c_stp.2_fir.2x15_lvl.1_dexp.1': 'STP',\n 'wc.2x2.c_fir.2x15_lvl.1_stategain.18_dexp.1': 'pop',\n 'wc.2x2.c_stp.2_fir.2x15_lvl.1_stategain.18_dexp.1': 'STP_pop'}\n\nall_cells = nd.get_batch_cells(batch=310).cellid.tolist()\n\ngoodcell = 'BRT037b-39-1'\nbest_model = 'wc.2x2.c-stp.2-fir.2x15-lvl.1-stategain.18-dexp.1'\ntest_path = '/auto/data/nems_db/results/310/BRT037b-39-1/BRT037b-39-1.wc.2x2.c_stp.2_fir.2x15_lvl.1_stategain.18_dexp.1.fit_basic.2018-11-14T093820/'\n\nrerun = False\n# using single cell recording predictions, generates a population recording\nif rerun == True:\n pop_rec_dict = col.defaultdict()\n for model in mod_modelnames:\n\n cell_resp_dict = dict()\n cell_pred_dict = col.defaultdict()\n\n for cellid in all_cells:\n filepath = [ff for ff in result_paths if cellid in ff and model in ff][0]\n\n # use modelsepcs to predict the response of resp\n xfspec, ctx = xforms.load_analysis(filepath=filepath, eval_model=True, only=None)\n modelspecs = ctx['modelspecs'][0]\n cellid = modelspecs[0]['meta']['cellid']\n modelname = modelspecs[0]['meta']['modelname']\n rec = ctx['rec'].copy()\n rec = ms.evaluate(rec, modelspecs) # recording containing signal for resp and pred\n cell_resp_dict.update(rec['resp']._data)\n cell_pred_dict[cellid] = rec['pred']._data\n\n # create a new population recording. pull stim from last single cell, create signal from meta form last resp signal and\n # stacked data for all cells. modify signal metadata to be consistent with the previous\n pop_resp = rec['resp']._modified_copy(data=cell_resp_dict,\n chans=list(cell_resp_dict.keys()),\n nchans=len(list(cell_resp_dict.keys())))\n\n stack_data = np.concatenate(list(cell_pred_dict.values()), axis=0)\n pop_pred = rec['pred']._modified_copy(data=stack_data,\n chans=list(cell_pred_dict.keys()),\n nchans=len(list(cell_pred_dict.keys())))\n\n pop_rec = rec.copy()\n\n pop_rec['resp'] = pop_resp\n pop_rec['pred'] = pop_pred\n del pop_rec.signals['state']\n del pop_rec.signals['state_raw']\n\n # stores recordign in a dictionary, renaming with a shorter model name\n pop_rec_dict[model] = pop_rec\n jl.dump(pop_rec_dict, '/home/mateo/code/context_probe_analysis/pickles/pop_predictions')\n\nelse:\n pop_rec_dict = jl.load('/home/mateo/code/context_probe_analysis/pickles/pop_predictions')\n# renames if necesary\npop_rec_dict = {models_shortname[key]:val for key, val in pop_rec_dict.items()}\n\n################################\n# compare the dispersion for the actual response and the predicted response for all models\n# format expochs for all recordings\nformated = {key: cep.set_recording_subepochs(rec) for key, rec in pop_rec_dict.items()}\n\n# some PSTH plotting to visually asses the quality of the different model fits.\ncells = ['BRT037b-39-1']\n# cells = 'all'\ncells = ['BRT037b-31-1', 'BRT037b-33-3', 'BRT037b-36-3', 'BRT037b-38-1', 'BRT037b-39-1', 'BRT037b-46-1'] # best cells\nepoch = r'\\AC\\d_P4' # best stimulus\n\nfor key, rec in formated.items():\n fig, axes = cplt.hybrid(rec['pred'], epoch_names=epoch, channels=cells, start=3, end=6, sub_types=[False, True, False])\n fig.suptitle(key)\nfig, axes = cplt.hybrid(formated['LN']['resp'], epoch_names=epoch, channels=cells, start=3, end=6, sub_types=[True, True, False])\nfig.suptitle('resp')\n\n# calculatest the dispersion for each prediction and one response\ndispersions = {modelname:\n cdisp.signal_all_context_sigdif(rec['pred'], channels='all', probes=(1, 2, 3, 4),\n dimensions='population', sign_fs=100, window=1, rolling=True,\n type='Euclidean', recache=False,\n signal_name='181115-{}'.format(modelname),\n value='metric')[0]\n for modelname, rec in formated.items()}\n\ndispersions['resp'] = cdisp.signal_all_context_sigdif(formated['LN']['resp'], channels='all', probes=(1, 2, 3, 4),\n dimensions='population', sign_fs=100, window=1, rolling=True,\n type='Euclidean', recache=False,\n signal_name='181115-{}'.format('resp'),\n value='metric')[0]\n# caluculatese the pvalue of the dispersion\n# calculatest the dispersion for each prediction and one response\npvals = {modelname:\n cdisp.signal_all_context_sigdif(rec['pred'], channels='all', probes=(1, 2, 3, 4),\n dimensions='population', sign_fs=100, window=1, rolling=True,\n type='Euclidean', recache=False,\n signal_name='181115-{}'.format(modelname),\n value='pvalue')[0]\n for modelname, rec in formated.items()}\n\npvals['resp'], stim_names = cdisp.signal_all_context_sigdif(formated['LN']['resp'], channels='all', probes=(1, 2, 3, 4),\n dimensions='population', sign_fs=100, window=1, rolling=True,\n type='Euclidean', recache=False,\n signal_name='181115-{}'.format('resp'),\n value='pvalue')\n\n\n\n\n# plots the dispersion over time\nfig, axes = plt.subplots(3,2)\naxes = np.ravel(axes)\nfor ii, (key, value) in enumerate(dispersions.items()):\n ax = axes[ii]\n ax.imshow(value, aspect='auto', origin='lower')\n ax.set_title(key)\n ax.set_yticks([0,1,2,3], minor=False)\n ax.set_yticklabels(stim_names)\n ax.axvline(300, color='red')\nfig.suptitle('euclidean distance over time for for eache prb')\n\n# plot difference pval over time\nfig, axes = plt.subplots(3,2)\naxes = np.ravel(axes)\nfor ii, (key, value) in enumerate(pvals.items()):\n ax = axes[ii]\n ax.imshow(value, aspect='auto', origin='lower')\n ax.set_title(key)\n ax.set_yticks([0,1,2,3], minor=False)\n ax.set_yticklabels(stim_names)\n ax.axvline(300, color='red')\nfig.suptitle('pvalue (of euclidean) over time for for eache prb')\n\n# plots significant difference over time\nsignificnaces = {key: cdisp._significance_criterion(val, axis=1, window=1, threshold=0.05, comp='<=')\n for key, val in pvals.items()}\nfig, axes = plt.subplots(3,2)\naxes = np.ravel(axes)\nfor ii, (key, value) in enumerate(significnaces.items()):\n ax = axes[ii]\n ax.imshow(value, aspect='auto', origin='lower', cmap='binary')\n ax.set_title(key)\n ax.set_yticks([0,1,2,3], minor=False)\n ax.set_yticklabels(stim_names)\n ax.axvline(300, color='red')\nfig.suptitle('significant difference over time for for eache prb')\n\n# plots all signals (resp, and model predictions) side by side. Collapses all different probes by the mean\n\ncollapsed = {key: np.nanmean(val, axis=0) for key, val in dispersions.items()}\n# fits an exponential decay. Todo make it work\nfitted = {key: cdisp.disp_exp_decay(val, start=300, prior=1, axis=None)[0] for key, val in collapsed.items()}\n\nfig, ax = plt.subplots()\nfor ii, (key, val) in enumerate(collapsed.items()):\n color = 'C{}'.format(ii)\n ax.plot(val, label=key, color=color)\n ax.plot(dispersions[key].T, color=color, alpha=0.2)\n ax.plot (fitted[key], color=color)\nax.axvline(300, color='black')\nax.legend()\n","repo_name":"Mateo-Lopez-Espejo/context_probe_analysis","sub_path":"scripts/1_euclidean/181114_resp_pred_disp_comparison.py","file_name":"181114_resp_pred_disp_comparison.py","file_ext":"py","file_size_in_byte":8697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8975293201","text":"import pika\r\nimport json\r\nimport logging\r\nfrom pika.adapters import tornado_connection\r\n\r\n\r\nclass PikaClient:\r\n def __init__(self, io_loop):\r\n self.io_loop = io_loop\r\n self.connected = False\r\n self.connecting = False\r\n self.connection = None\r\n self.channel = None\r\n self.event_listeners = set()\r\n\r\n def connect(self):\r\n if self.connecting:\r\n return\r\n self.connecting = True\r\n cred = pika.PlainCredentials('root', '123')\r\n param = pika.ConnectionParameters(host=\"127.0.0.1\", credentials=cred)\r\n self.connection = tornado_connection.TornadoConnection(param, custom_ioloop = self.io_loop, on_open_callback = self.on_connected)\r\n self.connection.add_on_open_error_callback(self.error)\r\n self.connection.add_on_close_callback(self.on_closed)\r\n\r\n def error(self, conn):\r\n logging.error('socket error', conn)\r\n pass\r\n\r\n def on_connected(self, conn):\r\n logging.error('hehe, connected')\r\n self.connected = True\r\n self.connection = conn\r\n self.connection.channel(channel_number = 1, on_open_callback = self.on_channel_open)\r\n\r\n def on_channel_open(self, channel):\r\n self.channel = channel\r\n\r\n def on_closed(self, conn, c):\r\n logging.error('pika close!')\r\n self.io_loop.stop()\r\n\r\n\r\nclass ConnPikaClient(object):\r\n def __init__(self):\r\n self.queue_name = \"queue-%s\" % (id(self),)\r\n\r\n # Default values\r\n self.connected = False\r\n self.connecting = False\r\n self.connection = None\r\n self.channel = None\r\n self.io_loop = None\r\n self.exchange = None\r\n\r\n # Webscoket object.\r\n self.websocket = None\r\n\r\n def setup_exchange(self):\r\n self.channel.exchange_declare(\r\n callback=self.on_exchange_declared,\r\n exchange=self.exchange,\r\n exchange_type='fanout',\r\n )\r\n\r\n def on_exchange_declared(self, unused_frame):\r\n logging.error('PikaClient: Exchange Declared, Declaring Queue')\r\n self.channel.queue_declare(auto_delete=True,\r\n queue=self.queue_name,\r\n durable=False,\r\n exclusive=True,\r\n callback=self.on_queue_declared)\r\n\r\n def on_queue_declared(self, frame):\r\n logging.error('PikaClient: Queue Declared, Binding Queue')\r\n self.channel.queue_bind(exchange=self.exchange,\r\n queue=self.queue_name,\r\n routing_key='',\r\n callback=self.on_queue_bound)\r\n\r\n def on_queue_bound(self, frame):\r\n logging.error('PikaClient: Queue Bound, Issuing Basic Consume')\r\n self.ctag = self.channel.basic_consume(on_message_callback=self.on_pika_message,\r\n queue=self.queue_name)\r\n\r\n def on_pika_message(self, channel, method, header, body):\r\n logging.error('PikaCient: Message receive, delivery tag #%i' % \\\r\n method.delivery_tag)\r\n self.websocket.write_message(body)\r\n\r\n def on_basic_cancel(self):\r\n logging.error('PikaClient: Basic Cancel Ok')\r\n self.channel.queue_delete(queue=self.queue_name)\r\n\r\n def on_closed(self, connection):\r\n self.io_loop.stop()\r\n\r\n def sample_message(self, exchange, message):\r\n properties = pika.BasicProperties(content_type=\"text/plain\", delivery_mode=1)\r\n self.channel.basic_publish(exchange=exchange,\r\n routing_key='',\r\n body=message,\r\n properties=properties)\r\n\r\n\r\n\r\n","repo_name":"yinpengdt/Chatroom","sub_path":"client/rabbitmq.py","file_name":"rabbitmq.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34044140259","text":"# -*- coding: utf-8 -*-\nfrom math import ceil\n\nimport gym\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch import optim\n\nfrom env import SkipframeWrapper\n\n\nclass EpisodeData(object):\n\n def __init__(self):\n self.fields = [\n 'states', 'actions', 'rewards', 'dones', 'log_probs', 'next_states'\n ]\n for f in self.fields:\n setattr(self, f, [])\n self.total_rewards = 0\n\n def add_record(self,\n state,\n action,\n reward,\n done,\n log_prob=None,\n next_state=None):\n self.states.append(state)\n self.actions.append(action)\n self.log_probs.append(log_prob)\n self.dones.append(done)\n self.rewards.append(reward)\n self.next_states.append(next_state)\n self.total_rewards += reward\n\n def get_states(self):\n return np.array(self.states)\n\n def get_actions(self):\n return np.array(self.actions)\n\n def steps(self):\n return len(self.states)\n\n def calc_qs(self, pre_model, gamma):\n next_states = torch.tensor(np.array(self.next_states)).float()\n next_qs = pre_model(next_states).max(dim=-1).values\n masks = torch.tensor(np.array(self.dones) == 0)\n\n rewards = torch.tensor(np.array(self.rewards)).view(-1)\n qs = rewards + gamma * next_qs * masks\n\n return qs.detach().float()\n\n\nclass DQN(object):\n\n def __init__(self,\n env,\n model,\n lr=1e-5,\n optimizer='adam',\n device='cpu',\n deterministic=False,\n gamma=0.95,\n n_replays=4,\n batch_size=200,\n model_kwargs=None,\n exploring=None,\n n_trained_times=1,\n n_buffers=32,\n model_prefix=\"dqn\"):\n self.env = env\n self.model = model\n self.lr = lr\n self.optimizer = optimizer\n self.device = device\n self.deterministic = deterministic\n self.gamma = gamma\n self.n_replays = n_replays\n self.batch_size = batch_size\n self.model_kwargs = model_kwargs\n if optimizer == 'adam':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n elif optimizer == 'sgd':\n self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr)\n\n self.exploring = exploring\n self.n_trained_times = n_trained_times\n\n if self.model_kwargs:\n self.pre_model = self.model.__class__(**self.model_kwargs)\n else:\n self.pre_model = self.model.__class__()\n\n self.data_buffer = []\n self.n_buffers = n_buffers\n self.model_prefix = model_prefix\n\n self.copy_model()\n\n def gen_epoch_data(self, n_steps=1024, exploring=0., done_penalty=0):\n state = self.env.reset()\n done = False\n epoch_data = EpisodeData()\n\n self.model.eval()\n steps = 0\n\n for _ in range(n_steps):\n steps += 1\n\n qs = self.model(torch.tensor(state[np.newaxis, :]).float())\n\n if exploring and np.random.rand() <= exploring:\n action = self.env.action_space.sample()\n else:\n action = qs[0].argmax().item()\n\n next_state, reward, done, _ = self.env.step(int(action))\n if done and done_penalty:\n reward -= done_penalty\n\n epoch_data.add_record(state,\n action,\n reward,\n 1 if done else 0,\n next_state=next_state)\n state = next_state\n\n if done:\n state = self.env.reset()\n\n return epoch_data\n\n def get_exploring(self, need_exploring=False, mexp=0.1):\n if need_exploring:\n return max(mexp, self.n_trained_times**(-0.5))\n if isinstance(self.exploring, float):\n return self.exploring\n elif self.exploring == 'quadratic_decrease':\n return max(0.01, self.n_trained_times**(-0.5))\n\n return 0.01\n\n def copy_model(self):\n self.pre_model.load_state_dict(self.model.state_dict())\n self.pre_model.eval()\n\n def train(self, epoch_data):\n total_loss = 0.\n qs = epoch_data.calc_qs(self.pre_model, gamma=0.95).to(self.device)\n states = torch.tensor(epoch_data.get_states()).float().to(self.device)\n actions = torch.tensor(epoch_data.get_actions()[:, np.newaxis]).to(\n self.device)\n\n n_batches = ceil(len(epoch_data.states) / self.batch_size)\n indices = torch.randperm(len(epoch_data.states)).to(self.device)\n for b in range(n_batches):\n batch_indices = indices[b * self.batch_size:(b + 1) *\n self.batch_size]\n batch_states = states[batch_indices]\n batch_actions = actions[batch_indices]\n batch_qs = qs[batch_indices]\n\n qs_pred = self.model(batch_states).gather(1,\n batch_actions).view(-1)\n loss_func = nn.MSELoss()\n loss = loss_func(batch_qs, qs_pred)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n total_loss += loss.item()\n\n return total_loss / n_batches\n\n def learning(self, n_epoches=100, n_steps=1024):\n self.model.train()\n\n max_reward = -10000.\n decay_reward = 0\n decay = 0.95\n\n for n in range(n_epoches):\n # generate new data\n new_data = self.gen_epoch_data(n_steps=n_steps,\n exploring=self.get_exploring()\n if not self.deterministic else 0.)\n self.data_buffer.insert(0, new_data)\n if len(self.data_buffer) > self.n_buffers:\n self.data_buffer = self.data_buffer[:self.n_buffers]\n\n # training\n for data in self.data_buffer[::-1]:\n loss = self.train(data)\n\n # update static model\n self.copy_model()\n\n # show training information\n decay_reward = new_data.total_rewards if decay_reward == 0 else (\n decay_reward * decay + new_data.total_rewards * (1 - decay))\n\n if max_reward < decay_reward:\n max_reward = decay_reward\n torch.save(self.model.state_dict(),\n f'./models/{self.model_prefix}-success-v{n}.pt')\n\n if n % 10 == 0:\n print(\n f'round: {n:>3d} | loss: {loss:>5.3f} | '\n f'pre reward: {decay_reward:>5.2f}',\n flush=True)\n\n\nclass ModuleInitMixin:\n\n def _initialize_weights(self):\n for module in self.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.normal_(module.weight, 0, 0.05)\n nn.init.normal_(module.bias, 0, 0.1)\n\n\nclass DARModel(ModuleInitMixin, nn.Module):\n\n def __init__(self, device='cpu') -> None:\n super().__init__()\n self.fc = nn.Sequential(\n nn.Linear(128, 128),\n nn.ReLU(),\n nn.Linear(128, 128),\n nn.ReLU(),\n nn.Linear(128, 64),\n nn.ReLU(),\n nn.Linear(64, 6),\n )\n self.device = device\n self._initialize_weights()\n\n def forward(self, x):\n if isinstance(x, np.ndarray):\n x = torch.tensor(x).float()\n\n x = x.to(self.device)\n return self.fc(x)\n\n\nif __name__ == '__main__':\n\n device = 'cpu' if not torch.cuda.is_available() else 'cuda'\n env = SkipframeWrapper(env=gym.make('DemonAttack-ram-v0'), n_max_nops=60)\n model = DARModel(device)\n dqn = DQN(env=env,\n model=model,\n exploring='quadratic_decrease',\n device=device,\n lr=5e-4,\n gamma=0.95,\n model_prefix='dqndar',\n batch_size=25)\n\n model = model.to(device)\n\n import os\n try:\n model_base = './models/'\n if not os.path.exists(model_base):\n os.makedirs(model_base)\n\n dqn.learning(11, 256)\n\n torch.save(model.state_dict(), './models/dqn-dar-train-final.pt')\n except Exception as e:\n torch.save(model.state_dict(), './models/dqn-dar-train-except.pt')\n raise\n","repo_name":"Wenbing-Yao/paper-sharing","sub_path":"reinforcement-learning/dqn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"7409032629","text":"#!/usr/bin/env python\n\n\"\"\"\nAuthor: Nick Russo\nPurpose: Adds new network objects to the FTD sandbox.\nCheck out the API explorer at \"https:///#/api-explorer\"\n\"\"\"\n\nimport json\nimport requests\nfrom auth_token import get_token\n\n\ndef main():\n \"\"\"\n Execution begins here.\n \"\"\"\n\n # The FTD sandbox uses a self-signed cert at present, so let's ignore any\n # obvious security warnings for now.\n requests.packages.urllib3.disable_warnings()\n\n # The API path below is what the DevNet sandbox uses for API testing,\n # which may change in the future. Be sure to check the IP address as\n # I suspect this changes frequently. See here for more details:\n # https://developer.cisco.com/firepower/\n api_path = \"https://10.10.20.65/api/fdm/latest\"\n token = get_token(api_path)\n\n # To authenticate, we issue a POST request with our username/password\n # as a JSON body to obtain a bearer token in response.\n post_headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": f\"Bearer {token}\",\n }\n\n # Load the new network objects to be added as Python objects\n with open(\"json_state/new_netobjs.json\", \"r\") as handle:\n network_objs = json.load(handle)\n\n # Loop over network objects and issue a POST request for each\n # network object to add. Raise HTTPError if anything fails\n for net in network_objs:\n post_resp = requests.post(\n f\"{api_path}/object/networks\",\n headers=post_headers,\n json=net,\n verify=False,\n )\n\n # Print object details if success or error fails if failed. We\n # don't wait to raise_for_status() which halts the entire process\n # even if a single element fails\n if post_resp.ok:\n net_json = post_resp.json()\n print(f\"Added {net_json['name']} network object at {net_json['id']}\")\n else:\n print(f\"Couldn't add {net['name']} network object\")\n print(f\" Details: {post_resp.status_code} / {post_resp.reason}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nickrusso42518/pluralsight","sub_path":"devcor2/m5/ftd/add_netobjs.py","file_name":"add_netobjs.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"4527644505","text":"from django.contrib.auth.decorators import login_required\nfrom .forms import *\nfrom django.http import HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.shortcuts import render\nfrom .models import *\nfrom django.conf import settings\nimport oauth2 as oauth\nimport urllib.parse\nimport twitter\nfrom django.views.generic import CreateView\n\n@login_required\ndef home(request):\n\tcurrent_user = request.user\n\n\tform = ThreadForm(request.POST)\n\tif not request.user.post_permissions:\n\t\treturn render(request, 'core/permissions.html')\n\n\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\ttext = form.cleaned_data['text']\n\t\t\tnew_thread = Thread(text=text)\n\t\t\tif 'save' not in request.POST:\n\t\t\t\tposttweets(current_user, text)\n\t\t\tnew_thread.save()\n\t\t\tcurrent_user.threads.add(new_thread)\n\n\t\t\treturn HttpResponseRedirect('/', RequestContext(request))\n\telse:\n\t\tform = ThreadForm(text='bitch')\n\n\n\n\treturn render(request, 'core/home.html', {'form': form})\n\ndef ThreadCreateView(CreateView):\n\tmodel = Thread\n\tfields = ('text')\n\n\n@login_required\ndef threeleggedauth1(request):\n\trequest_token_url = 'https://api.twitter.com/oauth/request_token'\n\tauthorize_url = 'https://api.twitter.com/oauth/authorize'\n\tcallback = settings.BASE_URL + \"posting-approved\"\n\n\tconsumer = oauth.Consumer(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)\n\tclient = oauth.Client(consumer)\n\n\tresp, content = client.request(request_token_url, \"POST\", body=urllib.parse.urlencode({'oauth_callback': callback}))\n\n\trequest_token = dict(urllib.parse.parse_qsl(content.decode(\"utf-8\")))\n\trequest.user.request_token = request_token['oauth_token']\n\trequest.user.request_token_secret = request_token['oauth_token_secret']\n\trequest.user.save()\n\n\tuser_auth_url = \"{0}?oauth_token={1}\".format(authorize_url, request_token['oauth_token'])\n\n\treturn HttpResponseRedirect(user_auth_url)\n\n@login_required\ndef threeleggedauth2(request):\n\n\toauth_verifier = request.GET.get('oauth_verifier')\n\taccess_token_url = 'https://api.twitter.com/oauth/access_token'\n\n\trequest_token = request.user.request_token\n\trequest_token_secret = request.user.request_token_secret\n\tconsumer = oauth.Consumer(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)\n\ttoken = oauth.Token(request_token, request_token_secret)\n\ttoken.set_verifier(oauth_verifier)\n\tclient = oauth.Client(consumer, token)\n\n\tresp, content = client.request(access_token_url, \"POST\")\n\taccess_token = dict(urllib.parse.parse_qsl(content.decode(\"utf-8\")))\n\n\trequest.user.access_token = access_token['oauth_token']\n\trequest.user.access_token_secret = access_token['oauth_token_secret']\n\trequest.user.post_permissions = True\n\trequest.user.save()\n\n\treturn HttpResponseRedirect('/', RequestContext(request))\n\n\n\n\n\ndef posttweets(user, text):\n\tmessages = []\n\tapi = twitter.Api(consumer_key=settings.TWITTER_CONSUMER_KEY,\n\t\t\t\t\t consumer_secret=settings.TWITTER_CONSUMER_SECRET,\n\t\t\t\t\t access_token_key=user.access_token,\n\t\t\t\t\t access_token_secret=user.access_token_secret)\n\tif len(text) > 280:\n\t\tlast_id = -1\n\t\twhile 280 < len(text):\n\t\t\tif(280 >= len(text)):\n\t\t\t\tbreak\n\n\t\t\tend = text[277:281]\n\t\t\tresp = api.PostUpdate(text[:277] + '...', in_reply_to_status_id=last_id)\n\t\t\tlast_id = resp.id\n\t\t\ttext = end + text[281:]\n\n\t\tapi.PostUpdate(text, in_reply_to_status_id=last_id)\n\telse:\n\t\tapi.PostUpdate(text)\n\n\treturn messages\n","repo_name":"JWatkins20/twitter_thread","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21046286314","text":"from collections import OrderedDict\nimport json\nimport requests\nimport traceback\nimport urllib\n\nfrom django.conf import settings\n\nimport serpng.common.abtest\nimport serpng.jobs.services.search.search_result\nimport serpng.jobs.services.search.search_result_sj # For SJ Ads A/B test.\nimport serpng.lib.Cookie\nimport serpng.lib.exceptions\nimport serpng.lib.http_utils\nimport serpng.lib.logging_utils\nimport serpng.lib.speed_logging_utils\n\n\ndef _get_bridge_response(bridge_url, headers):\n \"\"\"Return bridge response.\"\"\"\n return requests.get(bridge_url, headers=headers, allow_redirects=False, timeout=settings.BRIDGE_TIMEOUT_IN_SECONDS)\n\n\ndef search(request, query):\n # pylint: disable=R0912\n \"\"\"\n Makes call to php-platform bridge to request search results for passed-in query.\n\n Args:\n request: Django request object\n query: request params dictionary from url\n\n Returns:\n A tuple of bridge response headers, result (a SearchResult object), and\n user_data (a UserData object).\n\n Raises:\n serpng.lib.exceptions.NoQueryTermsError: when the passed-in query is empty.\n requests.exceptions.HTTPError: when there is an error getting a response from\n the PHP bridge.\n serpng.lib.exceptions.PHPBridgeError: when there is an error decoding the JSON\n response from the PHP bridge.\n serpng.lib.exceptions.BadSearcherResultsError: when the JSON response from the\n PHP bridge indicates that the search result is bad, or if there are no filters\n in the search result, or if there are no jobs in the search result.\n \"\"\"\n if not query:\n # No query terms; don't bother connecting to PHP bridge.\n raise serpng.lib.exceptions.NoQueryTermsError()\n\n bridge_search_query = query.get_query_path(True)\n bridge_query_param_string = '&'.join('%s=%s' % (k, urllib.quote(v, '')) for k, v in request.GET.items())\n\n bridge_url = 'http://%s/a/jobs/list/%s?%s' % (settings.BRIDGE_HOSTNAME, bridge_search_query, bridge_query_param_string)\n bridge_request_headers = serpng.lib.http_utils.get_http_headers(request)\n\n # Build bridge hostname based on language code.\n base_language = request.language_code.get_base_language()\n country_code = request.language_code.get_country_code()\n language_prefix = '' if base_language == 'en' else '-' + base_language\n country_suffix = 'com' if country_code == 'us' else country_code\n bridge_hostname = 'internal%s.simplyhired.%s' % (language_prefix, country_suffix)\n\n bridge_request_headers['host'] = bridge_hostname\n bridge_request_headers['accept-encoding'] = 'gzip'\n\n # set original request URI to header and ask to be redirected to new serp url format\n if(hasattr(request, 'original_request_uri')):\n bridge_request_headers['x-serpng-original-request-uri'] = request.original_request_uri\n\n # If the A/B framework has updated the \"shab\" cookie in any way, we now\n # need to replace it with the new value in the bridge request headers.\n #\n shab_cookie = request.abtest_manager.get_cookie_morsel_for_bridge()\n if shab_cookie:\n cookie_header = bridge_request_headers.get('cookie', None)\n bridge_request_cookies = serpng.lib.Cookie.SimpleCookie(cookie_header)\n bridge_request_cookies['shab'] = shab_cookie.value\n bridge_request_headers['cookie'] = '; '.join('%s=%s' % (k, v.value) for k, v in bridge_request_cookies.items())\n\n # Log bridge being sent to PHP bridge\n serpng.lib.logging_utils.log(\n module_name=__name__,\n log_level=\"DEBUG\",\n log_msg=\"search bridge sent to PHP bridge\",\n log_dict=OrderedDict([('bridge-url', bridge_url)])\n )\n\n try:\n serpng.lib.speed_logging_utils.mark_php_bridge_begins(request)\n bridge_response = _get_bridge_response(bridge_url=bridge_url, headers=bridge_request_headers)\n serpng.lib.speed_logging_utils.mark_php_bridge_ends(request)\n except requests.exceptions.HTTPError:\n # PHP bridge HTTP connection failed\n raise serpng.lib.exceptions.PHPBridgeError(\n error_msg=\"PHP bridge connection error: \",\n error_info_dict=OrderedDict([('bridge-url', bridge_url)]),\n error_traceback=traceback.format_exc()\n )\n\n # If the response contains the A/B test cookie (i.e., the 'shab' cookie),\n # then we need to use it to update the A/B Test Manager before removing it.\n #\n # We use the Python Cookie module here to parse the cookies since the\n # requests module doesn't seem to do it correctly -- it loses some cookies,\n # meaning that the bridge_response.cookies dictionary doesn't have all the\n # cookies we expect.\n #\n # Unfortunately, even the Cookie module doesn't parse our cookies\n # perfectly-- in in some cases, the parsed cookie properties end up with\n # commas at the end of the 'domain' and 'path', so we need to manually clean\n # those properties up.\n #\n if 'set-cookie' in bridge_response.headers:\n cookies = serpng.lib.Cookie.SimpleCookie(bridge_response.headers['set-cookie'])\n for morsel in cookies.values():\n if morsel.key == 'shab':\n request.abtest_manager.reload_cookie(morsel.value, True)\n del cookies['shab']\n continue\n\n if morsel['domain'].endswith(','):\n morsel['domain'] = morsel['domain'][:-1]\n\n if morsel['path'].endswith(','):\n morsel['path'] = morsel['path'][:-1]\n\n bridge_response.headers['set-cookie'] = ', '.join(m.OutputString() for m in cookies.values())\n\n # Handle redirect requests from the bridge.\n if bridge_response.status_code == 301:\n raise serpng.lib.exceptions.Http301(bridge_response.headers['Location'], bridge_response.headers)\n elif bridge_response.status_code == 302:\n raise serpng.lib.exceptions.Http302(bridge_response.headers['Location'], bridge_response.headers)\n\n # Parse bridge response.\n try:\n json_response = json.loads(bridge_response.text, object_pairs_hook=OrderedDict)\n except:\n # JSON string decode failed\n raise serpng.lib.exceptions.PHPBridgeError(\n error_msg=\"PHP bridge JSON response decode error\",\n error_info_dict=OrderedDict([('bridge-url', bridge_url),\n ('bridge-response', bridge_response.text)]),\n error_traceback=traceback.format_exc()\n )\n\n search_result_json = json_response.get('search_result', {})\n\n ### For SJ Ads A/B test. ###\n search_result_sj_json = json_response.get('search_result_sj', None)\n ############################\n\n if not search_result_json.get('results_good') and not 'primary_parametric_fields' in search_result_json:\n raise serpng.lib.exceptions.BadSearcherResultsError(\n error_msg=\"Bad results from searcher\",\n error_info_dict=OrderedDict([('bridge-url', bridge_url)]),\n search_result=serpng.jobs.services.search.search_result.BadSearchResult(\n search_result_json.get('google_adsense_keywords')),\n bridge_response=bridge_response,\n result_error_code=search_result_json.get('error_code', 'no-error-code'),\n result_error_title=search_result_json.get('error_title'),\n result_error_subtitle=search_result_json.get('error_subtitle'),\n result_error_text=search_result_json.get('error_text'),\n search_title=(\n '' if not search_result_json.get('search_title')\n else (search_result_json.get('search_title', '').title() or '').replace('Jobs In', 'Jobs in')),\n page_title=(\n 'Jobs' if not search_result_json.get('page_title')\n else search_result_json.get('page_title').replace('Jobs - ', 'Jobs in '))\n )\n else:\n # We got either:\n # - good search results - obtain data from the JSON object and construct a\n # full SearchResult object with the data.\n # - empty search result - good input data, but no jobs found from searcher\n # full SearchResult object with empty job\n\n # Construct user data from JSON object obtained from the bridge.\n user_data = serpng.jobs.services.search.user_data.UserData(json_response)\n result = serpng.jobs.services.search.search_result.SearchResult(request, search_result_json, bridge_search_query)\n \n ### For SJ Ads A/B test. ###\n result_sj = serpng.jobs.services.search.search_result_sj.SearchResultSJ(search_result_sj_json, bridge_search_query)\n ############################\n\n if result.total_job_count > 0:\n # Log return value result\n serpng.lib.logging_utils.log(\n module_name=__name__,\n log_level=\"DEBUG\",\n log_msg=\"search result from PHP bridge\",\n log_dict=OrderedDict([('search-title', result.title),\n ('num-results', result.total_job_count)])\n )\n else:\n raise serpng.lib.exceptions.BadSearcherResultsError(\n error_msg=\"Bad results from searcher\",\n error_info_dict=OrderedDict([('bridge-url', bridge_url)]),\n search_result=result,\n bridge_response=bridge_response,\n result_error_code=search_result_json.get('error_code', 'no-error-code'),\n result_error_title=search_result_json.get('error_title'),\n result_error_subtitle=search_result_json.get('error_subtitle'),\n result_error_text=search_result_json.get('error_text'),\n search_title=('' if not search_result_json.get('search_title')\n else (search_result_json.get('search_title', '') or '').replace('jobs in', 'jobs -')),\n page_title='Jobs' if not search_result_json.get('page_title')\n else search_result_json.get('page_title')\n )\n\n # result_sj added for SJ Ads A/B test.\n return (bridge_response.headers, result, result_sj, user_data)\n","repo_name":"alyago/django-web","sub_path":"web-serpng/code/serpng/jobs/services/search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":10044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18173180611","text":"\"\"\"\nFunctionality for visualizing overlay computations. Probably the best way to understand what\nis going on during an overlay computation.\n\"\"\"\n\nimport itertools\nfrom typing import Iterator, List, NamedTuple, Optional, Tuple, cast\n\nimport more_itertools\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.axes import Axes\n\nfrom gance.data_into_network_visualization import visualization_common\nfrom gance.gance_types import ImageSourceType\nfrom gance.logger_common import LOGGER\n\n\nclass OverlayContext(NamedTuple):\n \"\"\"\n Component parts of an overlay computation that lead to the decision to write the overlay\n or not.\n \"\"\"\n\n # If the overlay should be written\n overlay_written: bool = False\n\n # The following are params considered when computing the overlay.\n\n # How visually similar the foreground and background are.\n image_perceptual_hash_distance: Optional[float] = None\n\n # How visually similar the bounding box regions of the foreground and background are.\n bbox_perceptual_hash_distance: Optional[float] = None\n\n # For eye-tracking, how close the eye bounding boxes in the fore/background images\n # are.\n bbox_distance: Optional[float] = None\n\n\nclass VisualizeOverlayThresholds(NamedTuple):\n \"\"\"\n Set of threshold values to label on an overlay.\n \"\"\"\n\n phash_line: float\n bbox_distance_line: float\n\n\nclass YValues(NamedTuple):\n \"\"\"\n Defines a scatter plot on an axes.\n \"\"\"\n\n values: List[float]\n color: str\n label: str\n\n\ndef _setup_axis(\n axis: Axes,\n x_values: np.ndarray,\n y_values: List[YValues],\n title: str,\n horizontal_line_location: Optional[float],\n visualize_all_points: bool,\n y_label: str = \"Values\",\n x_label: str = \"Frame #\",\n) -> Tuple[float, float]:\n \"\"\"\n Helper function to set up axes for plotting.\n :param axis: To configure.\n :param x_values: X values of sub-scatter plots.\n :param y_values: List of y values, and data about the values to plot.\n :param title: Axes title.\n :param horizontal_line_location: If given, a horizontal line will be drawn at this location.\n :param visualize_all_points: If given, y axis will be set to show all points on each subplot.\n If false, only +/- 2 standard deviations from the mean will be visualized.\n :param y_label: Y label of axes.\n :param x_label: X label of axes.\n :return: Tuple, (min, max) values given in `y_values`.\n \"\"\"\n\n for values in y_values:\n axis.scatter(\n x_values,\n values.values,\n color=values.color,\n label=values.label,\n )\n\n axis.set_title(title)\n axis.set_ylabel(y_label)\n axis.set_xlabel(x_label)\n axis.grid()\n axis.legend(loc=\"upper right\")\n\n all_y_values = list(\n filter(None, itertools.chain.from_iterable(values.values for values in y_values))\n )\n\n if all_y_values:\n if visualize_all_points:\n axis_min = min(all_y_values) - 5\n axis_max = max(all_y_values) + 5\n else:\n mean = np.mean(all_y_values)\n std = np.std(all_y_values)\n axis_min = mean - 2 * std\n axis_max = mean + 2 * std\n else:\n axis_min = -5\n axis_max = 5\n\n axis.set_ylim(axis_min, axis_max)\n\n axis.hlines(\n y=horizontal_line_location,\n xmin=min(x_values) - 5,\n xmax=max(x_values) + 5,\n linestyles=\"dotted\",\n color=\"purple\",\n )\n\n return axis_min, axis_max\n\n\ndef visualize_overlay_computation( # pylint: disable=too-many-locals\n overlay: Iterator[OverlayContext],\n frames_per_context: int,\n video_square_side_length: Optional[int],\n horizontal_lines: Optional[VisualizeOverlayThresholds] = None,\n visualize_all_points: bool = True,\n) -> ImageSourceType:\n \"\"\"\n Consumes the contexts from an overlay computation and produces a visualization of the\n component parts.\n :param overlay: To write visualize.\n :param frames_per_context: The number of adjacent frames to visualize in each frame.\n :param video_square_side_length: Video is composed of a 3x2 grid of square sub-videos,\n each with a side length of this many pixels.\n :param horizontal_lines: Labeled lines to help understand computations.\n :param visualize_all_points: If True, the y axis of each subplot will be stretched such that\n all points in the underlying will be visualized. If false, the y axis will be set to +/- 2\n standard deviations from the mean.\n :return: The frames of the visualization.\n \"\"\"\n\n fig = visualization_common.standard_matplotlib_figure()\n\n hash_axis, bbox_distance_axis = fig.subplots(nrows=2, ncols=1)\n\n frame_count = itertools.count()\n\n for group_of_frames in more_itertools.grouper(overlay, frames_per_context):\n\n current: Iterator[OverlayContext] = filter(None, group_of_frames)\n\n # When we unzip here, the left side of the equation are all lists!\n # So it's okay to iterate over them more than once.\n (\n flags,\n bbox_perceptual_hash_distances,\n image_perceptual_hash_distances,\n bounding_box_distances,\n ) = zip(*current)\n\n num_frames = len(flags)\n x_axis = np.arange(num_frames)\n\n hash_axis_min, hash_axis_max = _setup_axis(\n axis=hash_axis,\n x_values=x_axis,\n y_values=[\n YValues(\n values=cast(List[float], bbox_perceptual_hash_distances),\n color=\"red\",\n label=\"Bounding Boxes\",\n ),\n YValues(\n values=cast(List[float], image_perceptual_hash_distances),\n color=\"blue\",\n label=\"Complete Image\",\n ),\n ],\n title=\"Overlay Discriminator (Image Hashing)\",\n horizontal_line_location=horizontal_lines.phash_line if horizontal_lines else None,\n visualize_all_points=visualize_all_points,\n )\n\n bbox_axis_min, bbox_axis_max = _setup_axis(\n axis=bbox_distance_axis,\n x_values=x_axis,\n y_values=[\n YValues(\n values=cast(List[float], bounding_box_distances),\n color=\"green\",\n label=\"Bounding Box Distance\",\n )\n ],\n title=\"Overlay Discriminator (Face Tracking)\",\n horizontal_line_location=horizontal_lines.bbox_distance_line\n if horizontal_lines\n else None,\n visualize_all_points=visualize_all_points,\n )\n\n plt.tight_layout()\n\n video_half_resolution = (video_square_side_length, video_square_side_length)\n\n for inter_group_index, flag in enumerate(flags):\n\n LOGGER.info(f\"Visualizing overlay for frame #{next(frame_count)}\")\n\n line_color = \"green\" if flag else \"red\"\n\n hash_line = hash_axis.vlines(\n x=inter_group_index, ymin=hash_axis_min, ymax=hash_axis_max, color=line_color\n )\n\n bbox_line = bbox_distance_axis.vlines(\n x=inter_group_index, ymin=bbox_axis_min, ymax=bbox_axis_max, color=line_color\n )\n\n yield visualization_common.render_current_matplotlib_frame(\n fig=fig, resolution=video_half_resolution\n )\n\n hash_line.remove()\n bbox_line.remove()\n\n for axes in fig.axes:\n axes.clear()\n","repo_name":"esologic/GANce","sub_path":"gance/overlay/overlay_visualization.py","file_name":"overlay_visualization.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"18212949664","text":"\"\"\"added genres column to venue table\n\nRevision ID: 539ad7561b1b\nRevises: 48fd91c530be\nCreate Date: 2020-05-02 14:52:40.190762\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '539ad7561b1b'\ndown_revision = '48fd91c530be'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Venue', sa.Column('genres', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('Venue', 'genres')\n # ### end Alembic commands ###\n","repo_name":"rukantabula/Sqlalchemy_project","sub_path":"migrations/versions/539ad7561b1b_added_genres_column_to_venue_table.py","file_name":"539ad7561b1b_added_genres_column_to_venue_table.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30817821602","text":"from itertools import permutations\r\n\r\n\r\ndef checkInclusion(s1, s2):\r\n if s1 in s2:\r\n return True\r\n\r\n if len(s1) > len(s2):\r\n return False\r\n\r\n perms = [\"\".join(tuples) for tuples in permutations(s1, len(s1))]\r\n\r\n\r\n print(perms)\r\n for options in perms:\r\n if options in s2:\r\n return True\r\n \r\n return False\r\n\r\nprint(checkInclusion(\"trin\", \"dinitrophenylhydrazinetrinitrophenylmethylnitramine\"))\r\n\r\n\r\n","repo_name":"GazPrash/Leetcode_QS","sub_path":"permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28575641950","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.utils import np_utils\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import cross_validation, metrics\nimport json\nimport numpy\n\n# max_words = 67395 # 入力単語数\nnb_classes = 3 # 3カテゴリを分類\n\nbatch_size = 64 # 1回の学習で利用するサンプル数(バッチ処理)\nnb_epoch = 20 # サンプルの読み込み回数\n\n\n# MLPのモデルを生成\ndef build_model():\n global max_words\n model = Sequential() # レイヤーの線形スタック\n # .addでレイヤーの積み重ね\n model.add(Dense(512, input_shape=(max_words,)))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n return model\n\n# データを読み込み\ndata = json.load(open(\"./text/data.json\"))\n# print('data: {}'.format(data))\n# data = json.load(open(\"./newstext/data.json\"))\nX = numpy.array(data[\"X\"]) # テキストを表すデータ\nY = numpy.array(data[\"Y\"]) # カテゴリデータ\nmax_words = len(X[0])\n# print('X: {}, Y: {}'.format(X, Y))\n# 学習\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y)\n# テキストの訓練データとテストデータ, カテゴリデータの訓練データとテストデータ\nY_train = np_utils.to_categorical(Y_train, nb_classes)\nprint(len(X_train), len(Y_train))\nmodel = KerasClassifier(\n build_fn=build_model,\n nb_epoch=20,\n batch_size=batch_size\n)\n# print('X_train: {}, Y_train: {}'.format(X_train, Y_train))\nmodel.fit(X_train, Y_train)\n\n# 予測\ny = model.predict(X_test)\nac_score = metrics.accuracy_score(Y_test, y)\ncl_report = metrics.classification_report(Y_test, y)\nprint(\" 正解率=\", ac_score)\nprint(\" レポート=\\n\", cl_report)\n","repo_name":"MATOBAD/keras-text-classifier","sub_path":"mlp3_text_classify.py","file_name":"mlp3_text_classify.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14644869100","text":"import os\nfrom dotenv import load_dotenv\nimport paho.mqtt.client as mqtt\nimport numpy as np\nimport time\nfrom dataclasses import dataclass\nfrom enum import Enum\n\nload_dotenv()\n\n# Constants for the motor's operation\nBOX_ID = os.getenv(\"BOX_ID\")\nLOOP_INTERVAL = float(os.getenv(\"MOTOR_LOOP_INTERVAL\"))\t\t# seconds\nMIN_SPEED = int(os.getenv(\"MOTOR_MIN_SPEED\"))\nMAX_SPEED = int(os.getenv(\"MOTOR_MAX_SPEED\"))\nMAX_ACCELERATION = int(os.getenv(\"MOTOR_MAX_ACCELERATION\"))\t# maximum change of the motor speed in one control loop\n\nclass Mode(Enum):\n\tCONTINUOUS = 0\n\tDIFFERENTIAL = 1\n\tSAFE = 99\n\nclass MotorStatus:\n\tdef __init__(self):\n\t\tself.running: bool = False\n\t\tself.speed: int = 0\n\nclass ContinuousSettings:\n\tdef __init__(self):\n\t\tself.level: int = MIN_SPEED\n\nclass DifferentialSettings:\n\tdef __init__(self):\n\t\tself.min: int = int(os.getenv(\"MOTOR_SAFE_MIN\"))\n\t\tself.max: int = int(os.getenv(\"MOTOR_SAFE_MAX\"))\n\t\tself.period: float = float(os.getenv(\"MOTOR_SAFE_PERIOD\"))\n\t\tself.ratio: float = float(os.getenv(\"MOTOR_SAFE_RATIO\"))\n\nclass MotorConfig:\n\tdef __init__(self):\n\t\tself.mode: Mode = Mode(Mode.DIFFERENTIAL.value)\n\t\tself.continuous: ContinuousSettings = ContinuousSettings()\n\t\tself.differential: DifferentialSettings = DifferentialSettings()\n\t\tself.safe: DifferentialSettings = DifferentialSettings()\t# same as the differential mode but with default settings\n\n# Parameters for the motor\nstatus = MotorStatus()\nconfig = MotorConfig()\npreviousSpeed = MIN_SPEED\n\n# Constants for the MQTT broker\nMQTT_BROKER_HOST = os.getenv(\"MQTT_BROKER_HOST\")\nMQTT_BROKER_PORT = int(os.getenv(\"MQTT_BROKER_PORT\"))\nMQTT_TOPIC_MOTOR_SPEED_OUT = BOX_ID + \"/motor/speed/out/\"\nMQTT_TOPIC_MOTOR_CONFIG_IN = BOX_ID + \"/motor/config/in/\"\nMQTT_TOPIC_MOTOR_CONFIG_OUT = BOX_ID + \"/motor/config/out/\"\nMQTT_TOPIC_MOTOR_COMMAND_IN = BOX_ID + \"/motor/command/in/\"\nMQTT_TOPIC_MOTOR_COMMAND_OUT = BOX_ID + \"/motor/command/out/\"\n\nclient = mqtt.Client()\n\n\"\"\"\nCallback function called when the connection to the MQTT broker is established.\n\"\"\"\ndef onConnect(client, userdata, flags, rc):\n if rc == 0:\n print(f\"Connected to MQTT Broker at: {MQTT_BROKER_HOST}\")\n else:\n print(\"Connection to MQTT Broker failed\")\n\n\"\"\"\nCalled when an MQTT message is received on the motor command topic.\nParses the message and starts or stops the motor accordingly.\n\"\"\"\ndef onMotorCommand(client, userdata, msg):\n\tfields = msg.payload.decode().split(\",\")\n\tif fields[0] == \"1\":\n\t\tsetMotorRunning(1)\n\telif fields[0] == \"0\":\n\t\tsetMotorRunning(0)\n\n\"\"\"\nCalled when an MQTT message is received on the motor config topic.\nThe message is validated, and if validation passes, the new values are stored to the motor's config.\n\"\"\"\ndef onMotorConfig(client, userdata, msg):\n\ttry:\n\t\tnewConfig = validateConfig(msg.payload.decode())\n\texcept ValueError as e:\n\t\tprint(\"Invalid configuration values: \" + str(e))\n\t\treturn\n\texcept Exception as e:\n\t\tprint(\"An unexpected error occurred: \" + str(e))\n\t\treturn\n\t\n\tglobal config\n\tconfig.mode = newConfig[0]\n\tconfig.continuous.level = newConfig[1]\n\tconfig.differential.min = newConfig[2]\n\tconfig.differential.max = newConfig[3]\n\tconfig.differential.period = newConfig[4]\n\tconfig.differential.ratio = newConfig[5]\n\tprint(\"New motor config stored\")\n\n\"\"\"\nTakes a string and checks if it's a correctly formatted list of comma separated values for the motor's config.\nIf the string and the values it contains are valid, it returns an array of those values cast into correct data types.\nOtherwise an exception is thrown.\n\"\"\"\ndef validateConfig(commaSeparatedValues) -> list:\n\ttry:\n\t\tfields = commaSeparatedValues.split(\",\")\n\t\tconfig = [\n\t\t\tMode(int(fields[0])),\n\t\t\tint(fields[1]),\n\t\t\tint(fields[2]),\n\t\t\tint(fields[3]),\n\t\t\tround(float(fields[4]), 2),\n\t\t\tround(float(fields[5]), 2)\n\t\t]\n\texcept ValueError as e:\n\t\traise ValueError(e)\n\texcept Exception as e:\n\t\traise Exception(e)\n\t\n\t#if not (config[0] == 0 or config[0] == 1 or config[0] == 99):\n\t\t#raise ValueError(f\"Invalid mode: {config[0]}\")\n\tif config[1] < MIN_SPEED or config[1] > MAX_SPEED:\n\t\traise ValueError(f\"Invalid continuous level: {config[0]}\")\n\telif config[2] < MIN_SPEED or config[2] > MAX_SPEED:\n\t\traise ValueError(f\"Invalid minimum differential level: {config[0]}\")\n\telif config[3] < MIN_SPEED or config[3] > MAX_SPEED:\n\t\traise ValueError(f\"Invalid maximum differential level: {config[0]}\")\n\telif config[4] <= 0.0:\n\t\traise ValueError(f\"Non-positive differential period: {config[0]}\")\n\telif config[5] < 0.0 or config[5] > 1.0:\n\t\traise ValueError(f\"Invalid differential ratio: {config[0]}\")\n\t\n\treturn config\n\n\"\"\"\nSets the motor active or inactive.\n\"\"\"\ndef setMotorRunning(run):\n\tglobal status\n\tif run == 1 and not status.running:\n\t\tprint(\"Motor started\")\n\t\tstatus.running = True\n\telif run == 0 and status.running:\n\t\tprint(\"Motor stopped\")\n\t\tstatus.running = False\n\n\"\"\"\nStays in a loop, and while the motor is running, sends a value to the MQTT broker.\n\"\"\"\ndef controlLoop():\n\tglobal status\n\tglobal previousSpeed\n\ttry:\n\t\twhile True:\n\t\t\tif status.running:\n\t\t\t\tstatus.speed = limitAcceleration(getMotorSpeed(config.mode), previousSpeed)\n\t\t\t\tpreviousSpeed = status.speed\n\t\t\t\tprintMotorSpeed()\n\n\t\t\t\tclient.publish(MQTT_TOPIC_MOTOR_SPEED_OUT, str(status.speed))\n\t\t\t\tclient.publish(MQTT_TOPIC_MOTOR_CONFIG_OUT, getMotorConfigMQTTString())\n\t\t\t\tclient.publish(MQTT_TOPIC_MOTOR_COMMAND_OUT, getMotorStatusMQTTString())\n\n\t\t\t# The value is sent once every interval, sleep in between\n\t\t\ttime.sleep(LOOP_INTERVAL)\n\texcept KeyboardInterrupt:\n\t\tclient.disconnect()\n\texcept Exception as e:\n\t\tprint(\"An unexpected error occurred: \" + str(e))\n\n\"\"\"\nLimits the amount of change in the given input speed.\nThe returned value can differ from the input at most by MAX_ACCELERATION.\n\"\"\"\ndef limitAcceleration(speed: int, previousSpeed: int) -> int:\n\tif speed > previousSpeed + MAX_ACCELERATION:\n\t\treturn previousSpeed + MAX_ACCELERATION\n\telif speed < previousSpeed - MAX_ACCELERATION:\n\t\treturn previousSpeed - MAX_ACCELERATION\n\telse:\n\t\treturn speed\n\n\"\"\"\nSelects the motor's speed value based on its operating mode.\n\"\"\"\ndef getMotorSpeed(mode: Mode):\n\tglobal config\n\tif mode is Mode.CONTINUOUS:\n\t\treturn getContinuousSpeed(config.continuous)\n\telif mode is Mode.DIFFERENTIAL:\n\t\treturn getDifferentialSpeed(config.differential)\n\telif mode is Mode.SAFE:\n\t\treturn getDifferentialSpeed(config.safe)\n\n\"\"\"\nVisualizes the motor's speed on the command line by drawing a graph, with min, max and speed values visible.\n\"\"\"\ndef printMotorSpeed():\n\tglobal config\n\tglobal status\n\tTOTAL_WIDTH = 40\n\t\n\tpositionPercentage = (status.speed - MIN_SPEED) / (MAX_SPEED - MIN_SPEED)\n\tlowerPadding = round(TOTAL_WIDTH * positionPercentage)\n\tupperPadding = TOTAL_WIDTH - lowerPadding\n\n\tprint(f\"{MIN_SPEED} |{' ' * lowerPadding}+{' ' * upperPadding}| {MAX_SPEED} [{status.speed}]\")\n\n\"\"\"\nReturns the motor's speed in continuous mode.\n\"\"\"\ndef getContinuousSpeed(settings: ContinuousSettings) -> int:\n\treturn settings.level\n\n\"\"\"\nReturns the motor's speed in differential mode.\n\"\"\"\ndef getDifferentialSpeed(settings: DifferentialSettings) -> int:\n\ttimeWithinPeriod = time.time() % settings.period\n\trisingDuration = settings.period * settings.ratio\n\tfallingDuration = settings.period - risingDuration\n\n\tif timeWithinPeriod < risingDuration:\n\t\t# Rising, use 1st quarter of the unit circle\n\t\tif risingDuration == 0:\n\t\t\tsinValue = 0\n\t\telse:\n\t\t\tsinValue = np.sin(np.pi * 0.5 * timeWithinPeriod / risingDuration)\n\telse:\n\t\t# Falling, use 3rd quarter of the unit circle\n\t\tif fallingDuration == 0:\n\t\t\tsinValue = 0\n\t\telse:\n\t\t\tsinValue = np.sin(np.pi + np.pi * 0.5 * (timeWithinPeriod - risingDuration) / fallingDuration) + 1.0\n\t\n\t# Scale the value from 0..1 -> differentialMin..differentialMax\n\tscaledValue = settings.min + sinValue * (settings.max - settings.min)\n\treturn int(scaledValue)\n\n\"\"\"\nReturns the motor's status as a string formatted for MQTT channel.\n\"\"\"\ndef getMotorStatusMQTTString() -> str:\n\tglobal status\n\treturn str(int(status.running)) + \",\" + str(status.speed)\n\n\"\"\"\nReturns the motor's config as a string formatted for MQTT channel.\n\"\"\"\ndef getMotorConfigMQTTString() -> str:\n\tglobal config\n\treturn str(int(config.mode.value)) + \",\" + str(config.continuous.level) + \",\" + str(config.differential.min) + \",\" + str(config.differential.max) + \",\" + str(config.differential.period) + \",\" + str(config.differential.ratio)\n\n\"\"\"\nMain function: establishes connection to the MQTT broker,\nsubscribes to the motor config and command topics and goes to the control loop.\n\"\"\"\nif __name__ == \"__main__\":\n\tclient.on_connect = onConnect\n\tclient.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT, 0)\n\n\tclient.subscribe([\n\t\t(MQTT_TOPIC_MOTOR_CONFIG_IN, 1),\n\t\t(MQTT_TOPIC_MOTOR_COMMAND_IN, 1)\n\t])\n\tclient.message_callback_add(MQTT_TOPIC_MOTOR_CONFIG_IN, onMotorConfig)\n\tclient.message_callback_add(MQTT_TOPIC_MOTOR_COMMAND_IN, onMotorCommand)\n\n\tclient.loop_start()\n\tcontrolLoop()\n","repo_name":"Crypdot/iot_security","sub_path":"motorController.py","file_name":"motorController.py","file_ext":"py","file_size_in_byte":8849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11437111440","text":"from .common import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# DBの設定\nDATABASES = {\n 'default': {\n # DB名\n 'ENGINE': 'django.db.backends.sqlite3',\n # DBファイルの保存名\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n","repo_name":"NotReady/MicroBlog","sub_path":"microblog/microblog/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35464654030","text":"from time import sleep\nfrom bob.store.Store import Store\nimport logging\nfrom time import time\nimport os\nimport sys\nimport traceback\nfrom threading import Thread\n\n# states\nFAILED = -2\nINIT = -1\nSTOPPED = 0\nSTOPPING = 1\nFORCE_STOPPING = 2\nSTARTING = 3\nRUNNING = 4\n\n\nSTATES = {\n INIT: \"init\",\n STOPPED: \"stopped\",\n STOPPING: \"stopping\",\n FORCE_STOPPING: \"force_stopping\",\n STARTING: \"starting\",\n RUNNING: \"running\",\n FAILED: \"failed\"\n}\n\nCOMMANDS = \"__AMI__.commands\" # commands for the AMI\nHISTORY = \"__AMI__.states\" # history of AMI state\nLOGS = \"__AMI__.logs\" # append logs\n\n\nclass AMI(object):\n \"\"\"\n Abstract Processing node\n\n External API:\n * start()\n * stop()\n\n Life cycle:\n * start()\n * -> status=STARTING -> onStart() -> status=STARTED -> onInterval()\n * stop() # Can only be called from this instance\n * -> status=STOPPING -> onStop() -> status=STOPPED\n\n Overwrite any/all of the following:\n * __init__(): call super and accept arguments\n * onStart(): initialization\n * onStop(): cleanup\n * onInterval(): execute every x ms\n\n onInterval():\n in onStart() or __init__() if the self.interval is not defined, then\n this method will only be executed once. Otherwise, this method will be\n excecuted every [self.interval] milliseconds.\n\n Store: self.store is a substore\n \"\"\"\n\n def __init__(self, tag=None, store=None, interval=None,\n enable_hub=True, is_thread=False, is_daemon=False,\n **args):\n self.TAG = tag or ''\n self.ID = str(hash(time()))[-5:]\n self.TAG += self.ID\n self.HISTORY_PATH = HISTORY + self.ID\n self.store = store if store else Store()\n self.interval = interval\n self.logger = logging.getLogger(str(self))\n from bob.ami.Hub import Hub\n self.hub = Hub(store=self.store) if enable_hub else None\n self.is_thread = is_thread\n self.is_daemon = is_daemon\n self.setState(INIT)\n self._thread = None\n\n def onStart(self):\n pass\n\n def onInterval(self):\n raise NotImplementedError()\n\n def onStop(self):\n pass\n\n def stop(self):\n \"\"\" Stops the AMI completely, should not be overwritten\n\n An external call to this method will not stop this AMI.\n \"\"\"\n if self.getState() not in [RUNNING, FORCE_STOPPING, FAILED]:\n self.logger.debug(\"Can not stop [%s]\" % self.getState())\n return\n try:\n self.setState(STOPPING)\n self.onStop() # TODO: add timeout, call self.onStop() in thread\n self.setState(STOPPED)\n if not self.is_thread:\n os._exit(0) # exit without error\n except Exception:\n self.logger.error(\"Exception while running onStop()\")\n print(sys.exc_info()) # push onto Store on top of STDOUT\n print(traceback.print_exc())\n if not self.is_thread:\n os._exit(1)\n\n def start(self):\n\n if self.is_thread:\n \"\"\" Starts this AMI in a thread with shared memory \"\"\"\n self._thread = Thread(target=self.__start, name=self.TAG)\n self._thread.daemon = self.is_daemon\n self._thread.start()\n else:\n \"\"\" Starts this AMI in a new process and\n returns the unique node_id \"\"\"\n pid = os.fork()\n if pid:\n # parent process\n return self.TAG\n else:\n # AMI process\n self.__start()\n\n def __start(self):\n self.setState(STARTING)\n if self.hub is not None:\n self.hub.store = self.store\n self.hub.start()\n self.onStart()\n if self.getState() == STARTING:\n self.setState(RUNNING)\n try:\n while self.getState() == RUNNING:\n self.onInterval()\n if self.interval:\n # at interval execution\n sleep(self.interval or 0)\n else:\n # single execution\n break\n except Exception:\n (_, err, trace) = sys.exc_info()\n stack_trace = traceback.extract_tb(trace)\n self.logger.error(\"AMI failed: %s: %s\", err, stack_trace)\n self.setState(FAILED)\n\n self.stop()\n\n def setState(self, newState):\n if newState not in STATES:\n raise ValueError(\"Invalid state [%s]\" % newState)\n self.store.add(self.HISTORY_PATH, newState)\n self.logger.debug(\"State change to [%s]\", STATES[newState])\n\n def getState(self):\n return self.store.getLasts(self.HISTORY_PATH, 1)[0]\n\n def __repr__(self):\n return \"[AMI.%s.%s]\" % (self.__class__.__name__, self.TAG)\n\n def fail(self, *args):\n self.logger.error(*args)\n self.setState(FAILED)\n","repo_name":"intergalactic-software/bob","sub_path":"bob/ami/AMI.py","file_name":"AMI.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24266732789","text":"# -*- coding: utf-8 -*-\nfrom lib.tools import Tools\nimport paramiko\nimport json\n\n\nclass ModelClass(object):\n def __init__(self, mylog):\n self.mylog = mylog\n\n def checkhostname(self, ssh, hostname, cmd):\n result = self.execcommand(ssh, cmd)\n cmdhostname = result[1].split(\"\\n\")[-2].strip()\n if cmdhostname == hostname:\n return True, cmdhostname\n else:\n return False, cmdhostname\n\n def execcommand(self, ssh, command, stdinfo=None, timeout=3):\n if stdinfo is None:\n stdinfo = []\n try:\n stdin, stdout, stderr = ssh.exec_command(\"echo $LANG\")\n langset = stdout.readlines()[0].replace(\"\\n\", \"\").split(\".\")[1]\n except paramiko.ssh_exception.SSHException:\n self.mylog.info(\"LANG获取失败\")\n raise Exception(\"LANG获取失败\")\n try:\n stdin, stdout, stderr = ssh.exec_command(command, timeout=timeout)\n # self.mylog.info(command)\n for info in stdinfo:\n stdin.write(info+\"\\n\")\n self.mylog.info(info)\n if stderr.readable():\n err = stderr.read()\n err = err.decode(langset)\n if stdout.readable():\n out = stdout.read()\n out = out.decode(langset)\n # self.mylog.info(\"命令out:\"+\"\".join(out))\n except paramiko.ssh_exception.SSHException:\n self.mylog.info(\"命令执行失败:\"+command)\n return [False, \"命令执行失败:\"+command]\n except Exception as e:\n self.mylog.info(e)\n self.mylog.info(\"命令执行失败:\"+command)\n return [False, e]\n if len(err) != 0:\n self.mylog.info(\"命令err:\" + \"\".join(err))\n return [False, \"\".join(err)]\n return [True, out]\n\n def action(self, ssh, hostname, param, hostparam=None):\n\n if param is None or \"cmd\" not in param.keys():\n param = {\"cmd\": \"hostname\"}\n rz, cmdhostname = self.checkhostname(ssh, hostname, param[\"cmd\"])\n if rz:\n self.mylog.info(\"主机名检查--成功\")\n else:\n self.mylog.error(f\"主机名检查--失败 cfg:{hostname} result:{cmdhostname}\")\n","repo_name":"caoyuanbaiyang/Tansible","sub_path":"model/CheckHostname/CheckHostname.py","file_name":"CheckHostname.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"1103188609","text":"\"\"\"\nhttps://leetcode.com/problems/longest-palindromic-substring/\n\nshould be using dynamic programming and recursion\nbabad\n\nshould have a map of the duplicated elements\nso we can consider a palindrome to be something like\nmapping\ncbabc => left == right => mapping[left] = right\nmoving on to b\nc != a hence no change\nmove on to a\n\ncomplexity of this is On2\n================\n\"\"\"\n# https://leetcode.com/problems/longest-palindromic-substring/\n\nfrom functools import lru_cache\n\nclass Solution(object):\n @lru_cache()\n def is_palindrome(self, a):\n \"\"\"check for palindromicity\n\n Args:\n a (str): the input string\n\n Returns:\n bool\n \"\"\"\n if len(a) == 1 or len(a) == 0:\n return True\n if a[0] == a[-1]:\n return self.is_palindrome(a[1:-1])\n\n def longestPalindrome(self, a):\n \"\"\"find the largest palindrome size\n\n Args:\n a (str): the input string\n\n Returns:\n int: the maximum size\n \"\"\"\n if len(a) == 0:\n return \"\"\n max_palindrome_size = 0\n sol = None\n for i in range(len(a)):\n #__import__('pdb').set_trace()\n span = 0\n # even case\n if i+1= span and i + span + 2 <= len(a):\n word = a[i-span:i+span+2]\n if self.is_palindrome(a[i-span:i+span+2]) is True:\n max_palindrome_size = max(max_palindrome_size, len(word))\n if max_palindrome_size == len(word):\n sol = word\n span += 1\n else:\n # odd case\n while i >= span and i + span + 1 <= len(a):\n word = a[i-span:i+span+1]\n #print(word)\n if self.is_palindrome(word) is True:\n max_palindrome_size = max(max_palindrome_size, len(word))\n if max_palindrome_size == len(word):\n sol = word\n span += 1\n return sol\n\nprint(Solution().longestPalindrome(\"babad\"))\nprint(\"%\"*10)\nprint(Solution().longestPalindrome(\"cbbd\"))\n","repo_name":"infinite-Joy/programming-languages","sub_path":"python-projects/algo_and_ds/max_palindrome.py","file_name":"max_palindrome.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74333087451","text":"from config import FLAGS\nfrom utils_siamese import get_flags\nfrom utils import load_data\nfrom exp import plot_preck, plot_single_number_metric, draw_emb_hist_heat, draw_ranking\nfrom dist_sim_calculator import get_gs_ds_mat\nfrom results import load_results_as_dict, load_result\nfrom dist_sim_kernel import create_ds_kernel\nfrom dist_sim import unnormalized_dist_sim\nimport numpy as np\nfrom collections import OrderedDict\nfrom os.path import join\n\n\nclass Eval(object):\n def __init__(self, data, dist_sim_calculator, need_val=True):\n self.test_row_gs = load_data(FLAGS.dataset_val_test, train=False).graphs\n self.test_col_gs = load_data(FLAGS.dataset_val_test, train=True).graphs\n self.baseline_models = []\n self.baseline_results_dict = load_results_as_dict(\n FLAGS.dataset_val_test, self.baseline_models,\n self.test_row_gs, self.test_col_gs, col_graphs_list=None,\n ds_mat=None, ds_mat_normed=False, # load its own thing from disk\n sim_or_dist=self._ds_metric_is_dist_or_sim(),\n ds_metric=FLAGS.ds_metric, time_mat=None)\n if need_val:\n val_gs1, val_gs2 = self.get_val_gs_as_tuple(data)\n self.val_row_gs = self._to_nxgraph_list(val_gs1)\n self.val_col_gs = self._to_nxgraph_list(val_gs2)\n true_val_ds_mat = self._get_true_dist_sim_mat_for_val(\n data, dist_sim_calculator)\n self.true_val_result = load_result(\n FLAGS.dataset_val_test, FLAGS.ds_algo,\n self.val_row_gs, self.val_col_gs, col_graphs_list=None,\n ds_mat=true_val_ds_mat, ds_mat_normed=False, # provide true ds mat\n sim_or_dist=self._ds_metric_is_dist_or_sim(),\n ds_metric=FLAGS.ds_metric, time_mat=None)\n self.true_test_result = load_result(\n FLAGS.dataset_val_test, FLAGS.ds_algo,\n self.test_row_gs, self.test_col_gs, col_graphs_list=None,\n ds_mat=None, ds_mat_normed=False, # load its own thing from disk\n sim_or_dist=self._ds_metric_is_dist_or_sim(),\n ds_metric=FLAGS.ds_metric, time_mat=None)\n self.norms = [FLAGS.ds_norm]\n\n def get_val_gs_as_tuple(self, data):\n return data.val_gs, data.train_gs\n\n def get_test_gs_as_tuple(self, data):\n return data.test_gs, data.train_gs + data.val_gs\n\n def get_true_dist_sim(self, i, j, val_or_test, model):\n if val_or_test == 'val':\n r = self.true_val_result\n else:\n assert (val_or_test == 'test')\n r = self.true_test_result\n return model.get_true_dist_sim(i, j, r)\n\n def eval_for_val(self, ds_mat, loss_list, time_list, metrics):\n assert (ds_mat is not None)\n models = [FLAGS.model]\n pred_r = load_result(\n FLAGS.dataset_val_test, FLAGS.model,\n self.val_row_gs, self.val_col_gs, col_graphs_list=None,\n ds_mat=ds_mat, ds_mat_normed=FLAGS.ds_norm, # provide pred ds mat\n sim_or_dist=FLAGS.pred_sim_dist,\n ds_metric=FLAGS.ds_metric, time_mat=time_list)\n rs = {FLAGS.model: pred_r, FLAGS.ds_algo: self.true_val_result}\n results = self._eval(models, rs, self.true_val_result,\n metrics, False, loss_list=loss_list)\n rtn = OrderedDict()\n li = []\n for metric, num in results.items():\n if not 'loss' in metric:\n num = num[FLAGS.model]\n results[metric] = num\n metric = 'val_' + self._remove_norm_from_str(metric)\n rtn[metric] = num\n s = '{}={:.5f}'.format(metric, num)\n li.append(s)\n return rtn, ' '.join(li)\n\n def eval_for_test(self, ds_mat, metrics, saver, loss_list=None, time_list=None,\n node_embs_dict=None, graph_embs_mat=None, attentions=None,\n model=None, data=None, slt_collec=None):\n assert (ds_mat is not None)\n models = []\n extra_dir = saver.get_log_dir()\n assert (slt_collec is None)\n row_gs, col_gs, col_graphs_list, models, rs, true_r = \\\n self._prepare_regular_results()\n models += [FLAGS.model]\n pred_r = load_result(\n FLAGS.dataset_val_test, FLAGS.model,\n row_gs, col_gs, col_graphs_list=col_graphs_list,\n ds_mat=ds_mat, ds_mat_normed=FLAGS.ds_norm, # provide pred ds mat\n sim_or_dist=FLAGS.pred_sim_dist,\n ds_metric=FLAGS.ds_metric, time_mat=time_list)\n rs.update({FLAGS.model: pred_r})\n return self._eval(models, rs, true_r,\n metrics, FLAGS.plot_results, loss_list,\n node_embs_dict, graph_embs_mat, attentions,\n extra_dir, model, data)\n\n def _prepare_slt_results(self, slt_collec, time_list, extra_dir):\n assert (slt_collec is not None)\n row_gs = self._to_nxgraph_list(slt_collec.row_gs)\n col_gs = None\n col_graphs_list = [self._to_nxgraph_list(li) for li in slt_collec.col_gs_list]\n extra_dir = join(extra_dir, slt_collec.short_name)\n true_r = load_result(\n FLAGS.dataset_val_test, 'decoy_true_result',\n row_gs, col_gs, col_graphs_list=col_graphs_list,\n ds_mat=slt_collec.true_ds_mat, ds_mat_normed=FLAGS.ds_norm,\n sim_or_dist=FLAGS.pred_sim_dist,\n ds_metric=FLAGS.ds_metric, time_mat=time_list)\n rs = {FLAGS.ds_algo: true_r}\n return row_gs, col_gs, col_graphs_list, extra_dir, rs, true_r\n\n def _prepare_regular_results(self):\n row_gs = self.test_row_gs\n col_gs = self.test_col_gs\n col_graphs_list = None\n models = ([FLAGS.ds_algo] + self.baseline_models)\n true_r = self.true_test_result\n rs = {FLAGS.ds_algo: true_r}\n if FLAGS.plot_results:\n rs.update(self.baseline_results_dict)\n return row_gs, col_gs, col_graphs_list, models, rs, true_r\n\n def _eval(self, models, rs, true_r, metrics, plot, loss_list=None,\n node_embs_dict=None, graph_embs_mat=None, attentions=None,\n extra_dir=None, model=None, data=None):\n rtn = OrderedDict()\n for metric in metrics:\n if metric == 'mrr' or metric == 'mse' or metric == 'dev' \\\n or metric == 'time' or 'acc' in metric \\\n or metric == 'kendalls_tau' or metric == 'spearmans_rho':\n d = plot_single_number_metric(\n FLAGS.dataset_val_test, FLAGS.ds_metric, models, rs, true_r, metric,\n self.norms,\n ds_kernel=create_ds_kernel(get_flags('ds_kernel'),\n yeta=get_flags('yeta'),\n scale=get_flags('scale'))\n if get_flags('ds_kernel') else None,\n thresh_poss=[get_flags('thresh_val_test_pos')],\n thresh_negs=[get_flags('thresh_val_test_neg')],\n thresh_poss_sim=[0.5],\n thresh_negs_sim=[0.5],\n plot_results=plot, extra_dir=extra_dir)\n rtn.update(d)\n elif metric == 'draw_heat_hist':\n if node_embs_dict is not None:\n draw_emb_hist_heat(\n FLAGS.dataset_val_test,\n node_embs_dict,\n true_result=true_r,\n ds_norm=FLAGS.ds_norm,\n extra_dir=extra_dir + '/mne',\n plot_max_num=FLAGS.plot_max_num)\n elif metric == 'ranking':\n em = self._get_node_mappings(data)\n draw_ranking(\n FLAGS.dataset_val_test, FLAGS.ds_metric, true_r, rs[FLAGS.model],\n node_feat_name=FLAGS.node_feat_name,\n model_name=FLAGS.model_name,\n plot_node_ids=FLAGS.dataset_val_test != 'webeasy' and em and not FLAGS.supersource,\n plot_gids=False,\n ds_norm=FLAGS.ds_norm,\n existing_mappings=em,\n extra_dir=extra_dir + '/ranking',\n plot_max_num=FLAGS.plot_max_num)\n elif metric == 'attention':\n if attentions is not None:\n draw_attention(\n FLAGS.dataset_val_test, true_r, attentions,\n extra_dir=extra_dir + '/attention',\n plot_max_num=FLAGS.plot_max_num)\n elif 'prec@k' in metric:\n d = plot_preck(\n FLAGS.dataset_val_test, FLAGS.ds_metric, models, rs, true_r, metric,\n self.norms, plot, extra_dir=extra_dir)\n rtn.update(d)\n elif metric == 'loss':\n rtn.update({metric: np.mean(loss_list)})\n elif metric == 'train_pair_ged_vis':\n if FLAGS.ds_metric == 'ged':\n plot_dist_hist(FLAGS.ds_metric, [FLAGS.dataset_val_test],\n [self._transform_train_pairs_to_dist_list(model)],\n extra_dir)\n elif metric == 'graph_classification':\n if graph_embs_mat is not None:\n results_dict = graph_classification(\n FLAGS.dataset_val_test, graph_embs_mat,\n FLAGS.ds_metric,\n extra_dir=extra_dir)\n rtn.update(results_dict)\n else:\n raise RuntimeError('Unknown metric {}'.format(metric))\n return rtn\n\n def _get_true_dist_sim_mat_for_val(self, data, dist_sim_calculator):\n gs1, gs2 = self.get_val_gs_as_tuple(data)\n gs1 = self._to_nxgraph_list(gs1)\n gs2 = self._to_nxgraph_list(gs2)\n return get_gs_ds_mat(gs1, gs2, dist_sim_calculator, 'val', 'train',\n FLAGS.dataset_val_test, FLAGS.ds_metric,\n FLAGS.ds_algo, norm=False, # load the raw dist/sim\n dec_gsize=FLAGS.supersource)\n\n def _to_nxgraph_list(self, gs):\n return [g.nxgraph for g in gs]\n\n def _remove_norm_from_str(self, s):\n return s.replace('_norm', '').replace('_nonorm', '')\n\n def _transform_train_pairs_to_dist_list(self, model):\n mn = model.__class__.__name__\n assert (mn == 'SiameseRegressionModel')\n rtn = []\n for g1, g2, s_or_d in model.train_triples.li:\n if FLAGS.supply_sim_dist == 'sim':\n d = model.ds_kernel.sim_to_dist_np(s_or_d)\n else:\n d = s_or_d\n if FLAGS.ds_norm:\n orig_d = d\n g1nx, g2nx = g1.nxgraph, g2.nxgraph\n g1size, g2size = g1nx.number_of_nodes(), g2nx.number_of_nodes()\n if FLAGS.supersource: # supersource changes #nodes, NOT ged/mcs\n g1size -= 1\n g2size -= 1\n d = unnormalized_dist_sim(\n d, g1nx, g2nx, dec_gsize=FLAGS.supersource)\n if not abs(d - round(d)) < 1e-6:\n raise RuntimeError(\n 'Wrong train pair: g1 gid {}, g2 gid {}, '\n 'g1 size {}, g2 size {}, '\n 'actual g1 size {}, actual g2 size {}, '\n 'd {}, round(d) {}, '\n 'orig_d {} '.format(\n g1nx.graph['gid'], g2nx.graph['gid'],\n g1nx.number_of_nodes(),\n g2nx.number_of_nodes(),\n g1size,\n g2size,\n d, round(d), orig_d))\n d = round(d)\n if d == np.inf:\n d = 999 # TODO: hacky; need to deal with ds_norm=False --> exp(big)=0\n else:\n d = int(d)\n rtn.append(d)\n return rtn\n\n def _get_node_mappings(self, data):\n # [train + val ... test]\n if data and hasattr(data.train_gs[0], 'mapping'):\n return [g.mapping for g in data.train_gs + data.val_gs + data.test_gs]\n else:\n return None\n\n def _ds_metric_is_dist_or_sim(self):\n if FLAGS.ds_metric == 'ged':\n return 'dist'\n else:\n assert FLAGS.ds_metric == 'mcs'\n return 'sim'\n","repo_name":"yunshengb/GraphSim","sub_path":"model/Siamese/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":12503,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"32"} +{"seq_id":"72931930012","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport requests\nfrom requests.utils import dict_from_cookiejar\nfrom lxml import etree\nfrom hit.ids.login import idslogin\nfrom hit.exceptions import LoginFailed\nimport json\nimport re\nimport random\nimport datetime\nimport argparse\nimport sys\nimport urllib\nfrom _datetime import date\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr\nfrom email.header import Header\n\nparser = argparse.ArgumentParser(description='HIT出校申请')\nparser.add_argument('username', help='统一身份认证登录用户名(学号)')\nparser.add_argument('password', help='统一身份认证登录密码')\nparser.add_argument('-k', '--api_key', help='Server酱的SCKEY,或是电邮密码/Key')\nparser.add_argument('-m', '--mail_to', help='电邮信息,格式\"服务器[:端口[U]]:用户名\"')\n\n\ndef print_log(msg: str) -> None:\n print(f'[{datetime.datetime.now()}] {msg}')\n\n\ndef get_application_info(session: requests.Session, module_id: str) -> dict:\n with open('post_data.jsonc', 'r', encoding='utf-8') as jsonfile:\n jsondata = ''.join(\n line for line in jsonfile if not line.startswith('//'))\n model = json.loads(re.sub(\"//.*\", \"\", jsondata, flags=re.MULTILINE))\n\n with open('reasons.json', 'r', encoding='utf-8') as reasons_file:\n reasons = json.load(reasons_file)\n # 随机出校理由\n model['cxly'] = random.choice(reasons)\n model['id'] = module_id\n # 日期为第二天\n model['rq'] = (datetime.date.today() +\n datetime.timedelta(days=1)).isoformat()\n application_info = {\n 'info': json.dumps({'model': model})\n }\n print_log('生成申请信息成功')\n return application_info\n\n\ndef main(args):\n print_log('尝试登录...')\n lose_count = 0\n session = None\n while lose_count < 10 and session == None:\n try:\n session = idslogin(args.username, args.password)\n break\n except LoginFailed as e:\n print_log(f'登录失败:{e}')\n lose_count += 1\n if lose_count == 10:\n return False, '登录失败'\n\n session.headers.update({\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 10; Redmi K30) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.110 Mobile Safari/537.36'\n })\n r = session.get('https://xg.hit.edu.cn/zhxy-xgzs/xg_mobile/shsj/common')\n r = session.post(\n 'https://xg.hit.edu.cn/zhxy-xgzs/xg_mobile/xsCxsq/getCxsq',\n data={'info': '{\"id\": \"id\"}'})\n\n response_txt = json.loads(r.text)\n\n if not r.ok or not response_txt['isSuccess']:\n print_log(\n f'无法获取出校申请信息, 响应原文: {response_txt}')\n return False, '无法获取出校申请信息'\n module = response_txt['module']['id']\n if not module:\n print_log('未获取申请信息!')\n return False, '未找到申请出校入口'\n\n application_info = get_application_info(session, module)\n save_url = 'https://xg.hit.edu.cn/zhxy-xgzs/xg_mobile/xsCxsq/saveCxsq'\n response = session.post(save_url, data=application_info)\n print_log(f'POST {save_url} {response.status_code}')\n # print_log(response.text)\n response = response.json()\n msg = response['msg']\n res_msg = '提交成功' if response['isSuccess'] else f'提交失败;{msg}'\n return response['isSuccess'], res_msg\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n is_successful, msg = main(args)\n print_log(msg)\n if args.api_key:\n report_msg = \"\" # 生成上报报告\n if is_successful:\n report_msg = f\"明天的出校申请成功!{datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')}\"\n else:\n report_msg = f\"明天的出校申请失败,原因:{msg}{datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')}\"\n\n if args.mail_to:\n mail_info = args.mail_to.split(':')\n mail_addr = mail_info[-1]\n\n msg = MIMEText(report_msg, 'plain', 'utf-8')\n msg['Subject'] = Header(report_msg, 'utf-8')\n msg['From'] = 'AUTO_REPORT_BOT'\n msg['To'] = mail_addr\n print_log('尝试发送邮件...')\n\n host = mail_info[0]\n unsafe = False\n if len(mail_info) == 3 and mail_info[1][-1] == 'U':\n unsafe = True\n mail_info[1] = mail_info[1][:-1]\n try:\n if unsafe:\n s = smtplib.SMTP(host=host) if len(mail_info) == 2 else smtplib.SMTP(\n host=host, port=int(mail_info[1]))\n s.login(mail_addr, args.api_key)\n print_log('邮件服务器连接成功')\n s.ehlo_or_helo_if_needed()\n s.sendmail(mail_addr, mail_addr, msg.as_string())\n s.quit()\n print_log('邮件发送成功!')\n else:\n s = smtplib.SMTP_SSL(host=host) if len(mail_info) == 2 else smtplib.SMTP_SSL(\n host=host, port=int(mail_info[1]))\n s.ehlo(host)\n s.starttls()\n s.login(mail_addr, args.api_key)\n print_log('邮件服务器连接成功')\n s.sendmail(mail_addr, mail_addr, msg.as_string())\n s.quit()\n print_log('邮件发送成功!')\n except Exception as e:\n print_log('邮件发送失败。')\n print_log(e)\n\n else:\n print_log('发送微信提醒...')\n requests.get(\n f\"https://sc.ftqq.com/{args.api_key}.send?text={report_msg}\")\n","repo_name":"FerdinandSukhoi/PAFCA","sub_path":"pafca.py","file_name":"pafca.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"9596930775","text":"import sys\nsys.stdin = open('input.txt')\n\n\nfrom collections import deque\n\nT = int(input())\nfor _ in range(T):\n p = input()\n n = int(input())\n x = input()[1:-1].split(',')\n\n xs = deque(x)\n if n == 0:\n xs = deque()\n\n rev = 0\n for i in p:\n if i == 'R':\n rev += 1\n elif i == 'D':\n if len(xs) == 0:\n print('error')\n break\n else:\n if rev % 2:\n xs.pop()\n else:\n xs.popleft()\n\n else:\n if rev % 2:\n xs.reverse()\n print('['+','.join(xs)+']')","repo_name":"dw3624/algorithm_practice","sub_path":"백준/5430_AC/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37376652232","text":"import cv2\nimport numpy as np\n\nlogo = cv2.imread('logo.jpg') # 读取 OpenCV 的 logo\nsize = logo.shape # 读取 logo 的长宽尺寸\n\nimg = np.zeros((360,480,3), dtype='uint8') # 产生一张 480x360 背景全黑的图\nimg[0:360, 0:480] = '255' # 将图片变成白色 ( 配合 logo 是白色底 )\nimg[0:size[0], 0:size[1]] = logo # 将图片的指定区域,换成 logo 的图案\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 产生一张灰阶的图片作为遮罩使用\nret, mask1 = cv2.threshold(img_gray, 200, 255, cv2.THRESH_BINARY_INV) # 使用二值化的方法,产生黑白遮罩图片\nlogo = cv2.bitwise_and(img, img, mask = mask1 ) # logo 套用遮罩\n\nbg = cv2.imread('meme.jpg') # 读取底图\nret, mask2 = cv2.threshold(img_gray, 200, 255, cv2.THRESH_BINARY) # 使用二值化的方法,产生黑白遮罩图片\nbg = cv2.bitwise_and(bg, bg, mask = mask2 ) # 底图套用遮罩\n\noutput = cv2.add(bg, logo) # 使用 add 方法将底图和 logo 合并\ncv2.imshow('oxxostudio', output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"oxxostudio/book-code","sub_path":"opencv/ch07/code11.py","file_name":"code11.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"17637779327","text":"from PyQt5 import QtWidgets, QtCore, QtGui\nfrom OperationWidget.OperationWidgetUpdater import OperationWidgetUpdater\nfrom FontStretchingWidget import FontStretchingLabel, FontStretchingButton\nfrom CoreConfig.ConfigDictionary import ConfigDictionary\nfrom CoreConfig.ResourceGetter import ResourceGetter\nfrom LogWriter import LogWriter\nfrom PublicData.QueryDataHandler import QueryDataHandler\nfrom CoreConfig.DefaultValues import DefaultQrConfigPathGetter\n# from ThermalXmlGenerators.StandardThermalXmlGenerator import StandardThermalXmlGenerator\n# from ModuleControllingCommand.ThermalPrinter import Print\nfrom datetime import datetime\nimport requests\nimport json\nimport string\nimport random\nimport hashlib\n\n\n# XmlGenerator = {\n# \"standard\": StandardThermalXmlGenerator\n# }\n\n\nclass IndexWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n\n layout = QtWidgets.QHBoxLayout()\n self.work_widget = TextShowingLabel()\n self.control_widget = ControlWidget(self.work_widget.rest)\n\n layout.addWidget(self.work_widget, stretch=70)\n layout.addWidget(self.control_widget, stretch=30)\n self.setLayout(layout)\n\n def load(self):\n self.control_widget.hint_label.setText(\"\")\n\n\nclass TextShowingLabel(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.setObjectName(\"index_widget_text_label\")\n\n # layout = QtWidgets.QGridLayout()\n layout = QtWidgets.QVBoxLayout()\n\n self.cur_button = QrButton(\"啟動\", \"Start\", \"\")\n\n self.scrollArea = QtWidgets.QScrollArea(self)\n self.scrollArea.setWidgetResizable(True)\n self.scrollAreaWidgetContents = QtWidgets.QWidget(self.scrollArea)\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 380, 247))\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.scrollArea.setObjectName(\"pay_widget_scroll\")\n self.scrollArea.setStyleSheet(\"background-color:transparent;border:0;\")\n layout.addWidget(self.scrollArea, stretch=70)\n layout.addWidget(self.cur_button, stretch=30)\n\n self.parking_detail_label = ParkingDetailLabel(self.scrollAreaWidgetContents)\n self.verticalLayoutScroll = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)\n self.verticalLayoutScroll.addWidget(self.parking_detail_label)\n\n QueryDataHandler.parking_detail_label = self.parking_detail_label.add_text\n # layout.addWidget(self.cur_button)\n\n # with open(DefaultQrConfigPathGetter().execute(),'r') as load_f:\n # load_dict = json.load(load_f)\n #\n # count = 0\n # for qr_item in load_dict['qr_list']:\n # cur_button = QrButton(qr_item['rule_name'], qr_item['rule_id'], info_shower)\n # layout.addWidget(cur_button, count/2, count%2, 1, 1)\n # count += 1\n\n self.setLayout(layout)\n\n def rest(self):\n self.cur_button.init_button()\n\n\nclass ParkingDetailLabel(QtWidgets.QLabel):\n def __init__(self, scrollAreaWidgetContents):\n super().__init__(scrollAreaWidgetContents)\n self.setObjectName(\"pay_widget_parking_detail_label\")\n self.setStyleSheet(\n \"\"\"\n QLabel#pay_widget_parking_detail_label\n {\n color: black;\n };\n \"\"\"\n )\n self.setWordWrap(True)\n self.setAlignment(QtCore.Qt.AlignLeft)\n self.setFont(QtGui.QFont(\"微軟正黑體\", 14, QtGui.QFont.Bold))\n self.all_message = ''\n self.load()\n\n def load(self):\n self.setText(self.all_message)\n\n def add_text(self, message):\n if self.all_message == '':\n self.setText(self.all_message)\n self.all_message = '{}\\n{}'.format(message, self.all_message)\n self.setText(self.all_message)\n\n\nclass QrButton(FontStretchingButton):\n def __init__(self, rule_name, rule_id, info_shower):\n super().__init__(resize_ratio=0.3)\n # QueryDataHandler.process_status = False\n self.info_shower = info_shower\n self.rule_name = rule_name\n self.rule_id = rule_id\n self.setObjectName(\"main_widget_button\")\n # self.setStyleSheet(\n # \"\"\"\n # QPushButton#main_widget_button\n # {{\n # color: white;\n # border-image: url(\\\"{}\\\");\n # }}\n # \"\"\".format(ResourceGetter.get_resource(\"Base\", \"confirm_button.png\"))\n # )\n self.init_button()\n\n self.clicked.connect(lambda: self.switch_button())\n\n def init_button(self):\n QueryDataHandler.process_status = False\n self.setText(\"開始\")\n\n def switch_button(self):\n if QueryDataHandler.process_status is False:\n self.setText(\"暫停\")\n QueryDataHandler.process_status = True\n else:\n self.setText(\"開始\")\n QueryDataHandler.process_status = False\n # self.info_shower.setText(\"列印中 .... \")\n # random_string = ''.join(random.choice(string.ascii_uppercase) for x in range(32))\n # expiry_date = datetime.now().strftime(\"%Y%m%d\")+\"235959\"\n # data_list = [\n # random_string,\n # ConfigDictionary.config_dict['shop_id'],\n # ConfigDictionary.config_dict['lot_id'],\n # self.rule_id,\n # expiry_date\n # ]\n # data = \",\".join(data_list)\n # plain_text = data+ConfigDictionary.config_dict['api_key']\n #\n # sha = hashlib.sha256()\n # sha.update(plain_text.encode('utf-8'))\n # hashed_text = sha.hexdigest()\n # qr_content = \"Off:\"+data+\",\"+hashed_text[:8]+hashed_text[-8:]\n #\n # qr_data = {\n # 'expiry_date': expiry_date,\n # 'qr_content': qr_content,\n # 'rule_name': self.rule_name\n # }\n #\n # thermal_xml_file = self.generate_thermal_xml(qr_data)\n # LogWriter().write_log(\"generated thermal xml is '{}'\".format(thermal_xml_file))\n # try:\n # # Print(Print.thermal_socket_client, None, thermal_xml_file).execute()\n # self.info_shower.setText(\"列印完成\")\n # except Exception as e:\n # LogWriter().write_log(\"Thermal printer exception {}\".format(e))\n # self.info_shower.setText(\"印表機連線異常\")\n\n # def generate_thermal_xml(self, qr_data):\n # target = ConfigDictionary.config_dict['qr_format']\n # thermal_xml_file = XmlGenerator[target]().execute(qr_data)\n # return thermal_xml_file\n\n\nclass ControlWidget(QtWidgets.QWidget):\n first_time = True\n\n def __init__(self, rest_work_widget):\n super().__init__()\n self.rest_work_widget = rest_work_widget\n layout = QtWidgets.QVBoxLayout()\n self.hint_label = self.__get_hint_label()\n self.button = self.__get_confirm_button()\n layout.addWidget(self.hint_label, stretch=70)\n layout.addWidget(self.button, stretch=30)\n\n self.setLayout(layout)\n\n def __get_hint_label(self):\n hint_label = QtWidgets.QLabel()\n hint_label.setText(\"\")\n hint_label.setAlignment(QtCore.Qt.AlignCenter)\n hint_label.setObjectName(\"main_widget_label\")\n hint_label.setStyleSheet(\"color: red;\")\n hint_label.setFont(QtGui.QFont(\"微軟正黑體\", 16))\n\n return hint_label\n\n def __get_confirm_button(self):\n button = FontStretchingButton(resize_ratio=0.3)\n button.setText(\"進入設定頁面\")\n button.setObjectName(\"main_widget_button\")\n # button.setStyleSheet(\n # \"\"\"\n # QPushButton#main_widget_button\n # {{\n # color: white;\n # border-image: url(\\\"{}\\\");\n # }}\n # \"\"\".format(ResourceGetter.get_resource(\"Base\", \"confirm_button.png\"))\n # )\n button.clicked.connect(lambda: self.query_operation())\n\n return button\n\n def query_operation(self):\n self.rest_work_widget()\n OperationWidgetUpdater().update_widget_by_command(\"next\")\n\n\n","repo_name":"dirtypig8/SimulateKey","sub_path":"OperationWidget/IndexWidget.py","file_name":"IndexWidget.py","file_ext":"py","file_size_in_byte":8179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18015573688","text":"# Dalam Python, magic method __oct__ adalah method khusus dalam Python \n# yang digunakan untuk mengimplementasikan representasi oktal (basis 8) dari sebuah objek.\n# method ini memungkinkan Anda untuk mendefinisikan perilaku objek ketika diubah menjadi \n# representasi oktal menggunakan fungsi bawaan oct().\n\n# method __oct__ harus mengembalikan representasi oktal dari objek dalam bentuk string dengan awalan \"0o\".\n# method ini dipanggil ketika Anda menggunakan fungsi oct() pada instance objek yang Anda buat.\n\n# Berikut adalah contoh penggunaan magic method __oct__ dalam sebuah kelas:\nclass NilaiOctal:\n def __init__(self, nilai):\n self.nilai = nilai\n\n def __oct__(self):\n return oct(self.nilai)\n\n# membuat objek NilaiOctal\nx = NilaiOctal(4)\nprint(x.__oct__())\n# Output:\n# 0o4\n\n# menggunakan fungsi bawaan oct()\nprint(oct(4))\n# Output:\n# 0o4\n\n# membuat objek NilaiOctal\ny = NilaiOctal(255)\nprint(y.__oct__())\n# Output:\n# 0o377\n\n# menggunakan fungsi bawaan oct()\nprint(oct(255))\n# Output:\n# 0o377","repo_name":"kobencry/python-dasar","sub_path":"Bagian4-OOP/magic-method/method_oct.py","file_name":"method_oct.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"id","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"14217441549","text":"from itertools import permutations\n\nN = int(input())\nxy = [] #? [(x1,y1), (x2,y2), ...] d1:家番号、d2:座標\nhouse = [i for i in range(N)]\nfor _ in range(N):\n x, y = map(int, input().split())\n xy.append((x,y))\n\n#! 先に家と家の距離を全パターン計算\ndis = [[0 for _ in range(N)] for _ in range(N)]\n\nfor i in range(N):\n for j in range(N):\n if i != j:\n a = xy[i]\n b = xy[j]\n dis[i][j] = ( (a[0]-b[0])**2 + (a[1]-b[1])**2)**(1/2)\n# print(dis)\ntotal = 0\ncount = 0\nfor keiro in permutations(house):\n for i in range(N-1):\n total += dis[keiro[i]][keiro[i+1]]\n count += 1\n\nprint(total/count)\n","repo_name":"tokuD/atcoder","sub_path":"abc/C/145.py","file_name":"145.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11952410440","text":"import numpy as np # Operations on arrays\n# http://wiki.scipy.org/Tentative_NumPy_Tutorial\n\nimport matplotlib as mpl\n# http://matplotlib.org/api/pyplot_summary.html\nfrom matplotlib import pyplot as pplt\nfrom matplotlib import patches as plt_patches\n\nimport msm_file as mf\n \nimport math, datetime, os\n\nimport config as c\n\nreqd_ymds = []; reqd_msms = []\npp_names = \"y! m!\" # indexes 0 to 1\npp_ix, pp_values = c.set_pp(pp_names) # Set up config info\nfor n,ix in enumerate(pp_ix):\n if ix == 0: # y (yyyymmdd) dates\n reqd_ymds = c.check_ymds(pp_values[n])\n elif ix == 1: # m (50xx) msm_ids\n reqd_msms = c.check_msm_ids(pp_values[n])\n else:\n exit()\nif len(reqd_ymds) == 0:\n reqd_ymds = [c.start_ymd]\nif len(reqd_msms) == 0:\n reqd_msms = [c.msm_id]\nprint(\"reqd_ymds = %s, reqd_msms = %s\\n\" % (reqd_ymds, reqd_msms))\nif len(reqd_ymds) > 1:\n print(\"tr-pkts-v-edges-per-depth.py only handles one ymd <<<\")\n exit()\nc.start_ymd = reqd_ymds[0]\n\nstart_time = c.start_time\nstart_ymd = c.start_ymd\n\ndef tbs_filter(tbs, mn_depth,mx_depth, mn_tr_pkts,mx_tr_pkts):\n stop_row = mx_depth+1\n #print(\"tbs_filter: mn %d, mx %s stop_row %d\" % (\n # mn_depth,mx_depth, stop_row))\n tbs_ftr_pkts = [ [] for j in range(mn_depth,mx_depth+1) ] # Filtered array\n if len(tbs.tb_tr_pkts) < mx_depth:\n stop_row = len(tbs.tb_tr_pkts)\n total_edges = 0\n for d in range(mn_depth,stop_row):\n row = np.array(tbs.tb_tr_pkts[d])\n tbs_ftr_pkts[d-mn_depth] = \\\n row[(row >= mn_tr_pkts) & (row <= mx_tr_pkts)]\n #print(\"d=%d, keep=%s\" % (d, tbs_ftr_pkts[d-mn_depth]))\n #print(\"d=%d, sum=%d\" % (d, tbs_ftr_pkts[d-mn_depth].sum()))\n total_edges += len(tbs_ftr_pkts[d-mn_depth])\n print(\">>> total_edges = %d\" % total_edges)\n return tbs_ftr_pkts\n\ndef plot_stacked(msm_objs, msm_dests, inner, bn): # Plots for timebin bn\n # msm_obj is an MsmStatsFile,\n # msm_obj.tbsa is an array of TbStats objects\n\n# Resistor colour codes 0-9, repeating ...\n# 1 2 3 4 5 6\n colours = ['brown', 'red', 'orange', 'yellow', 'green', 'blue',\n# 7 8 9 white 10 11 12 13\n 'violet', 'grey', 'navajowhite', 'black', 'brown', 'red', 'orange',\n# 14 15 16 17 18 19\n 'yellow', 'green', 'blue', 'violet', 'grey', 'navajowhite', \n# 20 21 22\n 'black', 'brown', 'red']\n if inner:\n which = \"Inner\"\n depths = (\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",\n \"11\", \"12\", \"13\", \"14\", \"15\", \"16\")\n colours = colours[:16]\n else:\n which = \"Outer\"\n depths = (\"17\", \"18\", \"19\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\",\n \"27\", \"28\", \"29\", \"30\", \"31\", \"32\")\n colours = colours[6:]\n\n# nc = len(colours) # Nbr of colours\n# print(\"nc = %d\" % nc)\n tpt = []; tlabels = [] # depths patch tuples and colours\n n_depths = len(depths)\n for t in range(n_depths):\n r = n_depths-1 - t\n tpt.append(plt_patches.Patch(color=colours[r]))\n tlabels.append(depths[r])\n\n print(\"@@@ Starting plot_stacked, which = %s . . .\" % which)\n if len(msm_objs) <= 3:\n rows = 1; cols = len(msm_objs)\n w = 7*cols/3; h = 3.4; stp = 11; tkp = 7; tp = 9\n elif len(msm_objs) <= 6:\n rows = 2; cols = 3\n w = 11.5; h = 9.0; stp = 16; tkp = 12; tp = 12\n else:\n print(\"Can't plot more than 6 msm_ids <<<\")\n fig, axes = pplt.subplots(rows, cols, figsize=(w,h)) # Inches (?)\n fig.legend(tpt, tlabels, \"center right\", title=\"Hops\\n to\\nDest\",\n bbox_to_anchor=(0.98,0.5), prop={\"size\":10}, handlelength=1)\n\n if len(msm_objs) < 6: # ?? \n pplt.subplots_adjust(left=0.155, bottom=None, right=None, top=0.9,\n wspace=0.3, hspace=0.85)\n else:\n #pplt.subplots_adjust(left=0.065, bottom=None, right=0.9, top=0.95,\n #pplt.subplots_adjust(left=0.065, bottom=None, right=None, top=0.9,\n pplt.subplots_adjust(left=0.065, bottom=None, right=None, top=0.91,\n wspace=0.4, hspace=0.3)\n \n fig.suptitle(\"%s traceroute packet histograms: %s, timebin %d\" % ( \\\n which, start_time.strftime(\"%A %Y-%m-%d\"), bn),\n fontsize=stp, horizontalalignment='center')\n\n for f in range(rows*cols):\n print(\"--- f = %d\" % f)\n r = int(f/cols); cl = f%cols \n if rows == 1:\n if cols == 1:\n xy1 = axes\n else:\n xy1 = axes[cl]\n else:\n xy1 = axes[r,cl]\n \n msm_obj = msm_objs[f]\n print(\"f = %i, msm = %d, bn = %d\" % (f, msm_obj.msm_id, bn))\n print(\" >>> len(tbas) = %d\" % len(msm_obj.tbsa))\n tbs = msm_obj.tbsa[bn]\n tb_mn_depth = msm_obj.tb_mn_depth\n\n msm_id = msm_obj.msm_id; dest = msm_dests[msm_id][0]\n title = \"%d: %s\" % (msm_id, dest)\n xy1.set_title(\"%s\" % title, fontsize=14) #? tp)\n \n xy1.set_yscale('log')\n xy1.tick_params(axis='y', labelsize=tkp) #x\n xy1.set_ylabel(\"tr packets\", fontsize=14, labelpad=-4)\n xy1.tick_params(axis='x', labelsize=tkp)\n xy1.set_xlabel(\"Edges\", fontsize=tkp, labelpad=1) #y\n\n if inner:\n tpa = tbs_filter(tbs, 0,15, 32,20000)\n trlo = 32; trhi = 1750\n xy1.set_ylim([trlo, trhi])\n xy1.set_yticks([20, 60, 180, 540, 1620])\n xy1.set_yticklabels(['20', '60', '180', '540', '1620'])\n ylo = math.log(trlo)/math.log(10) # For logspace() bins\n yhi = math.log(trhi)/math.log(10)\n\n edlo = 0.1; edhi = 100\n xy1.set_xticks([10, 30, 50, 70, 90])\n xy1.set_xlim([edlo, edhi])\n\n n, bins, patches = xy1.hist(\n #first_depths, stacked=True, orientation=u'horizontal',\n tpa, stacked=True, orientation=u'horizontal',\n bins=np.logspace(ylo, yhi, 40),\n color=colours, # linear scale\n alpha=0.8, linewidth=1, rwidth=0.8)\n\n else: # outer\n tpa = tbs_filter(tbs, 16,31, 1,120)\n trlo = 1; trhi = 90\n xy1.set_ylim([trlo, trhi])\n xy1.set_yticks([3, 9, 27, 81])\n xy1.set_yticklabels(['3', '9', '27', '81'])\n ylo = math.log(trlo)/math.log(10) # For logspace() bins\n yhi = math.log(trhi)/math.log(10)\n\n edlo = 10; edhi = 14000 ##10000 ##7000 ##6000 ##5400\n xy1.set_xlim([edlo, edhi])\n xy1.set_xscale('log')\n #xy1.set_xticks([20, 60, 180, 540, 1620])\n #xy1.set_xticklabels(['20', '60', '180', '540', '1620'])\n xy1.set_xticks([15, 60, 240, 1200, 7000])\n xy1.set_xticklabels(['15', '60', '240', '1200', '7000'])\n\n n, bins, patches = xy1.hist(\n tpa, stacked=True, orientation=u'horizontal',\n bins=np.logspace(ylo, yhi, 30),\n color=colours, # log=False\n alpha=0.8, linewidth=1, rwidth=0.8)\n\n #pplt.show()\n plot_fn = \"%s/%s-tr-pkts-v-edges-per-depth.svg\" % (start_ymd, which)\n pplt.savefig(plot_fn)\n return plot_fn\n\nmsm_objs = []\nbins_to_read = 1 # Only read stats for first bin\n#for msm_id in c.msm_nbrs:\n#print(\"--- msm_objs = %s <<<\" % msm_objs)\n#for msm_id in [5017]:\n#for msm_id in [5017, 5005, 5016]:\nfor msm_id in [5017, 5005, 5016, 5004, 5006, 5015]:\n fn = \"./\" + c.stats_fn(msm_id) # isfile expects full filename!\n print(\"msm_id = %d, fn = %s\" % (msm_id, fn))\n if os.path.isfile(fn):\n print(\" >>> %d: %s\" % (msm_id, fn))\n msm_objs.append(mf.MsmStatsFile(fn, 0, 0)) # Read bins 0 only\n #msm_objs.append(mf.MsmStatsFile(fn, 0, 2)) # Read bins 0,1,2\n else:\n print(\"No file %s\" % fn)\n print(\"len(msm_objs) = %i\" % len(msm_objs))\n\nif len(msm_objs) == 0:\n print(\"No stats files found <<<\")\n exit()\nprint(\"--- msm_objs = %s\" % msm_objs)\n\nplot_stacked(msm_objs, c.msm_dests, True, 0) # Inner plot for timebin 0\nplot_fn = plot_stacked(msm_objs, c.msm_dests, False, 0) # Outer plot for timebin 0\nprint(\">>> plot_fn = %s\" % plot_fn)\n#plot_stacked(msm_objs, c.msm_dests, 10) # Plot timebin 10 only\n\ndef run_cmd(cmd):\n output, rc = c.run_bash_commands(cmd)\n if rc != 0:\n print(output)\n return rc\n\nrt = run_cmd(\"python3 publishing/tweak-svg-headers.py %s\" % plot_fn)\nif rt != 0:\n print(\">>>>> tweak run failed!\"); exit()\n","repo_name":"nevil-brownlee/Atlas-graphs","sub_path":"publishing/trpkts-v-edges-per-depth.py","file_name":"trpkts-v-edges-per-depth.py","file_ext":"py","file_size_in_byte":8614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"37291911761","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.wait import WebDriverWait\n\ndriver = webdriver.Chrome(executable_path=\"C:\\\\Users\\P0022990\\Desktop\\Personal\\chromedriver.exe\")\ndriver.get(\"https://rahulshettyacademy.com/angularpractice/\")\ndriver.maximize_window()\nwait = WebDriverWait(driver, 7)\nwait.until(expected_conditions.presence_of_element_located((By.XPATH, \"//label[text()='Name']\")))\nshopBtn = driver.find_element_by_link_text(\"Shop\")\nshopBtn.click()\naddBtn = driver.find_elements_by_xpath(\"//div[@class='card-footer']/button\")\naddBtn[1].click()\ncheckOutBtn = driver.find_element_by_xpath(\"//a[@class='nav-link btn btn-primary']\")\ncheckOutBtn.click()\nquantity = driver.find_elements_by_id(\"exampleInputEmail1\")\nprice = driver.find_elements_by_xpath(\"//tbody/tr[1]/td[3]/strong[1]\")\nquantity[0].is_selected()\nquantity[0].send_keys(\"2\")\ncheckOutBtnSumPage = driver.find_element_by_xpath(\"//button[@class='btn btn-success']\")\ncheckOutBtnSumPage.click()\ncountyInput = driver.find_element_by_id(\"country\")\ncountyInput.send_keys(\"fi\")\nwait.until(expected_conditions.presence_of_element_located((By.CLASS_NAME, \"suggestions\")))\ncountryResults = driver.find_elements_by_xpath(\"//div[@class='suggestions']/ul/li/a\")\nfor i in countryResults:\n assert \"in\" or \"In\" in i.text\n \"\"\"\n assert \"in\" in i.text\n elif \"In\" in i.text:\n assert \"In\" in i.text\n else:\n print(\"country doesnt contain 'in'. the text is: \" + i.text)\n break\n\n\n#driver.quit()\n\"\"\"\n\n\n\n\n\n","repo_name":"itay1709/personalProjectSeleniumPytest","sub_path":"pactice.py","file_name":"pactice.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26351068433","text":"import requests\nfrom io import BytesIO\n\nfrom vk_api.utils import get_random_id\nimport vk_api\nfrom vk_api import VkUpload\nfrom vk_api.longpoll import VkLongPoll\nfrom vk_api.keyboard import VkKeyboard, VkKeyboardColor\nfrom vk_api.longpoll import VkEventType\n\n\nclass Bot:\n\n def __init__(self, key):\n self.authorize = vk_api.VkApi(token=key) # Авторизуемся в ВК для управления нашей группой, используя token\n self.longpoll = VkLongPoll(\n self.authorize) # Выбираем тип используемого API - Long Poll API, бывает еще Callback API\n self.upload = VkUpload(self.authorize) # Загрузчик изображений на сервер в ВК\n self.VkEventType = VkEventType\n\n @staticmethod\n def __get_keyboard_for_bot():\n keyboard = VkKeyboard(one_time=False) # создаем клавиатуру для бота\n keyboard.add_button('К следующему', color=VkKeyboardColor.PRIMARY)\n keyboard.add_line()\n keyboard.add_button('В избранное', color=VkKeyboardColor.POSITIVE)\n keyboard.add_button('В черный список', color=VkKeyboardColor.NEGATIVE)\n keyboard.add_line()\n keyboard.add_button('Список избранных', color=VkKeyboardColor.SECONDARY)\n keyboard_for_bot = keyboard.get_keyboard()\n return keyboard_for_bot\n\n def write_message(self, sender, message, attachment=None):\n self.authorize.method('messages.send',\n {'user_id': sender, 'message': message, 'random_id': get_random_id(),\n 'attachment': attachment,\n 'keyboard': self.__get_keyboard_for_bot()})\n\n def upload_photo(self, url):\n img = requests.get(url).content\n f = BytesIO(img)\n\n response = self.upload.photo_messages(f)[0]\n\n owner_id = response['owner_id']\n photo_id = response['id']\n access_key = response['access_key']\n attachment = f'photo{owner_id}_{photo_id}' # _{access_key}\n return attachment\n\n def get_attachment(self, photo_link_list: list):\n attachment_list = []\n for link in photo_link_list:\n uploaded_photo = self.upload_photo(link)\n attachment_list.append(uploaded_photo)\n attchment = ','.join(attachment_list)\n return attchment\n\n def get_user_info(self, user_id):\n user_info = self.authorize.method('users.get', {\"user_ids\": user_id, 'fields': 'city, bdate, sex'})[0]\n return user_info['first_name'], user_info['last_name'], user_info['city']['title'], user_info['bdate'], user_info['sex']\n\n def send_candidate(self, sender):\n # вызываем функцию подборакандидата, получаем данные и ссылки\n photo_list = ['https://vdp.mycdn.me/getImage?id=411588037337&idx=0&thumbType=32',\n 'https://www.mam4.ru/media/upload/user/5422/19/6170.jpg',\n 'https://avatanplus.com/files/resources/mid/5ab5736f0579416254cae9ae.png',\n ]\n candidate_id = 'candidate_id'\n fio = \"fio\"\n link = \"link\"\n attachment_photos = self.get_attachment(photo_list)\n self.write_message(sender, f'Вот отличный кандидат:\\n{fio}\\n{link}', attachment=attachment_photos)\n return candidate_id, fio, link, photo_list\n","repo_name":"YuriyGl86/bot","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19690771003","text":"def solution(cards1, cards2, goal):\n answer=\"Yes\"\n idx_1,idx_2=0,0\n for word in goal:\n if word in cards1 and cards1[idx_1]==word:\n idx_1+=1\n elif word in cards2 and cards2[idx_2]==word:\n idx_2+=1\n else:\n answer=\"No\"\n return answer\n return answer\n\nprint(solution([\"a\",\"b\",\"c\"],[\"d\", \"e\"],[\"b\", \"c\", \"d\", \"e\"]))","repo_name":"SarahParkSehyun/programmers","sub_path":"Lv.1/카드 뭉치.py","file_name":"카드 뭉치.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25479392772","text":"# def function(a):\n# i=0\n# c=[]\n# while i')\n exit() \n\nwith open(sys.argv[1]) as fp:\n untl_bs = json.load(fp)\n\nrequest_url = [\n 'https://digital.library.unt.edu/solrparse/raw/?q=*:*',\n 'fq=(aubrey_system:PTH+OR+untl_institution:UNTA)+AND+dc_rights_access:public',\n 'facet=true',\n 'facet.field=dc_subject.UNTL-BS_facet',\n 'facet.limit=-1',\n 'facet.mincount=1',\n 'wt=json',\n 'rows=0'\n ]\n\n# print('&'.join(request_url))\n\n# Build and make the request to the Solr server.\nresponse = urllib.request.urlopen('&'.join(request_url)).read()\n\n# Convert the response (which is json) into a python object.\ndata = json.loads(response.decode('utf-8'))\n\n# Convert the Solr facet format into a dict with counts.\nx = (data['facet_counts']['facet_fields']['dc_subject.UNTL-BS_facet'])\nvalues = dict(zip([d for d in x[::2]], [int(d) for d in x[1::2]]))\n\n# Create a blank dict to store our final UNTL-BS values\nuntl_with_counts = {}\n\n# Iterate through the valid UNTL-BS values. \nfor i in untl_bs:\n # Default assign each a count of zero. \n untl_with_counts[i] = 0\n # If they occur in the values dict (from Solr) then use the Solr values\n if i in values:\n untl_with_counts[i] = values[i]\n\n# Pretty print the json structure\nprint(json.dumps(untl_with_counts, indent=2, sort_keys=True))\n\n","repo_name":"unt-libraries/portal-leading","sub_path":"untl-bs/code/merge-untlbs-json-with-counts.py","file_name":"merge-untlbs-json-with-counts.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43039965717","text":"from rich import print\n\ncal = open('input.txt', 'r', encoding='utf-8').read().strip().split('\\n')\n\nvS = []\nvM = []\nvO = []\nvT = []\nvTrue = []\nvFalse = []\n\nfor txt in cal:\n txt = txt.strip()\n if txt.split(' ')[0] == 'Monkey':\n pos = int(txt.split(' ')[1][:-1])\n elif txt.split(':')[0] == 'Starting items':\n vM.append(pos)\n vM[pos] = txt.split(':')[1].split(',')\n for x in range(len(vM[pos])):\n vM[pos][x] = int(vM[pos][x])\n elif txt.split(':')[0] == 'Operation':\n vO.append(pos)\n _, _, _, _, o, v = txt.split(' ')\n vO[pos] = [o, v]\n elif txt.split(':')[0] == 'Test':\n vT.append(pos)\n vT[pos] = int(txt.split('by')[1].strip())\n elif txt.split(':')[0] == 'If true':\n vTrue.append(pos)\n vTrue[pos] = int(txt.split('monkey')[1].strip())\n elif txt.split(':')[0] == 'If false':\n vFalse.append(pos)\n vFalse[pos] = int(txt.split('monkey')[1].strip())\n\nvS = [0] * len(vM)\nvMCopy = []\nfor y in vM:\n vMCopy.append(y[:])\n\nfor x in range(20):\n for y in range(len(vM)):\n for z in range(len(vM[y])):\n vS[y] += 1\n if vO[y][0] == '+':\n t = vM[y][z] + int(vO[y][1])\n else:\n if vO[y][1] == 'old':\n t = vM[y][z] * vM[y][z]\n else:\n t = vM[y][z] * int(vO[y][1])\n t = t // 3\n if t % vT[y] == 0:\n xTo = vTrue[y]\n else:\n xTo = vFalse[y]\n vM[xTo].append(t)\n vM[y] = []\n\nvS.sort()\n\nprint(vS[-1] * vS[-2])\n\nvM = vMCopy.copy()\nvS = []\nvS = [0] * len(vM)\n\ndv = 1\nfor x in vT:\n dv *= x\n\nfor x in range(10000):\n for y in range(len(vM)):\n for z in range(len(vM[y])):\n vS[y] += 1\n if vO[y][0] == '+':\n t = vM[y][z] + int(vO[y][1])\n else:\n if vO[y][1] == 'old':\n t = vM[y][z] * vM[y][z]\n else:\n t = vM[y][z] * int(vO[y][1])\n\n t = t % dv\n\n if t % vT[y] == 0:\n xTo = vTrue[y]\n else:\n xTo = vFalse[y]\n vM[xTo].append(t)\n vM[y] = []\n\nvS.sort()\n\nprint(vS[-1] * vS[-2])\n","repo_name":"robertohbr1/adventofcode","sub_path":"2022/11/t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74921285852","text":"from configparser import ConfigParser\nfrom loguru import logger\nimport os\n\nDEFAULT_MESSAGE_FORMAT = \"**{author}**(_{server}_) - {time}{newline}{message.content}\"\n\n\nclass Configuration:\n def __init__(self):\n self.parser = ConfigParser()\n self.parser.read(\"./config.ini\")\n\n def get_discord_token(self) -> str:\n token = str(self.parser[\"info\"][\"discord_account_token\"])\n if \"your_account_token\" in token:\n token = os.environ.get(\"discord_account_token\")\n\n return str(token)\n\n def get_log_channel(self) -> int:\n channel_id = int(self.parser[\"info\"][\"logs_channel_id\"])\n message_format = str(self.parser[\"info\"][\"message_format\"])\n\n return (\n (channel_id, message_format if message_format else DEFAULT_MESSAGE_FORMAT)\n if not channel_id == 0\n else None\n )\n\n def get_bookmarks(self) -> dict:\n \"\"\"\n {'emoji': {'channel_id': 123, 'message_format': 'abc'}}\n \"\"\"\n\n bookmark_blocks = [\n x\n for x in self.parser\n if not x.startswith(\"DEFAULT\") and not x.startswith(\"info\")\n ]\n\n bookmarks = {}\n for block in bookmark_blocks:\n logger.info(\n f\"Obtained emoji {block} with channel id -> {self.parser[block]['channel_id']}\"\n )\n message_format = str(self.parser[block][\"message_format\"])\n\n bookmarks[block] = {\n \"channel_id\": int(self.parser[block][\"channel_id\"]),\n \"message_format\": message_format\n if message_format\n else DEFAULT_MESSAGE_FORMAT,\n }\n\n return bookmarks\n","repo_name":"yeti2006/discord-react-to-bookmark","sub_path":"src/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3666212537","text":"from abc import (\n abstractmethod,\n ABC,\n)\nfrom typing import (\n Type,\n List,\n)\n\nfrom pydantic import (\n BaseModel as PydanticModel,\n)\n\nfrom pddd.entities import (\n Entity,\n)\nfrom pddd.repositories import (\n Repository,\n CreateRepository,\n ReadRepository,\n UpdateRepository,\n DeleteRepository,\n CrudRepository,\n)\nfrom pddd.services import (\n Service,\n CreateService,\n ReadService,\n UpdateService,\n DeleteService,\n)\n\n\nclass PydanticService(Service, ABC):\n @property\n @abstractmethod\n def repository(self) -> Repository:\n raise NotImplementedError()\n\n\nclass PydanticCreateMixin(CreateService, ABC):\n @property\n @abstractmethod\n def repository(self) -> CreateRepository:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def create_model(self) -> Type[PydanticModel]:\n raise NotImplementedError()\n\n async def create(self, inputs: dict) -> dict:\n model: PydanticModel = self.create_model(**inputs)\n\n kwargs: dict = model.dict()\n entity: Entity = self.repository.entity(\n **kwargs, # noqa\n )\n\n new_entity = await self.repository.create(entity)\n return new_entity.__dict__\n\n\nclass PydanticReadMixin(ReadService, ABC):\n @property\n @abstractmethod\n def repository(self) -> ReadRepository:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def read_model(self) -> Type[PydanticModel]:\n raise NotImplementedError()\n\n async def read(self, filters: dict) -> List[dict]:\n model: PydanticModel = self.read_model(**filters)\n\n entities: list = await self.repository.read(filters=model.dict())\n\n return [\n entity.__dict__\n for entity in entities\n ]\n\n\nclass PydanticUpdateMixin(UpdateService, ABC):\n @property\n @abstractmethod\n def repository(self) -> UpdateRepository:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def update_model(self) -> Type[PydanticModel]:\n raise NotImplementedError()\n\n async def update(self, id_: str, inputs: dict) -> dict:\n model: PydanticModel = self.update_model(id=id_, **inputs)\n\n kwargs: dict = model.dict()\n entity: Entity = self.repository.entity(\n **kwargs, # noqa\n )\n\n new_entity = await self.repository.update(entity)\n return new_entity.__dict__\n\n\nclass PydanticDeleteMixin(DeleteService, ABC):\n @property\n @abstractmethod\n def repository(self) -> DeleteRepository:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def delete_model(self) -> Type[PydanticModel]:\n raise NotImplementedError()\n\n async def delete(self, id_: str) -> None:\n model: PydanticModel = self.delete_model(id=id_)\n\n kwargs: dict = model.dict()\n entity: Entity = self.repository.entity(\n **kwargs, # noqa\n )\n\n await self.repository.delete(entity)\n\n\nclass PydanticCrudService(\n PydanticService,\n PydanticCreateMixin,\n PydanticReadMixin,\n PydanticUpdateMixin,\n PydanticDeleteMixin,\n ABC,\n):\n @property\n @abstractmethod\n def repository(self) -> CrudRepository:\n raise NotImplementedError()\n","repo_name":"xurvan/pddd","sub_path":"pddd/services/pydantic.py","file_name":"pydantic.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2184270020","text":"from lxml import html\nimport requests\nimport util\n\ndef get_ladder(season=None, search=None):\n ladder = dict()\n ladder[\"eternal\"] = False\n rank_elo = \"ak-rank\"\n url = _('https://www.krosmaga.com/en/community/leaderboard')\n join_char = '?'\n if season == '0':\n url = '/'.join([url, _(\"eternal\")])\n ladder[\"eternal\"] = True\n rank_elo = \"ak-elo\"\n else:\n url = '/'.join([url, _(\"seasonal\")])\n if season is not None:\n url = join_char.join([url, \"season={0}\".format(season)])\n join_char = '&'\n if search is not None:\n url = join_char.join([url, \"search={0}\".format(search)])\n\n page = requests.get(url)\n tree = html.fromstring(page.content)\n\n ladder[\"places\"] = tree.xpath('//td[@class=\"ak-position\"]/span/text()')\n ladder[\"nicknames\"] = tree.xpath('//td[@class=\"ak-nickname\"]/text()')\n ladder[\"rank_elo\"] = tree.xpath('//td[contains(@class, $rank_elo)]/text()', rank_elo=rank_elo)\n ladder[\"victories\"] = tree.xpath('//span[@class=\"ak-win\"]/text()')\n ladder[\"defeats\"] = tree.xpath('//span[@class=\"ak-lose\"]/text()')\n\n return ladder\n\ndef create_message(ladder, first=1, last=1):\n if ladder[\"eternal\"]:\n rank_elo = _(\" Elo\")\n else:\n rank_elo = _(\" Rank\")\n try:\n msg = \"```{place}{nickname}{rank_elo}{win_lose}\".format(place=\" #\", nickname=util.align_right(_(\"Nickname\"), 30), rank_elo=rank_elo, win_lose=_(\" Victories/Defeats\"))\n for i in range(first-1, last):\n place = util.align_right(ladder[\"places\"][i], 5)\n nickname = util.align_right(ladder[\"nicknames\"][i], 30)\n rank_elo = util.align_right(ladder[\"rank_elo\"][i], len(rank_elo))\n win_lose = util.align_right(ladder[\"victories\"][i] + \"/\" + ladder[\"defeats\"][i], len(_(\" Victories/Defeats\")))\n msg = '\\n'.join([msg, \"{place}{nickname}{rank_elo}{win_lose}\".format(place=place, nickname=nickname, rank_elo=rank_elo, win_lose=win_lose)])\n msg = '\\n'.join([msg, \"```\"])\n except IndexError:\n msg = _(\"Sorry, I did not find anyone.\")\n return msg\n\ndef create_messages(ladder, first, last):\n msgs = list()\n while last - first >= 20:\n msg = create_message(ladder, first, first + 19)\n first = first + 20\n msgs.append(msg)\n msg = create_message(ladder, first, last)\n msgs.append(msg)\n\n return msgs\n","repo_name":"glouis/Laima-Discord-Bot","sub_path":"laima/ladder.py","file_name":"ladder.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"44500555109","text":"from flask import request\nfrom flask_restful import reqparse, Resource\n# dummy data\nfrom flask_restful_swagger import swagger\n\nfrom models import PlaceRecordModel\n\nplaceRecords = [\n {\n \"name\": \"JADS\",\n \"rating\": 4.5,\n \"address\": {\n \"postcode\": \"5211 DA\",\n \"street\": \"Sint Janssingel\",\n \"houseNo\": 92,\n \"city\": \"Den Bosch\"\n }\n }\n]\n\n\n# resource place record\nclass PlaceRecord(Resource):\n @swagger.operation(\n notes='Get a Place Record',\n responseClass=PlaceRecordModel.__name__,\n nickname='getRecord',\n parameters=[\n {\n \"name\": \"name\",\n \"description\": \"Place Record identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"dataType\": 'string',\n \"paramType\": \"path\"\n }\n ],\n responseMessages=[\n {\n \"code\": 200,\n \"message\": \"Place Record\"\n },\n {\n \"code\": 404,\n \"message\": \"Place record not found\"\n }\n ])\n def get(self, name):\n for record in placeRecords:\n if name == record[\"name\"]:\n return record, 200 # return 200 HTTP status code to indicate success\n return {\"message\": \"Place record not found\"}, 404 # return 404 HTTP status code to indicate resource not found\n\n @swagger.operation(\n notes='Update a Place Record',\n nickname='updateRecord',\n responseClass=PlaceRecordModel.__name__,\n parameters=[\n {\n \"name\": \"name\",\n \"description\": \"Place Record identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"dataType\": 'string',\n \"paramType\": \"path\"\n },\n {\n \"name\": \"rating\",\n \"description\": \"Place rating\",\n \"required\": True,\n \"allowMultiple\": False,\n \"dataType\": 'integer',\n \"paramType\": \"query\"\n }\n ],\n responseMessages=[\n {\n \"code\": 200,\n \"message\": \"Place Record was updated\"\n },\n {\n \"code\": 404,\n \"message\": \"Place record not found\"\n }\n ])\n def put(self, name):\n parser = reqparse.RequestParser()\n parser.add_argument('rating', type=int, help='Rate to charge for this resource')\n args = parser.parse_args(strict=True)\n\n for record in placeRecords:\n if name == record[\"name\"]:\n record[\"rating\"] = args[\"rating\"]\n return record, 200\n\n return {\"message\": \"Place record not found\"}, 404\n\n @swagger.operation(\n notes='Delete a Place Record',\n nickname='deleteRecord',\n parameters=[\n {\n \"name\": \"name\",\n \"description\": \"Place Record identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"dataType\": 'string',\n \"paramType\": \"path\"\n }\n ],\n responseMessages=[\n {\n \"code\": 200,\n \"message\": \"Place Record was deleted\"\n },\n {\n \"code\": 404,\n \"message\": \"Place record not found\"\n }\n ])\n def delete(self, name):\n to_be_deleted = None\n for record in placeRecords:\n if name == record[\"name\"]:\n to_be_deleted = record\n break\n\n if to_be_deleted:\n placeRecords.remove(to_be_deleted)\n return \"{} was deleted.\".format(name), 200\n return {\"message\": \"Place record not found\"}, 404\n\n\n# resource collection place records\nclass PlaceRecords(Resource):\n @swagger.operation(\n notes='Create a Place Record',\n responseClass=PlaceRecordModel,\n nickname='createRecord',\n parameters=[\n {\n \"name\": \"body\",\n \"description\": \"Place Record identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"dataType\": PlaceRecordModel.__name__,\n \"paramType\": \"body\"\n }\n ],\n responseMessages=[\n {\n \"code\": 201,\n \"message\": \"Created Place Record\"\n },\n {\n \"code\": 500,\n \"message\": \"Record with the given name already exists\"\n }\n ])\n def post(self):\n record_to_be_created = request.get_json(force=True)\n name = record_to_be_created['name']\n for record in placeRecords:\n if name == record[\"name\"]:\n return {\"message\": \"Record with name {} already exists\".format(\n name)}, 500 # 500 Internal Server Error HTTP status code\n\n placeRecords.append(record_to_be_created)\n return record_to_be_created, 201 # 201 Created HTTP status code\n","repo_name":"IndikaKuma/IS","sub_path":"lab13/openapiplacerecord/resources/place_record.py","file_name":"place_record.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19159775769","text":"from __future__ import absolute_import, print_function\n\nimport time\nimport numpy as np\n\nimport cv2\n\nfrom sea_ice_drift.lib import (get_speed_ms,\n interpolation_poly,\n get_displacement_km)\n\ndef find_key_points(image,\n edgeThreshold=34,\n nFeatures=100000,\n nLevels=7,\n patchSize=34,\n verbose=False,\n **kwargs):\n ''' Initiate detector and find key points on an image\n Parameters\n ----------\n image : 2D UInt8 Numpy array - image\n edgeThreshold : int - parameter for OpenCV detector\n nFeatures : int - parameter for OpenCV detector\n nLevels : int - parameter for OpenCV detector\n patchSize : int - parameter for OpenCV detector\n Returns\n -------\n keyPoints : list - coordinates of keypoint on image\n descriptors : list - binary descriptos of kepoints\n '''\n if cv2.__version__.startswith('3.') or cv2.__version__.startswith('4.'):\n detector = cv2.ORB_create()\n detector.setEdgeThreshold(edgeThreshold)\n detector.setMaxFeatures(nFeatures)\n detector.setNLevels(nLevels)\n detector.setPatchSize(patchSize)\n else:\n detector = cv2.ORB()\n detector.setInt('edgeThreshold', edgeThreshold)\n detector.setInt('nFeatures', nFeatures)\n detector.setInt('nLevels', nLevels)\n detector.setInt('patchSize', patchSize)\n keyPoints, descriptors = detector.detectAndCompute(image, None)\n if verbose:\n print('Key points found: %d' % len(keyPoints))\n return keyPoints, descriptors\n\n\ndef get_match_coords(keyPoints1, descriptors1,\n keyPoints2, descriptors2,\n matcher=cv2.BFMatcher,\n norm=cv2.NORM_HAMMING,\n ratio_test=0.7,\n verbose=False,\n **kwargs):\n ''' Filter matching keypoints and convert to X,Y coordinates\n Parameters\n ----------\n keyPoints1 : list - keypoints on img1 from find_key_points()\n descriptors1 : list - descriptors on img1 from find_key_points()\n keyPoints2 : list - keypoints on img2 from find_key_points()\n descriptors2 : list - descriptors on img2 from find_key_points()\n matcher : matcher from CV2\n norm : int - type of distance\n ratio_test : float - Lowe ratio\n verbose : bool - print some output ?\n Returns\n -------\n x1, y1, x2, y2 : coordinates of start and end of displacement [pixels]\n '''\n matches = _get_matches(descriptors1,\n descriptors2, matcher, norm, verbose)\n x1, y1, x2, y2 = _filter_matches(matches, ratio_test,\n keyPoints1, keyPoints2, verbose)\n return x1, y1, x2, y2\n\ndef _get_matches(descriptors1, descriptors2, matcher, norm, verbose=False):\n ''' Match keypoints using BFMatcher with cv2.NORM_HAMMING '''\n t0 = time.time()\n bf = matcher(norm)\n matches = bf.knnMatch(descriptors1, descriptors2, k=2)\n t1 = time.time()\n if verbose:\n print('Keypoints matched', t1 - t0)\n return matches\n\ndef _filter_matches(matches, ratio_test, keyPoints1, keyPoints2, verbose=False):\n ''' Apply ratio test from Lowe '''\n good = []\n for m,n in matches:\n if m.distance < ratio_test*n.distance:\n good.append(m)\n if verbose:\n print('Ratio test %f found %d keypoints' % (ratio_test, len(good)))\n\n # Coordinates for start, end point of vectors\n x1 = np.array([keyPoints1[m.queryIdx].pt[0] for m in good])\n y1 = np.array([keyPoints1[m.queryIdx].pt[1] for m in good])\n x2 = np.array([keyPoints2[m.trainIdx].pt[0] for m in good])\n y2 = np.array([keyPoints2[m.trainIdx].pt[1] for m in good])\n return x1, y1, x2, y2\n\ndef domain_filter(n, keyPoints, descr, domain, domainMargin=0, verbose=False, **kwargs):\n ''' Finds from Nansat objects which are within \n Parameters\n ----------\n n : source Nansat object\n keyPoints : list - keypoints on image from \n descr : list - descriptors of \n domain : destination Domain\n domainMargin : int - margin to crop points\n Returns\n -------\n keyPointsFilt : list of filtered keypoints\n descrFilt : list - descriptors of \n '''\n cols = [kp.pt[0] for kp in keyPoints]\n rows = [kp.pt[1] for kp in keyPoints]\n lon, lat = n.transform_points(cols, rows, 0)\n colsD, rowsD = domain.transform_points(lon, lat, 1)\n gpi = ((colsD >= 0 + domainMargin) *\n (rowsD >= 0 + domainMargin) *\n (colsD <= domain.shape()[1] - domainMargin) *\n (rowsD <= domain.shape()[0] - domainMargin))\n if verbose:\n print('Domain filter: %d -> %d' % (len(keyPoints), len(gpi[gpi])))\n return list(np.array(keyPoints)[gpi]), descr[gpi]\n\ndef max_drift_filter(n1, x1, y1, n2, x2, y2, max_speed=0.5, max_drift=None, verbose=False, **kwargs):\n ''' Filter out too high drift (m/s)\n Parameters\n ----------\n n1 : First Nansat object\n x1 : 1D vector - X coordinates of keypoints on image 1\n y1 : 1D vector - Y coordinates of keypoints on image 1\n n2 : Second Nansat object\n x2 : 1D vector - X coordinates of keypoints on image 2\n y2 : 1D vector - Y coordinates of keypoints on image 2\n max_speed : float - maximum allowed ice drift speed, m/s\n max_drift : float - maximum allowed drift distance, meters\n\n Returns\n -------\n x1 : 1D vector - filtered source X coordinates on img1, pix\n y1 : 1D vector - filtered source Y coordinates on img1, pix\n x2 : 1D vector - filtered destination X coordinates on img2, pix\n y2 : 1D vector - filtered destination Y coordinates on img2, pix\n\n Note\n ----\n If time_coverage_start is not avaialabe from input data then the threshold\n is not used and the user should set value for .\n\n '''\n # chack if input datasets have time stamp\n try:\n n1_time_coverage_start = n1.time_coverage_start\n n2_time_coverage_start = n2.time_coverage_start\n except ValueError:\n data_has_timestamp = False\n else:\n data_has_timestamp = True\n\n if data_has_timestamp:\n # if datasets have timestamp compare with speed\n gpi = get_speed_ms(n1, x1, y1, n2, x2, y2) <= max_speed\n elif max_drift is not None:\n # if datasets don't have timestamp compare with displacement\n gpi = 1000.*get_displacement_km(n1, x1, y1, n2, x2, y2) <= max_drift\n else:\n # if max displacement is not given - raise error\n raise ValueError('''\n\n Error while filtering matching vectors!\n Input data does not have time stamp, and is not set.\n Either use data supported by Nansat, or\n provide a value for - maximum allowed ice displacement between images (meters).\n Examples:\n uft, vft, lon1ft, lat1ft, lon2ft, lat2ft = sid.get_drift_FT(max_drift=10000)\n x1, y1, x2, y2 = feature_tracking(n1, n2, max_drift=10000)\n Vectors with displacement higher than will be removed.\n\n ''')\n if verbose:\n print('MaxDrift filter: %d -> %d' % (len(x1), len(gpi[gpi])))\n return x1[gpi], y1[gpi], x2[gpi], y2[gpi]\n\ndef lstsq_filter(x1, y1, x2, y2, psi=200, order=2, verbose=False, **kwargs):\n ''' Remove vectors that don't fit the model x1 = f(x2, y2)^n\n\n Fit the model x1 = f(x2, y2)^n using least squares method\n Simulate x1 using the model\n Compare actual and simulated x1 and remove points where error is too high\n Parameters\n ----------\n x1, y1, x2, y2 : coordinates of start and end of displacement [pixels]\n psi : threshold error between actual and simulated x1 [pixels]\n Returns\n -------\n x1 : 1D vector - filtered source X coordinates on img1, pix\n y1 : 1D vector - filtered source Y coordinates on img1, pix\n x2 : 1D vector - filtered destination X coordinates on img2, pix\n y2 : 1D vector - filtered destination Y coordinates on img2, pix\n '''\n if len(x1) == 0:\n return map(np.array, [[],[],[],[]])\n # interpolate using N-order polynomial\n x2sim, y2sim = interpolation_poly(x1, y1, x2, y2, x1, y1, order=order)\n\n # find error between actual and simulated x1\n err = np.hypot(x2 - x2sim, y2 - y2sim)\n\n # find pixels with error below psi\n gpi = err < psi\n\n if verbose:\n print('LSTSQ filter: %d -> %d' % (len(x1), len(gpi[gpi])))\n return x1[gpi], y1[gpi], x2[gpi], y2[gpi]\n\n\ndef feature_tracking(n1, n2, **kwargs):\n ''' Run Feature Tracking Algrotihm on two images\n Parameters\n ----------\n n1 : First Nansat object with 2D UInt8 matrix\n n2 : Second Nansat object with 2D UInt8 matrix\n domainMargin : int - how much to crop from size of domain\n max_speed : float - maximum allow ice drift speed, m/s\n max_drift : float - maximum allow ice drift displacement, m\n **kwargs : parameters for functions:\n find_key_points\n domain_filter\n get_match_coords\n max_drift_filter\n lstsq_filter\n Returns\n -------\n x1 : 1D vector - source X coordinates on img1, pix\n y1 : 1D vector - source Y coordinates on img1, pix\n x2 : 1D vector - destination X coordinates on img2, pix\n y2 : 1D vector - destination Y coordinates on img2, pix\n '''\n # find many keypoints\n kp1, descr1 = find_key_points(n1[1], **kwargs)\n kp2, descr2 = find_key_points(n2[1], **kwargs)\n if len(kp1) < 2 or len(kp2) < 2:\n return (np.array([]),)*4\n\n # filter keypoints by Domain\n kp1, descr1 = domain_filter(n1, kp1, descr1, n2, **kwargs)\n if len(kp1) < 2:\n return (np.array([]),)*4\n kp2, descr2 = domain_filter(n2, kp2, descr2, n1, **kwargs)\n if len(kp2) < 2:\n return (np.array([]),)*4\n\n # find coordinates of matching key points\n x1, y1, x2, y2 = get_match_coords(kp1, descr1, kp2, descr2, **kwargs)\n\n # filter out pair with too high drift\n x1, y1, x2, y2 = max_drift_filter(n1, x1, y1, n2, x2, y2, **kwargs)\n\n # filter out inconsistent pairs\n x1, y1, x2, y2 = lstsq_filter(x1, y1, x2, y2, **kwargs)\n\n return x1, y1, x2, y2\n","repo_name":"nansencenter/sea_ice_drift","sub_path":"sea_ice_drift/ftlib.py","file_name":"ftlib.py","file_ext":"py","file_size_in_byte":10540,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"32"} +{"seq_id":"34647873195","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0003_auto_20170705_1052'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Receiver',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('name', models.CharField(verbose_name='收货人名字', null=True, max_length=20)),\n ('city', models.CharField(verbose_name='城市', null=True, max_length=30, blank=True)),\n ('address', models.CharField(verbose_name='详细地址', null=True, max_length=70)),\n ('telephone', models.IntegerField(verbose_name='联系电话')),\n ],\n options={\n 'verbose_name': '收货人',\n 'verbose_name_plural': '收货人',\n 'ordering': ('id',),\n },\n ),\n migrations.AlterModelOptions(\n name='emailverifyrecord',\n options={'verbose_name': '邮箱验证', 'verbose_name_plural': '邮箱验证', 'ordering': ('send_date',)},\n ),\n migrations.AddField(\n model_name='userprofile',\n name='city',\n field=models.CharField(verbose_name='城市', null=True, max_length=30, blank=True),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='address',\n field=models.CharField(verbose_name='详细地址', null=True, max_length=70, blank=True),\n ),\n migrations.AddField(\n model_name='receiver',\n name='user',\n field=models.ForeignKey(verbose_name='账户拥有着', to=settings.AUTH_USER_MODEL),\n ),\n ]\n","repo_name":"derekCRO/djangoforeshop","sub_path":"eshop/users/migrations/0004_auto_20170706_1038.py","file_name":"0004_auto_20170706_1038.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29379045515","text":"import gzip\nimport numpy as np\n\n\ndef parse_data(data_paths, share_context=False, is_shared_context_first=True):\n for data_path in data_paths:\n with gzip.open(data_path, 'rt') as file:\n for line in file:\n chunks = line.split('|')\n rewards = {}\n features = {}\n for idx, chunk in enumerate(chunks):\n values = chunk.rstrip().split(' ')\n if idx == 0:\n timestamp = str(values[0])\n rewards[int(values[1])] = float(values[2])\n elif len(values) == 7:\n key = -1 if values[0] == 'user' else int(values[0])\n features[key] = np.array(list(map(lambda x: float(x.split(':')[1]), sorted(values[1:]))))\n action_ids = list(set(features.keys()).difference({-1}))\n if not share_context:\n context = {action_id: features[-1] for action_id in action_ids}\n elif is_shared_context_first:\n context = {action_id: np.append(np.outer(features[-1], features[action_id]).flatten()[1:], \n features[-1]) for action_id in action_ids}\n else:\n context = {action_id: np.append(features[-1], np.outer(\n features[-1], features[action_id]).flatten()[1:]) for action_id in action_ids}\n yield timestamp, context, rewards\n","repo_name":"aldente0630/multi_armed_bandit","sub_path":"utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"8859857225","text":"import json\nfrom bs4 import BeautifulSoup\n\nf = open('newsfeed.json', 'r')\narticles = json.load(f)\nf.close()\nfor article in articles:\n html = article['html']\n soup = BeautifulSoup(html, 'lxml')\n author = soup.body.h1.next.next\n while len(author.get_text()) < 3:\n author = author.next\n author = author.get_text()\n article['authors'].append(author)\n\nf = open('articles_informador.json', 'w')\njson.dump(articles, f)\n","repo_name":"threestone-analytics/jamxio","sub_path":"Crawler/get_author.py","file_name":"get_author.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33620376585","text":"'''\nCreated on Jan 3, 2016\n\n@author: igzo\n'''\nfrom src.model.configuration.SiteConfiguration import SiteConfiguration\nfrom src.model.configuration.UrlBasicConfiguration import UrlBasicConfiguration\nfrom src.model.configuration.UrlComplexConfiguration import UrlComplexConfiguration\n\nsites = {}\n\n\nsite_ti = \"theindependentsf.com\"\nstart_ti = 'http://www.theindependentsf.com/'\nbase_ti = 'http://www.theindependentsf.com/event/'\nkey_ti = '/event/'\nbasic_conf_ti = UrlBasicConfiguration(start=start_ti, base=base_ti, key=key_ti)\nsite_conf_ti = SiteConfiguration(site=site_ti, url_config=basic_conf_ti)\n\n\nsite_f = \"thefillmore.com\"\nstart_f = 'http://thefillmore.com/calendar/'\nbase_f = 'http://thefillmore.com/event/'\nkey_f = 'thefillmore.com/event/'\nbasic_conf_f = UrlBasicConfiguration(start=start_f, base=base_f, key=key_f)\nsite_conf_f = SiteConfiguration(site=site_f, url_config=basic_conf_f)\n\n\nsite_cftm = \"centerfornewmusic.com\"\nstart_cftm = 'http://centerfornewmusic.com/calendar/'\nbase_cftm = 'http://centerfornewmusic.com/calendar/'\nkey_cftm = 'centerfornewmusic.com/calendar/'\nbasic_conf_cftm = UrlBasicConfiguration(start=start_cftm, base= base_cftm, key=key_cftm)\nsite_conf_cftm = SiteConfiguration(site=site_cftm, url_config=basic_conf_cftm)\n\nsite_dg = 'http://dancersgroup.org/'\nstart_dg = 'http://dancersgroup.org/events/'\nbase_dg = 'http://dancersgroup.org/event/'\nkey_dg = '/event/'\nnext_url_dg = 'http://dancersgroup.org/events/page/'\ncomplex_conf_dg = UrlComplexConfiguration(start=start_dg, base=base_dg, key=key_dg, following=next_url_dg)\nsite_conf_dg = SiteConfiguration(site=site_dg, url_config=complex_conf_dg)\n\nsite_sffc = 'http://sf.funcheap.com/'\nstart_sffc = 'http://sf.funcheap.com/events/'\nbase_sffc = 'http://sf.funcheap.com/'\nkey_sffc = 'http://sf.funcheap.com/'\nnext_url_sffc = 'http://sf.funcheap.com/events/page/'\ncomplex_conf_sffc = UrlComplexConfiguration(start=start_sffc, base=base_sffc, key=key_sffc, following=next_url_sffc)\nsite_conf_sffc = SiteConfiguration(site=site_sffc, url_config=complex_conf_sffc)\n\nsite_bs = 'http://www.booksinc.net/'\nstart_bs = 'http://www.booksinc.net/event'\nbase_bs = 'http://www.booksinc.net/event/'\nkey_bs = '/event/'\nnext_url_bs = 'http://www.booksinc.net/event/2016'\ncomplex_conf_bs = UrlComplexConfiguration(start=start_bs, base=base_bs,key=key_bs,following=next_url_bs)\nsite_conf_bs = SiteConfiguration(site=site_bs, url_config=complex_conf_bs)\n\nsite_dnal = 'https://www.dnalounge.com/'\nstart_dnal = 'https://www.dnalounge.com/'\nbase_dnal = 'https://www.dnalounge.com/calendar/'\nkey_dnal = 'https://www.dnalounge.com/calendar/'\nbasic_conf_dnal = UrlBasicConfiguration(start=start_dnal, base=base_dnal, key=key_dnal)\nb_site_conf_dnal = SiteConfiguration(site=site_dnal, url_config=basic_conf_dnal)\n\nnext_url_dnal = 'https://www.dnalounge.com/calendar/2016/0'\ncomplex_conf_dnal = UrlComplexConfiguration(start=start_dnal, base=base_dnal, key=key_dnal, following=next_url_dnal)\nc_site_conf_dnal = SiteConfiguration(site=site_dnal, url_config=complex_conf_dnal)\n\nsite_cb = 'cityboxoffice.com'\nstart_cb = 'https://www.cityboxoffice.com/'\nbase_cb = 'https://www.cityboxoffice.com/eventperformances'\nkey_cb = 'eventperformances'\nbasic_conf_cb = UrlBasicConfiguration(start=start_cb, base=base_cb, key=key_cb)\nsite_conf_cb = SiteConfiguration(site=site_cb, url_config=basic_conf_cb)\n\nsite_js = 'engageinteractive.co.uk'\nstart_js = 'http://engageinteractive.co.uk/'\nbase_js = 'http://engageinteractive.co.uk/work/'\nkey_js = 'work'\nbasic_conf_js = UrlBasicConfiguration(start=start_js,base=base_js,key=key_js)\nsite_conf_js = SiteConfiguration(site=site_js,url_config=basic_conf_js)\n\nsites[site_ti] = site_conf_ti\nsites[site_bs] = site_conf_bs\nsites[site_cftm] = site_conf_cftm\nsites[site_dg] = site_conf_dg\nsites[site_dnal] = b_site_conf_dnal\nsites[site_sffc] = site_conf_sffc\nsites[site_f] = site_conf_f\nsites[site_cb] = site_conf_cb\nsites[site_js] = site_conf_js","repo_name":"bright-pan/ungoliant","sub_path":"ungoliant/src/main/Sites.py","file_name":"Sites.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33448014831","text":"import os\nimport re\nimport argparse\n\nfileTypes = re.compile(r\"\\.(accdb|accde)$\", re.IGNORECASE)\n# Fill this out with network locations you want to search\n\n\nclass programArgs:\n updateFileLocationList = False\n overwriteFileLocationList = False\n searchString = \"\"\n fileListFileName = r\"c:\\scripts\\filelist.txt\"\n\n\nclass screen:\n def __init__(self):\n self.curSearch = \"\"\n self.count = 0\n self.filesFound = []\n\n def incrementCount(self):\n self.count += 1\n self._refreshScreen()\n\n def appendFoundFile(self, file):\n self.filesFound.append(file)\n self._refreshScreen()\n\n def updateCurSearch(self, path):\n self.curSearch = path\n self._refreshScreen()\n\n def __str__(self):\n strret = f\"searching {self.curSearch}, found:\\n\"\n strret += '\\n'.join(self.filesFound)\n return strret\n\n def _refreshScreen(self):\n os.system('cls')\n scrString = f\"searching {self.curSearch}, found:\\n\"\n scrString += '\\n'.join(self.filesFound)\n scrString += '\\n'.join(self.filesFound)\n print(scrString)\n\n\ndef walker(path, searchstring, scr):\n for root, dirs, files in os.walk(path):\n scr.updateCurSearch(root)\n for file_ in files:\n if fileTypes.search(str(file_)):\n try:\n openfile = open(os.path.join(root, file_))\n if searchstring.lower() in openfile.read().lower():\n scr.appendFoundFile(os.path.join(root, file_))\n except:\n scr.appendFoundFile(f'file {file_} unreadable, skipping')\n finally:\n openfile.close()\n\n\ndef findFilePaths(path, args):\n outlist = []\n for root, dirs, files in os.walk(path):\n for file_ in files:\n if fileTypes.search(str(file_)):\n if root not in outlist:\n outlist.append(root)\n\n if os.path.exists(r'c:\\scripts\\filelist.txt'):\n append_write = 'w+'\n else:\n append_write = 'w+'\n\n with open(r\"c:\\scripts\\fileList.txt\", append_write) as myFile:\n myFile.write(','+','.join(outlist))\n\n\ndef processArguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"search_string\")\n parser.add_argument(\"-file_location_list\",\n default=r\"c:\\scripts\\filelist.txt\")\n UpdateHelp = \"set if you want to update a file\\\n location list instead of search for a string\"\n parser.add_argument(\"-uf\", \"-update-files\", action=\"store_true\",\n help=UpdateHelp)\n parser.add_argument(\"-nf\", \"-newfile\", action=\"store_true\")\n\n args = parser.parse_args()\n pargs = programArgs()\n\n pargs.searchString = args.search_string\n pargs.fileListFileName = args.file_location_list\n pargs.updateFileLocationList = args.uf\n pargs.overwriteFileLocationList = args.nf\n\n return pargs\n\n\ndef mainApp():\n scr = screen()\n PathsList = []\n\n args = processArguments()\n\n if(not args.updateFileLocationList):\n with open(args.fileListFileName) as fileslist:\n for line in fileslist:\n currentLine = line.split(\",\")\n for cl in currentLine:\n PathsList.append(cl)\n\n for location in PathsList:\n if(args.updateFileLocationList):\n findFilePaths(location, args)\n else:\n walker(location, args.searchString, scr)\n\n\nmainApp()\n","repo_name":"smcnabb83/MyPythonUtilities","sub_path":"searchforfile.py","file_name":"searchforfile.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43569621249","text":"# manager/management/commands/import_client_from_xls\n\nimport xlrd\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.utils.encoding import smart_str\nfrom manager.models import Client\n\nSILENT, NORMAL, VERBOSE, VERY_VERBOSE = 0, 1, 2, 3\n\n\nclass Command(BaseCommand):\n help = \"Imports clients from a local XLS file. \" \\\n \"Expects name, email.\"\n\n def add_arguments(self, parser):\n parser.add_argument('file_path')\n\n def handle(self, *args, **options):\n print(\"Reading xls...\")\n file_path = options['file_path']\n wb = xlrd.open_workbook(file_path)\n print(\"The number of worksheets is {0}\".format(wb.nsheets))\n print(\"Worksheet name(s): {0}\".format(wb.sheet_names()))\n\n verbosity = int(options.get(\"verbosity\", NORMAL))\n if verbosity >= NORMAL:\n print(\"=== XLS file imported ===\")\n\n first_sheet = wb.sheet_by_index(0) # get a single sheet\n # print(\"{0} {1} {2}\".format(first_sheet.name, first_sheet.nrows, first_sheet.ncols))\n\n for sh in wb.sheets():\n # let's skip the first sheet\n if sh != first_sheet:\n print(\"Sheet name: {0}; number of rows: {1}; numbers of columns: {2};\".format(sh.name, sh.nrows,\n sh.ncols))\n for rownum in range(sh.nrows):\n # let's skip the column captions and blank rows\n if rownum > 2:\n (name, email, telephone, address) = sh.row_values(rownum)\n client, created = Client.objects.get_or_create(\n name=name,\n email=email,\n )\n if verbosity >= NORMAL:\n print(\" - %s\" % smart_str(client.name))\n\n\n# pip install xlrd\n\n# command to run from terminal: python manage.py import_client_from_xls /Users//Documents//clients.xls\n\n","repo_name":"nmacambira/DjangoStudies","sub_path":"EmployeeManager/managerproject/manager/management/commands/import_client_from_xls.py","file_name":"import_client_from_xls.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70825479130","text":"from app import db\nfrom models import Restaurant, User, Order, Meal\nfrom datetime import datetime\nfrom collections import Counter\nimport collections\n\n\ndef query_meals(restaurant_id):\n meals_result = Meal.query.join(Meal.restaurant).filter(Restaurant.restaurant_id == int(restaurant_id)).all()\n meals = [meal.to_dict() for meal in meals_result]\n return meals\n\n\ndef query_meal_types(restaurant_id):\n types_result = Meal.query.with_entities(Meal.meal_type).filter(Meal.restaurant_id == restaurant_id).distinct()\n types = []\n for i in types_result:\n types.append({'meal_type': i.meal_type})\n return types\n\n\ndef add_new_meal(newMeal):\n # restaurant = Restaurant.query.filter(newMeal['restaurant_id'] == Restaurant.restaurant_id).first()\n new_meal = Meal(meal_name=newMeal['meal'][\"meal_name\"],\n meal_description=newMeal['meal']['meal_description'],\n meal_price=newMeal['meal']['meal_price'],\n meal_type=newMeal['meal']['meal_type'],\n restaurant_id=newMeal['restaurant_id'])\n db.session.add(new_meal)\n db.session.commit()\n\n\ndef remove_meal(removable_meal):\n Meal.query.filter(Meal.meal_id == removable_meal['meal_id']).delete()\n db.session.commit()\n\n\ndef query_meal_data_by_id(meal_id):\n meal = Meal.query.filter(Meal.meal_id == meal_id).first()\n response_meal = {'meal_id': meal.meal_id,\n 'meal_name': meal.meal_name,\n 'meal_description': meal.meal_description,\n 'meal_type': meal.meal_type,\n 'meal_price': meal.meal_price,\n 'restaurant_id': meal.restaurant_id}\n return response_meal\n\n\ndef edit_meal(params):\n meal = Meal.query.filter(Meal.meal_id == params['meal_id']).first()\n restaurant = Restaurant.query.filter(Restaurant.restaurant_id == params['restaurant_id']).first()\n user = User.query.filter(User.user_id == restaurant.user_id).first()\n modified_meal = params['modified_meal']\n if user.password == modified_meal['current_password']:\n meal.meal_name = modified_meal['meal_name']\n meal.meal_description = modified_meal['meal_description']\n meal.meal_type = modified_meal['meal_type']\n meal.meal_price = modified_meal['meal_price']\n meal.restaurant_id = params['restaurant_id']\n meal.image_source = meal.image_source\n db.session.commit()\n status = 200\n return status\n else:\n status = 401\n return status\n\n\ndef query_meal_type_stat(username, restaurant_id):\n user = User.query.filter(User.user_name == username).first()\n meal_types = Meal.query.filter(Meal.restaurant_id == restaurant_id).group_by(Meal.meal_type).all()\n orders_result = Order.query.filter(Order.restaurant_id == restaurant_id).all()\n meals = []\n for i in meal_types:\n tmp = 0\n for j in orders_result:\n for k in j.order_meals:\n if k.order_id == j.order_id:\n for l in k.meals:\n if i.meal_type == l.meal_type:\n tmp += 1\n meals.append({\"y\": tmp, \"label\": i.meal_type})\n return meals\n\n\ndef query_order_datetime_stat(restaurant_id):\n orders_result = Order.query.filter(Order.restaurant_id == restaurant_id).all()\n dates = []\n for i in orders_result:\n dates.append(datetime.strptime(i.order_date, \"%Y-%m-%d %H:%M:%S\").hour)\n\n a = dict(Counter(dates))\n ordered_a = collections.OrderedDict(sorted(a.items()))\n values = []\n keys = []\n dates_result = []\n for i in ordered_a.values():\n values.append(i)\n for i in ordered_a:\n keys.append(i)\n for i in range(0, len(values)):\n dates_result.append({\"y\": values[i], \"label\": keys[i]})\n\n return dates_result\n","repo_name":"gabori/DissertationApp","sub_path":"nyilvantarto/meal.py","file_name":"meal.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11936184360","text":"import torch\nfrom torchvision import transforms\nfrom PIL import Image\nfrom torchmetrics import PeakSignalNoiseRatio\nfrom torchmetrics import StructuralSimilarityIndexMeasure\nfrom PIL import Image\nfrom SINet.MyTesting import create_mask\nfrom DMFN.pipe import inpaint_face\nimport imageio\nimport cv2\nimport numpy as np\nfrom flask import Flask\nfrom flask import request\nimport os\nimport fnmatch\nfrom scipy import ndimage\nimport skimage \nimport shutil\n\ndef repaint(img_path):\n # read in masked image\n image = cv2.imread(img_path)\n #create image for inpainting model\n dest = shutil.copyfile(img_path, \"out/image.jpg\")\n # resize the image to be 256 x 256\n image = cv2.resize(image, (256,256), interpolation = cv2.INTER_AREA)\n # create the mask from the image segmentation network\n res = create_mask(image)\n # convert the image back to uint8, with regular pixel values\n res=cv2.normalize(res,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F).astype(np.uint8)\n # convert the mask to binary image\n res[res<10] =0\n res[res>=10] =255\n # post processing mask to fill potential holes\n kernel = np.ones((7,7), np.uint8)\n res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)\n res = cv2.dilate(res, kernel, iterations=2)\n res = ndimage.binary_fill_holes(res,structure =kernel)\n res = ndimage.binary_fill_holes(res,structure =kernel)\n res = skimage.img_as_uint(res)\n imageio.imwrite('./out/mask.jpg', res)\n # use inpaint model to get the predicted image\n output = inpaint_face()\n # output the predicted image\n if not os.path.exists(\"./out\"):\n os.makedirs(\"./out\")\n count = len(fnmatch.filter(os.listdir(\"./out\"), '*.*'))\n filepath=\"./out/output_\"+\"test\"+\".jpg\"\n imageio.imwrite(filepath, output)\n # Send filepath to JS\n return filepath\n\n\n\n# Initialise metrics\npsnr = PeakSignalNoiseRatio()\nssim = StructuralSimilarityIndexMeasure()\n# Paths\nmasked_path = \"Datasets/FFHQ/images256x256_masked\"\nGT_path = \"Datasets/FFHQ/images256x256\"\n\n# Misc vars\nmax_ssim=-2\nmax_ssim_name=\"\"\nmin_ssim=2\nmin_ssim_name=\"\"\n\nmin_psnr=100\nmin_psnr_name=\"\"\nmax_psnr=-2\nmax_psnr_name=\"\"\n\navg_psnr=0\navg_ssim=0\n\nc=0\n\n# Transform to tensor\ntrans= transforms.Compose([transforms.ToTensor()])\n\nfor file in os.listdir(masked_path):\n print(c)\n # get filepaths\n filepath = os.path.join(masked_path, file)\n GT_filepath = os.path.join(GT_path, file)\n # Retrieve images\n Original = Image.open(GT_filepath)\n im_path = repaint(filepath)\n predicted = Image.open(im_path) \n # Add batch channel and apply transform\n Original = trans(Original).unsqueeze(0)\n predicted = trans(predicted).unsqueeze(0)\n\n pval = psnr(predicted, Original)\n sval = ssim(predicted, Original)\n # Update avgs\n avg_psnr+=pval\n avg_ssim+=sval\n # Update min and maxs\n if pval > max_psnr:\n max_psnr = pval\n max_psnr_name = filepath\n if pval < min_psnr:\n min_psnr = pval\n min_psnr_name = filepath\n\n if sval > max_ssim:\n max_ssim = sval\n max_ssim_name = filepath\n if sval < min_ssim:\n min_ssim = sval\n min_ssim_name = filepath\n c+=1\n if c>1000:\n break\n\n\navg_psnr=avg_psnr/c\navg_ssim=avg_ssim/c\n# Print results\nprint(\"Avg psnr:\",avg_psnr)\nprint(\"Avg ssim:\",avg_ssim)\nprint(\"\\n\")\nprint(\"max psnr:\",max_psnr)\nprint(\"max psnr path:\",max_psnr_name)\nprint(\"min psnr:\",min_psnr)\nprint(\"min psnr path:\",min_psnr_name)\nprint(\"\\n\")\nprint(\"max ssim:\",max_ssim)\nprint(\"max ssim path:\",max_ssim_name)\nprint(\"min ssim:\",min_ssim)\nprint(\"min ssim path:\",min_ssim_name)\nprint(\"\\n\")\n","repo_name":"JMithoo/Dissertation","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13521893715","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 1 16:31:37 2021\n\n@author: axvargas\n\"\"\"\n\n#Clustering jerarquico\n\n#libraries import\nimport pandas as pd \nimport matplotlib.pyplot as plt\n\n#data import\ndataset = pd.read_csv('Mall_Customers.csv')\n\n#Matrix de caracteristicas\nX = dataset.iloc[:,[3,4]].values \n\n#Utilizar el dendrograma para encontrar el numero optimo de clusters\nimport scipy.cluster.hierarchy as sch\ndendrograma = sch.dendrogram(sch.linkage(X, method='ward'))\n\nplt.title(\"Dendrograma\")\nplt.xlabel(\"Clientes\")\nplt.ylabel(\"Distancia Euclidea\")\nplt.show()\n#Despues de analizar el grafico se define que el numero de clusters optimo es 5\n\n#Ajustar el clustering jerarquico\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')\ny_hc = hc.fit_predict(X)\n\n#Visualizacion de los clusters\nplt.scatter(X[y_hc == 0,0], X[y_hc == 0, 1], s=100, c=\"red\", label=\"Cautos\")\nplt.scatter(X[y_hc == 1,0], X[y_hc == 1, 1], s=100, c=\"blue\", label=\"Estandar\")\nplt.scatter(X[y_hc == 2,0], X[y_hc == 2, 1], s=100, c=\"green\", label=\"Objetivo\")\nplt.scatter(X[y_hc == 3,0], X[y_hc == 3, 1], s=100, c=\"cyan\", label=\"Descuidados\")\nplt.scatter(X[y_hc == 4,0], X[y_hc == 4, 1], s=100, c=\"magenta\", label=\"Conservadores\")\nplt.title(\"Clustering de clientes\")\nplt.xlabel(\"Ingresos anuales (en miles de $)\")\nplt.ylabel(\"Puntuacion de gastos (1-100)\")\nplt.legend()\nplt.show()","repo_name":"axvargas/machine-learning-course","sub_path":"15-Clustering jerarquico(Clustering)/hc.py","file_name":"hc.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1788140436","text":"#\n# @lc app=leetcode id=264 lang=python3\n#\n# [264] Ugly Number II\n#\n# https://leetcode.com/problems/ugly-number-ii/description/\n#\n# algorithms\n# Medium (35.18%)\n# Total Accepted: 93.8K\n# Total Submissions: 266.6K\n# Testcase Example: '10'\n#\n# Write a program to find the n-th ugly number.\n# \n# Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. \n# \n# Example:\n# \n# \n# Input: n = 10\n# Output: 12\n# Explanation: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10\n# ugly numbers.\n# \n# Note:  \n# \n# \n# 1 is typically treated as an ugly number.\n# n does not exceed 1690.\n# \n#\nimport heapq\nclass Solution:\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n\n if n == 1:\n return 1\n \n factors = [2,3,5]\n\n r = 1\n heapq.heapify(factors)\n for _ in range(2, n+1):\n\n ele = heapq.heappop(factors)\n if ele*2 not in factors:\n heapq.heappush(factors, ele*2)\n\n if ele*3 not in factors:\n heapq.heappush(factors, ele*3)\n\n if ele*5 not in factors:\n heapq.heappush(factors, ele*5)\n\n return ele\n\n\n# n = 11\n# s = Solution()\n# print(s.nthUglyNumber(n))\n'''\n时间复杂度为 O(n**2)\n\n ✔ Accepted\n ✔ 596/596 cases passed (1072 ms)\n ✔ Your runtime beats 12.19 % of python3 submissions\n\n'''","repo_name":"Provinm/leetcode","sub_path":"array/264.ugly-number-ii.py","file_name":"264.ugly-number-ii.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5929269187","text":"import requests\nimport os\n\nimport wind.queries\nfrom wind.queries import make_query\n\ncwd = os.getcwd()\nconfig = None\n\n\nclass InfraredUser:\n \"\"\"Class to handle Infrared communication for the InfraredUser\"\"\"\n\n def __init__(self, reset_user_at_endpoint=False, uuid=None, token=None):\n self.uuid = uuid\n self.token = token\n\n if not self.uuid:\n self.infrared_user_login()\n\n if reset_user_at_endpoint:\n self.delete_all_projects()\n\n # logs in infrared user\n def infrared_user_login(self):\n user_creds = {\"username\": os.getenv(\"INFRARED_USERNAME\"), \"password\": os.getenv(\"INFRARED_PASSWORD\")}\n request = requests.post(os.getenv(\"INFRARED_URL\"), json=user_creds)\n\n if request.status_code == 200:\n # get the auth token from the returned cookie\n print(request.cookies)\n self.uuid = request.cookies.get(\"InFraReDClientUuid\")\n self.token = \"InFraReD=\" + request.cookies.get(\"InFraReD\")\n else:\n raise Exception(\"Failed to login to infrared by returning code of {}\".format(request.status_code))\n \n def export_to_json(self):\n return {\n \"uuid\": self.uuid,\n \"token\": self.token,\n }\n # deletes all projects for the infrared user\n def delete_all_projects(self):\n for project_uuid in self.get_projects_uuids():\n print(project_uuid, \"deleted\")\n make_query(wind.queries.delete_project_query(self.uuid, project_uuid), self)\n\n # deletes all projects belonging to a city_pyo_user\n def delete_all_projects_for_city_pyo_user(self, city_pyo_user):\n all_projects = self.get_all_projects() \n for project_uuid, project in all_projects.items():\n if city_pyo_user in project[\"projectName\"]:\n print(project_uuid, \"deleted\")\n make_query(wind.queries.delete_project_query(self.uuid, project_uuid), self)\n\n\n # gets all the user's projects\n def get_all_projects(self):\n all_projects = make_query(wind.queries.get_projects_query(self.uuid), self)\n\n try:\n projects = get_value(\n all_projects,\n [\"data\", \"getProjectsByUserUuid\", \"infraredSchema\", \"clients\", self.uuid, \"projects\"]\n )\n except KeyError:\n print(\"no projects for user\")\n return {}\n\n return projects\n\n # gets all the user's projects\n def get_projects_uuids(self):\n all_projects = self.get_all_projects()\n\n return all_projects.keys()\n\n # the root snapshot of the infrared project will be used to create buildings and perform analysis\n def get_root_snapshot_id_for_project_uuid(self, project_uuid):\n graph_snapshots_path = [\"data\", \"getSnapshotsByProjectUuid\", \"infraredSchema\", \"clients\", self.uuid,\n \"projects\", project_uuid, \"snapshots\"]\n snapshot = make_query(wind.queries.get_snapshot_query(project_uuid), self)\n\n snapshot_uuid = list(get_value(snapshot, graph_snapshots_path).keys())[0]\n\n if not snapshot_uuid:\n print(\"could not get snapshot uuid\")\n exit()\n\n return snapshot_uuid\n\n # infrared needs any request to be performed by user at least 1 per hour to keep account alive\n def keep_alive_ping(self):\n self.get_projects_uuids()\n\n\"\"\"\n# TODO move to 1 single file\n# make query to infrared api\ndef make_query(query, token_cookie):\n \"\"\n Make query response\n auth token needs to be send as cookie\n \"\"\n # print(query)\n\n # AIT requested a sleep between the requests. To let their servers breath a bit.\n # time.sleep(0.5)\n\n request = requests.post(os.getenv(\"INFRARED_URL\") + '/api', json={'query': query}, headers={'Cookie': token_cookie, 'origin': os.getenv('INFRARED_URL')})\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception(\"Query failed to run by returning code of {}. {}\".format(request.status_code, query))\n\"\"\"\n\n# gets a values from a nested object\ndef get_value(data, path):\n for prop in path:\n if len(prop) == 0:\n continue\n if prop.isdigit():\n prop = int(prop)\n data = data[prop]\n return data\n\n","repo_name":"digitalcityscience/COUP-wind","sub_path":"wind/infrared_user.py","file_name":"infrared_user.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38948212102","text":"def function(normal, *items, **kwargs):\n print(normal)\n for item in items:\n print(item)\n\n print(\"\\nNow i would like to introduce our some spacial candidates: \\n\")\n for key, value in kwargs.items():\n print(f\"{key} is the {value}\")\n\nItems = [\"Chair\", \"Table\", \"Podium\", \"Stage\", \"Mic\", \"Speakers\", \"Child water\"]\nNormal = \"These are the items which we will have to collect eairy\"\ncandidate = {\"Hariom\":\"Moniter\", \"Abhishek\":\"PT Teacher\", \"Sonu\":\"Cordinator\", \"Pankaj\":\"Directer\", \"Sohan\":\"Manager\"}\nfunction(Normal, *Items, **candidate)\n","repo_name":"HARIOM317/Programing-languages-and-source-code","sub_path":"Programming Languages/Python/11. args and kwargs/3_Kwargs.py","file_name":"3_Kwargs.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"32934590760","text":"import sqlite3\r\n\r\n\r\nclass Bdd:\r\n def __init__(self, chemin_bdd):\r\n self.chemin_bdd = chemin_bdd\r\n\r\n def recuperer_personnes(self):\r\n connexion = sqlite3.connect(self.chemin_bdd)\r\n curseur = connexion.cursor()\r\n requete_sql = \"\"\"SELECT * FROM projet;\"\"\"\r\n resultat = curseur.execute(requete_sql)\r\n personnes = resultat.fetchall()\r\n connexion.close()\r\n return personnes\r\n\r\n def rechercher_eleves(self, matricule):\r\n connexion = sqlite3.connect(self.chemin_bdd)\r\n curseur = connexion.cursor()\r\n\r\n requete_sql = f\"\"\"SELECT * FROM projet p WHERE p.Matricule= '{matricule}';\"\"\"\r\n resultat = curseur.execute(requete_sql)\r\n eleves = resultat.fetchall()\r\n connexion.close()\r\n return eleves\r\n\r\n def ajouter_eleve(self, eleve):\r\n (nom, prenom, naissance, academie, etablissement) = eleve\r\n connexion = sqlite3.connect(self.chemin_bdd)\r\n curseur = connexion.cursor()\r\n\r\n requete_sql = f\"\"\"INSERT INTO projet (Nom, Prenom, DateDeNaissance, Etablissement, Academie) VALUES ('{nom}', '{prenom}', '{naissance}', '{etablissement}', '{academie}');\"\"\"\r\n print(requete_sql)\r\n resultat = curseur.execute(requete_sql)\r\n eleves = resultat.fetchall()\r\n connexion.commit()\r\n connexion.close()\r\n return None\r\n \r\n def supprimer_eleve(self, matricule):\r\n connexion = sqlite3.connect(self.chemin_bdd)\r\n curseur = connexion.cursor()\r\n requete_sql = f\"\"\"DELETE FROM projet WHERE Matricule = '{matricule}'\"\"\"\r\n resultat = curseur.execute(requete_sql)\r\n connexion.commit()\r\n connexion.close()\r\n \r\n \r\n def modifier_eleves(self, eleve):\r\n (matricule, nom, prenom, naissance, academie, etablissement) = eleve\r\n connexion = sqlite3.connect(self.chemin_bdd)\r\n curseur = connexion.cursor()\r\n requete_sql = f\"\"\"UPDATE projet SET Nom = '{nom}', Prenom='{prenom}', DateDeNaissance = '{naissance}', Etablissement = '{etablissement}', Academie ='{academie}' WHERE matricule = '{matricule}'\"\"\"\r\n resultat = curseur.execute(requete_sql)\r\n eleves = resultat.fetchall()\r\n connexion.commit()\r\n connexion.close()\r\n return None\r\n \r\n","repo_name":"shakky33/nsi","sub_path":"bdd_py.py","file_name":"bdd_py.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26423367565","text":"\nfrom database import get_past_five_min_movements, initialize_db\nfrom flask import Flask, request, jsonify\nfrom process import handle_request\nfrom flask_apscheduler import APScheduler\n\n# initialize flask app\napp = Flask(__name__)\nPORT = 5001\n# initialize scheduler\nscheduler = APScheduler()\nscheduler.init_app(app)\nscheduler.start()\n\n\n@app.route(\"/health\", methods=[\"GET\"])\ndef health():\n return jsonify({\"status\": \"ok\"}), 200\n\n\n@app.route(\"/api/v1/update\", methods=[\"POST\"])\ndef update():\n data = request.get_json()\n if not validate_update_json(data):\n return jsonify({\"status\": \"error\", \"error\": \"missing client_id\"}), 400\n handle_request(request.remote_addr, data)\n\n return jsonify({\"status\": \"ok\"}), 200\n\n\ndef validate_update_json(data):\n if \"client_id\" not in data:\n return False\n else:\n return True\n\n\n@scheduler.task(\"cron\", id=\"u\", minute=\"*/5\")\ndef update_temp():\n print(\"Checking if user is starting to sleep...\")\n data = get_past_five_min_movements()\n status_sleep = [item for item in data if item['overall_status'] == 1]\n status_awake = [item for item in data if item['overall_status'] == 0]\n\n if len(status_sleep) != 0:\n sleep_awake_ratio = 0.0 + len(status_awake) / len(status_sleep)\n\n # if the user is \"asleep\" for more than 50% of the time in the past 5 minutes, set temp to 23 C\n if sleep_awake_ratio < 0.5:\n print(\"User is starting to sleep...\")\n print(\"Action: set temp to 23 C\")\n else:\n print(\"Waiting: User is awake\")\n\n\nif __name__ == \"__main__\":\n initialize_db()\n\n app.run(host=\"0.0.0.0\", port=PORT, debug=True)\n print(\"Server started on port {}\".format(PORT))\n","repo_name":"BrianCKChiu/Sleep-Temperature-Monitor","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30029570807","text":"\ndef will_fit(holds, cargo):\n s1 = 0\n s2 = 0\n for i in range(len(holds)):\n if holds[i] == 'S':\n s1+= 50\n elif holds[i] == 'M':\n s1+=100\n elif holds[i] == 'L':\n s1+= 200\n for j in range(len(cargo)):\n s2+= cargo[j]\n if s1 >=s2:\n return True\n else:\n return False\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"zzibM5MaxDNvQCrEk_5.py","file_name":"zzibM5MaxDNvQCrEk_5.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29469445737","text":"\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\nimport os\nfrom django.utils.translation import gettext_lazy as _\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nLANGUAGES = (\n ('tr', _('Turkish')),\n ('en', _('English')),\n)\n\nLANGUAGES_OBJ = (\n {\n \"code\": 'tr',\n \"value\": _('Turkish'),\n },\n {\n \"code\": 'en',\n \"value\": _('English'),\n },\n)\n\nLANGUAGE_CODE = 'tr'\n\nUSE_TZ = True\n\n# TIME_ZONE = 'Europe/Istanbul'\nTIME_ZONE = 'Europe/Istanbul'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\n\n\n","repo_name":"mgunesdev/exhange-api","sub_path":"core/settings/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70699833051","text":"from fastapi import APIRouter, Request\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.responses import RedirectResponse\nfrom .utils.weather import get_weather_data\nfrom .forms.city_form import CityForm\nfrom .forms.settings_form import SettingsForm\nfrom core.config import Config as cfg\n\n\nrouter = APIRouter()\ntemplates = Jinja2Templates(directory='templates')\n\n\n@router.get('/')\nasync def main_get(request: Request):\n if request.session.get('city'):\n if not request.session.get('mes_sys'):\n request.session['mes_sys'] = cfg.mes_sys['metric']\n data = get_weather_data(request.session.get('city'), request.session.get('mes_sys'))\n if not data:\n request.session.pop('city')\n return templates.TemplateResponse('weather.html',\n {'request': request, 'msg': 'Город не найден :('})\n return templates.TemplateResponse('weather.html', {'request': request,\n 'data': data[0],\n 'mes_sys': data[1],\n 'time': data[2],\n 'usd': cfg.usd,\n 'eur': cfg.eur,\n 'news': cfg.news})\n return templates.TemplateResponse('weather.html', {'request': request,\n 'usd': cfg.usd,\n 'eur': cfg.eur,\n 'news': cfg.news})\n\n\n@router.post('/')\nasync def main_post(request: Request):\n form = CityForm(request)\n await form.load_data()\n request.session['city'] = form.city\n return RedirectResponse(request.url_for('main_get'), status_code=303)\n\n\n@router.get('/options')\nasync def options_get(request: Request):\n return templates.TemplateResponse('options.html', {'request': request})\n\n\n@router.post('/options')\nasync def options_post(request: Request):\n form = SettingsForm(request)\n await form.load_data()\n request.session['mes_sys'] = cfg.mes_sys.get(form.mes_sys)\n return RedirectResponse(request.url_for('main_get'), status_code=303)\n","repo_name":"kirill221100/fast-weather","sub_path":"routes/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13745884555","text":"\"\"\"\nDefines a GeoGrid object. Grid objects store certain properties of a gridded dataset (lat/lon grid\ncorners, resolution, etc.), and can simplify defining a grid when calling utilities such as\ninterpolation routines, plotting, etc.\n\"\"\"\n\n\n# Built-ins\nimport reprlib\n\n# Third-party\nimport numpy as np\n\n# This package\nfrom cpc.geogrids.exceptions import GeogridError\n\n\n# Create reprlib\nr = reprlib.Repr()\nr.maxlist = 4 # max elements displayed for lists\nr.maxstring = 50 # max characters displayed for strings\n\n# Create dict of all built-in GeoGrids\nbuiltin_geogrids = {\n '1deg-global': {\n 'll_corner': (-90, 0),\n 'ur_corner': (90, 359),\n 'res': 1,\n 'type': 'latlon'\n },\n '2deg-global': {\n 'll_corner': (-90, 0),\n 'ur_corner': (90, 358),\n 'res': 2,\n 'type': 'latlon'\n },\n '2.5deg-global': {\n 'll_corner': (-90, 0),\n 'ur_corner': (90, 357.5),\n 'res': 2.5,\n 'type': 'latlon'\n },\n '2deg-conus': {\n 'll_corner': (20, 230),\n 'ur_corner': (56, 300),\n 'res': 2,\n 'type': 'latlon'\n },\n '1/6th-deg-global': {\n 'll_corner': (-89.9167, 0.0833),\n 'ur_corner': (89.9167, 359.9167),\n 'res': 1/6,\n 'type': 'latlon'\n },\n '0.5deg-global-edge-aligned': {\n 'll_corner': (-89.75, 0.25),\n 'ur_corner': (89.75, 359.75),\n 'res': 0.5,\n 'type': 'latlon'\n },\n '0.5deg-global-center-aligned': {\n 'll_corner': (-90, 0),\n 'ur_corner': (90, 359.5),\n 'res': 0.5,\n 'type': 'latlon'\n },\n}\n\n\ndef list_builtin_geogrids():\n return list(builtin_geogrids.keys())\n\n\nclass Geogrid:\n \"\"\"\n Geogrid object storing attributes of a geo grid.\n\n A Geogrid object can either be created by providing the name of the grid, or by providing the\n other attributes listed below\n\n #### Attributes\n\n - name - *str* - name of the grid\n - ll_corner - *tuple of floats* - lower-left corner of the grid, formatted as (lat, lon)\n - ur_corner - *tuple of floats* - upper-right corner of the grid, formatted as (lat, lon)\n - res = *float* - resolution of the grid (in km if `type=\"even\"`, in degrees if `type=\"latlon\"`)\n - type - *str* - grid type. Possible values are 'latlon' (Latlon grid), 'equal' (Equally-spaced square grid)\n \"\"\"\n\n def __init__(self, name=None, ll_corner=None, ur_corner=None, res=None, type='latlon'):\n\n # ------------------------------------------------------------------------------------------\n # Document attributes\n #\n self.name = None\n '''Grid name'''\n self.ll_corner = ll_corner\n '''Lower-left corner of grid (lon, lat)'''\n self.ur_corner = ur_corner\n '''Upper-right corner of grid (lon, lat)'''\n self.res = res\n '''Grid resolution'''\n self.type = type\n '''Grid type (currently only latlon is supported)'''\n\n # ------------------------------------------------------------------------------------------\n # Create the Geogrid\n #\n # Built-in\n if name in builtin_geogrids:\n self.name = name\n self.ll_corner = builtin_geogrids[name]['ll_corner']\n self.ur_corner = builtin_geogrids[name]['ur_corner']\n self.res = builtin_geogrids[name]['res']\n self.type = builtin_geogrids[name]['type']\n # Custom\n else:\n # User didn't provide everything necessary to create a custom Geogrid\n if not all([self.ll_corner, self.ur_corner, self.res]):\n raise GeogridError('You must either supply the name of a built-in Grid, or an '\n 'll_corner, ur_corner, and res to create a custom Grid')\n # Create a custom Geogrid\n else:\n self.name = 'custom'\n self.ll_corner = ll_corner\n self.ur_corner = ur_corner\n self.res = res\n self.type = type\n\n # ------------------------------------------------------------------------------------------\n # Calculate additional attributes\n #\n self.num_y = int(((self.ur_corner[0] - self.ll_corner[0]) / self.res) + 1)\n '''Number of points in the y-direction'''\n self.num_x = int(((self.ur_corner[1] - self.ll_corner[1]) / self.res) + 1)\n '''Number of points in the x-direction'''\n self.lats = np.arange(self.ll_corner[0], self.ur_corner[0] + 0.00000001, self.res).tolist()\n '''List of latitude values at which grid points are found'''\n self.lons = np.arange(self.ll_corner[1], self.ur_corner[1] + 0.00000001, self.res).tolist()\n '''List of longitude values at which grid points are found'''\n\n def __repr__(self):\n details = ''\n for key, val in sorted(vars(self).items()):\n details += eval(r.repr('- {}: {}\\n'.format(key, val)))\n return 'Geogrid:\\n{}'.format(details)\n\n def data_fits(self, data):\n \"\"\"\n Determines if the specified data fits this Geogrid\n\n #### Parameters\n\n - data - *array_like* - data to verify\n\n #### Returns\n\n - *boolean* - whether the data fits this Geogrid\n\n #### Exceptions\n\n - *GeogridError* - raised if data is not a valid NumPy array\n\n #### Examples\n\n >>> import numpy as np\n >>> from cpc.geogrids import Geogrid\n >>> grid = Geogrid('1deg-global')\n >>> data = np.random.random((grid.num_y, grid.num_x))\n >>> data.shape\n (181, 360)\n >>> grid.data_fits(data)\n True\n >>> data = np.random.random((grid.num_y + 1, grid.num_x + 1))\n >>> data.shape\n (182, 361)\n >>> grid.data_fits(data)\n False\n \"\"\"\n # Make sure there are num_y x num_x points\n try:\n if self.num_y * self.num_x != data.size:\n return False\n else:\n return True\n except AttributeError:\n raise GeogridError('Data not a valid NumPy array')\n\n def latlon_to_1d_index(self, latlons):\n \"\"\"\n Returns the 1-dimensional index of the grid point, from this Geogrid, that is located at\n the specified lat/lon position\n\n For example, you may have a 1-dimensional data array on a `1deg-global` Geogrid, and you\n want to know the index corresponding to 50 deg lat, -80 deg lon.\n\n #### Parameters\n\n - latlons - *tuple of floats* or *list of tuples of floats* - lat/lon of grid point(s)\n\n #### Returns\n\n - *int* or *None* - array index containing the given gridpoint(s) index(es), or -1 if no gridpoint matches the\n given lat/lon value\n\n #### Examples\n\n Get the index of a 1deg-global grid at 50 deg lat, -80 deg lon\n\n >>> from cpc.geogrids import Geogrid\n >>> grid = Geogrid('1deg-global')\n >>> grid.latlon_to_1d_index((50, -80))\n [50820]\n\n Get the index of 1deg-global grid at several lat/lon points\n\n >>> from cpc.geogrids import Geogrid\n >>> grid = Geogrid('1deg-global')\n >>> grid.latlon_to_1d_index([(0, 0), (20, 40), (50, -80)])\n [90, 7350, 50820]\n \"\"\"\n if type(latlons) is not list:\n latlons = [latlons]\n matches = []\n for latlon in latlons:\n lat, lon = latlon\n lon = 360 + lon if lon < 0 else lon\n lats, lons = np.meshgrid(self.lats, self.lons)\n lats = lats.reshape((self.num_y * self.num_x))\n lons = lons.reshape((self.num_y * self.num_x))\n try:\n matches.append(np.argwhere((lats == lat) & (lons == lon))[0][0])\n except IndexError:\n matches.append(-1)\n return matches\n\n# Support applications referring to the legacy name for GeoGrids (Grids)\nGrid = Geogrid\nGridError = GeogridError\n\nif __name__ == '__main__':\n from cpc.geogrids import Geogrid\n print(Geogrid('0.5deg-global-center-aligned'))\n","repo_name":"noaa-nws-cpc/cpc.geogrids","sub_path":"cpc/geogrids/definition.py","file_name":"definition.py","file_ext":"py","file_size_in_byte":8129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72332936091","text":"from flask import current_app\nimport redis\nimport progproxy as pp\nfrom lightserv import cel\nimport os\nimport logging\nfrom datetime import datetime, timedelta\nimport json, requests\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.propagate=False\n\nformatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')\n\n''' Make the file handler to deal with logging to file '''\nfile_handler = logging.FileHandler('logs/neuroglancer_tasks.log')\nfile_handler.setFormatter(formatter)\n\nstream_handler = logging.StreamHandler() # level already set at debug from logger.setLevel() above\n\nstream_handler.setFormatter(formatter)\n\nlogger.addHandler(stream_handler)\nlogger.addHandler(file_handler)\n\n@cel.task() \ndef ng_viewer_checker():\n \"\"\" A celery task to check the activity timestamp\n for all open viewers.\n If it has been more than 30 seconds since last activity,\n then shuts down the viewer and its cloudvolumes and removes them from\n the proxy table. \"\"\"\n \n\n proxy_h = pp.progproxy(target_hname='confproxy')\n \"\"\" Make the timestamp against which we will compare the viewer timestamps \"\"\"\n response_all = proxy_h.getroutes()\n proxy_dict_all = json.loads(response_all.text)\n logger.debug(\"PROXY DICT (ALL ROUTES):\")\n logger.debug(proxy_dict_all)\n expire_seconds = current_app.config['NG_VIEWER_EXPIRE_SECONDS']\n timeout_timestamp_iso = (datetime.utcnow() - timedelta(seconds=expire_seconds)).isoformat()\n response = proxy_h.getroutes(inactive_since=timeout_timestamp_iso)\n proxy_dict = json.loads(response.text)\n # proxy_viewer_dict = {key:proxy_dict[key] for key in proxy_dict.keys() if 'viewer' in key}\n expired_session_names = [key.split('/')[-1] for key in proxy_dict.keys() if 'viewer' in key]\n \"\"\" Now figure out the session names of each of the expired viewers \"\"\" \n logger.debug(f\"Expired session names: {expired_session_names}\")\n\n \"\"\" Now delete the proxy routes for the viewers and cloudvolumes\n associated with each of these expired sessions\"\"\"\n for expired_session_name in expired_session_names:\n # first ng viewers\n ng_proxypath = f'/viewers/{expired_session_name}'\n proxy_h.deleteroute(ng_proxypath)\n # now find any cloudvolumes with this session name\n cv_proxypaths = [key for key in proxy_dict.keys() if f'cloudvols/{expired_session_name}' in key]\n logger.debug(\"expired cloudvolume proxypaths:\")\n logger.debug(cv_proxypaths)\n for cv_proxypath in cv_proxypaths:\n proxy_h.deleteroute(cv_proxypath)\n\n \"\"\" Now use the session names to take down the actual \n docker containers for both neuroglancer viewer and cloudvolume\n that were launched for each session \"\"\"\n logger.debug(\"removing cloudvolume/neuroglancer containers linked to expired viewers\")\n kv = redis.Redis(host=\"redis\", decode_responses=True)\n container_names_to_kill = []\n for session_name in expired_session_names:\n logger.debug(f\"in loop for {session_name}\")\n session_dict = kv.hgetall(session_name)\n # Cloudvolume containers\n try: # there might not always be cloudvolumes\n cv_count = int(session_dict['cv_count'])\n except:\n cv_count = 0\n for i in range(cv_count):\n # logger.debug(f\"in loop over cloudvolume counts\")\n cv_container_name = session_dict['cv%i_container_name' % (i+1)]\n container_names_to_kill.append(cv_container_name)\n\n # Neuroglancer container - there will just be one per session\n ng_container_name = session_dict['ng_container_name']\n container_names_to_kill.append(ng_container_name)\n \n # Have to send the containers to viewer-launcher to kill since we are not root \n # in this flask container\n if len(container_names_to_kill) > 0:\n containers_to_kill_dict = {'list_of_container_names':container_names_to_kill}\n requests.post('http://viewer-launcher:5005/container_killer',json=containers_to_kill_dict)\n \n return \"checked ng viewer health\"","repo_name":"BrainCOGS/lightserv","sub_path":"lightserv/neuroglancer/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16100336643","text":"import pandas as pd\nfrom app.archive_constants import (LABEL)\nimport warnings\nfrom pandas.core.common import SettingWithCopyWarning\n\npd.set_option('mode.chained_assignment', None)\n\nwarnings.simplefilter(action=\"ignore\", category=SettingWithCopyWarning)\n\n\ndef extract_cell_metdata(df_c_md):\n \"\"\" Build cell metadata \"\"\"\n df_cell_md = pd.DataFrame()\n df_cell_md[LABEL.CELL_ID.value] = [df_c_md[LABEL.CELL_ID.value]]\n df_cell_md[LABEL.ANODE.value] = [df_c_md[LABEL.ANODE.value]]\n df_cell_md[LABEL.CATHODE.value] = [df_c_md[LABEL.CATHODE.value]]\n df_cell_md[LABEL.SOURCE.value] = [df_c_md[LABEL.SOURCE.value]]\n df_cell_md[LABEL.AH.value] = [df_c_md[LABEL.AH.value]]\n df_cell_md[LABEL.FORM_FACTOR.value] = [df_c_md[LABEL.FORM_FACTOR.value]]\n df_cell_md[LABEL.TEST.value] = [df_c_md[LABEL.TEST.value]]\n # df_cell_md[LABEL.MAPPING.value] = [df_c_md[LABEL.MAPPING.value]]\n df_cell_md[LABEL.TESTER.value] = [df_c_md[LABEL.TESTER.value]]\n\n return df_cell_md\n\n\ndef split_cycle_metadata(df_c_md):\n\n df_cell_md = extract_cell_metdata(df_c_md)\n\n # Build test metadata\n df_test_md = pd.DataFrame()\n df_test_md[LABEL.CELL_ID.value] = [df_c_md[LABEL.CELL_ID.value]]\n df_test_md[LABEL.CRATE_C.value] = [df_c_md[LABEL.CRATE_C.value]]\n df_test_md[LABEL.CRATE_D.value] = [df_c_md[LABEL.CRATE_D.value]]\n df_test_md[LABEL.SOC_MAX.value] = [df_c_md[LABEL.SOC_MAX.value]]\n df_test_md[LABEL.SOC_MIN.value] = [df_c_md[LABEL.SOC_MIN.value]]\n df_test_md[LABEL.TEMP.value] = [df_c_md[LABEL.TEMP.value]]\n\n return df_cell_md, df_test_md\n\n\ndef split_abuse_metadata(df_c_md):\n\n df_cell_md = extract_cell_metdata(df_c_md)\n\n # Build test metadata\n df_test_md = pd.DataFrame()\n df_test_md[LABEL.CELL_ID.value] = [df_c_md[LABEL.CELL_ID.value]]\n df_test_md[LABEL.THICKNESS.value] = [df_c_md[LABEL.THICKNESS.value]]\n df_test_md[LABEL.V_INIT.value] = [df_c_md[LABEL.V_INIT.value]]\n df_test_md[LABEL.INDENTOR.value] = [df_c_md[LABEL.INDENTOR.value]]\n df_test_md[LABEL.NAIL_SPEED.value] = [df_c_md[LABEL.NAIL_SPEED.value]]\n df_test_md[LABEL.TEMP.value] = [df_c_md[LABEL.TEMP.value]]\n\n return df_cell_md, df_test_md\n\n\n# sort data imported to insure cycle index and test times are correctly calculated\ndef sort_timeseries(df_tmerge):\n # Arrange the data by date time first, then by test time\n # Rebuild Cycle Index and test time to increment from file to file\n # This method does not depend on data from a specific testers\n\n if not df_tmerge.empty:\n\n df_t = df_tmerge.sort_values(\n by=[LABEL.DATE_TIME.value, LABEL.TEST_TIME.value])\n df_t = df_t.reset_index(drop=True)\n\n cycles = df_t[[\n LABEL.CYCLE_INDEX_FILE.value, LABEL.CYCLE_INDEX.value,\n LABEL.FILENAME.value, LABEL.TEST_TIME.value\n ]].to_numpy()\n\n max_cycle = 1\n past_index = 1\n max_time = 0\n last_file = \"\"\n delta_t = 0\n start = 0\n\n for x in cycles:\n\n if start == 0:\n last_file = x[2]\n start += 1\n\n if x[2] != last_file:\n delta_t = max_time\n x[3] += delta_t\n last_file = x[2]\n else:\n x[3] += delta_t\n max_time = x[3]\n last_file = x[2]\n\n if x[0] < max_cycle:\n\n if past_index == x[0]:\n past_index = x[0]\n x[1] = max_cycle\n else:\n past_index = x[0]\n x[1] = max_cycle + 1\n max_cycle = x[1]\n\n else:\n past_index = x[0]\n max_cycle = x[0]\n x[1] = x[0]\n\n df_tmp = pd.DataFrame(data=cycles[:, [1]],\n columns=[LABEL.CYCLE_INDEX.value])\n df_t[LABEL.CYCLE_INDEX.value] = df_tmp[LABEL.CYCLE_INDEX.value]\n\n df_tmp = pd.DataFrame(data=cycles[:, [3]],\n columns=[LABEL.TEST_TIME.value])\n df_t[LABEL.TEST_TIME.value] = pd.to_numeric(\n df_tmp[LABEL.TEST_TIME.value])\n\n df_ts = df_t.sort_values(by=[LABEL.TEST_TIME.value])\n\n # Remove quantities only needed to tag files\n df_ts.drop(LABEL.FILENAME.value, axis=1, inplace=True)\n df_ts.drop(LABEL.CYCLE_INDEX_FILE.value, axis=1, inplace=True)\n\n return df_ts\n\n\n# calculate statistics for abuse test\ndef calc_abuse_stats(df_t, df_test_md):\n\n for _ in df_t.index:\n df_t[LABEL.NORM_D.value] = df_t.iloc[\n 0:, df_t.columns.get_loc(LABEL.AXIAL_D.value)] - df_t[\n LABEL.AXIAL_D.value][0]\n df_t[LABEL.STRAIN.value] = df_t.iloc[\n 0:, df_t.columns.get_loc(LABEL.NORM_D.value)] / df_test_md[\n LABEL.THICKNESS.value]\n\n return df_t\n\n\ndef calc_cycle_stats(df_t):\n\n df_t[LABEL.CYCLE_TIME.value] = 0\n\n no_cycles = int(df_t[LABEL.CYCLE_INDEX.value].max())\n\n # Initialize the cycle_data time frame\n a = [0 for _ in range(no_cycles)] # using loops\n\n df_c = pd.DataFrame(data=a, columns=[LABEL.CYCLE_INDEX.value])\n\n df_c[LABEL.CELL_ID.value] = df_t[LABEL.CELL_ID.value]\n df_c[LABEL.CYCLE_INDEX.value] = 0\n df_c[LABEL.V_MAX.value] = 0\n df_c[LABEL.I_MAX.value] = 0\n df_c[LABEL.V_MIN.value] = 0\n df_c[LABEL.I_MIN.value] = 0\n df_c[LABEL.AH_C.value] = 0\n df_c[LABEL.AH_D.value] = 0\n df_c[LABEL.E_C.value] = 0\n df_c[LABEL.E_D.value] = 0\n df_c[LABEL.V_C_MEAN.value] = 0\n df_c[LABEL.V_D_MEAN.value] = 0\n df_c[LABEL.TEST_TIME.value] = 0\n df_c[LABEL.AH_EFF.value] = 0\n df_c[LABEL.E_EFF.value] = 0\n\n for c_ind in df_c.index:\n x = c_ind + 1\n\n df_f = df_t[df_t[LABEL.CYCLE_INDEX.value] == x]\n\n df_f[LABEL.AH_C.value] = 0\n df_f[LABEL.E_C.value] = 0\n df_f[LABEL.AH_D.value] = 0\n df_f[LABEL.E_D.value] = 0\n\n if not df_f.empty:\n\n try:\n\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.CYCLE_INDEX.value)] = x\n\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.V_MAX.value)] = df_f.loc[\n df_f[LABEL.V.value].idxmax()].v\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.V_MIN.value)] = df_f.loc[\n df_f[LABEL.V.value].idxmin()].v\n\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.I_MAX.value)] = df_f.loc[\n df_f[LABEL.I.value].idxmax()].i\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.I_MIN.value)] = df_f.loc[\n df_f[LABEL.I.value].idxmin()].i\n\n df_c.iloc[\n c_ind,\n df_c.columns.get_loc(LABEL.TEST_TIME.value)] = df_f.loc[\n df_f[LABEL.TEST_TIME.value].idxmax()].test_time\n\n df_f[LABEL.DT.value] = df_f[LABEL.TEST_TIME.value].diff() / 3600.0\n df_f_c = df_f[df_f[LABEL.I.value] > 0]\n df_f_d = df_f[df_f[LABEL.I.value] < 0]\n\n df_f = calc_cycle_quantities(df_f)\n\n df_t.loc[df_t.cycle_index == x,\n LABEL.CYCLE_TIME.value] = df_f[LABEL.CYCLE_TIME.value]\n df_t.loc[df_t.cycle_index == x,\n LABEL.AH_C.value] = df_f[LABEL.AH_C.value]\n df_t.loc[df_t.cycle_index == x,\n LABEL.E_C.value] = df_f[LABEL.E_C.value]\n df_t.loc[df_t.cycle_index == x,\n LABEL.AH_D.value] = df_f[LABEL.AH_D.value]\n df_t.loc[df_t.cycle_index == x,\n LABEL.E_D.value] = df_f[LABEL.E_D.value]\n\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.AH_C.value)] = df_f[\n LABEL.AH_C.value].max()\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.AH_D.value)] = df_f[\n LABEL.AH_D.value].max()\n df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_C.value)] = df_f[\n LABEL.E_C.value].max()\n df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_D.value)] = df_f[\n LABEL.E_D.value].max()\n\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.V_C_MEAN.value)] = df_f_c[\n LABEL.V.value].mean()\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.V_D_MEAN.value)] = df_f_d[\n LABEL.V.value].mean()\n\n if df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.AH_C.value)] == 0:\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.AH_EFF.value)] = 0\n else:\n df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.AH_EFF.value)] = df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.AH_D.value)] / \\\n df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.AH_C.value)]\n\n if df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.E_C.value)] == 0:\n df_c.iloc[c_ind,\n df_c.columns.get_loc(LABEL.E_EFF.value)] = 0\n else:\n df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_EFF.value)] = df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_D.value)] / \\\n df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_C.value)]\n\n except Exception as e:\n pass\n\n df_cc = df_c[df_c[LABEL.CYCLE_INDEX.value] > 0]\n df_tt = df_t[df_t[LABEL.CYCLE_INDEX.value] > 0]\n return df_cc, df_tt\n\n\n# unpack the dataframe and calculate quantities used in statistics\ndef calc_cycle_quantities(df):\n\n tmp_arr = df[[\n LABEL.TEST_TIME.value, LABEL.I.value, LABEL.V.value, LABEL.AH_C.value,\n LABEL.E_C.value, LABEL.AH_D.value, LABEL.E_D.value,\n LABEL.CYCLE_TIME.value\n ]].to_numpy()\n\n start = 0\n last_time = 0\n last_i_c = 0\n last_v_c = 0\n last_i_d = 0\n last_v_d = 0\n last_ah_c = 0\n last_e_c = 0\n last_ah_d = 0\n last_e_d = 0\n initial_time = 0\n\n for x in tmp_arr:\n\n if start == 0:\n start += 1\n initial_time = x[0]\n else:\n if x[1] >= 0:\n x[3] = (x[0] - last_time) * (x[1] + last_i_c) * 0.5 + last_ah_c\n x[4] = (x[0] - last_time) * (x[1] + last_i_c) * 0.5 * (\n x[2] + last_v_c) * 0.5 + last_e_c\n last_i_c = x[1]\n last_v_c = x[2]\n last_ah_c = x[3]\n last_e_c = x[4]\n\n if x[1] <= 0:\n x[5] = (x[0] - last_time) * (x[1] + last_i_d) * 0.5 + last_ah_d\n # if x[5] == 0:\n # print(\"x5=0:\" + str(x[5]) + \" last_ah_d: \" +\n # str(last_ah_d))\n # if last_ah_d == 0:\n # print(\"x5:\" + str(x[5]) + \" last_ah_d=0: \" +\n # str(last_ah_d))\n x[6] = (x[0] - last_time) * (x[1] + last_i_d) * 0.5 * (\n x[2] + last_v_d) * 0.5 + last_e_d\n last_i_d = x[1]\n last_v_d = x[2]\n last_ah_d = x[5]\n last_e_d = x[6]\n\n x[7] = x[0] - initial_time\n last_time = x[0]\n\n df_tmp = pd.DataFrame(data=tmp_arr[:, [3]], columns=[LABEL.AH_C.value])\n df_tmp.index += df.index[0]\n df[LABEL.AH_C.value] = df_tmp[LABEL.AH_C.value] / 3600.0\n\n df_tmp = pd.DataFrame(data=tmp_arr[:, [4]], columns=[LABEL.E_C.value])\n df_tmp.index += df.index[0]\n df[LABEL.E_C.value] = df_tmp[LABEL.E_C.value] / 3600.0\n\n df_tmp = pd.DataFrame(data=tmp_arr[:, [5]], columns=[LABEL.AH_D.value])\n df_tmp.index += df.index[0]\n df[LABEL.AH_D.value] = -df_tmp[LABEL.AH_D.value] / 3600.0\n\n df_tmp = pd.DataFrame(data=tmp_arr[:, [6]], columns=[LABEL.E_D.value])\n df_tmp.index += df.index[0]\n df[LABEL.E_D.value] = -df_tmp[LABEL.E_D.value] / 3600.0\n\n df_tmp = pd.DataFrame(data=tmp_arr[:, [7]],\n columns=[LABEL.CYCLE_TIME.value])\n df_tmp.index += df.index[0]\n df[LABEL.CYCLE_TIME.value] = df_tmp[LABEL.CYCLE_TIME.value]\n\n return df\n","repo_name":"battery-lcf/batteryarchive-service","sub_path":"app/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":12477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"6565096333","text":"import os\n\nfrom file_hash import *\nfrom makeCSV import *\nfrom file_info import *\nimport Item\nfrom dbManager import *\nfrom get_file_path import *\nfrom explorer import *\nitemList = [] #Item.py랑 같이쓰던건데 사용목적희미해짐\ndefault_path = \"\" # explorer에서 최상위폴더 경로\ndebug_mode = False #디버그\ndef printDebugMessage(message) : #디버그메세지\n if debug_mode == True :\n print(message)\n\ndef printItemList() : #Item.py랑 같이쓰던건데 사용목적희미해짐\n for i in itemList :\n print(i.showInfo())\n\ndef makeCSV(itemList) : #CSV떨구기\n f = open(\"list.csv\", 'w')\n f.write(\"\\\"이름\\\" \\\"경로\\\" \\\"크기\\\" \\\"MD5\\\" \\\"SHA-1\\\" \\\"마지막 수정시간\\\" \\\"마지막 접근시간\\\" \\\"생성시간\\\" \\\"색인\\\"\\n\")\n for i in itemList :\n f.write(\"\\\"\"+i.name + \"\\\" \\\"\" + i.path + \"\\\" \\\"\" + str(i.size)\n + \"\\\" \\\"\" + str(i.md5) + \"\\\" \\\"\" + str(i.sha1) + \"\\\" \\\"\" + str(i.modify_time)\n + \"\\\" \\\"\" + str(i.access_time) + \"\\\" \\\"\" + str(i.create_time)\n + \"\\\" \\\"\" + str(i.index) + \"\\\"\\n\")\n print(\"make CSV\")\n f.close()\n f = open(\"list_csv_hash.txt\", 'w') # CSV에 대한 hash값을 담은 list_csv_hast.txt파일을 생성\n f.write(\"md5 : \" + str(md5_for_largefile(\"list.csv\")) + \"\\nsha-1 : \"\n + str(sha1_for_largefile(\"list.csv\")))\n f.close()\n\ndef makeDD(path) : #DD떨구기 1도모름\n with open(\"E:\\\\test.txt\", \"rb\") as f:\n i = open(\"test.dd\", \"wb\")\n while True:\n if i.write(f.read(512)) == 0:\n break\n\ndef readDisk():\n print(\"Please input path > \") # 최상위폴더 입력받기\n default_path = input() #입력받음\n\n item_dict = {}\n item_dict['name'] = \"DEFAULT_PATH\"\n item_dict['contents'] = default_path\n\n db1.updateDB(\"SETUP\", item_dict) # 최상위폴더경로 (default_path) db에 저장\n\n #directory = os.listdir(default_path)\n get_file_path(default_path, db1) # 최상위폴더부터 파일폴더 쭉긁어옴\n \"\"\"for items in directory:\n one_item = os.path.join(default_path, items) # 디렉토리에있는 파일 및 폴더 읽어온다.\n\n if os.path.isfile(one_item): # 파일이면, itemList에 집어넣을 것\n file_inform = file_info(one_item)\n item = Item.Item(file_inform['name'], file_inform['path']\n , file_inform['size'], file_inform['md5']\n , file_inform['sha1'], file_inform['modify_time']\n , file_inform['access_time'], file_inform['create_time']\n , file_inform['index']) # 새로운 Item객체 만듬, 정보대입\n itemList.append(item) # itemList에 item객체를 넣는다\n #db1.updateDB(\"INSERT\", file_inform)\"\"\"\ndb1 = db(); #db 객체생성\ndb1.createTableIfNotExist() #db 테이블 없으면 만들기\nprint(\"DB 연결완료\")\n#readDisk()\nbeginExplorer() #explorer 실행\n#printItemList()\n#makeCSV(itemList)\n#db1.printDB()\ndb1.conn.close() #db연결종료\n\n\n","repo_name":"Nabureang/Matzip","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4421978716","text":"\"\"\"Helper functions to conduct a rollout with policies or agents.\"\"\"\n\nimport torch\nfrom gym.wrappers.monitoring.video_recorder import VideoRecorder\nfrom tqdm import tqdm\n\nfrom rllib.dataset.datatypes import Observation\nfrom rllib.util.neural_networks.utilities import broadcast_to_tensor, to_torch\nfrom rllib.util.training.utilities import Evaluate\nfrom rllib.util.utilities import (\n get_entropy_and_log_p,\n sample_model,\n tensor_to_distribution,\n)\n\n\ndef step_env(environment, state, action, action_scale, pi=None, render=False):\n \"\"\"Perform a single step in an environment.\"\"\"\n try:\n next_state, reward, done, info = environment.step(action)\n except TypeError:\n next_state, reward, done, info = environment.step(action.item())\n\n action = to_torch(action)\n\n if pi is not None:\n try:\n with torch.no_grad():\n entropy, log_prob_action = get_entropy_and_log_p(\n pi, action, action_scale\n )\n except RuntimeError:\n entropy, log_prob_action = 0.0, 1.0\n else:\n entropy, log_prob_action = 0.0, 1.0\n\n observation = Observation(\n state=state,\n action=action,\n reward=reward,\n next_state=next_state,\n done=done,\n entropy=entropy,\n log_prob_action=log_prob_action,\n ).to_torch()\n state = next_state\n if render:\n environment.render()\n return observation, state, done, info\n\n\ndef step_model(\n dynamical_model,\n reward_model,\n termination_model,\n state,\n action,\n done=None,\n action_scale=1.0,\n pi=None,\n):\n \"\"\"Perform a single step in an dynamical model.\"\"\"\n # Sample a next state\n next_state = sample_model(dynamical_model, state, action)\n\n # Sample a reward\n reward = sample_model(reward_model, state, action, next_state)\n\n if done is None:\n done = torch.zeros_like(reward).bool()\n broadcast_done = broadcast_to_tensor(done, target_tensor=reward)\n reward *= (~broadcast_done).float()\n\n # Check for termination.\n if termination_model is not None:\n done_ = sample_model(termination_model, state, action, next_state).bool()\n done = done + done_ # \"+\" is a boolean \"or\".\n\n if pi is not None:\n try:\n entropy, log_prob_action = get_entropy_and_log_p(pi, action, action_scale)\n except RuntimeError:\n entropy, log_prob_action = 0.0, 1.0\n else:\n entropy, log_prob_action = 0.0, 1.0\n\n observation = Observation(\n state=state,\n action=action,\n reward=reward,\n next_state=next_state,\n done=done.float(),\n entropy=entropy,\n log_prob_action=log_prob_action,\n ).to_torch()\n\n return observation, next_state, done\n\n\ndef record(environment, agent, path, num_episodes=1, max_steps=1000):\n \"\"\"Record an episode.\"\"\"\n recorder = VideoRecorder(environment, path=path)\n for _ in range(num_episodes):\n state = environment.reset()\n agent.set_goal(environment.goal)\n\n done = False\n time_step = 0\n while not done:\n action = agent.act(state)\n observation, state, done, info = step_env(\n environment, state, action, agent.policy.action_scale\n )\n recorder.capture_frame()\n\n time_step += 1\n if max_steps <= time_step:\n break\n\n recorder.close()\n\n\ndef rollout_episode(\n environment, agent, max_steps, render, callback_frequency, callbacks\n):\n \"\"\"Rollout a full episode.\"\"\"\n state = environment.reset()\n agent.set_goal(environment.goal)\n agent.start_episode()\n done = False\n time_step = 0\n while not done:\n action = agent.act(state)\n obs, state, done, info = step_env(\n environment=environment,\n state=state,\n action=action,\n action_scale=agent.policy.action_scale,\n pi=agent.pi,\n render=render,\n )\n agent.observe(obs)\n # Log info.\n agent.logger.update(**info)\n\n time_step += 1\n if max_steps <= time_step:\n break\n\n if callback_frequency and agent.total_episodes % callback_frequency == 0:\n for callback in callbacks:\n callback(agent, environment, agent.total_episodes)\n agent.end_episode()\n\n\ndef rollout_agent(\n environment,\n agent,\n num_episodes=1,\n max_steps=1000,\n render=False,\n print_frequency=0,\n callback_frequency=0,\n eval_frequency=0,\n save_milestones=None,\n callbacks=None,\n):\n \"\"\"Conduct a rollout of an agent in an environment.\n\n Parameters\n ----------\n environment: AbstractEnvironment\n Environment with which the abstract interacts.\n agent: AbstractAgent\n Agent that interacts with the environment.\n num_episodes: int, optional (default=1)\n Number of episodes.\n max_steps: int.\n Maximum number of steps per episode.\n render: bool.\n Flag that indicates whether to render the environment or not.\n print_frequency: int, optional.\n Print agent stats every `print_frequency' episodes if > 0.\n callback_frequency: int, optional.\n Plot agent callbacks every `plot_frequency' episodes if > 0.\n eval_frequency: int, optional.\n Evaluate agent every 'eval_frequency' episodes if > 0.\n save_milestones: List[int], optional.\n List with episodes in which to save the agent.\n callbacks: List[Callable[[AbstractAgent, AbstractEnvironment,int], None]], optional.\n List of functions for evaluating/plotting the agent.\n \"\"\"\n save_milestones = list() if save_milestones is None else save_milestones\n callbacks = list() if callbacks is None else callbacks\n for episode in tqdm(range(num_episodes)):\n rollout_episode(\n environment=environment,\n agent=agent,\n max_steps=max_steps,\n render=render,\n callback_frequency=callback_frequency,\n callbacks=callbacks,\n )\n\n if print_frequency and episode % print_frequency == 0:\n print(agent)\n\n if episode in save_milestones:\n agent.save(f\"{agent.name}_{episode}.pkl\")\n\n if eval_frequency and episode % eval_frequency == 0:\n with Evaluate(agent):\n rollout_episode(\n environment=environment,\n agent=agent,\n max_steps=max_steps,\n render=render,\n callback_frequency=callback_frequency,\n callbacks=callbacks,\n )\n agent.end_interaction()\n\n\ndef rollout_policy(\n environment, policy, num_episodes=1, max_steps=1000, render=False, memory=None\n):\n \"\"\"Conduct a rollout of a policy in an environment.\n\n Parameters\n ----------\n environment: AbstractEnvironment\n Environment with which the policy interacts.\n policy: AbstractPolicy\n Policy that interacts with the environment.\n num_episodes: int, optional (default=1)\n Number of episodes.\n max_steps: int.\n Maximum number of steps per episode.\n render: bool.\n Flag that indicates whether to render the environment or not.\n memory: ExperienceReplay, optional.\n Memory where to store the simulated transitions.\n\n Returns\n -------\n trajectories: List[Trajectory]=List[List[Observation]]\n A list of trajectories.\n\n \"\"\"\n trajectories = []\n for _ in tqdm(range(num_episodes)):\n state = environment.reset()\n done = False\n trajectory = []\n time_step = 0\n while not done:\n pi = tensor_to_distribution(policy(to_torch(state)), **policy.dist_params)\n action = pi.sample()\n if not policy.discrete_action:\n action = policy.action_scale * action.clamp(-1.0, 1.0)\n obs, state, done, info = step_env(\n environment=environment,\n state=state,\n action=action.detach().numpy(),\n action_scale=policy.action_scale,\n pi=pi,\n render=render,\n )\n trajectory.append(obs)\n if memory is not None:\n memory.append(obs)\n\n time_step += 1\n if max_steps <= time_step:\n break\n\n trajectories.append(trajectory)\n return trajectories\n\n\ndef rollout_model(\n dynamical_model,\n reward_model,\n policy,\n initial_state,\n initial_action=None,\n termination_model=None,\n max_steps=1000,\n memory=None,\n):\n \"\"\"Conduct a rollout of a policy interacting with a model.\n\n Parameters\n ----------\n dynamical_model: AbstractModel\n Dynamical Model with which the policy interacts.\n reward_model: AbstractModel.\n Reward Model with which the policy interacts.\n policy: AbstractPolicy\n Policy that interacts with the environment.\n initial_state: State\n Starting states for the interaction.\n initial_action: Action.\n Starting action for the interaction.\n termination_model: AbstractModel.\n Termination condition to finish the rollout.\n max_steps: int.\n Maximum number of steps per episode.\n memory: ExperienceReplay, optional.\n Memory where to store the simulated transitions.\n\n Returns\n -------\n trajectory: Trajectory=List[Observation]\n A list of observations.\n\n Notes\n -----\n It will try to do the re-parametrization trick with the policy and models.\n\n TODO: Parallelize it!.\n \"\"\"\n trajectory = list()\n state = initial_state\n done = torch.full(state.shape[:-1], False, dtype=torch.bool)\n\n assert max_steps > 0\n for i in range(max_steps):\n if policy is not None:\n pi = tensor_to_distribution(policy(state), **policy.dist_params)\n action_scale = policy.action_scale\n else:\n assert max_steps == 1\n pi, action_scale = None, 1.0\n\n if i == 0 and initial_action is not None:\n action = initial_action\n else:\n # Sample an action\n if pi.has_rsample:\n action = pi.rsample()\n else:\n action = pi.sample()\n if not policy.discrete_action:\n action = policy.action_scale * action.clamp_(-1.0, 1.0)\n\n observation, next_state, done = step_model(\n dynamical_model=dynamical_model,\n reward_model=reward_model,\n termination_model=termination_model,\n state=state,\n action=action,\n action_scale=action_scale,\n done=done,\n pi=pi,\n )\n trajectory.append(observation)\n if memory is not None:\n memory.append(observation)\n\n state = next_state\n if torch.all(done):\n break\n\n return trajectory\n\n\ndef rollout_actions(\n dynamical_model,\n reward_model,\n action_sequence,\n initial_state,\n termination_model=None,\n memory=None,\n):\n \"\"\"Conduct a rollout of an action sequence interacting with a model.\n\n Parameters\n ----------\n dynamical_model: AbstractModel\n Dynamical Model with which the policy interacts.\n reward_model: AbstractReward, optional.\n Reward Model with which the policy interacts.\n action_sequence: Action\n Action Sequence that interacts with the environment.\n The dimensions are [horizon x num samples x dim action].\n initial_state: State\n Starting states for the interaction.\n The dimensions are [1 x num samples x dim state].\n termination_model: Callable.\n Termination condition to finish the rollout.\n memory: ExperienceReplay, optional.\n Memory where to store the simulated transitions.\n\n Returns\n -------\n trajectory: Trajectory=List[Observation]\n A list of observations.\n\n Notes\n -----\n It will try to do the re-parametrization trick with the policy and models.\n \"\"\"\n trajectory = list()\n state = initial_state\n done = torch.full(state.shape[:-1], False, dtype=torch.bool)\n\n for action in action_sequence: # Normalized actions\n\n observation, next_state, done = step_model(\n dynamical_model=dynamical_model,\n reward_model=reward_model,\n termination_model=termination_model,\n state=state,\n action=action,\n action_scale=1.0,\n done=done,\n )\n trajectory.append(observation)\n if memory is not None:\n memory.append(observation)\n\n state = next_state\n if torch.all(done):\n break\n\n return trajectory\n","repo_name":"sebascuri/rllib","sub_path":"rllib/util/rollout.py","file_name":"rollout.py","file_ext":"py","file_size_in_byte":12727,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"10893532194","text":"from startwork.main import main\nfrom startwork.constants.__version__ import __version__\n\nclass TestEndToEnd:\n expected_help_output = f'avaliable options:\\n default: run\\n\\n create: create a new project\\n\\n delete: delete a project\\n\\n'\n\n def test_unkown_option(self, capsys):\n main([\"\", \"--randon_stuff\"])\n out, err = capsys.readouterr()\n expeted_help_message = 'Unknown option: --randon_stuff\\nTry one of the following:\\n'\n assert out == expeted_help_message + self.expected_help_output\n assert err == ''\n\n def test_help(self, capsys):\n main([\"\", \"--help\"])\n out, err = capsys.readouterr()\n assert out == self.expected_help_output\n assert err == ''\n\n def test_version(self, capsys):\n main([\"\", \"--version\"])\n out, err = capsys.readouterr()\n assert out == f'version: {__version__}\\n\\n'\n assert err == ''\n","repo_name":"JorbFreire/startwork","sub_path":"tests/test_end_to_end.py","file_name":"test_end_to_end.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5154021860","text":"\"\"\"训练数据\"\"\"\n# -*- coding:utf-8 -*-\nimport sys\nimport os\nimport time\nimport gzip\nimport json\nimport pickle\nimport random\nfrom collections import defaultdict\n\nclass AveragedPerceptron(object): #平均感知机\n def __init__(self):\n self.weights = {} #最后是一个{ {} {}...}\n self.classes = set()\n self._totals = defaultdict(int)\n self._tstamps = defaultdict(int)\n self.i = 0 #记录时训练集中有几个字载入\n\n def predict(self, features):\n '''Dot-product the features and current weights and return the best label.'''\n scores = defaultdict(float)\n for feat, value in features.items():\n if feat not in self.weights or value == 0:\n continue\n weights = self.weights[feat]\n for label, weight in weights.items():\n scores[label] += value * weight\n return max(self.classes, key=lambda label: (scores[label], label)) #如果不在weights里面的就返回 'O' \n #否则从符合前后关系的features里面找到权重最大的返回\n\n def update(self, truth, guess, features):\n '''Update the feature weights.'''\n def upd_feat(c, f, w, v):\n param = (f, c)\n self._totals[param] += (self.i - self._tstamps[param]) * w #self.i - self._tstamps[param] 相当于一种距离函数 上一次出现这种上下文关系的字和现在这个字的位置差\n # print(w,self.i , self._tstamps[param],\" \",param) #这个在最后的均值化的时候作为除数 所以当这个距离大的话意味着该词出现的频率低达到削弱该种上下文关系的权重 \n self._tstamps[param] = self.i\n self.weights[f][c] = w + v #f=>前缀 c=>tag #如果没有这种上下文关系就添加 否则进行加减\n\n self.i += 1\n if truth == guess: #如果猜对了则不更改\n return None\n for f in features:\n weights = self.weights.setdefault(f, {}) #如果猜错了那就给这个上下文关系的字典 进行给正确的 +1 给猜错的-1\n upd_feat(truth, f, weights.get(truth, 0.0), 1.2) #2种更新情况 a.添加这个标签 b.对原有的标签进行加减\n upd_feat(guess, f, weights.get(guess, 0.0), -1.0)\n return None\n\n def average_weights(self): #权重的平均化\n '''Average weights from all iterations.'''\n for feat, weights in self.weights.items():\n new_feat_weights = {}\n for clas, weight in weights.items():\n param = (feat, clas)\n total = self._totals[param]\n total += (self.i - self._tstamps[param]) * weight #把最后没加上的加上\n averaged = round(total / float(self.i), 3)\n if averaged:\n new_feat_weights[clas] = averaged\n self.weights[feat] = new_feat_weights\n return None\n\nclass Perceptron:\n\tdef __init__(self, loc=None):\n\t\tself.START = ['-START-', '-START2-']\n\t\tself.END = ['-END-', '-END2-']\n\t\tself.model = AveragedPerceptron()\n\t\tif loc != None:\n\t\t\tself.load(loc)\n\n\tdef predict(self, words):\n\t\tprev, prev2 = self.START\n\t\tlabels = []\n\t\tcontext = self.START + words + self.END\n\t\tfor i, word in enumerate(words):\n\t\t\tfeatures = self._get_features(i, word, context, prev, prev2)\n\t\t\ttag = self.model.predict(features)\n\t\t\tlabels.append(tag)\n\t\t\tprev2 = prev\n\t\t\tprev = tag\n\t\treturn labels\n\t\t\n\tdef train(self, sentences, save_loc=None, nr_iter=5, shuf=False):\n\t\tself._make_tagdict(sentences)\n\t\t# print(len(sentences))\n\t\tif sentences == []:\n\t\t\treturn\n\t\tfor iter_ in range(nr_iter):\n\t\t\tc = 0\n\t\t\tn = 0\n\t\t\tfor words, tags in sentences: #sentences =>[ [[words][tags]] ]\n\t\t\t\tprev, prev2 = self.START #prev =>上一个字的tag prev2=>前一个字的tag\n\t\t\t\tcontext = self.START + words + self.END\n\t\t\t\tfor i, word in enumerate(words):\n\t\t\t\t\tfeats = self._get_features(i, word, context, prev, prev2) #获取这个字的特征信息\n\t\t\t\t\tguess = self.model.predict(feats) #根据这个word的特征信息预测这个word的tag \n\t\t\t\t\tself.model.update(tags[i], guess, feats) #根据预测结果来对保存的特征集进行更新\n\t\t\t\t\tprev2 = prev\n\t\t\t\t\tprev = guess\n\t\t\t\t\tc += guess == tags[i] #c用于记录猜对的次数\n\t\t\t\t\tn += 1 #n用于记录总共的猜测次数\n\t\t\tif shuf == True: #打乱训练集\n\t\t\t\trandom.shuffle(sentences)\n\t\t\tself.save(save_loc)\n\t\tself.model.average_weights()\n\t\tself.save(save_loc)\n\t\t\n\tdef save(self, loc=os.path.dirname(os.path.realpath(sys.argv[0]))+r'\\ap.model', zip=True):\n\t\tif zip == False:\n\t\t\tpickle.dump((self.model.weights, self.model.classes), open(loc, 'wb'))\n\t\telse:\n\t\t\tpickle.dump((self.model.weights, self.model.classes), gzip.open(loc, 'wb'))\n\t\t\t\n\tdef load(self, loc=os.path.dirname(os.path.realpath(sys.argv[0]))+r'\\ap.model', zip=True):\n\t\tif zip == False:\n\t\t\tself.model.weights, self.model.classes = pickle.load(open(loc, 'rb'))\n\t\telse:\n\t\t\tself.model.weights, self.model.classes = pickle.load(gzip.open(loc,'rb'))\n\t\t\t\n\tdef _get_features(self, i, word, context, prev, prev2): #特征提取\n\t\t'''Map tokens into a feature representation, implemented as a\n\t\t{hashable: float} dict. If the features change, a new model must be\n\t\ttrained.\n\t\t'''\n\t\tdef add(name, *args):\n\t\t\tfeatures[' '.join((name,) + tuple(args))] += 1\n\t\ti += len(self.START)\n\t\tfeatures = defaultdict(int) #避免普通字典多个键值产生的ERRO\n\t\t# It's useful to have a constant feature, which acts sort of like a prior\n\t\tadd('bias')\n\t\tadd('i suffix', word[-3:])\n\t\tadd('i pref1', word[0])\n\t\tadd('i-1 tag', prev)\n\t\tadd('i-2 tag', prev2)\n\t\tadd('i tag+i-2 tag', prev, prev2)\n\t\tadd('i word', context[i])\n\t\tadd('i-1 tag+i word', prev, context[i])\n\t\tadd('i-1 word', context[i - 1])\n\t\tadd('i-1 suffix', context[i - 1][-3:])\n\t\tadd('i-2 word', context[i - 2])\n\t\tadd('i+1 word', context[i + 1])\n\t\tadd('i+1 suffix', context[i + 1][-3:])\n\t\tadd('i+2 word', context[i + 2])\n\t\treturn features\n\n\tdef _make_tagdict(self, sentences):\n\t\t'''Make a tag dictionary for single-tag words.'''\n\t\tfor words, tags in sentences:\n\t\t\tfor word, tag in zip(words, tags):\n\t\t\t\tself.model.classes.add(tag) #保存所有的tag的类型\n\t\t\t\t #ap.model是实体识别的 cws.model是分词的\n\ndef get_json_data(filepath): #转化json格式训练集 到需要的sentence\n\ttraining_data = []\n\tsentence = ([], [])\n\tdict=json.load(open(filepath,\"r\",encoding=\"utf-8\"))\n\tfor dict_data in dict:\n\t\tnew_sentence=dict_data[\"text\"]\n\t\tfor i in new_sentence:\n\t\t\tsentence[0].append(i)\n\t\t\tsentence[1].append(\"O\")\n\t\tif dict_data[\"labels\"] == []:\n\t\t\tcontinue\n\t\tfor elem in dict_data[\"labels\"]:\n\t\t\tlabel=elem[\"label\"]\n\t\t\tsentence[1][elem[\"start\"]]=\"B-\"+label\n\t\t\tfor j in range(elem[\"start\"]+1,elem[\"end\"]):\n\t\t\t\tsentence[1][j]=\"I-\"+label\n\t\ttraining_data.append(sentence)\n\t\tsentence = ([], [])\n\treturn training_data\n\ndef get_txt_data(filepath): #获取txt格式的训练集\n\ttraining_data=[]\n\tsentence = ([], [])\n\tfin = open(filepath, 'r', encoding='utf8')\n\tfor index, line in enumerate(fin): #数据准备sentence[0]存放word sentence[1]存放对应的tag\n\t\tline = line.strip()\n\t\tif line == '': #训练集中的句子间需要有空格 且训练集中末句需要有两个换行否则末句将不会被读取\n\t\t\ttraining_data.append(sentence)\n\t\t\tsentence = ([], []) \n\t\telse:\n\t\t\tparams = line.split()\n\t\t\tif len(params) != 2: continue\n\t\t\tsentence[0].append(params[0])\n\t\t\tsentence[1].append(params[1])\n\tfin.close()\n\treturn training_data\n\ndef train(filepath=r'./new_train_data.txt', model=os.path.dirname(os.path.realpath(sys.argv[0]))+r'\\ap.model', nr_iter=1): #这个nr_iter=1\n\t\"\"\"\n\t\t目前支持 json格式 和 txt格式训练集\n\t\"\"\"\n\ttagger = Perceptron()\n\tkey=filepath.split(\".\")[-1]\n\tif key=='json':\n\t\ttraining_data = get_json_data(filepath)\n\telif key=='txt':\n\t\ttraining_data = get_txt_data(filepath)\n\t# print(f'training corpus size : {len(training_data)}' ) #输出的是读入训练集的句数 #training_data 的数据格式 => [sentence1,sentence2... ]\n\ttagger.train(training_data, save_loc=model, nr_iter=nr_iter)\n\ndef json_txt_prd(filepath,savePath,onuse_str,tagger): #读取的json格式的测试集\n\tdef ceshi_trans(filepath): # 由 json_prd调用 将json格式测试集转化为需要的sentence格式\n\t\tsentence = []\n\t\tlist=json.load(open(filepath,\"r\",encoding=\"utf-8\"))\n\t\tfor obj in list:\n\t\t\tsentence.append(obj[\"text\"])\n\t\treturn sentence\n\tff2=ceshi_trans(filepath)\n\ttxtWrite = open(savePath,\"w\",encoding=\"utf-8\")\n\tfor line in ff2:\n\t\tfor elem in onuse_str:\n\t\t\tline=line.replace(elem,'')\n\t\twords = list(line)\n\t\tlables=tagger.predict(words)\n\t\tfor word, label in zip(words, lables):\n\t\t\ttxtWrite.write(word+\"\\t\"+label+\"\\n\")\n\t\t\t# print(word, label)\n\t\ttxtWrite.write(\"\\n\")\n\ttxtWrite.close()\n\ndef json_json_prd(filepath,savePath,onuse_str,tagger):\n\t\"\"\"\n\t\tjson 格式的训练集\n\t\t得到json格式的预测集 该预测集保留了之前打好标签的部分\n\t\"\"\"\n\tjs = open(filepath,\"r\",encoding=\"utf-8\")\n\ttrain_list =json.load(js)\n\tdef ceshi_trans(train_list): # 由 json_prd调用 将json格式测试集转化为需要的sentence格式\n\t\tsentence = []\n\t\tfor obj in train_list:\n\t\t\tsentence.append(obj[\"text\"])\n\t\treturn sentence\n\tff2=ceshi_trans(train_list)\n\ttxtWrite = open(savePath,\"w\",encoding=\"utf-8\")\n\tarr = []\n\tfor line in ff2:\n\t\ttemp_dict = {\"text\":\"\",'labels':[]}\n\t\tlb = {'start':0 , 'end':0 ,'label': ''}\n\t\tpre_label = \"O\"\n\t\tfor elem in onuse_str:\n\t\t\tline=line.replace(elem,'')\n\t\twords = list(line)\n\t\ttemp_dict['text'] = line\n\t\tlables=tagger.predict(words)\n\t\tfor index,[word, label] in enumerate(zip(words, lables)):\n\t\t\tif \"-\" in label:\n\t\t\t\tlabel = label.split(\"-\")[1]\n\t\t\tif pre_label != label:\n\t\t\t\tif pre_label == \"O\":\n\t\t\t\t\tpre_label = label\n\t\t\t\t\tlb['start'] = index\n\t\t\t\t\tlb['label'] = label\n\t\t\t\telse:\n\t\t\t\t\tlb['end'] = index\n\t\t\t\t\ttemp_dict['labels'].append(lb)\n\t\t\t\t\tlb = {'start':0 , 'end':0 ,'label': ''}\n\t\t\t\t\tpre_label = label\n\t\t\t\t\tif label != \"O\":\n\t\t\t\t\t\tlb['start'] = index\n\t\t\t\t\t\tlb['label'] = label\n\t\tarr.append(temp_dict)\n\n\t\t\n\t# new_arr= [] \n\t# for train,predict in zip(train_list,arr): #保留训练集中已经打好标签的 并输出json格式的预测集\n\t# \ttemp_dict = {\"text\":\"\",'labels':[]}\n\t# \ttemp_dict[\"text\"] = train[\"text\"]\n\t# \ttemp_dict[\"labels\"] = train[\"labels\"]\n\t# \tpredict_labels = predict[\"labels\"]\n\t# \tif train[\"labels\"] == []:\n\t# \t\ttemp_dict[\"labels\"] = predict[\"labels\"]\n\t# \t\tnew_arr.append(temp_dict)\n\t# \t\tcontinue\n\t# \tif predict_labels != []:\n\t# \t\tfor elem in predict_labels:\n\t# \t\t\tflag = 0\n\t# \t\t\tfor i in train[\"labels\"]:\n\t# \t\t\t\tif (elem['start'] <= i['start'] and elem['end'] >= i['end']) or (elem['start'] =i['end']):\n\t# \t\t\t\t\tflag = 1\n\t# \t\t\t\t\tbreak\n\t# \t\t\tif flag == 0:\n\t# \t\t\t\ttemp_dict[\"labels\"].append(elem)\n\t# \tnew_arr.append(temp_dict)\n\ttxtWrite.write(json.dumps(arr,indent=4,ensure_ascii=False))\n\ttxtWrite.close()\n\tjs.close()\n\ndef txt_txt_prd(filepath,savePath,onuse_str,tagger): #读取使用 txt格式测试集 输出txt格式的预测集\n\twith open (filepath,\"r\",encoding=\"utf-8\") as f:\n\t\tww = open(savePath,\"w\",encoding=\"utf-8\")\n\t\tarr=list(f)\n\t\tfor line in arr:\n\t\t\tline=line.strip()\n\t\t\tfor elem in onuse_str:\n\t\t\t\tline=line.replace(elem,\"\")\n\t\t\twords=list(line)\n\t\t\tlabels=tagger.predict(words)\n\t\t\tfor word,label in zip(words, labels):\n\t\t\t\tww.write(word+\"\\t\"+label+\"\\n\")\n\t\t\tww.write(\"\\n\")\n\t\tww.close()\n\ndef txt_json_prd(filepath,savePath,onuse_str,tagger):\n\t\"\"\"\n\t\tTXT格式的训练集 得到json格式的预测集\n\t\"\"\"\n\twith open (filepath,\"r\",encoding=\"utf-8\") as f:\n\t\tww = open(savePath,\"w\",encoding=\"utf-8\")\n\t\tff2 = list(f)\n\t\tarr = []\n\t\tfor line in ff2:\n\t\t\tline=line.strip()\n\t\t\ttemp_dict = {\"text\":\"\",'labels':[]}\n\t\t\tlb = {'start':0 , 'end':0 ,'label': ''}\n\t\t\tpre_label = \"O\"\n\t\t\tfor elem in onuse_str:\n\t\t\t\tline=line.replace(elem,'')\n\t\t\twords = list(line)\n\t\t\ttemp_dict['text'] = line\n\t\t\tlables=tagger.predict(words)\n\t\t\tfor index,[word, label] in enumerate(zip(words, lables)):\n\t\t\t\tif \"-\" in label:\n\t\t\t\t\tlabel = label.split(\"-\")[1]\n\t\t\t\tif pre_label != label:\n\t\t\t\t\tif pre_label == \"O\":\n\t\t\t\t\t\tpre_label = label\n\t\t\t\t\t\tlb['start'] = index\n\t\t\t\t\t\tlb['label'] = label\n\t\t\t\t\telse:\n\t\t\t\t\t\tlb['end'] = index\n\t\t\t\t\t\ttemp_dict['labels'].append(lb)\n\t\t\t\t\t\tlb = {'start':0 , 'end':0 ,'label': ''}\n\t\t\t\t\t\tpre_label = label\n\t\t\t\t\t\tif label != \"O\":\n\t\t\t\t\t\t\tlb['start'] = index\n\t\t\t\t\t\t\tlb['label'] = label\n\t\t\tarr.append(temp_dict)\n\t\tww.write(json.dumps(arr,indent=4,ensure_ascii=False))\n\t\tww.close()\n\ndef predict(filepath,savePath,model = os.path.dirname(os.path.realpath(sys.argv[0])) + r'\\ap.model'): #实体识别预测函数\n\t\"\"\"\n\t\t测试集数据格式支持json txt\n\t\"\"\"\n\tonuse_str=\"\\n\"\n\ttagger = Perceptron(model)\n\tkey=filepath.split(\".\")[-1]\n\tkey2 = savePath.split(\".\")[-1]\n\t# txt_json_prd(filepath,savePath,onuse_str,tagger)\n\tif key==\"json\" and key2==\"txt\":\n\t\tjson_txt_prd(filepath,savePath,onuse_str,tagger)\n\telif key==\"txt\" and key2==\"txt\":\n\t\ttxt_txt_prd(filepath,savePath,onuse_str,tagger)\n\telif key==\"txt\" and key2==\"json\":\n\t\ttxt_json_prd(filepath, savePath, onuse_str, tagger)\n\telif key==\"json\" and key2==\"json\":\n\t\tjson_json_prd(filepath,savePath,onuse_str,tagger)\n\ndef write_predict(predict_path,save_predictions_path):\n\tpredict(predict_path,save_predictions_path) \n\ndef get_arg():\n\targs = sys.argv\n\tif len(args) == 2: #传入1 个参数 1.json格式训练集 只训练模型\n\t\ttrain_path = args[1]\n\t\ttrain(filepath=train_path,nr_iter=3) \n\t\twrite_predict(train_path,os.path.dirname(os.path.realpath(sys.argv[0])) + r'.\\result.json')\n\t\tprint(\"success\")\n\telif len(args) == 3: #传入2个参数的时候 1.json格式训练集路径 2.json格式预测集路径\n\t\ttrain_path = args[1]\n\t\ttrain(filepath=train_path,nr_iter=3) \n\t\tcurrent_dir = os.path.dirname(os.path.realpath(sys.argv[0])) + r'\\result.json'\n\t\tpredict_path = args[2]\n\t\twrite_predict(predict_path, r'.\\result.json')\n\t\tprint(\"success\")\n\telif len(args) == 4: #传入3个参数的时候 1.json格式训练集路径 2.json格式预测集路径 3.json格式预测结果的保存路径\n\t\ttrain_path = args[1]\n\t\ttrain(filepath=train_path,nr_iter=3) \n\t\tpredict_path = args[2]\n\t\tsave_predictions_path = args[3]\n\t\twrite_predict(predict_path, save_predictions_path)\n\t\tprint(save_predictions_path)\n\telse :\n\t\tprint(args)\n\t\tprint('false')\n\nif __name__ == '__main__':\n\tget_arg()\n\ttime.sleep(1)","repo_name":"p1967914901/label-tool","sub_path":"main/tools/jiagu_train_model.py","file_name":"jiagu_train_model.py","file_ext":"py","file_size_in_byte":14929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37801661556","text":"#!/usr/bin/env python\n\n\"\"\"Solution for https://adventofcode.com/2022/day/3.\n\nP1:\nin the given input (elves distinguished by empty newlines)\n\nP2:\n\"\"\"\n\nfrom collections.abc import Iterable\nfrom aoc.common import load_input, show_current_day\n\nEXAMPLE_INPUT = \"\"\"vJrwpWtwJgWrhcsFMMfFFhFp\njqHRNqRjqzjGDLGLrsFMfFZSrLrFZsSL\nPmmdzqPrVvPwwTWBwg\nwMqvLMZHhHMvwLHjbvcjnnSBnvTQFn\nttgJtRGJQctTZtZT\nCrZsJsPPZsGzwwsLwLmpwMDw\"\"\"\n\nEXAMPLE_SOLUTION = 157\nEXAMPLE_SOLUTION_P2 = 70\n\n\ndef get_common_item_priority_score(c: str) -> int:\n \"\"\"Find the priority score of a given rucksack item.\n\n a-z are worth 1-26, while A-Z are worth 27-52.\n\n Args:\n c (str): The rucksack item to find the value of.\n\n Returns:\n int: The value of the rucksack item, as defined by the challenge.\n \"\"\"\n if c.islower():\n return ord(c) - 96\n\n # Account for the difference between a->Z (-32), as well as the fact\n # that we're starting at 27 and not 1 (+26)\n return (ord(c) - (96 - 32)) + 26\n\n\ndef line_to_rucksack(inv_line: str) -> str:\n \"\"\"Find the common inventory item contained within a rucksack's two inventory compartments.\n\n The rucksack is guranteed to be evenly divisible in two equal halves,\n and there will always be at least and at most 1 common item between\n the two.\n\n Args:\n inv_line (str): The rucksack to find the matching\n\n Returns:\n str: The common item between the rucksack's two compartments.\n \"\"\"\n midway = len(inv_line) // 2\n return set(inv_line[0:midway]).intersection(set(inv_line[midway:])).pop()\n\n\ndef find_priority_sum_each_rucksack(input: list[str]) -> int:\n \"\"\"Calculate the solution for part one.\n\n Args:\n input (list[str]): The given input, each line representing a rucksack.\n\n Returns:\n int: The sum of all shared item's priority value for every rucksack.\n \"\"\"\n return sum([get_common_item_priority_score(line_to_rucksack(line)) for line in input])\n\n\ndef take_chunks(l: list[str], size: int) -> Iterable[list[str]]:\n \"\"\"Split a given list into N many chunks of size _size_.\n\n Args:\n l (list[str]): The list to split.\n size (int): The size of sublists to chunk into.\n\n Returns:\n Iterable[list[str]]: A chunk of the original list of size _size.\n \"\"\"\n for i in range(0, len(l), size):\n yield l[i : i + size]\n\n\ndef get_common_from_three_rucksacks(rucksacks: list[str]) -> str:\n \"\"\"Find the common item between three rucksacks.\n\n It's guranteed there will always be at least and at most one common\n item across all three.\n\n Args:\n rucksacks (list[str]): The list of rucksacks to find the common\n item from.\n\n Returns:\n str: The shared inventory item of all three rucksacks.\n \"\"\"\n return set.intersection(*list(map(set, rucksacks))).pop() # type: ignore\n\n\ndef find_priority_of_3sum(input: list[str]) -> int:\n \"\"\"Calculate the solution for part two.\n\n Args:\n input (list[str]): The given input, each line representing a rucksack.\n\n Returns:\n int: The sum of all 3-rucksack pairings' shared item's priority value.\n \"\"\"\n return sum(\n [\n get_common_item_priority_score(get_common_from_three_rucksacks(elf_3some))\n for elf_3some in take_chunks(input, 3)\n ]\n )\n\n\ndef main(): # pragma: no cover\n print(show_current_day(__file__))\n inp = load_input(day=3).strip().split()\n\n p1 = find_priority_sum_each_rucksack(inp)\n p2 = find_priority_of_3sum(inp)\n print(f\"Part 1: Sum of priority of common item in each rucksack: {p1}\")\n print(f\"Part 2: Sum of threesome rucksack pairings: {p2}\")\n\n\nif __name__ == \"__main__\":\n main() # pragma: no cover\n","repo_name":"bigpick/code-practice","sub_path":"2022/advent-of-code/python/src/aoc/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23009623063","text":"import os\nimport pandas as pd\nfrom typing import Dict\nfrom typing import List\nfrom typing import Tuple\n\nimport streamlit.components.v1 as components\n\n_RELEASE = True # on packaging, pass this to True\n\nif not _RELEASE:\n _component_func = components.declare_component(\"time_chart\", url=\"http://localhost:3001/\",)\nelse:\n parent_dir = os.path.dirname(os.path.abspath(__file__))\n build_dir = os.path.join(parent_dir, \"frontend/build\")\n _component_func = components.declare_component(\"time_chart\", path=build_dir)\n\n\ndef time_chart(\n data: List[Tuple[str, float, int]],\n width: int = None,\n key=None,\n):\n \"\"\"Display a line chart with overlapping circles on a list of (x, y) points, using the D3 library.\n\n :param data: A list of (x, y) points\n :param width: Width of canvas, in pixels\n :param key: An optional string to use as the unique key for the widget.\n \"\"\" \n component_value = _component_func(\n data=data,\n svgWidth=width,\n key=key,\n default=None,\n )\n return component_value\n","repo_name":"migroch/inspire_data","sub_path":"components/time_chart/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39902266331","text":"#!/usr/bin/env python3\nfrom collections import namedtuple\nfrom typing import Optional, Union\n\n\ninput_file = open(\"Day_24/input\", \"r\")\nlines = [x.strip() for x in input_file.readlines()]\n\n\nclass Instruction(namedtuple(\"Instruction\", \"operator variable value\")):\n def __repr__(self) -> str:\n return f\"{self.operator} {self.variable} {self.value}\"\n\n\n# Grab input blocks\ninstructions = []\nfor line in lines:\n tokens = line.split(' ')\n value = (tokens[2:3] or (None,))[0]\n if value not in [None, \"w\", \"x\", \"y\", \"z\"]:\n value = int(value)\n instructions.append(Instruction(tokens[0],tokens[1],value))\n\ninst_blocks = []\nwhile instructions:\n instruction = instructions.pop(0)\n if instruction.operator == \"inp\":\n inst_blocks.append([])\n else:\n inst_blocks[-1].append(instruction)\n\nfor block in inst_blocks:\n print(f\"{block}\")\n\n\n# Define the APU we'll use to process each block of instructions\nclass ALU:\n def __init__(self, program : list[Instruction]) -> None:\n self.program = program\n self.mem = { 'w' : 0, 'x' : 0, 'y' : 0, 'z' : 0 }\n \n def build_program_expression(self, w: int, x: int = 0, y: int = 0, z: int = 0) -> dict[str, int]:\n self.mem = { 'w' : w, 'x' : x, 'y' : y, 'z' : z }\n for ins in self.program:\n if ins.operator == \"add\":\n self.mem[ins.variable] = self._add(ins)\n elif ins.operator == \"mul\":\n self.mem[ins.variable] = self._mul(ins)\n elif ins.operator == \"div\":\n self.mem[ins.variable] = self._div(ins)\n elif ins.operator == \"mod\":\n self.mem[ins.variable] = self._mod(ins)\n elif ins.operator == \"eql\":\n self.mem[ins.variable] = self._eql(ins)\n return self.mem[\"w\"], self.mem[\"x\"], self.mem[\"y\"], self.mem[\"z\"]\n\n def _add(self, ins: Instruction) -> Union[Instruction, int]:\n a = self._get_a(ins)\n b = self._get_b(ins)\n if isinstance(a, int) and isinstance(b, int):\n return a + b\n elif b == 0:\n return a\n elif a == 0:\n return b\n return ins\n\n def _mul(self, ins: Instruction) -> Union[Instruction, int]:\n a = self._get_a(ins)\n b = self._get_b(ins)\n if isinstance(a, int) and isinstance(b, int):\n return a * b\n elif a == 0 or b == 0:\n return 0\n elif a == 1:\n return b\n elif b == 1:\n return a\n return ins\n\n def _div(self, ins: Instruction) -> Union[Instruction, int]:\n a = self._get_a(ins)\n b = self._get_b(ins)\n if isinstance(a, int) and isinstance(b, int):\n return a // b\n elif a == 0:\n return 0\n elif b == 1:\n return a\n return ins\n\n def _mod(self, ins: Instruction) -> Union[Instruction, int]:\n a = self._get_a(ins)\n b = self._get_b(ins)\n if isinstance(a, int) and isinstance(b, int):\n return a % b\n elif a == 0:\n return 0\n elif b == 1:\n return 0\n return ins\n\n def _eql(self, ins: Instruction) -> Union[Instruction, int]:\n a = self._get_a(ins)\n b = self._get_b(ins)\n if isinstance(a, int) and isinstance(b, int):\n return 1 if a == b else 0\n elif b == 0 and isinstance(a, Instruction) and a.operator == \"eql\":\n return Instruction(\"neq\", a.variable, a.value)\n elif b == 1 and isinstance(a, Instruction) and a.operator == \"eql\":\n return a\n return ins\n\n def _get_a(self, ins : Instruction) -> Union[Instruction, int]:\n return self.mem[ins.variable]\n \n def _get_b(self, ins : Instruction) -> Union[Instruction, int]:\n return ins.value if not ins.value in self.mem else self.mem[ins.value]\n\n\ndef find_negative_add_to_x_instruction(block : list[Instruction]) -> Optional[int]:\n # This finds the values to make the inner condition false\n # This is used to prevent the z expression from growing\n # Cheers Reddit!\n for ins in block:\n if ins.operator == \"add\" and ins.variable == \"x\" and isinstance(ins.value, int):\n return ins.value if ins.value < 0 else None\n return None\n\n\n# Part 1\nprint(\"Part 1:\")\n\n# Setup starting inputs - Input is only ever put into the w space\n# We've dropped the inp instructions so we'll just load the input straight into w\nresults = {}\nalu = ALU(inst_blocks[0])\nfor w in range(9, 0 ,-1):\n _, x, y, z = alu.build_program_expression(w, 0, 0, 0)\n results[z] = [w] # Z should be 9 to 17\nprint(f\"Step 0 w={w} z={min(results)}..{max(results)} {len(results)}\")\n\n\nfor i, block in enumerate(inst_blocks[1:], start=1):\n prev_results = results\n results = {}\n neg_to_x = find_negative_add_to_x_instruction(block)\n alu = ALU(block)\n for w in range(9, 0 ,-1):\n for z in prev_results:\n if neg_to_x and ((z % 26) + neg_to_x != w):\n # Optimisation touted on reddit - can't take credit for this one!\n continue\n _, x, y, new_z = alu.build_program_expression(w, 0, 0, z)\n if new_z not in results:\n results[new_z] = prev_results[z] + [w]\n print(f\"Step 0 w={w} z={min(results)}..{max(results)} {len(results)}\")\n\n# The results should have a 0 for z\npart_01 = \"\".join(str(x) for x in results[0])\nprint(f\"Result: {part_01}\")\n\n\n# Part 2\nprint(\"Part 2:\")\n\nalu = ALU(inst_blocks[0])\nfor w in range(1, 10):\n _, x, y, z = alu.build_program_expression(w, 0, 0, 0)\n results[z] = [w] # Z should be 9 to 17\nprint(f\"Step 0 w={w} z={min(results)}..{max(results)} {len(results)}\")\n\n\nfor i, block in enumerate(inst_blocks[1:], start=1):\n prev_results = results\n results = {}\n neg_to_x = find_negative_add_to_x_instruction(block)\n alu = ALU(block)\n for w in range(1, 10):\n for z in prev_results:\n if neg_to_x and ((z % 26) + neg_to_x != w):\n # Optimisation touted on reddit - can't take credit for this one!\n continue\n _, x, y, new_z = alu.build_program_expression(w, 0, 0, z)\n if new_z not in results:\n results[new_z] = prev_results[z] + [w]\n print(f\"Step 0 w={w} z={min(results)}..{max(results)} {len(results)}\")\n\npart_02 = \"\".join(str(x) for x in results[0])\nprint(f\"Result: {part_02}\")","repo_name":"MichaelReel/adventofcode-2021","sub_path":"Day_24/day_24.py","file_name":"day_24.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"954076528","text":"import argparse\n\n\ndef str2bool(v):\n \"\"\"\n Converts string to bool type; enables command line \n arguments in the format of '--arg1 true --arg2 false'\n \"\"\"\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Result evaluation report generation for machine learning', add_help=False)\n # ------------------------------------------ Evaluation settings------------------------------------------------\n parser.add_argument('--cut_off_method', default='youden', type=str, help='the optimal cut-off value method.')\n\n # ------------------------------------------ information report_saved ------------------------------------------------\n parser.add_argument('--report_title', default='', type=str, help='The title of report_saved')\n parser.add_argument('--report_describe',\n default='Evaluation according the probability, prediction and true label',\n type=str, help='Description about the report')\n parser.add_argument('--result_path', type=str, help='The path of image')\n parser.add_argument('--report_saved_dir', default='./output/report_saved', type=str,\n help='The path of report_saved saved')\n parser.add_argument('--report_file_name', default='report.pdf', type=str, help='The name of saved pdf file')\n parser.add_argument('--report_keep_tmp', action='store_true', default=False)\n # If generate the figure, default = True\n parser.add_argument('--report_confusion_matrix', action='store_true', default=True)\n parser.add_argument('--report_roc_auc', action='store_true', default=True)\n parser.add_argument('--report_dca', action='store_true', default=True)\n\n\n parser.add_argument('--file_names', nargs='+', type=str, default=['1', '2', '3'])\n\n return parser\n","repo_name":"wyd1216/DLVisual","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13172048279","text":"\"\"\"\n-------------------------------------------------\nFile Name: UnderstandingObject\nAuthor : wellqin\ndate: 2020/4/10\n-------------------------------------------------\n\n函数和类也是对象,属于python的一等公民\n\"\"\"\n\n\n\"\"\" 1. 函数和类可以赋值给一个变量 \"\"\"\n\n\ndef ask(name=\"bobby\"): # 先以函数为例\n print(name)\n\n\n# my_func = ask() TypeError: 'NoneType' object is not callable 不能加括号\nmy_func = ask\nmy_func(\"bobby\") # bobby\nmy_func(\"alan\") # alan\n\n\nclass Person: # 再以类为例\n def __init__(self):\n print(\"bobby\")\n\n\nmy_class = Person\nmy_class() # bobby\n\n\"\"\" 2. 可以添加到集合对象中 \"\"\"\nobj_list = [ask, Person]\nfor obj in obj_list:\n print(obj())\n\n# bobby # ask()\n# None # ask()的返回值为None,为什么之前没有,因为print缘故\n# bobby # Person()\n# <__main__.Person object at 0x00000260A1DD8780> # Person()返回的类对象\n\n\n\"\"\" 3. 可以作为参数传递给函数 \"\"\"\n\n\ndef print_type(item):\n print(type(item)) # \n print(item) # \n\n\nprint(print_type(ask)) # None\n\n\"\"\" 4. 可以当做函数的返回值 【装饰器原理】 \"\"\"\n\n\ndef decorator():\n print(\"dec start\")\n return ask\n\n\nmy_ask = decorator()\nmy_ask(\"tom\") # dec start tom\n","repo_name":"wellqin/USTC","sub_path":"PythonCore/Ch01@allIsObject/1.UnderstandingObject.py","file_name":"1.UnderstandingObject.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39025802367","text":"import os\nfrom pyrogram import Client, filters\nfrom pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom telegraph import upload_file\nfrom Config import Config\npsycho = Client(\n \"Telegraph Uploader Bot\",\n bot_token = Config.TOKEN,\n api_id = Config.API_ID,\n api_hash = Config.API_HASH\n)\nDOWNLOAD_LOCATION = os.environ.get(\"DOWNLOAD_LOCATION\", \"./DOWNLOADS/\")\n\nSTART_TEXT = \"\"\"\nH![✨](https://telegra.ph/file/1434d9d0eb6a8bf00456a.jpg)\nI am Telegraph Media Converter🧳 Create by Telegraph v3\nI can create Pictures under 5MB\n\n~ @Master_X_Updates ~\n\"\"\"\nHELP_TEXT = \"\"\"\n- Just give me a media under 5MB\n- Then I will download it\n- I will then upload it to the telegra.ph link\n\nSupport ~ @Master_X_Updates ~\n\"\"\"\nABOUT_TEXT = \"\"\"\n- **Bot :** `Telegraph Uploader v3`\n- **Python3 :** `3.9.6`\n- **Updates Channel: **[Master X Bot's Updates](t.me/Master_X_Updates)\n- **Support :** [Best Friends](t.me/Best_Friends15)\n\n\"\"\"\nSTART_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('Help', callback_data='help'),\n InlineKeyboardButton('About', callback_data='about'),\n InlineKeyboardButton('Close', callback_data='close')\n ]]\n )\nHELP_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('Home', callback_data='home'),\n InlineKeyboardButton('About', callback_data='about'),\n InlineKeyboardButton('Close', callback_data='close')\n ]]\n )\nABOUT_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('Support', url=\"t.me/Best_Friends15\"),\n InlineKeyboardButton('Updates', url='https://t.me/Master_X_Updates')\n ],\n [\n InlineKeyboardButton('Home', callback_data='home'),\n InlineKeyboardButton('Help', callback_data='help'),\n InlineKeyboardButton('Close', callback_data='close')\n ]]\n )\n\n@psycho.on_callback_query()\nasync def cb_data(bot, update):\n if update.data == \"home\":\n await update.message.edit_text(\n text=START_TEXT.format(update.from_user.mention),\n disable_web_page_preview=False,\n reply_markup=START_BUTTONS\n )\n elif update.data == \"help\":\n await update.message.edit_text(\n text=HELP_TEXT,\n disable_web_page_preview=True,\n reply_markup=HELP_BUTTONS\n )\n elif update.data == \"about\":\n await update.message.edit_text(\n text=ABOUT_TEXT,\n disable_web_page_preview=True,\n reply_markup=ABOUT_BUTTONS\n )\n else:\n await update.message.delete()\n \n\n@psycho.on_message(filters.private & filters.command([\"start\"]))\nasync def start(bot, update):\n text = START_TEXT.format(update.from_user.mention)\n reply_markup = START_BUTTONS\n await update.reply_text(\n text=text,\n disable_web_page_preview=False,\n quote=True,\n reply_markup=reply_markup\n )\n\n@psycho.on_message(filters.private & filters.media)\nasync def getmedia(bot, update):\n medianame = DOWNLOAD_LOCATION + str(update.from_user.id)\n try:\n message = await update.reply(\n text=\"`Processing...`\",\n quote=True,\n disable_web_page_preview=True\n )\n await bot.download_media(\n message=update,\n file_name=medianame\n )\n response = upload_file(medianame)\n try:\n os.remove(medianame)\n except:\n pass\n except Exception as error:\n print(error)\n text=f\"Error :- {error}\"\n reply_markup=InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('More Help', callback_data='help')\n ]]\n )\n await message.edit_text(\n text=text,\n disable_web_page_preview=True,\n reply_markup=reply_markup\n )\n return\n text=f\"**Link :-** `https://telegra.ph{response[0]}`\\n\\n**Join :-** @Master_X_Updates\"\n reply_markup=InlineKeyboardMarkup(\n [[\n InlineKeyboardButton(text=\"Open Link\", url=f\"https://telegra.ph{response[0]}\"),\n InlineKeyboardButton(text=\"Share Link\", url=f\"https://telegram.me/share/url?url=https://telegra.ph{response[0]}\")\n ],[\n InlineKeyboardButton(text=\"Join Updates Channel\", url=\"https://telegram.me/Master_X_Updates\")\n ]]\n )\n await message.edit_text(\n text=text,\n disable_web_page_preview=True,\n reply_markup=reply_markup\n )\n\npsycho.run()\n\n# © @Master_X_Updates #\n\n","repo_name":"Team-MasterXBots/Telegraph-Bot-V3","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"33637724473","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/movement/apertureClient.py\nimport service\nimport util\nimport uthread\n\nclass ApertureClient(service.Service):\n __guid__ = 'svc.apertureClient'\n __notifyevents__ = ['ProcessEntityVisibility']\n __dependencies__ = []\n __componentTypes__ = ['aperture', 'apertureSubject']\n\n def Run(self, *etc):\n service.Service.Run(self)\n\n def CreateComponent(self, name, state):\n return None\n\n def _CreateEntity(self, sceneID, entityID, initialState):\n self.LogInfo('ApertureClient Creating entity', entityID, 'in scene', sceneID)\n self.entityService.CreateEntityFromServer(entityID, sceneID, initialState)\n\n def _RemoveEntity(self, entityID):\n self.LogInfo('ApertureClient Deleting entity', entityID)\n self.entityService.UnregisterAndDestroyEntityByID(entityID)\n\n def ProcessEntityVisibility(self, eventList):\n charid = session.charid\n playerEntityCreateIdx = None\n charactersCreated = []\n for i, event in enumerate(eventList):\n if event[0] == 'OnEntityCreate' and event[2] == charid:\n eventList[i] = eventList[0]\n eventList[0] = event\n break\n\n callsToMake = []\n for t in eventList:\n if t[0] == 'OnEntityCreate':\n eventName, sceneID, entityID, initialState = t\n callsToMake.append((self._CreateEntity, (sceneID, entityID, initialState)))\n if util.IsCharacter(entityID):\n charactersCreated.append(entityID)\n elif t[0] == 'OnEntityDestroy':\n eventName, entityID = t\n callsToMake.append((self._RemoveEntity, (entityID,)))\n else:\n self.LogError('Aperture Client received a unknown event type %s', str(t[0]))\n\n if charactersCreated:\n uthread.new(cfg.eveowners.Prime, charactersCreated)\n uthread.parallel(callsToMake)","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/eve/client/script/movement/apertureClient.py","file_name":"apertureClient.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"3086390930","text":"from subprocess import Popen, PIPE\nimport inspect\nimport os.path\nimport platform\nimport random\nimport threading\nimport time\nimport shutil\nimport os\nimport atexit\nimport signal\n\nfrom . import port\n#from . import http_api\n\n_BINARY_FORMAT = \"\"\nif platform.system() == \"windows\":\n _BINARY_FORMAT = \".exe\"\n\n_IPFS_CLI_PATH = os.path.dirname(os.path.abspath(inspect.getframeinfo(inspect.currentframe()).filename)) + os.path.sep + \"resources\" + os.path.sep\n_PORT_SWARM = None\n_PORT_HTTP = None\n_PORT_GATEWAY = None\n\ndef _daemon_init():\n global _PORT_SWARM,_PORT_HTTP,_PORT_GATEWAY,_BINARY_FORMAT,_IPFS_CLI_PATH\n \n if os.path.isdir(\".ipfs\"):\n shutil.rmtree(\".ipfs\")\n\n _PORT_SWARM = _random_port()\n _PORT_HTTP = _random_port()\n _PORT_GATEWAY = _random_port()\n print ([_IPFS_CLI_PATH + \"ipfs-cli\" + _BINARY_FORMAT,_IPFS_CLI_PATH+\"ipfs\"+_BINARY_FORMAT,str(_PORT_SWARM),str(_PORT_HTTP),str(_PORT_GATEWAY)])\n p = Popen([_IPFS_CLI_PATH + \"ipfs-cli\" + _BINARY_FORMAT,_IPFS_CLI_PATH+\"ipfs\"+_BINARY_FORMAT,str(_PORT_SWARM),str(_PORT_HTTP),str(_PORT_GATEWAY)], stdout=PIPE, bufsize=1, universal_newlines=True)\n atexit.register(lambda: os.killpg(os.getpgid(p.pid), signal.SIGTERM))\n p.communicate()\n\n\n _PORT_SWARM, _PORT_HTTP, _PORT_GATEWAY = None,None,None\n print (\"[IPFS-CLI] ERROR: DAEMON FAILURE\")\n\ndef swarm_port():\n global _PORT_SWARM\n return _PORT_SWARM\n\ndef http_port():\n global _PORT_HTTP\n return _PORT_HTTP\n\ndef gateway_port():\n global _PORT_GATEWAY\n return _PORT_GATEWAY\n\ndef _random_port():\n c_port = random.randint(4001,65535)\n while True:\n if not port.is_open(c_port):\n return c_port\n\ndef _th_start():\n th = threading.Thread(target=_daemon_init,args=())\n th.daemon = True\n th.start()\n\n\ndef await_init():\n while True:\n try:\n print (\"DHT:\",http_api.dht.provide())\n return True\n except:\n #import traceback\n #traceback.print_exc()\n time.sleep(0.2)\n continue\n return True\n\n_th_start()\n\nfrom . import http_api\n","repo_name":"ZachisGit/SubPubServe","sub_path":"python/ipfsclipy/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"97599180","text":"import time\r\nfrom urllib import request\r\nfrom bs4 import BeautifulSoup\r\nimport chardet\r\n\r\n #这是代理IP\r\nproxy = {'http':'125.123.142.56:9999'}\r\n #创建ProxyHandler\r\nproxy_support = request.ProxyHandler(proxy)\r\n #创建Opener\r\nopener = request.build_opener(proxy_support)\r\n #添加User Angent\r\nopener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')]\r\n #安装OPener\r\nrequest.install_opener(opener)\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport urllib.request\r\nimport gzip\r\nimport io\r\nimport random\r\n\r\nT=0\r\nT_list=[]\r\nF=0\r\nF_list=[]\r\nTta = True_to_all = []\r\n\r\ncount = 0\r\nfor i in range(10000):\r\n avid = random.randint(200000,49110000)\r\n print(avid)\r\n pre_url = 'https://www.bilibili.com/video/av'+str(avid)\r\n #url = 'https://www.bilibili.com/video/av212109/'\r\n\r\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'en-US,en;q=0.5',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0'}\r\n\r\n\r\n req = urllib.request.Request(pre_url, headers=headers)\r\n while True:\r\n try:\r\n response = urllib.request.urlopen(req)\r\n break\r\n except Exception:\r\n time.sleep(0.05)\r\n\r\n if response.info().get('Content-Encoding') == 'gzip':\r\n pagedata = gzip.decompress(response.read())\r\n elif response.info().get('Content-Encoding') == 'deflate':\r\n pagedata = response.read()\r\n elif response.info().get('Content-Encoding'):\r\n print('Encoding type unknown')\r\n else:\r\n pagedata = response.read()\r\n\r\n soup = BeautifulSoup(pagedata,'lxml')\r\n\r\n title=soup.title\r\n\r\n title = soup.find(\"meta\", property=\"og:title\")\r\n #print(title[\"content\"] if title else \"No meta title given\")\r\n #print(soup.prettify())\r\n e = title[\"content\"] if title else \"No meta title given\"\r\n\r\n if e =='视频去哪了呢?_哔哩哔哩 (゜-゜)つロ 干杯~-bilibili':\r\n print('F')\r\n F_list.append(avid)\r\n F+=1\r\n Tta.append(0)\r\n else:\r\n print('T')\r\n T_list.append(avid)\r\n T+=1\r\n Tta.append(1)\r\n count+=1\r\n print(str(count/100)+'%')\r\n print(str(T/(T+F)*100)+'%')\r\n #Tta.append(T/(T+F)*100)\r\nprint(T)\r\nprint(F)\r\n#T_list.append(43872244)\r\n\r\n","repo_name":"haoyuF996/BiliBili-spider","sub_path":"spider_study_1.py","file_name":"spider_study_1.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6489830215","text":"from guizero import App, PushButton, Text\n\ndef do_nothing():\n global num\n print(\"Button was pressed\")\n app.bg = \"hot pink\"\n app.text_size = 12\n message.value = \"You clicked\"\n message.value += num\napp = App()\nbutton = PushButton(app, command=do_nothing)\nmessage = Text(app, text=\"^ press it ^\")\napp.display()\n\n","repo_name":"fiemc/researchProjectFinal","sub_path":"Button prototype.py","file_name":"Button prototype.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73716588891","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom libs.figure.figure_QDialog import fig_Dialog\n\nclass figure_DwellTime(QThread):\n\n def __init__(self, over_tracked, Method, T, parent=None):\n super(figure_DwellTime, self).__init__()\n self.overtracked = over_tracked # 跟踪后的所有信息,从1开始,0为字节头\n self.Method = Method\n self.SubImg_T = T\n\n def process_(self):\n all = self.overtracked\n dwell_time = []\n for i in range(len(all)):\n start_frame = all[i][1][0]\n over_frame = all[i][-1][0]\n if all[i][-1][2] == \"debinding\":\n over_index = self.search_debinding(all[i])\n over_frame = all[i][over_index][0]\n # if len(all[i][0]) == 4:\n # for j in range(1, len(all[i])):\n # if len(all[i][j]) == 3:\n # over_frame = all[i][j][0]\n # break\n if self.Method == 1 and len(all[i]) == 2:\n continue # 如果逐帧相减且只有起始点,则认为该点一直存在\n elif self.Method == 0 and all[i][-1][2] == \"binding\" and all[i][-1][0] % self.SubImg_T == 0:\n continue # 如果减第一帧,该轨迹的最后一帧是T的整数倍,那就认为该粒子还存在\n\n dwell_time.append(over_frame - start_frame)\n\n result = pd.value_counts(dwell_time)\n x = list(result.index)\n x = sorted(x)\n y = [result[i] for i in x]\n plt.bar(range(len(y)), y, width=0.8, color='c', tick_label=x)\n plt.xticks(fontsize=5, rotation=30)\n plt.title(\"Histogram of binding dwell time\", fontsize=10)\n plt.xlabel('Dwell Time, Frame')\n plt.savefig(\"./temp/temp_DwellTime.tif\")\n plt.close()\n fig = fig_Dialog(\"./temp/temp_DwellTime.tif\", \"Histogram of binding-event\")\n fig.start()\n\n def generate_img_Menu(self):\n self.menu = QMenu()\n self.change_index_Action = QAction(QIcon('resources/icons/open.png'), '&Change Index', self)\n self.change_index_Action.triggered.connect(self.change_index_)\n self.menu.addAction(self.change_index_Action)\n self.menu.popup(QCursor.pos())\n self.menu.show()\n\n def change_index_(self):\n dialog = QDialog()\n self.dialog = QDialog()\n self.dialog.setWindowTitle(\"设置曝光时间\")\n self.dialog.resize(250, 100)\n layout = QVBoxLayout()\n\n layout1 = QHBoxLayout()\n lable1 = QLabel(\"处理格式:\")\n lineedit1 = QLineEdit()\n layout1.addWidget(lable1)\n layout1.addWidget(lineedit1)\n Wid1 = QWidget()\n Wid1.setLayout(layout1)\n\n layout2 = QHBoxLayout()\n ok_btn = QPushButton(\"确定\")\n cancle_btn = QPushButton(\"取消\")\n layout2.addWidget(ok_btn)\n layout2.addWidget(cancle_btn)\n Wid2 = QWidget()\n Wid2.setLayout(layout2)\n\n layout.addWidget(Wid1)\n layout.addWidget(Wid2)\n dialog.setLayout(layout)\n dialog.show()\n ok_btn.clicked.connect(self.dialog.close)\n\n def _cancle():\n lineedit1.clear()\n dialog.close()\n\n cancle_btn.clicked.connect(_cancle)\n ret = dialog.exec_()\n\n if ret == 0:\n if len(lineedit1.text()) > 0:\n print(lineedit1.text())\n\n def search_debinding(self, data):\n '''从后往前搜索,找到第一次出现debinding的位置,则认为从这里结束,返回位置'''\n index = -1\n if data[1][2] == \"debinding\":\n return 1\n for i in range(2, len(data)):\n index = -1 * i\n if data[index][2] == \"binding\" and data[index + 1][2] == \"debinding\":\n return index + 1\n if abs(index) >= len(data):\n return -1\n return -1\n\n\n # def wheel_resize(self, ratio):\n #\n # '''并重新设置'''\n # img = self.img\n #\n # # img列表中最新的,需要调整\n # img = cv.resize(img, (int(img.shape[1] * ratio), int(img.shape[0] * ratio)), interpolation=cv.INTER_CUBIC)\n #\n #\n # nowheight=img.shape[0]\n # nowwidth=img.shape[1]\n # #print('图长:', self.nowheight, '图宽:', self.nowwidth)\n # global T1, T2, T3, T4\n # if nowwidth <= self.wid:\n # T1 = (self.wid - nowwidth) / 2\n # T2 = (self.wid - nowwidth) / 2 + nowwidth\n # else:\n # T1 = 0\n # T2 = self.wid\n # if nowheight <= self.hei:\n # T3 = (self.hei - nowheight) / 2\n # T4 = (self.hei - nowheight) / 2 + nowheight\n # else:\n # T3 = 0\n # T4 = self.hei\n #\n # self.dialog.setPixmap(self.cv2pixmap(img))\n","repo_name":"bevarb/AutoDetect","sub_path":"libs/figure/figure_DwellTime.py","file_name":"figure_DwellTime.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32799689949","text":"import datetime, time, multiprocessing, os\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.common.by import By\n\n\"\"\"\nThis version of the script excludes the class_name function in favor of getting all course names one time.\n\"\"\"\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--window-size=1920x1080\")\ndriver = webdriver.Chrome(r\"/Users/nadiabey/PycharmProjects/classRegistration/chromedriver 2\", options=chrome_options)\ndriver.implicitly_wait(10)\n\n\ndef init():\n \"\"\"\n open dukehub landing page and navigate to public class search\n \"\"\"\n driver.get(\n \"https://dukehub.duke.edu/psc/CSPRD01/EMPLOYEE/SA/s/WEBLIB_HCX_GN.H_SPRINGBOARD.FieldFormula.IScript_Main\"\n \"?&institution=DUKEU\")\n catalog = driver.find_element_by_xpath(\"/html/body/div/main/div/div/div[4]/div/button\")\n catalog.click()\n iframe = driver.find_element_by_xpath(\"/html/body/div[1]/iframe\")\n driver.switch_to.frame(iframe)\n\n\ndef get_term(x):\n \"\"\"\n input given term (Fall 2021, etc)\n \"\"\"\n term = driver.find_element_by_xpath(\"/html/body/div[1]/main/div/form/div/div[2]/div/div/div/input\")\n term.clear()\n term.send_keys(x)\n term.send_keys(Keys.ARROW_DOWN)\n term.send_keys(Keys.RETURN)\n time.sleep(4)\n\n\ndef get_career(x):\n \"\"\"\n input given career x (undergraduate, graduate, etc)\n \"\"\"\n career = driver.find_element_by_xpath(\"/html/body/div[1]/main/div/form/div/div[3]/div/div/div/input\")\n career.clear()\n career.send_keys(x)\n career.send_keys(Keys.ARROW_DOWN)\n career.send_keys(Keys.RETURN)\n\n\ndef show_all():\n \"\"\"\n uncheck 'open classes only'\n \"\"\"\n closed = driver.find_element_by_xpath(\"/html/body/div[1]/main/div/form/div/div[22]/label/span[1]/span[1]/input\")\n closed.click()\n\n\ndef search_button():\n \"\"\"\n click search\n \"\"\"\n search = driver.find_element_by_xpath(\"/html/body/div[1]/main/div/form/div/div[20]/button\")\n search.click()\n\n\ndef page_end():\n \"\"\"\n scroll to bottom of page to load all classes\n \"\"\"\n try:\n driver.find_element_by_xpath(\"/html/body/div[1]/main/div/p/div/div[2]/span\")\n except NoSuchElementException:\n ActionChains(driver).send_keys(Keys.END).perform()\n time.sleep(4)\n page_end()\n finally:\n time.sleep(3)\n\n\ndef num_search(listy):\n \"\"\"\n get info for class number\n \"\"\"\n temp = []\n for item in listy:\n if \"Class Number\" in item:\n index = item.rfind('r')\n num = item[index + 1:]\n section = item[0:item.find(',')]\n temp.append(section) # index 0\n temp.append(num) # index 1\n\n if \"seats\" in item:\n # reserved and waitlist use rfind because 'seats' is present multiple times\n if \"reserved\" in item:\n if \"Closed\" not in item:\n avail = int(item[item.find('d,') + 3:item.find('of') - 1])\n total = int(item[item.find('of') + 3:item.rfind('seats') - 1])\n temp.append(avail) # index 2\n temp.append(total) # index 3\n temp.append(\"Yes\") # index 4\n else:\n avail = int(item[item.rfind('d,') + 3:item.find('of') - 1])\n total = int(item[item.find('of') + 3:item.rfind('seats') - 1])\n temp.append(avail) # index 2\n temp.append(total) # index 3\n temp.append(\"Yes\") # index 4\n elif \"waitlist\" in item:\n avail = int(item[item.find('e.') + 3:item.rfind('of') - 1])\n total = int(item[item.rfind('of') + 3:item.rfind('seats') - 1])\n temp.append(avail)\n temp.append(total)\n temp.append(\"No\")\n else:\n avail = int(item[item.find(',') + 2:item.find('of') - 1])\n total = int(item[item.find('of') + 3:item.find('seats') - 1])\n temp.append(avail)\n temp.append(total)\n temp.append(\"No\")\n try:\n temp.append((total - avail) / total * 100) # index 5\n temp.append(str(datetime.datetime.now())) # index 6\n except ZeroDivisionError:\n temp.append(\"check\")\n temp.append(str(datetime.datetime.now()))\n\n return temp\n\n\ndef input_subject(x):\n \"\"\"\n x - department\n returns dictionary of lists\n \"\"\"\n dept = driver.find_element_by_xpath(\"/html/body/div[1]/main/div/form/div/div[4]/div/div/div/input\")\n dept.clear()\n dept.send_keys(x)\n dept.send_keys(Keys.ARROW_DOWN)\n dept.send_keys(Keys.RETURN)\n search_button()\n time.sleep(3)\n\n try:\n exist = driver.find_element_by_xpath(\"//span[contains(@role, 'alert')]\")\n if \"We're sorry\" in exist.text:\n print(x, \"has no results\")\n o = open('dept overflow.txt', 'a')\n o.write(x + \"\\n\")\n o.close()\n return None\n except NoSuchElementException:\n page_end()\n time.sleep(3)\n secNum = driver.find_elements_by_xpath(\"//span[contains(@class, 'sr-only')]\")\n y = [z.text for z in secNum]\n info = num_search(y)\n sections = info[0::7]\n numbers = info[1::7]\n availability = info[2::7]\n totals = info[3::7]\n reserves = info[4::7]\n percent = info[5::7]\n times = info[6::7]\n\n rows = {'Section': sections, 'Class Number': numbers, 'Open Seats': availability,\n 'Total Seats': totals, 'Reserved Seats': reserves, 'Percent': percent, 'Timestamp': times}\n return rows\n\n\ndef to_file(f, data):\n try:\n file = open(f, 'a+')\n last = file.readlines()[-1]\n if last[-1] != \"\\n\":\n file.write(\"\\n\")\n file.close()\n except IOError:\n # if file does not exist create it\n data.to_csv(f, index=False)\n else:\n # add to preexisting file\n data.to_csv(f, mode='a', index=False, header=False)\n\n\ndef run(x, y, z, f):\n \"\"\"\n run all functions\n x - term\n y - career\n z - subject\n f - filename\n \"\"\"\n init()\n get_term(x)\n get_career(y)\n show_all()\n start = time.time()\n result = input_subject(z)\n end = time.time()\n print(z, end - start)\n if type(result) is None:\n pass\n else:\n df = pd.DataFrame(result)\n to_file(f, df)\n\n\ndef repeat(l):\n term = '2021 Fall Term'\n career = 'Undergraduate'\n print(\"start time: \", str(datetime.datetime.now()))\n first = time.time()\n pool = multiprocessing.Pool()\n pool.starmap(run, [(term, career, x,\n os.path.join(r'/Users/nadiabey/PycharmProjects/classRegistration/data/',\n x + ' fall 2021.csv')) for x in l])\n pool.close()\n driver.quit()\n last = time.time()\n print(\"end time: \", str(datetime.datetime.now()))\n print(\"time elapsed:\", last - first, \"seconds\")\n\n\nif __name__ == '__main__':\n fall = [x[:-1] for x in open('fall21dept.txt', 'r').readlines()]\n day = datetime.datetime.today().day\n while datetime.datetime.now() < datetime.datetime(2021, 7, day, 12, 0):\n repeat(fall)\n if datetime.datetime.now() > datetime.datetime(2021, 7, day, 12, 0):\n break\n","repo_name":"nadiabey/classRegistration","sub_path":"registration2.py","file_name":"registration2.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70927134810","text":"import cv2\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n# cap.set is for setting the window properties\r\n# cv2.CAP_PROP_FRAME_WIDTH , cv2.CAP_PROP_FRAME_HEIGHT can be changed using set property\r\n# frame_width and height flags can be replaced with integer values corresponding to the flags.\r\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 1208)\r\n# cap.set(3, 1208)\r\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\r\n# cap.set(4,720)\r\n\r\n'''\r\nNOTE: even though we can set the 2nd parameter to any random value, the frame dimensions will only take up the \r\nnearest available dimensions.\r\n'''\r\nwhile cap.isOpened():\r\n ret, frame = cap.read()\r\n\r\n if ret:\r\n cv2.imshow('readFrame', frame)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n# ----------------------2----------------------- GET PROPERTIES OF CAPTURE CLASS\r\n\r\n# The width and height can be fetched using:\r\n# cap.get(cv2.CAP_PROP_FRAME_WIDTH)\r\n# cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\r\n\r\n# OTHER PROPERTIES THAT CAN BE FOUND ARE ON:\r\n# https://docs.opencv.org/4.0.0/d4/d15/group__videoio__flags__base.html#gaeb8dd9c89c10a5c63c139bf7c4f5704d\r\n\r\n","repo_name":"delzadbamji/Computer-vision-101","sub_path":"seeAndSaveVideos/GetAndSetWindowParameters.py","file_name":"GetAndSetWindowParameters.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1093864983","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n#____________signal generation function, which is the sum of a random number of harmonics with random weights________\ndef gen():\n rand_freq=random.randint(1,20) \n max_freq=rand_freq*100 # max frequency in the signal spectrum\n delta=1/(2*max_freq) # \n Num=1000 # number of points\n T=Num*delta # impulse duration\n x=np.linspace(0,T,Num)\n n=random.randint(1,20)\n freq=np.random.randint(1,max_freq/10,n)\n freq=10*freq # these 2 lines are needed to generate frequencies that are at least 10 Hz apart\n weight_coeff=np.random.sample(n)\n print(\"Number of harmonics:\",n,freq) #for check myself\n sum_arr=np.zeros(Num)\n noise=np.random.sample(1)\n for i in range(n):\n arr=weight_coeff[i]*np.cos(freq[i]*2*np.pi*x)+0.01*noise*np.ones(Num)\n sum_arr+=arr\n return T,sum_arr\n\n\n#_____________function that writes the resulting sequence of points to a file__________\ndef write_to_file(T,sum_arr):\n my_file = open(\"input.txt\", \"w\")\n my_file.write(str(T))\n for i in range(len(sum_arr)):\n my_file.write(\"\\t\"+str(sum_arr[i]))\n my_file.close()\n \n \n#___________function that draws a graph of the resulting signal______________\ndef graph(T,sum_arr):\n x=np.linspace(0,T,len(sum_arr))\n plt.plot(x,sum_arr)\n plt.grid()\n plt.show()\n \n#_______main_______\nT,sum_arr=gen()\ngraph(T,sum_arr)\nwrite_to_file(T,sum_arr)","repo_name":"1numco/DFT","sub_path":"generator_of_harmonic.py","file_name":"generator_of_harmonic.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1103516789","text":"import random\nfrom itertools import cycle\n\nclass RandomizedSet:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.size = 1024\n self.no_elems = 0\n self.elems = [None] * self.size\n\n def insert(self, val: int) -> bool:\n \"\"\"\n Inserts a value to the set. Returns true if the set did not already contain the specified element.\n \"\"\"\n idx = hash(val) & (self.size-1)\n if self.elems[idx] is None:\n self.elems[idx] = val\n self.no_elems += 1\n return True\n else:\n return False\n\n def remove(self, val: int) -> bool:\n \"\"\"\n Removes a value from the set. Returns true if the set contained the specified element.\n \"\"\"\n idx = hash(val) & (self.size-1)\n if self.elems[idx] is None:\n return False\n else:\n self.no_elems -= 1\n self.elems[idx] = None\n\n def getRandom(self) -> int:\n \"\"\"\n Get a random element from the set.\n \"\"\"\n choice = random.choice(range(self.size))\n while True:\n for i in cycle(range(self.size)):\n idx = choice + i\n if idx >= self.size:\n idx = idx - self.size\n if self.elems[idx] is not None:\n return self.elems[idx]\n\n# Your RandomizedSet object will be instantiated and called as such:\n# obj = RandomizedSet()\n# param_1 = obj.insert(val)\n# param_2 = obj.remove(val)\n# param_3 = obj.getRandom()\n","repo_name":"infinite-Joy/programming-languages","sub_path":"python-projects/algo_and_ds/set_implementation.py","file_name":"set_implementation.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"12494978066","text":"from flask import Blueprint, g, jsonify, request\nfrom flask_expects_json import expects_json\nfrom payments.ext.api import credit_card\n\nbp = Blueprint(\"api\", __name__)\n\n@bp.route(\"/v1/credit-card\", methods=[\"GET\"])\ndef list_credit_card():\n response = credit_card.get_all()\n return response\n\n\n@bp.route(\"/v1/credit-card/\", methods=[\"GET\"])\ndef detail_credit_card(id):\n response = credit_card.get_by_id(id)\n return response\n\nvalidation = {\n \"type\": \"object\",\n \"properties\": {\n \"cc_number\": { \"type\": \"string\", \"minLength\": 16, \"maxLength\": 16, \"error_msg\": \"Please provide a valid Credit Card Number\" }, \n \"exp_date\": {\"type\": \"string\", \"minLength\": 7, \"error_msg\": \"Please provide a valid Expiration Date YYYY/MM\"},\n \"holder\": { \"type\": \"string\", \"minLength\": 2 , \"error_msg\": \"Please provide a Holder name\"}, \n \"cvv\": {\"type\":\"string\",\"minLength\": 3, \"maxLength\": 4, \"error_msg\": \"Please provide a valid CVV\" } \n },\n \"required\": [\"exp_date\",\"holder\"]\n}\n\n@bp.route(\"/v1/credit-card\", methods=['POST'])\n@expects_json(validation)\ndef add_credit_card(): \n data = g.data\n response = credit_card.add_credit_card(data)\n return response\n\ncc_validation = {\n \"type\": \"object\",\n \"properties\": { \n \"cc_number\": { \"type\": \"string\",\"error_msg\": \"Please provide a valid Credit Card Number\" }, \n },\n \"required\": [\"cc_number\"]\n}\n\n@bp.route(\"/v1/credit-card-validation\", methods=['POST'])\n@expects_json(cc_validation)\ndef validate_credit_card(): \n data = g.data\n cc_number = data[\"cc_number\"]\n response = credit_card.validate_credit_card(cc_number)\n return response \n\n","repo_name":"amanda-fernandes/payments","sub_path":"payments/ext/api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73738984731","text":"import pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n\ndf_tracks = pd.read_csv(\n \"/run/media/high/Edu/Music Genre Classification Project/fma_metadata/tracks.csv\",\n low_memory=False,\n)\n\ndf = df_tracks[[\"track_id\", \"subset\", \"bit_rate\", \"duration\", \"genre_top\", \"title\"]]\n\ndf_small = df[df[\"subset\"] == \"small\"].copy()\n\ndf_small[\"track_id\"] = df_small[\"track_id\"].apply(str)\ndf_small[\"track_id\"] = list(map(lambda x: x.zfill(6), df_small[\"track_id\"]))\ndf_small[\"file_name\"] = df_small[\"track_id\"] + \".mp3\"\n\ngenre_list = df_small[\"genre_top\"].unique().tolist()\n\nle = preprocessing.LabelEncoder()\nle.fit(genre_list)\ndf_small[\"genre_class\"] = df_small[\"genre_top\"].apply(lambda x: le.transform([x])[0])\n\ndf_train, df_test = train_test_split(df_small, test_size=0.2)\ndf_train[\"train-test\"] = \"Train\"\ndf_test[\"train-test\"] = \"Test\"\nresult = pd.concat([df_train, df_test])\nresult.sort_index()\n\n\nresult = result[\n [\n \"file_name\",\n \"track_id\",\n \"subset\",\n \"bit_rate\",\n \"duration\",\n \"genre_top\",\n \"genre_class\",\n \"train-test\",\n \"title\",\n ]\n]\nresult.sort_values(by=\"track_id\", ignore_index=True, inplace=True)\n\nresult.to_csv(\n \"/run/media/high/Edu/Music Genre Classification Project/Music-Genre-Recognition/track-genre.csv\",\n index=False,\n)\n\nprint(result.info())\nprint(result[\"train-test\"].value_counts())\nprint(sorted(genre_list))\nprint(le.transform(sorted(genre_list)))\n","repo_name":"ChitranshuV/Music-Genre-Recognition","sub_path":"data_creation.py","file_name":"data_creation.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24494403720","text":"from mock import patch\nfrom cap.modules.mail.users import get_all_users, get_users_by_record, \\\n get_users_by_experiment\nfrom cap.modules.mail.custom.recipients import get_owner, get_submitter\n\n\ndef test_get_all_user_mails(users):\n users = get_all_users()\n\n assert len(users) == 12\n\n\ndef test_get_users_by_record(app, db, users, create_deposit):\n user1 = users['alice_user']\n user2 = users['alice_user2']\n\n deposit = create_deposit(user1, 'alice-analysis-v0.0.1')\n deposit._add_user_permissions(\n user2,\n ['deposit-read'],\n db.session\n )\n depid = deposit['_deposit']['id']\n\n users_all = get_users_by_record(depid)\n assert len(users_all) == 2\n\n users_admin = get_users_by_record(depid, role='admin')\n assert len(users_admin) == 1\n\n users_read = get_users_by_record(depid, role='read')\n assert len(users_read) == 2\n\n\ndef test_get_user_by_experiment(remote_accounts):\n cms_users = get_users_by_experiment('cms')\n assert len(cms_users) == 2\n\n lhcb_users = get_users_by_experiment('lhcb')\n assert len(lhcb_users) == 1\n\n atlas_users = get_users_by_experiment('atlas')\n assert len(atlas_users) == 1\n\n alice_users = get_users_by_experiment('alice')\n assert len(alice_users) == 1\n\n\ndef test_get_current_user(app, db, users):\n user1 = users['alice_user']\n assert get_submitter(None,\n default_ctx={'submitter_id': user1.id}) == ['alice_user@cern.ch']\n\n\ndef test_get_record_owner(users, location, create_schema, create_deposit):\n user = users['cms_user']\n create_schema('test', experiment='CMS')\n deposit = create_deposit(\n user, 'test',\n experiment='CMS',\n )\n assert get_owner(deposit) == ['cms_user@cern.ch']\n","repo_name":"cernanalysispreservation/analysispreservation.cern.ch","sub_path":"tests/unit/mail/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"32"} +{"seq_id":"11414501810","text":"import requests\nimport time\nfrom telegram.ext import Updater\n\n# CoinGecko API URL\nAPI_URL = \"https://api.coingecko.com/api/v3/coins/markets\"\nTELEGRAM_TOKEN = \"5698712224:AAGnNrK0THRu5L8vBTWcZQZU1nBBFLWIuvU\";\nTELEGRAM_CHAT_ID = \"5591584952\";\n\n\n# function to get latest price data for all tokens\ndef get_all_token_prices(page=1):\n # make API call to CoinGecko\n response = requests.get(API_URL, params={\"vs_currency\": \"usd\", \"per_page\": 250, \"page\": page})\n\n # parse response as JSON\n data = response.json()\n # print(data)\n\n # create a map to store token prices\n token_prices = {}\n # store the prices in the map\n for token in data:\n token_prices[token[\"id\"]] = token[\"current_price\"]\n\n # log token price\n print(token_prices)\n\n # log token_prices length\n print(len(token_prices))\n\n # check if there are more pages of data\n if len(data) == 250:\n # get prices for next page\n token_prices.update(get_all_token_prices(page=page + 1))\n\n # #log token price\n # print(token_prices)\n\n # return the map\n return token_prices\n\n # return a map of token IDs to current prices\n return {token[\"id\"]: token[\"current_price\"] for token in data}\n\n\n# function to scan for sudden changes in token prices\ndef scan_for_sudden_changes():\n # create a new Telegram bot\n updater = Updater(TELEGRAM_TOKEN, use_context=True)\n bot = updater.bot\n\n # get the initial prices for all tokens\n previous_prices = get_all_token_prices()\n\n # scan for sudden changes in token prices every 5 minutes\n while True:\n # sleep for 5 minutes\n time.sleep(5 * 60)\n\n # get the current prices for all tokens\n current_prices = get_all_token_prices()\n\n # check for sudden changes in token prices\n for token_id, current_price in current_prices.items():\n # compare current price to previous price\n previous_price = previous_prices[token_id]\n change = (current_price - previous_price) / previous_price\n if abs(change) > 0.05: # 5% change\n # send notification to user\n bot.send_message(TELEGRAM_CHAT_ID,\n f\"{token_id} has experienced a sudden {'increase' if change > 0 else 'decrease'} in price\")\n\n # update previous price for token\n previous_prices[token_id] = current_price\n\n\n# start scanning for sudden changes in token prices\nscan_for_sudden_changes()","repo_name":"Zeek-eth/Basic-Python-Exercise","sub_path":"Exercise File/exercise8_5.py","file_name":"exercise8_5.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5511461078","text":"class Solution:\n def smallestTrimmedNumbers(self, nums: List[str], queries: List[List[int]]) -> List[int]:\n \n l = []\n for m in range(len(nums[0])):\n d=[]\n c=len(nums[0])-1-m\n for i in range(len(nums)):\n d.append([int(nums[i][c:]),i])\n \n d.sort()\n l.append(d)\n \n ans = []\n for a,b in queries:\n ans.append(l[b-1][a-1][1])\n return ans","repo_name":"RohitWaghole/Leetcode","sub_path":"2343-query-kth-smallest-trimmed-number/2343-query-kth-smallest-trimmed-number.py","file_name":"2343-query-kth-smallest-trimmed-number.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"13170084672","text":"import json\n\n\nclass labelme_json:\n def __init__(self, shapes, imagePath, imageHeight, imageWidth, filename):\n self.imagePath = imagePath\n self.imageHeight = imageHeight\n self.imageWidth = imageWidth\n self.filename = filename\n self.shapes = []\n\n self.get_shapes(shapes)\n\n def write_to_file(self, path):\n label_json = {\n \"version\": \"4.5.7\",\n \"flags\": {},\n \"shapes\": self.shapes,\n \"imagePath\": self.imagePath,\n \"imageData\": None,\n \"imageHeight\": self.imageHeight,\n \"imageWidth\": self.imageWidth\n }\n # print(label_json)\n with open(path, 'w') as f:\n json.dump(label_json, f, indent=4, ensure_ascii=False)\n\n def get_shapes(self, shapes):\n for shape in shapes:\n d = {\n \"label\": shape.label,\n \"points\": shape.points,\n \"group_id\": None,\n \"shape_type\": shape.shape_type,\n \"flags\": {}\n }\n self.shapes.append(d)\n\n\nclass shape:\n def __init__(self, label, points, shape_type):\n self.label = label\n self.points = points\n self.shape_type = \"polygon\"\n","repo_name":"HuangRuiquan/learngit","sub_path":"labelme_json.py","file_name":"labelme_json.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34500931850","text":"import torch\nfrom torch import nn, optim\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.autograd.variable import Variable\nimport os\nfrom tqdm import tqdm\nimport numpy as np\nimport argparse\nimport calendar, time\n\nfrom logger import Logger\nfrom model import Generator, Discriminator\nimport utils\nfrom utils import TrainDatasetFromFolder as TDF\nfrom loss import GeneratorLoss\n\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--G_path\", help=\"Generator mdoel path\")\n\tparser.add_argument(\"--D_path\", help=\"Discriminator mdoel path\")\n\tparser.add_argument(\"--vis_path\", help=\"Store SR images path\")\n\tparser.add_argument(\"--batch_size\", type=int, help=\"Batch size\")\n\tparser.add_argument(\"--res_blocks\", type=int, help=\"No. of resnet blocks\")\n\tparser.add_argument(\"--in_channels\", type=int, help=\"No. of input channels\")\n\tparser.add_argument(\"--dropout\", type=float, help=\"Dropout\")\n\tparser.add_argument(\"--train\", type=int, help=\"Train - 1 or Test - 0\")\n\tparser.add_argument(\"--downsample\", nargs='?', const=True, default=False, help=\"Downsampling GAN\")\n\tparser.add_argument(\"--loss\", help=\"Loss function - mse/bce/comb\")\n\tparser.add_argument(\"--norm\", help=\"BN or IN for Generator\")\n\n\targs = parser.parse_args()\n\tprint(args.batch_size)\n\tprint(args.res_blocks)\n\tprint(args.in_channels)\n\n\n\tpathG=args.G_path\n\tpathD=args.D_path\n\tsrDir=args.vis_path\n\n\ttest_batch_size = args.batch_size\n\tshuffle_dataset = True\n\trandom_seed= 42\n\n\t# Creating data indices for training and validation splits:\n\t# dataset_size = len(data_loader)*batch_size\n\t# root='/net/kihara/scratch/smaddhur/SuperReso/dataset_clean'\n\troot='/net/kihara-fast-scratch/smaddhur/SuperReso/EM_data/dataset_new_2_6'\n\tdataset = TDF(root,25,2,args.downsample,args.train) \n\tdataset_size = len(dataset)\n\tprint(dataset_size)\n\tindices = list(range(dataset_size))\n\tif shuffle_dataset :\n\t np.random.seed(random_seed)\n\t np.random.shuffle(indices)\n\n\t# indices_test=indices[900000:900160]\n\tindices_test=indices\n\n\tprint(len(indices_test))\n\t# Creating PT data samplers and loaders:\n\ttest_sampler = SubsetRandomSampler(indices_test)\n\ttest_loader = torch.utils.data.DataLoader(dataset, batch_size=test_batch_size, \n\t sampler=test_sampler)\n\n\n\t# Num batches\n\tnum_batches=len(test_loader)\n\tprint(num_batches)\n\n\tG = Generator(args.in_channels,2,args.res_blocks, args.downsample, args.dropout, args.norm)\n\tG.load_state_dict(torch.load(pathG))\n\tG.eval()\n\tD = Discriminator(args.in_channels, args.dropout)\n\tD.load_state_dict(torch.load(pathD))\n\tD.eval()\n\n\tG.cuda()\n\tD.cuda()\n\n\tG_criterionLoss = GeneratorLoss(args.loss).cuda()\n\n\tcoords=[\"0\",\"0\",\"0\"]\n\twith torch.no_grad():\n\t\tfor index,(lr, hr, filName) in enumerate(test_loader):\n\n\t\t\tlr=lr.float()\n\t\t\tval_z=Variable(lr)\n\t\t\tval_z=val_z.cuda()\n\t\t\tsr_test = G(val_z)\n\t\t\tval_target=Variable(hr)\n\t\t\tval_target=val_target.cuda()\n\t\t\thr_test=D(val_target).mean()\n\t\t\thr_fake=D(sr_test).mean()\n\n\t\t\tG_loss_test=G_criterionLoss(hr_fake, sr_test, val_target)\n\t\t\tD_loss_test=1 - hr_test + hr_fake\n\t\t\tutils.write_voxels(args.batch_size, srDir, sr_test, index, args.downsample, \"test\",coords,filName)\n\t\t\tif (index) % 50 == 0: \n\t\t\t\tprint(index)\n\n\t\tprint(torch.cuda.memory_allocated())\n\t\tprint('TEST - ')\n\t\tprint(\"D_loss:%d,G_loss:%\",(D_loss_test, G_loss_test))\n\n\t\tdel G_loss_test\n\t\tdel D_loss_test\n\n\n\nif __name__==\"__main__\":\n\tmain()\n\n","repo_name":"raptor419/SuperEM-Train","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20791395358","text":"import random\nimport json\nimport torch\nfrom training_model import NeuralNetwork\nfrom nltk_utils import bag_of_words, tokenize\nimport speech_recognition as sr\nimport pyttsx3\n\n\ndef main():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n with open(\"intents.json\", \"r\") as f:\n intents = json.load(f)\n \n FILE = \"data.pth\"\n data = torch.load(FILE)\n\n input_size = data[\"input_size\"]\n hidden_size = data[\"hidden_size\"]\n output_size = data[\"output_size\"]\n word_collection = data[\"word_collection\"]\n tags = data[\"tags\"]\n model_state = data[\"model_state\"]\n \n model = NeuralNetwork(input_size, hidden_size, output_size).to(device)\n model.load_state_dict(model_state)\n model.eval()\n \n bot_name = \"ChatBot\"\n print(\"Let's chat! Say 'quit' or 'end' to exit...\")\n engine = pyttsx3.init()\n voices = engine.getProperty(\"voices\")\n engine.setProperty(\"voice\", voices[1].id)\n \n chatting = True\n recognizer = sr.Recognizer()\n while chatting:\n value_error_message = \"Unable to recognize speech. Please try again.\"\n \n try:\n with sr.Microphone() as source:\n print(\"Speak something...\")\n recognizer.adjust_for_ambient_noise(source, duration = 1)\n audio = recognizer.listen(source)\n \n input_sentence = recognizer.recognize_google(audio)\n print(f\"You: {input_sentence}\")\n input_sentence = input_sentence.lower()\n \n except sr.UnknownValueError:\n print(value_error_message)\n engine.say(value_error_message)\n engine.runAndWait()\n continue\n \n \n if input_sentence == \"quit\" or input_sentence == \"end\":\n chatting = False\n break\n \n input_sentence = tokenize(input_sentence)\n x = bag_of_words(input_sentence, word_collection)\n x = x.reshape(1, x.shape[0])\n x = torch.from_numpy(x).to(device)\n \n output = model(x)\n _, predicted = torch.max(output, dim = 1)\n tag = tags[predicted.item()]\n \n probability = torch.softmax(output, dim = 1)\n probability = probability[0][predicted.item()]\n \n unrecognized_input = \"I do not understand. Please make it clear.\"\n \n if probability.item() > 0.75:\n for intent in intents[\"intents\"]:\n if tag == intent[\"tag\"]:\n random_response = random.choice(intent['responses'])\n print(f\"{bot_name}: {random_response}\")\n engine.say(random_response)\n engine.runAndWait()\n else:\n print(unrecognized_input)\n engine.say(unrecognized_input)\n engine.runAndWait()\n \n \n \n \n\nif __name__ == \"__main__\":\n main()","repo_name":"elib00/python_stuff","sub_path":"ChatBot/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"34240851888","text":"from flask import current_app\nimport os\n\n\ndef get_last_number() -> int:\n \"\"\"Return last number of file in directory with saved files.\"\"\"\n\n path = current_app.config[\"TEXT_PATH\"]\n _, _, list_names = next(os.walk(path))\n list_files_text = []\n\n for file_name in list_names:\n if \".txt\" in file_name:\n list_files_text.append(file_name)\n\n if len(list_files_text) == 0:\n return -1\n\n last_str = list_files_text[-1]\n str_number = last_str.split(\".\")[0]\n last_number = int(str_number)\n\n return last_number\n\n\ndef get_full_file_name(name: str) -> str:\n \"\"\"Return full path of file with text.\"\"\"\n\n return os.path.join(current_app.config[\"TEXT_PATH\"], name + \".txt\")\n\n\ndef get_full_keywords_name(name: str) -> str:\n \"\"\"Return full path of file with keywords.\"\"\"\n\n return os.path.join(current_app.config[\"KEY_PATH\"], name + \".json\")\n\n\ndef save_new(text: str, keywords_raw: str):\n \"\"\"Save text and keywords (must be as str format, for example as json).\"\"\"\n name = str(get_last_number() + 1)\n\n with open(get_full_file_name(name), \"w\", encoding=\"utf-8\") as text_file:\n text_file.write(text)\n\n with open(get_full_keywords_name(name), \"w\", encoding=\"utf-8\") as key_file:\n key_file.write(keywords_raw)\n","repo_name":"Semerak/Keyphrase_extractor","sub_path":"lib/saving.py","file_name":"saving.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"31653809128","text":"#!/usr/bin/env python\n\nimport copy\nfrom hexlet.fs import mkdir, mkfile, get_children, get_meta, is_file, get_name, is_directory, flatten\n\n\ndef find_empty_dir_paths(tree):\n def walk(node, depth):\n name = get_name(node)\n children = get_children(node)\n if len(children) == 0:\n return name\n if depth == 2:\n return []\n dir_paths = filter(is_directory, children)\n output = map(\n lambda child: walk(child, depth + 1), dir_paths,\n )\n return flatten(output)\n return walk(tree, 0)\n\n\ndef main():\n tree = mkdir('/', [\n mkdir('etc', [\n mkdir('apache'),\n mkdir('nginx', [\n mkfile('.nginx.conf'),\n ]), \n mkdir('.consul', [\n mkfile('.config.json'),\n mkfile('file.tmp'),\n mkdir('data'),\n ]),\n ]),\n mkdir('logs'),\n mkfile('hosts'),\n ])\n\n print((tree))\n print('------------------')\n print(find_empty_dir_paths(tree))\n\nif __name__ == '__main__':\n main()\n","repo_name":"akishev-m/exercises","sub_path":"12_woods/08_teory.py","file_name":"08_teory.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71842966171","text":"# Variáveis\nnumberOne = input('Primeiro número: ')\nnumberTwo = input('Segundo número: ')\n# Somar e Converter em Number\nnumberAll = int(numberOne) + int(numberTwo)\n# Cor\ncor = {'amarelo': '\\033[33m',\n 'fim': '\\033[m'}\n# Texto\nprint('A soma desses valores é: {}{}{}'.format(cor['amarelo'], numberAll, cor['fim']))\n","repo_name":"catabimbas/Curso-Python","sub_path":"Curso Python/Mundo 1/Modulo1/Desafios/des002 - Cor.py","file_name":"des002 - Cor.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36557146329","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom django.views.generic import RedirectView\n\nurlpatterns = [\n path(\"\", RedirectView.as_view(url=\"/wf/list/\"), name=\"home\"),\n path(\"admin/\", admin.site.urls),\n path(\"wf/\", include(\"lbworkflow.urls\")),\n path(\"attachment/\", include(\"lbattachment.urls\")),\n path(\"select2/\", include(\"django_select2.urls\")),\n]\n","repo_name":"vicalloy/django-lb-workflow","sub_path":"lbworkflow/tests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"32"} +{"seq_id":"17789331060","text":"import sys\nimport os\n\nsys.path.append(os.getcwd())\n\nfrom gudhi import RipsComplex\nimport numpy as np\nfrom phimaker import compute_ensemble\nimport math\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport random\n\nN = 10\naN_nice = 20\nbN_nice = 20\nmax_diagram_dim = 2\njitter_strength = 0.05\na = 0.3\nb = 0.8\ntruncation = 1.1\n\n\ndef get_jitter_torus_point(phase1, phase2, jit):\n random_phase1 = random.random() * jit\n random_phase2 = random.random() * jit\n phase1 = phase1 + random_phase1\n phase2 = phase2 + random_phase2\n angle1 = phase1 * 2 * math.pi\n angle2 = phase2 * 2 * math.pi\n return [\n (a * math.cos(angle1) + b) * math.cos(angle2),\n (a * math.cos(angle1) + b) * math.sin(angle2),\n a * math.sin(angle1),\n ]\n\n\ndef get_jitter_circle_point(phase, jit):\n random_phase = random.random() * jit\n return [\n b * math.cos(2 * math.pi * (phase + random_phase)),\n b * math.sin(2 * math.pi * (phase + random_phase)),\n 0,\n ]\n\n\nnice_points = np.array(\n [\n get_jitter_torus_point(i / aN_nice, j / bN_nice, jitter_strength)\n for i in range(aN_nice)\n for j in range(bN_nice)\n ]\n)\n\nfilling_points = np.array(\n [get_jitter_circle_point(i / bN_nice, jitter_strength) for i in range(bN_nice)]\n)\n\n\npts = np.vstack((nice_points, filling_points))\n\n# Plot point\nfig = plt.figure()\nax = fig.add_subplot(projection=\"3d\")\nax.scatter(filling_points[:, 0], filling_points[:, 1], filling_points[:, 2])\nax.scatter(nice_points[:, 0], nice_points[:, 1], nice_points[:, 2])\nax.set_aspect(\"equal\", adjustable=\"box\")\nplt.legend([\"Filling points\", \"Torus points\"], loc=\"lower right\")\nplt.savefig(\"fig/pointcloud_torus.png\")\nplt.show()\n\n\ndef is_nice_point(idx):\n return idx < len(nice_points)\n\n\ndef is_nice_smplx(smplx):\n return all([is_nice_point(pt) for pt in smplx])\n\n\nrcomp = RipsComplex(points=pts, max_edge_length=truncation)\n\n\n# Build simplex tree\nsimplex_tree = rcomp.create_simplex_tree(max_dimension=max_diagram_dim + 1)\n# Build second simplex tree with index as filtration value\ns_tree2 = simplex_tree.copy()\nentrance_times = []\ndimensions = []\nfor idx, f_val in enumerate(simplex_tree.get_filtration()):\n s_tree2.assign_filtration(f_val[0], idx)\n entrance_times.append(f_val[1])\n dimensions.append(len(f_val[0]) - 1)\n# Build up matrix to pass to phimaker\nmatrix = []\nfor idx, f_val in enumerate(s_tree2.get_filtration()):\n smplx = f_val[0]\n sparse_bdry = [int(face_idx) for _, face_idx in s_tree2.get_boundaries(smplx)]\n annotated_col = (is_nice_smplx(smplx), sorted(sparse_bdry))\n matrix.append(annotated_col)\n# Report\nprint(\"Got matrix\")\nprint(len(matrix))\n# Compute diagrams\ndgms = compute_ensemble(matrix)\nprint(\"Done\")\n\n\n# Plot diagrams\ndef plot_diagram(\n diagram,\n entrance_times,\n dimensions,\n truncation,\n max_dim=2,\n ax=None,\n title=None,\n legend=True,\n dim_shift=0,\n):\n all_pts = np.array(\n [\n [\n entrance_times[birth_idx],\n entrance_times[death_idx],\n dimensions[birth_idx] - dim_shift,\n ]\n for birth_idx, death_idx in diagram.paired\n if entrance_times[death_idx] != entrance_times[birth_idx]\n and dimensions[birth_idx] - dim_shift <= max_dim\n ]\n + [\n [\n entrance_times[birth_idx],\n truncation * 1.05,\n dimensions[birth_idx] - dim_shift,\n ]\n for birth_idx in diagram.unpaired\n if dimensions[birth_idx] - dim_shift <= max_dim\n ]\n )\n df = pd.DataFrame(data=all_pts, columns=[\"Birth\", \"Death\", \"Dimension\"])\n ret_ax = sns.scatterplot(\n data=df, x=\"Birth\", y=\"Death\", hue=\"Dimension\", ax=ax, legend=legend\n )\n ret_ax.set(xlabel=None)\n ret_ax.set(ylabel=None)\n ret_ax.set(title=title)\n sns.move_legend(ret_ax, \"lower right\")\n handle = ax if ax is not None else plt\n handle.plot([0, truncation * 1.05], [0, truncation * 1.05], \"m\", alpha=0.4)\n handle.plot(\n [0, 0, truncation * 1.05],\n [0, truncation * 1.05, truncation * 1.05],\n \"m--\",\n alpha=0.4,\n )\n\n\nfig, axes = plt.subplots(nrows=2, ncols=3, figsize=[3.5 * 3, 3.5 * 2])\nplot_diagram(\n dgms.ker,\n entrance_times,\n dimensions,\n truncation,\n ax=axes[0][0],\n title=\"Kernel\",\n dim_shift=1,\n max_dim=max_diagram_dim,\n)\nplot_diagram(\n dgms.rel,\n entrance_times,\n dimensions,\n truncation,\n ax=axes[0][1],\n title=\"Relative\",\n max_dim=max_diagram_dim + 1,\n)\nplot_diagram(\n dgms.cok,\n entrance_times,\n dimensions,\n truncation,\n ax=axes[0][2],\n title=\"Cokernel\",\n max_dim=max_diagram_dim,\n)\nplot_diagram(\n dgms.g,\n entrance_times,\n dimensions,\n truncation,\n ax=axes[1][0],\n title=\"Domain\",\n max_dim=max_diagram_dim,\n)\nplot_diagram(\n dgms.im,\n entrance_times,\n dimensions,\n truncation,\n ax=axes[1][1],\n title=\"Image\",\n max_dim=max_diagram_dim,\n)\nplot_diagram(\n dgms.f,\n entrance_times,\n dimensions,\n truncation,\n ax=axes[1][2],\n title=\"Codomain\",\n legend=True,\n max_dim=max_diagram_dim,\n)\nplt.savefig(\"fig/5pack_torus.png\")\n","repo_name":"tomchaplin/phimaker","sub_path":"examples/torus_test.py","file_name":"torus_test.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"36431754846","text":"from rest_framework import serializers\nfrom watchlist_app.models import WatchList, StreamingPlatform, Reviews\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n review_user = serializers.StringRelatedField(read_only=True)\n\n class Meta:\n model = Reviews\n fields = '__all__'\n\n\nclass WatchListSerializer(serializers.ModelSerializer):\n reviews = ReviewSerializer(many=True, read_only=True)\n platform = serializers.CharField(source='platform.name')\n len_title = serializers.SerializerMethodField()\n\n class Meta:\n model = WatchList\n fields = '__all__'\n\n def get_len_title(self, object):\n length = len(object.title)\n return length\n\n\nclass StreamingPlatformSerializer(serializers.ModelSerializer):\n watchlist = WatchListSerializer(many=True, read_only=True)\n\n class Meta:\n model = StreamingPlatform\n fields = '__all__'\n\n #\n # def validate(self, data):\n # if data['name'] == data['description']:\n # raise serializers.ValidationError('name and description should be different!')\n # else:\n # return data\n #\n # def validate_name(self, value):\n # if len(value) < 3:\n # raise serializers.ValidationError('Name is too short. Must have at least 3 characters')\n # else:\n # return value\n\n # def create(self, validated_data):\n # return Movie.objects.create(**validated_data)\n\n # def update(self, instance, validated_data):\n # instance.name = validated_data.get('name', instance.name)\n # instance.description = validated_data.get(\n # 'description', instance.description)\n # instance.active = validated_data.get('active', instance.active)\n # instance.save()\n # return instance\n\n","repo_name":"ivanlegranbizarro/movieList","sub_path":"watchlist_app/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13979134358","text":"import pandas as pd\n\ndef load_gene(path):\n filename = path + 'epsilon2_GIgenes6_DTU190308_LB_genome_dip_normed3_clust1000_geneinfo_cp.xlsx'\n sheet_name = 'epsilon2_GIgenes6_DTU190308_LB_'\n\n book = pd.read_excel(filename, sheet_name=sheet_name)\n\n # [[no_cluster, b_no, gene_name], [], ..., []]\n gene_list = book.iloc[:,1:4].values.tolist()\n return gene_list\n\n\ndef main():\n path = '/cl/work/shusuke-t/mori_lab_cluster/data/'\n write_file = path + 'extracted_data_2019_05_13/gene/gene.tsv'\n gene_list = load_gene(path)\n\n '''\n name_list = [name[2] for name in gene_list if not name[2] == 'Noinfo_in_table']\n print(len(name_list))\n print(len(set(name_list))) \n '''\n\n with open(write_file, 'w') as w:\n for gene in gene_list:\n cluster = str(gene[0])\n name = str(gene[2])\n if not name == 'Noinfo_in_table':\n w.write(cluster + '\\t' + name + '\\n')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shuu-tatsu/gene_tfidf","sub_path":"read_gene_excel.py","file_name":"read_gene_excel.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11132000700","text":"import numpy as np\r\n\r\nfrom predict.model.interfaces import IModel\r\n\r\n\r\nclass PolyFit(IModel):\r\n\r\n def __init__(self, name, inputlen=7, outputlen=1):\r\n IModel.__init__(self, 'poly', name)\r\n self.__InputLength = inputlen\r\n self.__OutputLength = outputlen\r\n\r\n\r\n def fit(self, x, y):\r\n self.updatecheckpoint()\r\n pass\r\n\r\n\r\n def inputrequire(self):\r\n return self.__InputLength\r\n\r\n\r\n def maxpredict(self):\r\n return self.__OutputLength\r\n\r\n\r\n def predict(self, x):\r\n \"\"\"\r\n Predict each dimension separately\r\n \"\"\"\r\n x = np.array(x)\r\n t = np.arange(0, self.__InputLength, 1)\r\n t_pred = np.arange(self.__InputLength, self.__InputLength + self.__OutputLength, 1)\r\n\r\n pred = []\r\n\r\n for dim in range(x.shape[1]):\r\n f = np.polyfit(t, x[-self.__InputLength:,dim], 3)\r\n poly3 = np.poly1d(f)\r\n\r\n pred.append(poly3(t_pred))\r\n\r\n pred = np.asarray(pred).transpose()\r\n\r\n return pred\r\n","repo_name":"EngineerDDP/PredictingModels","sub_path":"model/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14056377890","text":"import argparse\n\nfrom prodvana.client import Client, make_channel\nfrom prodvana.proto.prodvana.common_config.parameters_pb2 import ParameterValue\nfrom prodvana.proto.prodvana.desired_state.manager_pb2 import (\n GetDesiredStateConvergenceReq,\n)\nfrom prodvana.proto.prodvana.service.service_manager_pb2 import GetMaterializedConfigReq\n\n\ndef print_param_value(param: ParameterValue) -> None:\n print(f\"parameter {param.name}: \", end=\"\")\n if param.string:\n print(param.string)\n elif param.int:\n print(f\"{param.int}\")\n elif param.docker_image_tag:\n print(param.docker_image_tag)\n else:\n raise Exception(f\"unrecognized parameter: {param}\")\n\n\ndef main() -> None:\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"--api-token\", help=\"Prodvana API token. if not passed, defaults to PVN_TOKEN.\"\n )\n ap.add_argument(\n \"--org\",\n required=True,\n help=\"Organization slug for your instance of Prodvana. This is the part of your URL before .prodvana.io.\",\n )\n ap.add_argument(\"ds_id\", help=\"Desired state id to approve for\")\n args = ap.parse_args()\n ds_id = args.ds_id\n with make_channel(org=args.org, api_token=args.api_token) as channel:\n client = Client(channel=channel)\n\n resp = client.desired_state_manager.GetDesiredStateConvergenceSummary(\n GetDesiredStateConvergenceReq(\n desired_state_id=ds_id,\n )\n )\n\n svc_entity = [\n entity\n for entity in resp.summary.entity_graph.entities\n if entity.desired_state_id == ds_id\n ][0]\n starting_state = svc_entity.starting_state.service\n starting_state_release_channels = {\n rc.release_channel: rc for rc in starting_state.release_channels\n }\n desired_state = svc_entity.desired_state.service\n for desired_release_channel_state in desired_state.release_channels:\n release_channel = desired_release_channel_state.release_channel\n starting_release_channel_state = starting_state_release_channels.get(\n release_channel\n )\n print(f\"release channel: {release_channel}\")\n if starting_release_channel_state: # can be None on first deployment\n print(\"starting state:\")\n for version in starting_release_channel_state.versions:\n config_resp = client.service_manager.GetMaterializedConfig(\n GetMaterializedConfigReq(\n application=desired_state.application,\n service=desired_state.service,\n version=version.version,\n )\n )\n svc_instance_config = [\n cfg\n for cfg in config_resp.compiled_service_instance_configs\n if cfg.release_channel == release_channel\n ][0]\n for param in svc_instance_config.parameter_values:\n print_param_value(param)\n print(\"desired state:\")\n assert (\n len(desired_release_channel_state.versions) == 1\n ), \"can only have one desired version\"\n desired_version = desired_release_channel_state.versions[0]\n config_resp = client.service_manager.GetMaterializedConfig(\n GetMaterializedConfigReq(\n application=desired_state.application,\n service=desired_state.service,\n version=desired_version.version,\n )\n )\n svc_instance_config = [\n cfg\n for cfg in config_resp.compiled_service_instance_configs\n if cfg.release_channel == release_channel\n ][0]\n for param in svc_instance_config.parameter_values:\n print_param_value(param)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"prodvana/prodvana-public","sub_path":"python/prodvana/examples/get_before_and_after_parameters.py","file_name":"get_before_and_after_parameters.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16389584359","text":"import asyncio\nimport json\nimport os.path\nimport sys\nimport time\nfrom datetime import datetime, timedelta\nfrom unittest.mock import patch\n\nimport aiohttp_xmlrpc.exceptions\nimport aiounittest\nimport psutil\n\nfrom utils.bundle.interface import check_or_start_bundle_server, run_bundle\n\n\nclass TestBundleInterface(aiounittest.AsyncTestCase):\n def setUp(self):\n self.cwd = os.path.dirname(os.path.abspath(__file__))\n\n # Set up the venv and python symlinks\n bin_path = os.path.join(self.cwd, 'test_bundle', 'venv', 'bin')\n\n activate_path = os.path.join(bin_path, 'activate')\n if os.path.exists(activate_path):\n os.remove(activate_path)\n\n self.python_path = os.path.join(bin_path, 'python')\n if os.path.exists(self.python_path):\n os.remove(self.python_path)\n\n os.makedirs(bin_path, exist_ok=True)\n os.symlink(os.path.join(os.path.dirname(sys.executable), 'activate'), activate_path)\n os.symlink(os.path.join(os.path.dirname(sys.executable), 'python'), self.python_path)\n\n self.cleanup()\n\n def tearDown(self):\n # Kill any spawned python server processes\n for proc in psutil.process_iter():\n try:\n # Fetch process details as dict\n info = proc.as_dict(attrs=['pid', 'cmdline'])\n\n args = info['cmdline']\n has_python = any(filter(lambda x: 'python' in x, args))\n has_server = any(filter(lambda x: 'utils/bundle/server.py' in x, args))\n if has_python and has_server:\n # Found a spawned server, terminate it\n psutil.Process(info['pid']).terminate()\n except Exception:\n pass\n\n # Small delay to let the OS clean up\n time.sleep(0.5)\n\n self.cleanup()\n\n def cleanup(self):\n if os.path.exists(os.path.join(self.cwd, 'test_bundle', 'test_output')):\n os.remove(os.path.join(self.cwd, 'test_bundle', 'test_output'))\n\n if os.path.exists(os.path.join(self.cwd, 'test_bundle', 'test_extra.py')):\n os.remove(os.path.join(self.cwd, 'test_bundle', 'test_extra.py'))\n\n @patch('asyncio.sleep')\n async def test_check_or_start_bundle_server_no_start(self, sleep_mock):\n # Check that a bundle that fails to start is reported correctly\n # Remove the symlink to the python interpreter so that the server never starts\n os.remove(self.python_path)\n\n with self.assertRaises(Exception) as e:\n await check_or_start_bundle_server(self.cwd, 'test_bundle')\n\n self.assertEqual(str(e.exception), \"Unable to start the RPC server for bundle with hash test_bundle\")\n\n async def call_bundle(self, output, bundle_function, bundle_path, bundle_hash, details, job_data):\n with open(os.path.join(self.cwd, 'test_bundle', 'test_output'), 'w') as f:\n json.dump(output, f)\n\n result = await run_bundle(bundle_function, bundle_path, bundle_hash, details, job_data)\n self.assertEqual(json.dumps(result['expected']), json.dumps(output))\n\n self.assertEqual(result['what'], bundle_function)\n self.assertDictEqual(result['details'], details)\n self.assertDictEqual(result['job_data'], job_data)\n\n async def test_bundle_functions(self):\n for fn in ['working_directory', 'submit', 'status', 'cancel', 'delete']:\n await self.call_bundle(1234, fn, self.cwd, 'test_bundle', {}, {})\n await self.call_bundle('1234', fn, self.cwd, 'test_bundle', {'test': 'hello'}, {'hello': 'test'})\n await self.call_bundle({'test': 'dict'}, fn, self.cwd, 'test_bundle', {'test': {'sub': 'dict'}}, {})\n\n async def test_bundle_reload(self):\n await self.call_bundle(1234, 'working_directory', self.cwd, 'test_bundle', {}, {})\n\n with open(os.path.join(self.cwd, 'test_bundle', 'test_extra.py'), 'w') as f:\n f.write(\"\"\"\ndef working_directory(details, job_data):\n return \"module_reloading_works\"\n \"\"\")\n\n result = await run_bundle('working_directory', self.cwd, 'test_bundle', {}, {})\n self.assertEqual(result, \"module_reloading_works\")\n\n with open(os.path.join(self.cwd, 'test_bundle', 'test_extra.py'), 'w') as f:\n f.write(\"\"\"\ndef working_directory(details, job_data):\n return \"module_reloading_works2\"\n \"\"\")\n\n result = await run_bundle('working_directory', self.cwd, 'test_bundle', {}, {})\n self.assertEqual(result, \"module_reloading_works2\")\n\n async def test_bundle_exception(self):\n with self.assertRaises(aiohttp_xmlrpc.exceptions.ApplicationError):\n await run_bundle('not_a_real_function', self.cwd, 'test_bundle', {}, {})\n\n async def test_bundle_non_blocking(self):\n # Here we're going to create an updated working directory function that has a 1 second delay in it. We'll\n # then call the bundle function 100 times and check that the time to complete is less than 10 seconds,\n # indicating that the calls ran in parallel\n\n with open(os.path.join(self.cwd, 'test_bundle', 'test_extra.py'), 'w') as f:\n f.write(\"\"\"\ndef working_directory(details, job_data):\n import time\n time.sleep(1)\n return job_data['index']\n \"\"\")\n\n tasks = [run_bundle('working_directory', self.cwd, 'test_bundle', {}, {'index': index}) for index in range(100)]\n\n now = datetime.now()\n\n results = await asyncio.gather(*tasks)\n\n self.assertEqual(len(results), len(tasks))\n\n for index, task in enumerate(results):\n self.assertEqual(task, index)\n\n duration = datetime.now() - now\n self.assertTrue(duration < timedelta(seconds=10))\n","repo_name":"gravitationalwavedc/gwcloud_job_client","sub_path":"utils/bundle/tests/test_bundle_interface.py","file_name":"test_bundle_interface.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42121543227","text":"\"\"\"TO-DO: Write a description of what this XBlock is.\"\"\"\n\nimport pkg_resources\nimport logging\n\nfrom xblock.core import XBlock\nfrom xblock.fields import Scope, Integer, String, Boolean, List, Dict, Float\nfrom xblock.fragment import Fragment\n\nfrom utils import load_resource, render_template\nfrom xblockutils.resources import ResourceLoader\nfrom django.template import Context, Template\n\n# Please start and end the path with a trailing slash\nlog = logging.getLogger(__name__)\nloader = ResourceLoader(__name__)\n\n\nclass AttrDict(dict):\n\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\nclass VoiceRecognizerXBlock(XBlock):\n\n \"\"\"\n TO-DO: document what your XBlock does.\n \"\"\"\n\n # Fields are defined on the class. You can access them in your code as\n # self..\n\n display_name = String(display_name=\"Display Name\",\n default=\"Voice Recognizer XBlock\",\n scope=Scope.settings\n )\n\n lang = String(default=\"en-US\",\n scope=Scope.settings\n )\n\n mode = String(default=\"standerd\",\n scope=Scope.settings\n )\n\n score = Float(\n default=0,\n scope=Scope.user_state\n )\n\n data = String(default=\"Hello\",\n scope=Scope.settings\n )\n\n answer = String(default=\"\",\n scope=Scope.user_state\n )\n\n weight = Float(\n default=1,\n scope=Scope.settings\n )\n\n max_attempts = Integer(\n default=1,\n scope=Scope.settings\n )\n\n attempts = Integer(\n default=0,\n scope=Scope.user_state\n )\n\n has_score = True\n\n def resource_string(self, path):\n \"\"\"\n Handy helper for getting resources from our kit.\n \"\"\"\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")\n\n # TO-DO: change this view to display your data your own way.\n def student_view(self, context=None):\n \"\"\"\n The primary view of the RadPatternsXBlock, shown to students\n when viewing courses.\n \"\"\"\n frag = Fragment()\n frag.add_content(render_template(\"static/html/voicerecognizer.html\",\n {'self': self, 'context': context}\n ))\n frag.add_css(self.resource_string(\"static/css/voicerecognizer.css\"))\n frag.add_javascript(\n self.resource_string(\"static/js/voicerecognizer.js\"))\n frag.initialize_js('VoiceRecognzerXBlock')\n return frag\n\n # Edit view\n def studio_view(self, context=None):\n\n frag = Fragment()\n frag.add_content(render_template(\"static/html/voicerecognizer_edit.html\",\n {'self': self, 'context': context}\n ))\n\n frag.add_javascript(\n self.resource_string('static/js/voicerecognizer_edit.js'))\n\n frag.initialize_js('VoiceRecognzerEditXBlock')\n return frag\n\n # TO-DO: change this handler to perform your own actions. You may need more\n # than one handler, or you may not need any handlers at all.\n @XBlock.json_handler\n def studio_submit(self, data, suffix=''):\n \"\"\"\n This function is used to add stuff to the xblock student view\n after user adds a xblock\n \"\"\"\n try:\n self.lang = data.get(\"lang\")\n self.display_name = data.get(\"display_name\")\n self.mode = data.get(\"mode\")\n self.data = data.get(\"data\").strip(' \\t\\n\\r')\n self.waight = data.get(\"waight\")\n self.max_attempts = data.get(\"max_attempts\")\n return {'status': 'success'}\n except:\n return {'status': 'error'}\n\n def getRemainingAttempts(self):\n return self.max_attempts - self.attempts\n\n @XBlock.json_handler\n def check_voice(self, data, suffix=''):\n \"\"\"\n This function is used to add stuff to the xblock student view\n after user adds a xblock\n \"\"\"\n try:\n voice_data = data.get(\"data\")\n self.answer = voice_data\n if self.attempts < self.max_attempts:\n if voice_data.lower().strip(' \\t\\n\\r') == self.data.lower():\n if self.score == 0:\n self.score = 1\n self.runtime.publish(\n self, \"grade\", {'value': self.score, 'max_value': 1})\n self.attempts += 1\n return {'status': 'success', 'submit': True}\n else:\n self.attempts += 1\n return {'status': 'success', 'submit': False, 'remaining_attempts': self.getRemainingAttempts()}\n else:\n return {'status': 'success', 'submit': False, 'remaining_attempts': self.getRemainingAttempts()}\n except:\n return {'status': 'error'}\n","repo_name":"perpetualny/voicerecxblock","sub_path":"voicerecognizer/voicerecognizer.py","file_name":"voicerecognizer.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"20985763907","text":"import subprocess as sp\r\na=sp.check_output(['netsh','wlan','show','profiles']).decode('utf-8').split(\"\\n\")\r\na=[i.split(\":\")[1][1:-1] for i in a if \"All User Profile\" in i]\r\nfor i in a:\r\n re=sp.check_output(['netsh','wlan','show','profile',i,'key=clear']).decode('utf-8').split(\"\\n\")\r\n re=[b.split(\":\")[1][1:-1] for b in re if \"Key Content\" in b]\r\n try:\r\n print(\"{:<30}|{:<}\".format(i,re[0]))\r\n except IndexError:\r\n print(\"{:<30}|{:<}\".format(i,\" \"))\r\n\r\n\r\n'''\r\n\r\nre=sbp.check_output([\"netsh\",\"wlan\",\"show\",\"network\"])\r\nre=re.decode(\"ascii\")\r\nre=re.replace(\"\\r\",\"\")\r\nls=re.split(\"\\n\")\r\nls=ls[4:]\r\nsid=[]\r\nfor x in range(len(ls)):\r\n if x%5==0:\r\n sid.append(ls[x])\r\nprint(sid)\r\n\r\n\r\n'''\r\n","repo_name":"karthiaravinth/abc","sub_path":"wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36986440829","text":"#!/usr/bin/env python3\n\n# from setup_logger import logger\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass hockeyface(object):\n def get_events(self, teams, leagues):\n import time\n\n now = int(time.time())\n if (self.last_updated + 3600) <= now:\n logger.debug(f\"Invalidating cache (last_updated: {self.last_updated})\")\n self.__get_events()\n\n if not teams:\n return self.__filter_events([], leagues)\n else:\n return self.__filter_events(teams, leagues)\n\n def __get_events(self):\n import time\n\n import requests\n\n logger.info(\"Fetching events from upstream\")\n events = []\n\n for league in [\"SHL\", \"HA\", \"SDHL\"]:\n logger.debug(f\"Processing {league}\")\n\n for season in self.league_information[league][\"seasonUuids\"]:\n for gametype in self.league_information[league][\"gameTypeUuids\"]:\n # Example URL\n # https://www.hockeyallsvenskan.se/api/sports/game-info?seasonUuid=qcz-3NvSZ2Cmh&seriesUuid=qQ9-594cW8OWD&gameTypeUuid=qQ9-af37Ti40B&gamePlace=all&played=all\n\n r = requests.get(\n f\"{self.league_information[league]['baseurl']}seasonUuid={season}&seriesUuid={self.league_information[league]['seriesUuid']}&gameTypeUuid={gametype}&gamePlace={self.league_information[league]['gamePlace']}&played={self.league_information[league]['played']}\"\n )\n\n if r.headers[\"content-type\"] == \"application/json; charset=utf-8\":\n returned_json = r.json()\n for event in returned_json[\"gameInfo\"]:\n startDateTime = event[\"startDateTime\"]\n if \"names\" in event[\"homeTeamInfo\"]:\n home = event[\"homeTeamInfo\"][\"names\"][\"code\"]\n else:\n home = \"TBD\"\n if \"names\" in event[\"awayTeamInfo\"]:\n away = event[\"awayTeamInfo\"][\"names\"][\"code\"]\n else:\n away = \"TBD\"\n if event[\"venueInfo\"]:\n venue = event[\"venueInfo\"][\"name\"]\n else:\n venue = \"\"\n events.append(\n {\n \"startDateTime\": startDateTime,\n \"home\": home,\n \"away\": away,\n \"venue\": venue,\n \"league\": league,\n }\n )\n else:\n logger.debug(f\"{league} {season} {gametype} did not (yet?) respond with a json\")\n\n for league in [\"CHL\"]:\n logger.debug(f\"Processing {league}\")\n\n for season in self.league_information[league][\"seasonUuids\"]:\n # Example URL\n # https://www.championshockeyleague.com/api/s3?q=schedule-21ec9dad81abe2e0240460d0-384dfd08cf1b5e6e93cd19ba.json\n\n r = requests.get(f\"{self.league_information[league]['baseurl']}schedule-{season}.json\")\n returned_json = r.json()\n for event in returned_json[\"data\"]:\n startDateTime = event[\"startDate\"]\n home = event[\"teams\"][\"home\"][\"name\"]\n away = event[\"teams\"][\"away\"][\"name\"]\n venue = event[\"venue\"][\"name\"]\n events.append(\n {\n \"startDateTime\": startDateTime,\n \"home\": home,\n \"away\": away,\n \"venue\": venue,\n \"league\": league,\n }\n )\n\n self.last_updated = int(time.time())\n return events\n\n def __filter_events(self, teams, leagues):\n events_to_return = []\n for event in self.events:\n if teams:\n if event[\"league\"] not in leagues:\n continue\n\n if event[\"home\"] in teams:\n events_to_return.append(event)\n elif event[\"away\"] in teams:\n events_to_return.append(event)\n else:\n match = False\n for team in self.teamdata:\n if team[\"key\"] not in teams:\n continue\n name = team[\"name\"]\n if name == event[\"home\"] or name == event[\"away\"]:\n logger.warning(f\"Match in name for {name}: {event}\")\n events_to_return.append(event)\n match = True\n break\n\n for name in team[\"alternateNames\"]:\n if name == event[\"home\"] or name == event[\"away\"]:\n if team[\"key\"] in teams:\n logger.warning(f\"Match in alternativeNames for {name}: {event}\")\n events_to_return.append(event)\n match = True\n break\n if match == True:\n break\n\n # if match == False:\n # logger.debug(event)\n else:\n if event[\"league\"] in leagues:\n events_to_return.append(event)\n\n return events_to_return\n\n def __build_teamdata(self):\n import json\n\n team_data_file = \"static/teamdata.json\"\n with open(team_data_file) as json_file:\n file_contents = json_file.read()\n team_data = json.loads(file_contents)\n return team_data\n\n def __pp_team_name(self, short):\n for team in self.teamdata:\n if team[\"key\"] == short:\n return team[\"name\"]\n if team[\"name\"] == short:\n return team[\"name\"]\n for name in team[\"alternateNames\"]:\n if name == short:\n return team[\"name\"]\n\n logger.warn(f\"No match for '{short}'\")\n return short\n\n def __pp_cal_name(self, teams):\n return_dict = {\n \"CALNAME\": \"Svensk hockey\",\n \"CALDESC\": \"Spelschema för svensk hockey\",\n }\n if len(teams) == 1:\n for team in self.teamdata:\n if team[\"key\"] == teams[0]:\n return_dict = {\n \"CALNAME\": f\"{team['name']}\",\n \"CALDESC\": f\"Spelschema för {team['name']}\",\n }\n\n return return_dict\n\n def build_ical(self, events, teams, leagues):\n import uuid\n from datetime import datetime, timedelta\n\n import pytz\n from icalendar import Calendar, Event\n\n cal = Calendar()\n cal.add(\"prodid\", \"-//Hockey McHF//Hockey McHockeyFace//EN\")\n cal.add(\"version\", \"2.0\")\n cal.add(\"CALSCALE\", \"GREGORIAN\")\n dstamp = datetime.now()\n\n desc = self.__pp_cal_name(teams)\n cal.add(\"X-WR-CALNAME\", desc[\"CALNAME\"])\n cal.add(\"X-WR-CALDESC\", desc[\"CALDESC\"])\n\n for event in events:\n home = self.__pp_team_name(event[\"home\"])\n away = self.__pp_team_name(event[\"away\"])\n event_start = datetime.fromisoformat(event[\"startDateTime\"]).astimezone(pytz.utc)\n event_end = event_start + timedelta(minutes=150)\n\n ical_event = Event()\n prefix_string = \"\"\n if (\"SHL\" in leagues and \"SDHL\" in leagues) or event[\"league\"] == \"CHL\":\n prefix_string = f\"{event['league']}: \"\n\n ical_event.add(\"summary\", f\"{prefix_string}{home} - {away}\")\n ical_event.add(\"uid\", uuid.uuid4())\n ical_event.add(\"dtstamp\", dstamp)\n ical_event.add(\"dtstart\", event_start)\n ical_event.add(\"dtend\", event_end)\n\n ical_event.add(\"location\", event[\"venue\"])\n cal.add_component(ical_event)\n\n return cal.to_ical().decode(\"utf-8\")\n\n def __init__(self) -> None:\n logger.debug(\"Hockey McHockeyFace initiated\")\n self.last_updated = 0\n self.league_information = {\n \"SHL\": {\n \"baseurl\": \"https://www.shl.se/api/sports/game-info?\",\n \"seriesUuid\": \"qQ9-bb0bzEWUk\",\n \"gameTypeUuids\": [\n \"qQ9-af37Ti40B\", # Seriematch\n \"qRf-347BaDIOc\", # Kvalmatch nedflyttning\n \"qQ9-7debq38kX\", # Slutspelsmatch\n \"qQ9-46aa140wUl\", # Play in-match\n ],\n \"gamePlace\": \"all\",\n \"played\": \"all\",\n \"seasonUuids\": [\n \"qcz-3NvSZ2Cmh\", # 2023/2024\n \"qbN-XMFfjGVt\", # 2022/2023\n \"qZl-8qa6OaFXf\", # 2021/2022\n ],\n },\n \"HA\": {\n \"baseurl\": \"https://www.hockeyallsvenskan.se/api/sports/game-info?\",\n \"seriesUuid\": \"qQ9-594cW8OWD\",\n \"gameTypeUuids\": [\n \"qQ9-af37Ti40B\", # Seriematch\n \"qRe-AJnJ12qqEc\", # Seriefinalmatch\n \"qRe-AJkH2owyv\", # Slutspelsseriematch\n \"qQ9-7debq38kX\", # Slutspelsmatch\n \"qRe-AJog2gISz\", # Kvalmatch uppflyttning\n \"qRf-347BaDIOc\", # Kvalmatch nedflyttning\n \"qQ9-be68b0QHe\", # Vänskapsmatch\n ],\n \"gamePlace\": \"all\",\n \"played\": \"all\",\n \"seasonUuids\": [\n \"qcz-3NvSZ2Cmh\", # 2023/2024\n \"qbN-XMFfjGVt\", # 2022/2023\n \"qZl-8qa6OaFXf\", # 2021/2022\n ],\n },\n \"CHL\": {\n \"baseurl\": \"https://www.championshockeyleague.com/api/s3?q=\",\n \"seasonUuids\": [\n \"21ec9dad81abe2e0240460d0-384dfd08cf1b5e6e93cd19ba\", # 2023/2024\n \"21ec9dad81abe2e0240460d0-42d2f45345814558d4daff38\", # 2022/2023\n \"21ec9dad81abe2e0240460d0-f73bbb143cc88c3ebe188d77\", # 2021/2022\n ],\n },\n \"SDHL\": {\n \"baseurl\": \"https://www.sdhl.se/api/sports/game-info?\",\n \"seriesUuid\": \"qQ9-f438G8BXP\",\n \"gameTypeUuids\": [\n \"qQ9-af37Ti40B\", # Seriematch\n \"qRe-AJog2gISz\", # Kvalmatch uppflyttning\n \"qRf-347BaDIOc\", # Kvalmatch nedflyttning\n \"qQ9-7debq38kX\", # Slutspelsmatch\n ],\n \"gamePlace\": \"all\",\n \"played\": \"all\",\n \"seasonUuids\": [\n \"qcz-3NvSZ2Cmh\", # 2023/2024\n \"qbN-XMFfjGVt\", # 2022/2023\n \"qZl-8qa6OaFXf\", # 2021/2022\n ],\n },\n }\n self.events = self.__get_events()\n self.teamdata = self.__build_teamdata()\n","repo_name":"theseal/hockey-mchockeyface","sub_path":"hockeyface.py","file_name":"hockeyface.py","file_ext":"py","file_size_in_byte":11388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"8555946586","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nDataset = \"\"\n\n\n# Read data from dataset and store it in a list\ndef readData():\n global Dataset\n Dataset = list(csv.reader(open(\"data/cpyincomesg_dataset.csv\")))\n\n\n# Menu 1 Code\ndef option1():\n global Dataset\n print(\"Display all information for the year 2011 : \")\n print(\"-----------------------------------------\")\n for i in enumerate(Dataset):\n if i[0] != 5:\n continue\n\n print(\"Year = \", i[1][0])\n print(\"No of Companies = \", i[1][1])\n print(\"Total Income = \", i[1][2])\n print(\"Donations = \", i[1][3])\n print(\"Chargeable Income = \", i[1][4])\n print(\"-----------------------------------------\")\n\n\n# Menu 2 Code\ndef option2(category):\n t = False\n if category == \"Year\":\n t = True\n print(\"Year can't be used as a category!\")\n\n global Dataset\n cat_list = []\n for i in enumerate(Dataset):\n if i[0] == 0:\n if category in i[1][1:]:\n temp_list = i[1][1:]\n for j in Dataset:\n cat_list.append(j[temp_list.index(category) + 1])\n if len(cat_list) > 0:\n cat_list = cat_list[7:]\n for i in range(len(cat_list)):\n cat_list[i] = int(cat_list[i])\n\n year_list = []\n for i in enumerate(Dataset):\n if i[0] < 7:\n continue\n year_list.append(i[1][0])\n\n print(\"-------------------------------\")\n print(\"Average = \", np.mean(cat_list))\n print(\"Minimum = \", np.amin(cat_list), \" in \", year_list[cat_list.index(np.amin(cat_list))])\n print(\"-------------------------------\")\n else:\n if not t:\n print(\"Given Category Not Found !!\")\n else:\n pass\n\n\n# Menu 3 Code\ndef option3(category):\n t = False\n if category == \"Year\":\n t = True\n print(\"Year can't be used as a category!\")\n\n global Dataset\n cat_list = []\n for i in enumerate(Dataset):\n if i[0] == 0:\n if category in i[1][1:]:\n temp_list = i[1][1:]\n for j in Dataset:\n cat_list.append(j[temp_list.index(category) + 1])\n if len(cat_list) > 0:\n cat_list = cat_list[1:]\n for i in range(len(cat_list)):\n cat_list[i] = int(cat_list[i])\n\n year_list = []\n for i in enumerate(Dataset):\n if i[0] == 0:\n continue\n year_list.append(i[1][0])\n\n ind_sort = np.argsort(cat_list)\n ind_sort = ind_sort[::-1]\n\n for i in ind_sort[:3]:\n print(cat_list[i], \" in \", year_list[i])\n\n else:\n if not t:\n print(\"Given Category Not Found !!\")\n else:\n pass\n\n\n# Return data for a given dataset pattern\ndef retDatafromds(position):\n global Dataset\n temp_list = []\n for i in enumerate(Dataset):\n if i[0] == 0:\n continue\n temp_list.append(int(i[1][position]))\n\n return temp_list\n\n\n# Display Line Chart\ndef disp_linechart(year, CI, TI):\n # Title and the x, y label\n plt.title(\"Chargeable Income, Total Income vs Year\")\n plt.xlabel(\"Year\")\n plt.ylabel(\"Income\")\n\n # Plot the line chart\n plt.plot(year, CI, label=\"Chargeable Income\")\n plt.plot(year, TI, label=\"Total Income\")\n\n # Display the year as x axis label\n plt.xticks(year)\n\n plt.legend(loc=\"upper left\")\n plt.show()\n\n\n# Display Bar Chart\ndef disp_barchart(year, NC, DO):\n bar_width = 0.3\n\n # Title and the x, y label\n plt.title(\"Number of Companies, Donations vs Year\")\n plt.xlabel(\"Year\")\n plt.ylabel(\"Value\")\n\n # plot the bar\n plt.bar(np.arange(len(NC)), NC, width=bar_width, label=\"No of Companies\")\n plt.bar(np.arange(len(DO)) + bar_width, DO, width=bar_width,\n label=\"Donations\")\n\n # Display the year as x axis label\n plt.xticks(np.arange(len(DO)) + (bar_width / 2), year)\n\n plt.legend(loc=\"upper left\")\n plt.show()\n\n\n\n# Menu 4 Code\ndef option4():\n global Dataset\n # Get Chart Data\n\n # get all years\n year = retDatafromds(0)\n # get Chargeable Income\n CI = retDatafromds(4)\n # get Total Income\n TI = retDatafromds(2)\n # get Number of Companies\n NC = retDatafromds(1)\n # get Donations\n DO = retDatafromds(3)\n\n # Line Chart\n disp_linechart(year, CI, TI)\n # Bar Chart\n disp_barchart(year, NC, DO)\n\n","repo_name":"Abhijith14/PythonProjects","sub_path":"Project 3 - Income Singapore Companies 2/data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"70932155931","text":"from urllib.parse import urljoin\nfrom datetime import datetime\n\nimport os\nimport requests\n\ndef schedule_command(cmd, data=None,\n endpoint=os.getenv('INTRUSTD_ADMIN_ENDPOINT', 'http://admin.intrustd.com.app.local'),\n run_after=None, retain_until=None, alias=None):\n d = { 'command': cmd }\n if run_after is not None:\n if not isinstance(run_after, datetime):\n raise TypeError('run_after should be of type datetime')\n d['run_after'] = datetime_json(run_after)\n\n if retain_until is not None:\n if not isinstance(retain_until, datetime):\n raise TypeError('retain_until should be of type datetime')\n d['retain_until'] = datetime_json(retain_until)\n\n if alias is not None:\n d['alias'] = alias\n\n r = requests.post(urljoin(endpoint, '/schedule'), json=d)\n if r.status_code == 201:\n rsp = r.json()\n return rsp\n elif r.status_code == 409:\n raise KeyError(alias)\n else:\n raise RuntimeError('Unknown status code while adding command: {} {}'.format(r.status_code, r.text))\n\ndef get_scheduled_command_status(task_id,\n endpoint=os.getenv('INTRUSTD_ADMIN_ENDPOINT', 'http://admin.intrustd.com.app.local')):\n r = requests.get(urljoin(endpoint, '/schedule/{}'.format(task_id)))\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 404:\n raise KeyError(task_id)\n else:\n raise RuntimeError('Unknown status while checking command status: {}'.format(r.status_code))\n","repo_name":"intrustd/py-intrustd","sub_path":"intrustd/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34096401668","text":"import logging\nfrom django.core.exceptions import ValidationError\n\nfrom cms.models import CMSPlugin\nfrom cms.models.placeholdermodel import Placeholder\nfrom cms.plugin_rendering import PluginContext, render_plugin as _render_plugin\n\nfrom ..utils import get_timetravel_date\nfrom ..models import Schedulable\n\n\ndef render_plugin(self, context=None, placeholder=None, admin=False, processors=None):\n instance, plugin = self.get_plugin_instance()\n if instance and not (admin and not plugin.admin_preview):\n if not self.published():\n return \"\"\n\n if not isinstance(placeholder, Placeholder):\n placeholder = instance.placeholder\n placeholder_slot = placeholder.slot\n context = PluginContext(context, instance, placeholder)\n context = plugin.render(context, instance, placeholder_slot)\n if plugin.render_plugin:\n template = hasattr(instance, 'render_template') and instance.render_template or plugin.render_template\n if not template:\n raise ValidationError(\"plugin has no render_template: %s\" % plugin.__class__)\n else:\n template = None\n return _render_plugin(context, instance, placeholder, template, processors)\n return \"\"\n\n\ndef published(self):\n \"\"\"\n Checks if the plugin should be published, depending on the publication\n start and/or end date (if available).\n \"\"\"\n ref_date = get_timetravel_date()\n instance, plugin = self.get_plugin_instance()\n if isinstance(instance, Schedulable):\n logging.debug(\"The current plugin instance is Schedulable.\")\n return (instance.publication_date is None or instance.publication_date < ref_date) and \\\n (instance.publication_end_date is None or instance.publication_end_date >= ref_date)\n else:\n return True\n\nCMSPlugin.published = published\nCMSPlugin.render_plugin = render_plugin\n","repo_name":"jjanssen/django-cms-timetravel","sub_path":"cms_timetravel/managers/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5511937117","text":"import os\nimport shutil\n\nsource_folder = '/home/ubuntu/Downloads/BratsEdited'\ndestination_folder = '/home/ubuntu/Downloads/BratsScripted'\nmatching_folder = '/home/ubuntu/Licenta/Semi Supervised Medical Segmentation/data/BraTS2019/data'\n\n\n# Get a list of files in the matching folder\nmatching_files = [filename.split('.')[0] for filename in os.listdir(matching_folder) if\n os.path.isfile(os.path.join(matching_folder, filename))]\nprint(matching_files)\n# Iterate over the folders in the source folder\nfor folder_name in os.listdir(source_folder):\n folder_path = os.path.join(source_folder, folder_name)\n\n # Check if the folder name is in the list of matching files\n if folder_name in matching_files:\n destination_path = os.path.join(destination_folder, folder_name)\n\n # Move the folder to the destination folder\n shutil.move(folder_path, destination_path)\n print(f\"Moved folder '{folder_name}' to '{destination_folder}'.\")\n","repo_name":"sebastianiscruleasa/Semi-Supervised-Medical-Segmentation","sub_path":"code/dataloaders/move_matching_files.py","file_name":"move_matching_files.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8886612145","text":"'''\n프로그래머스 위클리 챌린지\nhttps://programmers.co.kr/learn/courses/30/lessons/86048\n7주차\n\n2021.09.14\n'''\n\n'''\n문제 이해하기\n입실 명부 [1, 3, 2]\n퇴실 명부 [1, 2, 3]\n\n1은 2번 / 3번 만났을 수도 안만났을 수도 있음\n2는 3은 무조건 만남 => 1\n3은 2를 무조건 만남 => 1\n==> [0, 1, 1]\n\n접근 방법\n총 4개의 경우의 수가 존재함\n1. 먼저 들어오고 나중에 나간 사람\n2. 나중에 들어오고 먼저 나간 사람 > 1 찾을 때 찾아짐 \n3. 먼저 들어오고 먼저 나갔지만, 그 사이에 나중에 들어오고 먼저 나간 사람이 있어서 만난게 보장되는 사람\n\n3번을 위해서 set으로 해당 인덱스에 만난 사람을 추가해주는 방식을 해보도록 하자! -> dict key는 사람 번호 value는 set으로 만난 사람들\n=> 3번 어케 구하지..ㅠㅠㅠㅠ -> 일단 pass....\n\n\n접근 방법 2 -> 순서대로\n1. 입실시키고 퇴실할 수 있는지 체크\n2. 퇴실 할 수 없으면 다음 사람 입실 \n3. 퇴실 할 수 있으면 퇴실하고 다음 사람 입실\n4. 퇴실하기 전에 해당 배열?에 있는 사람들은 추가해줌 \n'''\n\n# enter : 입실 순서\n# leave : 퇴실 순서\n### 접근 방법 2번 적용 ###\ndef solution(enter, leave):\n\n # 만난 사람 저장하는 dict\n meet_people = {idx:set() for idx in enter}\n\n # room : 회의실에 지금 있는 사람\n room = [enter.pop(0)]\n\n # 모두가 회의실 다녀갈 때 까지\n while leave:\n # 퇴실해야 되는 사람이 지금 회의실에 있는지 확인\n if leave[0] in room:\n leave_people = leave.pop(0)\n room.remove(leave_people) # 퇴실시키기\n\n # 퇴실해야 하는 사람이 없으면, 입실 시키기\n else:\n enter_people = enter.pop(0)\n room.append(enter_people) # 입실시키기\n\n # 새로 들어오면 그 앞에까지 meet_people에 넣어줘야지 -> 같이 회의실에 있다는 것은 만났다는 것이니까\n for i in room[:-1]:\n meet_people[i].add(enter_people)\n meet_people[enter_people].add(i)\n\n # 배열로 전환\n answer = [0 for _ in range(len(meet_people))]\n for i in meet_people.keys():\n answer[i-1] = len(meet_people[i])\n\n return answer\n\n\n### 다른분 풀이 ###\n### 참고 : https://blog.naver.com/jjys9047/222504044646 ###\n# max_time = (85.38ms, 10.5MB)\n### 접근 방법 1과 비슷하게 접근!\ndef solution2(enter, leave):\n\n # 이렇게 하면 나중에 배열 따로 안만들어도 되서 효율적일듯\n ans = {idx: 0 for idx in range(1, len(enter) + 1)}\n pre_max = 0 # 일찍 퇴실한 사람들의 입실 기간 중 가장 마지막 시간 확인용\n # => 특정인보다 빨리 퇴실한 사람은 해당 시간보다 빨리 입실할 것\n # => 이를 기준으로 입실 순서를 확인하면 특정인 이후 입실한 사람들 중 pre_max 이전에 입실한 사람은 모두 우리가 원하는 조건을 충족함\n # => 퇴실 시간이 특정 시간 이후지만, 반드시 만나는 것이 확실한 사람이 포함되게 됨\n for i, l in enumerate(leave): # i : 특정인이 퇴실한 시간, l : 특정인\n now = enter.index(l) # 특정인이 입실한 시간\n if i:\n for e in enter[now + 1:pre_max + 1]: # 특정인이 입실한 시간+1 ~ 일찍 퇴실한 사람들의 가장 마지막 시간 => 나중에 들어온 사람 중에 먼저 퇴실한 사람 있는지\n ans[l] += 1\n ans[e] += 1\n pre_max = max(pre_max, now)\n\n return [v for i, v in ans.items()]\n\n\n### TEST 1\nenter = [1,3,2]\nleave = [1,2,3]\nprint(solution2(enter, leave)) # [0,1,1]\n\n### TEST 2\nenter = [1,4,2,3]\nleave = [2,1,3,4]\nprint(solution2(enter, leave)) # [2,2,1,3]\n\n### TEST 3\nenter = [3,2,1]\nleave = [2,1,3]\nprint(solution(enter, leave)) # [1,1,2]\n\n### TEST 4\nenter = [3,2,1]\nleave = [1,3,2]\nprint(solution(enter, leave)) # [2,2,2]\n\n### TEST 5\nenter = [1,4,2,3]\nleave = [2,1,4,3]\nprint(solution(enter, leave)) # [2,2,0,2]\n\n","repo_name":"kokoritaaa7/Algo","sub_path":"WeeklyChallenge/week7.py","file_name":"week7.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28789612614","text":"#!/usr/bin/env python3\n\nimport os\nimport json\nimport random\nimport tarfile\nimport http.client\nimport urllib.parse\n# import traceback\nimport logging\n\nimport pygit2\nimport github\nimport requests\n# import rapidjson as json\n\nlogger = logging.getLogger()\n\nGITHUB_API_TOKEN = 'SET YOUR OWN'\n\nCACHE_JSON = 'cache.json'\n\nEXTS = ['.java']\n\n\ndef issrc(name):\n b = False\n if name:\n for ext in EXTS:\n if name.endswith(ext):\n b = True\n break\n else:\n cap = ext.upper()\n if name.endswith(cap):\n b = True\n break\n return b\n\n\ndef get_modified_files(_commit, commit):\n delta = _commit.tree.diff_to_tree(commit.tree)\n modified = []\n for p in delta:\n old_file = p.delta.old_file\n new_file = p.delta.new_file\n if issrc(old_file.path) or issrc(new_file.path):\n modified.append((old_file, new_file))\n return modified\n\n\ndef save_file(repo, fobj, dpath):\n blob = repo.get(fobj.id, None)\n if blob:\n fpath = os.path.join(dpath, fobj.path)\n d = os.path.dirname(fpath)\n if not os.path.exists(d):\n os.makedirs(d)\n with open(fpath, 'wb') as f:\n f.write(blob.data)\n logger.debug(f'file saved at \"{fpath}\"')\n\n\ndef check_url(url):\n o = urllib.parse.urlparse(url)\n conn = http.client.HTTPSConnection(o.netloc)\n conn.request('GET', o.path)\n res = conn.getresponse()\n b = res.status != 404\n return b\n\n\ndef sampling(json_file, nsamples, refactoring=None, out='a.json'):\n res = []\n ref_list = []\n failure_count = 0\n\n if os.path.exists(CACHE_JSON):\n with open(CACHE_JSON, 'r') as f:\n ref_list = json.load(f)\n else:\n with open(json_file) as f:\n data = json.load(f)\n for commit in data:\n # oid = commit['id']\n repo = commit['repository']\n sha1 = commit['sha1']\n url = commit['url']\n if check_url(url):\n for ref in commit['refactorings']:\n valid = ref['validation'] == 'TP'\n if valid:\n refty = ref['type']\n if refactoring is None or refactoring == refty:\n desc = ref['description']\n d = {'repo': repo, 'sha1': sha1,\n 'type': refty, 'desc': desc}\n ref_list.append(d)\n else:\n failure_count += 1\n logger.warning(f'{url}: failed to access')\n\n if failure_count:\n logger.warning(f'failed to get {failure_count} commits')\n\n if ref_list:\n with open(CACHE_JSON, 'w') as f:\n json.dump(ref_list, f)\n\n if nsamples > 0 and len(ref_list) > nsamples:\n res = random.sample(ref_list, k=nsamples)\n else:\n res = ref_list\n\n if res:\n with open(out, 'w') as f:\n json.dump(res, f)\n\n return res\n\n\ndef gh_dl(dl_link, out_path):\n logger.info(f'downloading {dl_link}...')\n resp = requests.get(dl_link)\n dir_path = os.path.dirname(out_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n with open(out_path, 'wb') as f:\n f.write(resp.content)\n logger.info(f'downloaded to {out_path}')\n\n\ndef clone_repos(repo_tbl, repo_dir, sample_dir, modified_only=False):\n\n for repo_url, cl in repo_tbl.items():\n logger.info(f'repo_url={repo_url}')\n\n user_name, repo_name = repo_url.split('/')[-2:]\n\n if repo_name.endswith('.git'):\n repo_name = repo_name[:-4]\n\n repo_path = os.path.join(repo_dir, user_name, repo_name)\n\n if not os.path.exists(repo_path):\n logger.info(f'cloning {repo_url} into {repo_path}...')\n pygit2.clone_repository(repo_url, repo_path, bare=True)\n\n sample_path = os.path.join(sample_dir, user_name, repo_name)\n\n repo = pygit2.Repository(repo_path)\n\n for c in cl:\n logger.info(f'commit={c}')\n\n short_id = c[:7]\n\n before_path = os.path.join(sample_path, f'{short_id}-before')\n after_path = os.path.join(sample_path, f'{short_id}-after')\n\n if os.path.exists(before_path) and os.path.exists(after_path):\n continue\n\n before_tar_path = before_path + '.tar'\n after_tar_path = after_path + '.tar'\n\n before_tgz_path = before_tar_path + '.gz'\n after_tgz_path = after_tar_path + '.gz'\n\n pdir = os.path.dirname(after_path)\n if not os.path.exists(pdir):\n os.makedirs(pdir)\n\n commit_id = pygit2.Oid(hex=c)\n commit = None\n _commit = None\n\n gh_flag = False\n\n try:\n commit = repo[commit_id]\n _commit = commit.parents[0]\n logger.info('tree={}'.format(str(commit.tree.id)))\n logger.info('tree_={}'.format(str(_commit.tree.id)))\n except Exception:\n logger.warning(f'\"{c}\": not found')\n try:\n gh = github.MainClass.Github(GITHUB_API_TOKEN)\n logger.warning('finding via GitHub API...')\n gh_repo = gh.get_repo(f'{user_name}/{repo_name}')\n dl_link_after = gh_repo.get_archive_link('tarball', c)\n logger.info(f'dl_link_after={dl_link_after}')\n dl_link_before = gh_repo.get_archive_link('tarball', c+'^')\n logger.info(f'dl_link_before={dl_link_before}')\n gh_flag = True\n except Exception as e:\n logger.warning(f'failed to get download link: {e}')\n continue\n\n if modified_only:\n if gh_flag:\n try:\n commit = gh_repo.get_commit(c)\n _commit = commit.parents[0]\n logger.info('{} modified files found'\n .format(len(commit.files)))\n for f in commit.files:\n fn = f.filename\n logger.info(f'fn={fn}')\n if fn.endswith('.java') and f.status == 'modified':\n fp = os.path.join(after_path, fn)\n fc = gh_repo.get_contents(fn, commit.sha)\n gh_dl(fc.download_url, fp)\n _fn = fn\n if f.previous_filename:\n _fn = f.previous_filename\n _fc = gh_repo.get_contents(_fn, _commit.sha)\n _fp = os.path.join(before_path, _fn)\n gh_dl(_fc.download_url, _fp)\n except Exception as e:\n logger.warning(f'failed to handle {c}: {e}')\n continue\n\n else:\n modified = get_modified_files(_commit, commit)\n logger.info('{} modified source files found'\n .format(len(modified)))\n for fobj0, fobj1 in modified:\n save_file(repo, fobj0, before_path)\n save_file(repo, fobj1, after_path)\n\n elif gh_flag:\n try:\n gh_dl(dl_link_before, before_tgz_path)\n except Exception:\n logger.warning(f'failed to download {dl_link_before}')\n continue\n try:\n gh_dl(dl_link_after, after_tgz_path)\n except Exception:\n logger.warning(f'failed to download {dl_link_after}')\n continue\n\n with tarfile.open(after_tgz_path, 'r') as a:\n a.extractall(after_path)\n os.remove(after_tgz_path)\n\n with tarfile.open(before_tgz_path, 'r') as a:\n a.extractall(before_path)\n os.remove(before_tgz_path)\n\n else:\n with tarfile.open(after_tar_path, 'w') as a:\n logger.info(f' {c} --> {after_path}')\n try:\n repo.write_archive(commit, a)\n except Exception:\n logger.warning(f'failed to checkout {c}')\n try:\n gh = github.MainClass.Github(GITHUB_API_TOKEN)\n gh_repo = gh.get_repo(f'{user_name}/{repo_name}')\n dl_link_after = gh_repo.get_archive_link('tarball', c)\n logger.info(f'dl_link_after={dl_link_after}')\n gh_dl(dl_link_after, after_tgz_path)\n after_tar_path = after_tgz_path\n except Exception:\n continue\n\n with tarfile.open(after_tar_path, 'r') as a:\n a.extractall(after_path)\n os.remove(after_tar_path)\n\n with tarfile.open(before_tar_path, 'w') as a:\n _c = str(_commit.id)\n logger.info(f' {_c} --> {before_path}')\n try:\n repo.write_archive(_commit, a)\n except Exception:\n logger.warning(f'failed to checkout {_c}')\n try:\n gh = github.MainClass.Github(GITHUB_API_TOKEN)\n gh_repo = gh.get_repo(f'{user_name}/{repo_name}')\n dl_link_before = gh_repo.get_archive_link('tarball', c+'^')\n logger.info(f'dl_link_before={dl_link_before}')\n gh_dl(dl_link_before, before_tgz_path)\n before_tar_path = before_tgz_path\n except Exception:\n continue\n\n with tarfile.open(before_tar_path, 'r') as a:\n a.extractall(before_path)\n os.remove(before_tar_path)\n\n\ndef clone_repos_from_data(data, repo_dir, sample_dir, modified_only=False):\n repo_tbl = {}\n for d in data:\n repo_url = d['repo']\n sha1 = d['sha1']\n try:\n cl = repo_tbl[repo_url]\n except KeyError:\n cl = []\n repo_tbl[repo_url] = cl\n\n if sha1 not in cl:\n cl.append(sha1)\n\n clone_repos(repo_tbl, repo_dir, sample_dir, modified_only=modified_only)\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\n parser = ArgumentParser(description='sampling from Refactoring Oracle',\n formatter_class=ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--oracle', type=str, default='data.json',\n help='specify Oracle')\n\n parser.add_argument('-n', '--nsamples', type=int, default=10,\n help='specify number of samples')\n\n parser.add_argument('-d', '--debug', dest='debug', action='store_true',\n help='enable debug printing')\n\n parser.add_argument('-r', '--refactoring', dest='ref', default=None,\n help='specify refactoring type')\n\n parser.add_argument('-o', '--out-json', dest='out_json', default='a.json',\n help='specify output JSON file')\n\n parser.add_argument('-s', '--sampled-json', dest='in_json', default=None,\n help='specify sampled refactorings')\n\n parser.add_argument('-m', '--modified-only', dest='modified_only',\n action='store_true',\n help='checkout modified source files only')\n\n args = parser.parse_args()\n\n log_level = logging.INFO\n if args.debug:\n log_level = logging.DEBUG\n fh = logging.FileHandler('sampling.log', mode='w', encoding='utf-8')\n fh.setLevel(log_level)\n fmt = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')\n fh.setFormatter(fmt)\n logging.basicConfig(level=log_level, handlers=[fh])\n logger.addHandler(fh)\n\n data = None\n\n if args.in_json is None:\n\n if os.path.exists(args.out_json):\n print('\"{}\" exists!'.format(args.out_json))\n while True:\n a = input('Do you want to overwrite? [y/n] ')\n if a == 'y':\n break\n elif a == 'n':\n exit(0)\n\n if args.nsamples <= 0:\n logger.info('extracting {} samples from \"{}\"...'\n .format(args.ref if args.ref else 'arbitrary',\n args.oracle))\n else:\n logger.info('sampling {} {} samples from \"{}\"...'\n .format(args.nsamples,\n args.ref if args.ref else 'arbitrary',\n args.oracle))\n\n data = sampling(args.oracle, args.nsamples, refactoring=args.ref,\n out=args.out_json)\n\n logger.info('result dumped into \"{}\"'.format(args.out_json))\n\n else:\n with open(args.in_json) as f:\n data = json.load(f)\n\n clone_repos_from_data(data, 'repositories', 'samples',\n modified_only=args.modified_only)\n","repo_name":"mstmhsmt/diffast-experiments","sub_path":"refactoring/scripts/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":13620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27150114109","text":"import re\nfrom itertools import chain\n\nimport numpy as np\n\n\ndef get_arcs_and_head(ddp_result, tokens):\n \"\"\"\n 将ddparser输出的依存弧映射到用户自定义切词结果上,返回映射后的依存弧和核心词索引。\n\n Arags:\n ddp_result: ddparser结果\n tokens: 用户自定义切词\n Returns:\n arc_tails: arc_tails表示映射后所有弧尾的索引\n arc_heads: arc_heads表示映射后所有弧头的索引\n head_id: 表示映射后核心词的所有\n \"\"\"\n words = ddp_result['word']\n heads = ddp_result['head']\n # 为了运算方便,父节点索引减一\n heads = [i - 1 for i in heads]\n mapping = _get_mapping(words, tokens)\n arc_tails, arc_heads = _get_arcs(mapping, heads)\n head_id = _get_head_id(ddp_result, mapping)\n return (arc_tails, arc_heads), head_id\n\n\ndef get_arcs_and_head_in_wordpiece(ddp_result, tokens):\n \"\"\"\n 当用户的切词使用的是wordpiece时,将ddparser输出的依存弧映射到用户自定义切词结果上,返回映射后的依存弧和核心词索引。\n\n Arags:\n ddp_result: ddparser结果\n tokens: 用户自定义切词\n Returns:\n arc_tails: arc_tails表示映射后所有弧尾的索引\n arc_heads: arc_heads表示映射后所有弧头的索引\n head_id: 表示映射后核心词的所有\n \"\"\"\n words = [s.lower() for s in ddp_result['word']]\n heads = ddp_result['head']\n # 为了运算方便,父节点索引减一\n heads = [i - 1 for i in heads]\n merge_idxs, merge_tokens = _merge_wordpiece_tokens(tokens)\n # replace [UNK]\n words, merge_tokens = _replace_unk(words, merge_tokens)\n assert \"\".join(words) == \"\".join(merge_tokens)\n mapping = _get_mapping(words, merge_tokens)\n re_index = list(range(len(tokens)))\n for n, i in enumerate(merge_idxs):\n re_index[n] = mapping[i]\n arc_tails, arc_heads = _get_arcs(re_index, heads)\n head_id = _get_head_id(ddp_result, re_index)\n return (arc_tails, arc_heads), head_id\n\n\ndef get_adj_of_one_sent(arcs, length, max_len=None):\n \"\"\"\n 将弧转化为邻接矩阵\n\n Arags:\n arcs: 弧\n length: token数量\n max_len: 最大token的数量\n Returns:\n graph: 转化后的邻接矩阵\n \"\"\"\n if max_len is None:\n max_len = length\n arc_tails, arc_heads = [], []\n for arc_tail, arc_head in zip(*arcs):\n if arc_tail < max_len and arc_head < max_len:\n arc_tails.append(arc_tail)\n arc_heads.append(arc_head)\n if not arc_tails:\n arc_tails.append(0)\n arc_heads.append(0)\n\n graph = np.zeros((max_len, max_len), dtype=\"int32\")\n for arc_tail, arc_head in zip(arc_tails, arc_heads):\n graph[arc_tail, arc_head] = 1\n for i in range(max_len):\n graph[i, i] = 1\n return graph\n\n\ndef get_adj_of_one_sent_in_ernie(arcs, length, max_len=None):\n \"\"\"\n 当用户模型是ernie时,将弧转化为邻接矩阵(自动补齐[CLS],[SEP])\n\n Arags:\n arcs: 弧\n length: token数量\n max_len: 最大token的数量\n Returns:\n graph: 转化后的邻接矩阵\n \"\"\"\n if max_len is None:\n max_len = length\n arc_tails, arc_heads = [], []\n for arc_tail, arc_head in zip(*arcs):\n if arc_tail < max_len and arc_head < max_len:\n arc_tails.append(arc_tail + 1)\n arc_heads.append(arc_head + 1)\n if not arc_tails:\n arc_tails.append(0)\n arc_heads.append(0)\n\n graph = np.zeros((max_len + 2, max_len + 2), dtype=\"int32\")\n for arc_tail, arc_head in zip(arc_tails, arc_heads):\n graph[arc_tail, arc_head] = 1\n for i in range(max_len + 2):\n if i not in [0, max_len + 1]:\n graph[i, i] = 1\n return graph\n\n\ndef get_adj_of_two_sent_in_ernie(arcs_a, length_a, arcs_b, length_b, max_len=None):\n \"\"\"\n 当用户模型是ernie且输入为两条句子拼接在一起时,将弧转化为邻接矩阵(自动补齐[CLS],[SEP])\n\n Arags:\n arcs_a: 句子a的弧\n length_a: 句子a的token数量\n arcs_b: 句子b的弧\n length_b: 句子b的token数量\n max_len: 最大token的数量\n Returns:\n graph: 转化后的邻接矩阵\n \"\"\"\n if max_len is None:\n max_len_a = length_a\n else:\n max_len_a = max_len\n arc_tails, arc_heads = [], []\n for arc_tail, arc_head in zip(*arcs_a):\n if arc_tail < max_len_a and arc_head < max_len_a:\n arc_tails.append(arc_tail + 1)\n arc_heads.append(arc_head + 1)\n\n if max_len is None:\n max_len_b = length_b\n else:\n max_len_b = max_len\n for arc_tail, arc_head in zip(*arcs_b):\n if arc_tail < max_len_b and arc_head < max_len_b:\n arc_tails.append(arc_tail + max_len_a + 2)\n arc_heads.append(arc_head + max_len_a + 2)\n if not arc_tails:\n arc_tails.append(0)\n arc_heads.append(0)\n\n graph = np.zeros((max_len_a + max_len_b + 3, max_len_a + max_len_b + 3), dtype=\"int32\")\n for arc_tail, arc_head in zip(arc_tails, arc_heads):\n graph[arc_tail, arc_head] = 1\n for i in range(max_len_a + max_len_b + 3):\n if i not in [0, max_len_a + 1, max_len_a + max_len_b + 2]:\n graph[i, i] = 1\n return graph\n\n\ndef pad_batch_graphs(graphs, max_len=None):\n \"\"\"\n padding batch graphs\n\n Arags:\n graphs: 未填充的邻接矩阵\n max_len: 最大长度\n Returns:\n out_tensor: 填充后的邻接矩阵\n \"\"\"\n if max_len is None:\n max_len = max([s.shape[0] for s in graphs])\n out_dims = (len(graphs), max_len, max_len)\n out_tensor = np.full(out_dims, 0, dtype=np.int64)\n for i, tensor in enumerate(graphs):\n length = min(tensor.shape[0], max_len)\n out_tensor[i, :length, :length] = tensor\n return out_tensor\n\n\ndef _get_arcs(mapping, heads):\n \"\"\"\n 映射函数,获取映射后的弧\n \"\"\"\n arc_tails, arc_heads = [], []\n for n, i in enumerate(mapping):\n if i == -1 or heads[i] == -1:\n continue\n for m, j in enumerate(mapping):\n if j != -1 and j == heads[i]:\n arc_tails.append(n)\n arc_heads.append(m)\n arc_tails.append(m)\n arc_heads.append(n)\n return arc_tails, arc_heads\n\n\ndef _get_mapping(words, tokens):\n \"\"\"\n 获取映射数组\n \"\"\"\n index = list(range(len(tokens)))\n tmp_ddp = words[0]\n tmp_tok = tokens[0]\n ddp_idx = 0\n tok_idx = 0\n while ddp_idx < len(words) - 1 or tok_idx < len(tokens) - 1:\n if tmp_ddp == tmp_tok:\n index[tok_idx] = ddp_idx\n tok_idx += 1\n ddp_idx += 1\n tmp_ddp += words[ddp_idx]\n tmp_tok += tokens[tok_idx]\n elif tmp_ddp > tmp_tok:\n # index[tok_idx] = ddp_idx\n index[tok_idx] = -1\n tok_idx += 1\n tmp_tok += tokens[tok_idx]\n elif tmp_ddp < tmp_tok:\n # index[tok_idx] = ddp_idx\n index[tok_idx] = -1\n ddp_idx += 1\n tmp_ddp += words[ddp_idx]\n else:\n index[tok_idx] = ddp_idx\n return index\n\n\ndef _merge_wordpiece_tokens(tokens):\n \"\"\"合并被wordpiece切散的token\"\"\"\n assert len(tokens) >= 1\n idxs = []\n m_tokens = []\n cur_token = \"\"\n for token in tokens:\n if cur_token == \"\":\n cur_token += token\n idxs.append(0)\n continue\n if token.startswith(\"##\"):\n cur_token += token[2:]\n idxs.append(idxs[-1])\n else:\n m_tokens.append(cur_token)\n cur_token = token\n idxs.append(idxs[-1] + 1)\n else:\n m_tokens.append(cur_token)\n return idxs, m_tokens\n\n\ndef _get_head_id(ddp_result, mapping):\n \"\"\"获取映射后核心词索引\"\"\"\n heads = ddp_result['head']\n try:\n head_id = mapping.index(heads.index(0))\n except:\n head_id = len(mapping) - 1\n return head_id\n\n\ndef _replace_unk(words, tokens):\n \"\"\"将[UNK]符号替换为原始符号\"\"\"\n if '[UNK]' not in tokens:\n return words, tokens\n words = [_replace_escape(word) for word in words]\n query = \"\".join(words)\n new_tokens = []\n for token in tokens:\n if token != '[UNK]':\n new_tokens.append(_replace_escape(token))\n else:\n new_tokens.append('(.+?)')\n matchs = re.match(\"\".join(new_tokens) + \"$\", query)\n if matchs is None:\n raise \"unkonwn error\"\n\n for match in matchs.groups():\n new_tokens[new_tokens.index('(.+?)')] = match\n return words, new_tokens\n\n\nESCAPE_DICT = {\n '(': '(',\n ')': ')',\n '[': '【',\n ']': '】',\n '+': '+',\n '?': '?',\n '*': '×',\n '{': '{',\n '}': '}',\n '.': '.',\n}\n\n\ndef _replace_escape(string):\n \"\"\"将正则中的转义字符替换为全角字符\"\"\"\n for k, v in ESCAPE_DICT.items():\n string = string.replace(k, v)\n return string\n\n\ndef transfor_head_id_for_ernie(head_id_a, length_a, head_id_b=None, length_b=None):\n \"\"\"\n 当用户模型为ernie时, 获取新的核心词位置(由于拼接[CLS], [SEP])\n \"\"\"\n if head_id_b is None or length_b is None:\n return min(head_id_a + 1, length_a)\n else:\n return (min(head_id_a + 1, length_a), min(length_a + head_id_b + 2, length_a + length_b + 1))\n\n\nif __name__ == \"__main__\":\n d = {'word': ['10086', '话费', '清单', '查询'], 'head': [2, 3, 4, 0], 'deprel': ['ATT', 'ATT', 'VOB', 'HED']}\n\n t = ['1008', '##6', '话', '费', '清', '单', '查', '询']\n print(_merge_wordpiece_tokens(t))","repo_name":"baidu/DDParser","sub_path":"tools/representation/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":9656,"program_lang":"python","lang":"en","doc_type":"code","stars":929,"dataset":"github-code","pt":"32"} +{"seq_id":"33156012641","text":"# pip install PyMuPDF pillow\n\nimport fitz\nimport PIL.Image\nimport io\n\npdf = fitz.open(\"sample.pdf\")\ncounter = 1\nfor i in range(len(pdf)):\n page = pdf[i]\n images = page.get_images()\n for image in images:\n base_img = pdf.extract_image(image[0])\n print(base_img)\n image_data = base_img[\"image\"]\n img = PIL.Image.open(io.BytesIO(image_data))\n extension = base_img[\"ext\"]\n img.save(open(f\"image{counter}.{extension}\", \"wb\"))\n counter += 1\n\n","repo_name":"FDlucifer/python-climb-learning-tutorial","sub_path":"advanced-python-tutorial/Extract PDF Content/main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"28871975213","text":"from dataset import XORDataset\nfrom lstm_xor import LSTMXOR\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nTRAINING_TAG = \"[Training]\"\nVALIDATION_TAG = \"[Validation]\"\nBATCH_SIZE = 64\n\ndef run_model(model, optimizer, loss_function, dataset, epoch, tag, is_evaluation_mode=False, shuffle=True, batch_size=BATCH_SIZE):\n if is_evaluation_mode:\n model.eval()\n else:\n model.train()\n\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=2)\n\n with torch.set_grad_enabled(not is_evaluation_mode):\n current_correct_predictions = 0.0\n current_loss = []\n\n print(\"{} - [Epoch {}] - START\".format(tag, epoch))\n\n for inputs, labels in tqdm(dataloader, desc=tag):\n inputs = inputs.to(DEVICE).float()\n labels = labels.to(DEVICE).float()\n\n if not is_evaluation_mode:\n optimizer.zero_grad()\n\n predictions = model(inputs)\n\n loss = loss_function(predictions, labels)\n current_loss.append(loss.cpu().data.item())\n current_correct_predictions += calculate_correct_predictions(predictions.cpu().data, labels.cpu().data)\n\n if not is_evaluation_mode:\n loss.backward()\n optimizer.step()\n \n print(\"{} - [Epoch {}] Accuracy is {:.5f}\\n\".format(tag, epoch, current_correct_predictions / len(dataset)))\n print(\"{} - [Epoch {}] Loss is {:.5f}\\n\".format(tag, epoch, sum(current_loss) / len(current_loss)))\n print(\"{} - [Epoch {}] - END\\n\\n\".format(tag, epoch))\n\ndef calculate_correct_predictions(predictions, labels):\n predictions = torch.round(torch.sigmoid(predictions))\n correct_predictions = torch.sum(predictions == labels).data.item()\n\n return correct_predictions\n\nif __name__ == \"__main__\":\n # Open dataset from CSV file\n training_df = pd.read_csv(\"dataset/training.csv\")\n validation_df = pd.read_csv(\"dataset/validation.csv\")\n X_train, y_train = training_df[\"inputs\"].tolist(), training_df[\"labels\"].tolist()\n X_val, y_val = validation_df[\"inputs\"].tolist(), validation_df[\"labels\"].tolist()\n\n # Start creating the datasets\n training_ds = XORDataset(X_train, y_train)\n validation_ds = XORDataset(X_val, y_val)\n\n # Create model, optimizer, and loss function\n lstm_xor_model = LSTMXOR(1, 512).to(DEVICE)\n optimizer = optim.Adam(lstm_xor_model.parameters(), lr=1e-2)\n loss_function = nn.BCEWithLogitsLoss()\n\n for epoch in range(3):\n # Train the model\n run_model(lstm_xor_model, optimizer, loss_function, training_ds, epoch + 1, TRAINING_TAG)\n\n # Validate the model\n run_model(lstm_xor_model, optimizer, loss_function, validation_ds, epoch + 1, VALIDATION_TAG, is_evaluation_mode=True, shuffle=False)\n","repo_name":"francislata/LSTM-XOR","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16200407238","text":"import numpy as np\nfrom numpy.testing import *\n\nimport supreme.api as sr\nimport scipy.ndimage as ndi\n\nclass TestLogpolar:\n def test_basic(self):\n theta = 25 / 180. * np.pi\n M = sr.register.affine_tm(theta=theta, scale=1.3)\n shape = np.array([301, 301])\n M_shift = np.array([[1, 0, -shape[1]/2.],\n [0, 1, -shape[0]/2.],\n [0, 0, 1]])\n M = np.dot(np.linalg.inv(M_shift), np.dot(M, M_shift))\n x = (np.random.random(shape)*255).astype(np.uint8)\n y = sr.transform.matrix(x, M)\n\n peak, angle, est_scale = sr.register.lp_patch_match(x, y, angles=360)\n\n assert_almost_equal(angle, theta, decimal=1)\n assert_almost_equal(est_scale, 1.3, decimal=1)\n\nif __name__ == \"__main__\":\n run_module_suite()\n","repo_name":"stefanv/supreme","sub_path":"supreme/register/tests/test_logpolar.py","file_name":"test_logpolar.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"32"} +{"seq_id":"3470652228","text":"import tkinter as tk\n\nroot = tk.Tk()\nroot.geometry(\"500x500\") # Size of the window \n\ndef selected_element():\n print(my_listbox.get(tk.ACTIVE)) # The selected element \n \nmy_listbox = tk.Listbox(root, height=4)\nmy_listbox.grid(row=1,column=1) \nmy_list=['PHP','Python','MySQL']\nfor element in my_list:\n my_listbox.insert(tk.END, element)\n\n \nbutton1 = tk.Button(root, text='Show', width=10,bg='yellow',command=lambda: selected_element())\nbutton1.grid(row=1,column=2) \n\nroot.mainloop() # Keep the window open\n","repo_name":"H0r4c3/Python_00_ALL","sub_path":"GUI/Listbox.py","file_name":"Listbox.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15916918900","text":"import logging\nimport warnings\n\nfrom viewflow_rest import flows, nodes, rest_extensions, this\nfrom viewflow_rest.signals import task_finished, task_started\n\nfrom . import models, serializers\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DeprecatedFlow(flows.Flow):\n\n process_class = models.DeprecatedProcess\n task_class = models.DeprecatedTask\n\n start = nodes.Start(\n viewclass=rest_extensions.AutoCreateAPIView,\n serializer_class=serializers.BaseSerializer,\n ).Next(\n this.warning\n )\n warning = nodes.View(\n viewclass=rest_extensions.AutoCreateAPIView,\n serializer_class=serializers.BaseSerializer,\n ).Next(\n this.error\n )\n error = nodes.View(\n viewclass=rest_extensions.AutoCreateAPIView,\n serializer_class=serializers.BaseSerializer,\n ).Next(\n this.end\n )\n\n end = nodes.End()\n\n\ndeprecated_flow = DeprecatedFlow()\n\n\ndef only_warning(**kwargs):\n warnings.warn(\"warning: do not continue anymore!!!!!\")\n logger.warning(kwargs)\n\ndef always_raise(**kwargs):\n logger.error(\"error!!!\")\n raise DeprecationWarning()\n\n\ntask_finished.connect(always_raise,\n sender=DeprecatedFlow.error)\n\ntask_finished.connect(only_warning,\n sender=DeprecatedFlow.warning)\n\ntask_started.connect(only_warning,\n sender=DeprecatedFlow.warning)\n \ntask_started.connect(only_warning,\n sender=DeprecatedFlow.error)","repo_name":"ramwin/viewflow-rest","sub_path":"example_project/app_signals/flows.py","file_name":"flows.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"32"} +{"seq_id":"4125301389","text":"from Ghosty_API.models import Work, Deceased, Task\nfrom django.contrib.auth.models import User\nfrom rest_framework import permissions\nfrom rest_framework import viewsets\nfrom rest_framework.mixins import CreateModelMixin, UpdateModelMixin, ListModelMixin, RetrieveModelMixin\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\n\nfrom .serializers import UserSerializer, WorkSerializer, DeceasedSerializer, TaskSerializer\n\n\nclass UserViewSet(CreateModelMixin, UpdateModelMixin, ListModelMixin, RetrieveModelMixin,\n viewsets.GenericViewSet):\n permission_classes_by_action = {'default': [permissions.IsAuthenticated()],\n 'create': [permissions.AllowAny()]}\n\n def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return (permission for permission in self.permission_classes_by_action[self.action])\n except KeyError:\n # action is not set return default permission_classes\n return (permission for permission in self.permission_classes_by_action['default'])\n\n def get_queryset(self):\n if self.request.user.is_superuser:\n return User.objects.all()\n else:\n return User.objects.filter(id=self.request.user.id)\n\n queryset = User.objects.all()\n serializer_class = UserSerializer\n renderer_classes = (JSONRenderer,)\n\n\nclass WorkViewSet(viewsets.ModelViewSet):\n \"\"\"\n List all works, or create a new work.\n \"\"\"\n\n queryset = Work.objects.all()\n serializer_class = WorkSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (JSONWebTokenAuthentication,)\n\n\nclass DeceasedViewSet(viewsets.ModelViewSet):\n \"\"\"\n List all customers, or create a new one.\n \"\"\"\n\n queryset = Deceased.objects.all()\n serializer_class = DeceasedSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (JSONWebTokenAuthentication,)\n\n\nclass TaskViewSet(viewsets.ModelViewSet):\n \"\"\"\n List all tasks, or create a new task.\n \"\"\"\n\n queryset = Task.objects.all()\n serializer_class = TaskSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (JSONWebTokenAuthentication,)\n","repo_name":"headsrooms/ghosty-backend","sub_path":"src/Ghosty_API/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25283259584","text":"# Program for detecting yellow ball on a green mat and displaying its XY coordinates on the video\nimport cv2 as cv\nfrom helper.ball_detector import ball_detect\nfrom helper.utils import DLT, get_projection_matrix, read_camera_parameters\nimport numpy as np\n\n# Done on 80 cm ball video shared\ncap = cv.VideoCapture(\n 'stereo_vision/new_camera_data/stereo_camera_test_230208/80cm/stereo_80cm_ball_230208_0218pm.avi')\n\n# if result video need to save to disk change write_video=1 else write_video=0\nwrite_video = 1\n\n# if Z value is refence from camera frame camera_to_world=0 else camera_to_world=1\ncamera_to_world = 0\n\n# initializing video writer\nwidth = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))//2\nheight = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))\nfps = int(cap.get(cv.CAP_PROP_FPS))\nleft_v = cv.VideoWriter('output_xyz.avi', cv.VideoWriter_fourcc(\n *'XVID'), fps, (width, height))\n\n# Function for getting XYZ coords from detection points\n\n\ndef get_xyz(r_pt, l_pt):\n\n P0 = get_projection_matrix(0)\n P1 = get_projection_matrix(1)\n\n # RT matrix for C1 is identity.\n # cmtx, dist = read_camera_parameters(0)\n # RT1 = np.concatenate([np.eye(3), [[0],[0],[0]]], axis = -1)\n # P0 = cmtx @ RT1 #projection matrix for C1\n\n if l_pt[0] == 0 or r_pt[0] == 0:\n point_3d = [0, 0, 0]\n else:\n point_3d = DLT(P0, P1, r_pt, l_pt) # calculate 3d position of keypoint\n\n return point_3d\n\n# function for mapping values\n\n\ndef _map(x, in_min, in_max, out_min, out_max):\n a = int((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min)\n return a\n\n\ntry:\n while True:\n # reading video source\n ret, frame = cap.read()\n if not ret:\n print(\"no video\")\n break\n\n # slicing stereo video to left and right frames\n left_frame = frame[0:frame.shape[0], 0:(frame.shape[1]//2)]\n right_frame = frame[0:frame.shape[0],\n (frame.shape[1]//2):frame.shape[1]]\n\n img_width = left_frame.shape[1]\n img_height = left_frame.shape[0]\n\n # send left and right frames to ball detection function\n left_detection, right_detection = ball_detect(left_frame, right_frame)\n\n # using detected rectangles\n (lx, ly, lw, lh) = left_detection\n (x, y, w, h) = right_detection\n\n # getting rectangle center\n lcenter = (lx+lw//2, ly+lh//2)\n rcenter = (x+w//2, y+h//2)\n\n # Preset origin point\n r_org = (725, 67) # origin point in right frame\n l_org = (707, 102) # origin point in left frame\n\n # drawing reference axis on the image\n cv.line(left_frame, l_org, (l_org[0]+50, l_org[1]), (0, 0, 255), 2)\n cv.line(left_frame, l_org, (l_org[0], l_org[1]+50), (255, 0, 0), 2)\n\n # getting origin's XYZ\n org_xyz = get_xyz(r_org, l_org)\n [oX, oY, oZ] = org_xyz\n\n # getting ball's XYZ\n ball_xyz = get_xyz(lcenter, rcenter)\n [bX, bY, bZ] = ball_xyz\n\n Lx, Ly, Lz = 0, 0, 0\n\n # if ball detected\n if any(ball_xyz) != 0:\n\n # mapping orgin point Y to the image's height to 0 to 100cm\n Ly = _map(lcenter[1], l_org[1], img_height, 0, 100)\n\n # mapping orgin point X to the image's width to 0 to 100cm\n Lx = _map(lcenter[0], l_org[0], img_width, 0, 50)\n\n # ball reference from origin\n if camera_to_world == 0:\n ball_left_xyz = get_xyz(\n (lcenter[0]-lw, lcenter[1]), (rcenter[0]-w, rcenter[1]))\n ball_right_xyz = get_xyz(\n (lcenter[0]+lw, lcenter[1]), (rcenter[0]+w, rcenter[1]))\n Lz = abs(ball_xyz[2]-((ball_left_xyz[2]+ball_right_xyz[2])//2))\n # Lz=bZ-oZ\n\n # ball reference from camera\n else:\n Lz = bZ\n\n # display bounding box over detected ball\n cv.rectangle(left_frame, (lx, ly), (lx+lw, ly+lh), (255, 0, 0), 2)\n\n # Display XYZ in frame\n cv.putText(left_frame, \"X: {} cm\".format(str(round(Lx, 1))), (lx+w+15, lcenter[1]-30), cv.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=[0, 0, 255], thickness=1, lineType=cv.LINE_AA)\n cv.putText(left_frame, \"Y: {} cm\".format(str(round(Ly, 1))), (lx+w+15, lcenter[1]), cv.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=[225, 0, 0], thickness=1, lineType=cv.LINE_AA)\n cv.putText(left_frame, \"Z: {} cm\".format(str(round(Lz, 1))), (lx+w+15, lcenter[1]+30), cv.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=[255, 255, 0], thickness=1, lineType=cv.LINE_AA)\n\n print(\"X,Y,Z: \", Lx, Ly, Lz)\n cv.imshow(\"L\", left_frame)\n # cv.imshow(\"R\",right_frame)\n\n # result video write to disk\n if write_video:\n left_v.write(left_frame)\n\n k = cv.waitKey(1)\n if k == ord('q'):\n break\n\n\nfinally:\n if write_video:\n left_v.release()\n cap.release()\n cv.destroyAllWindows()\n","repo_name":"vickyr95/Stereo_Vision_XYZ","sub_path":"Ball_detection_XYZ.py","file_name":"Ball_detection_XYZ.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27132723440","text":"import datetime\nimport time\n\n# 雪球相关配置 (这段本应该放在 config.py,token 是周期性更新,且自用,没必要给自己增加麻烦所以提出来放在这里做全局参数用)\nXQ_A_TOKEN = '030889433953bf4bf15985d7a13b442abad75180'\nTOKEN = 'xq_a_token={};'.format(XQ_A_TOKEN)\n\n\n# 当前时间\ndef nowTime():\n current = datetime.datetime.now()\n un_time = time.mktime(current.timetuple())\n current = datetime.datetime.now()\n # 打印当前时间\n print(\"当前时间 :\", current)\n return round(un_time)\n\n\n# (雪球网专用)\ndef currentTime():\n current = datetime.datetime.now()\n # 打印当前时间\n print(\"当前时间 :\", current)\n today = datetime.date.today()\n # yesterday = today - datetime.timedelta(days=1)\n tomorrow = today + datetime.timedelta(days=1)\n # print(\"当前时间 :\", today)\n # print(\"昨天 :\", yesterday)\n # print(\"明天 :\", tomorrow)\n # year = datetime.datetime.now().year\n # month = datetime.datetime.now().month\n # day = datetime.datetime.now().day\n # 雪球网请求是需要把日往后延一天\n dt = str(tomorrow) + ' 17:00:00'\n timeArray = time.strptime(dt, \"%Y-%m-%d %H:%M:%S\")\n timestamp = time.mktime(timeArray)\n # print(round(timestamp*1000))\n return round(timestamp * 1000)\n\n\ndef zeroTime():\n current = datetime.datetime.now()\n # 打印当前时间\n print(\"当前时间 :\", current)\n today = datetime.date.today()\n dt = str(today) + ' 00:00:00'\n timeArray = time.strptime(dt, \"%Y-%m-%d %H:%M:%S\")\n timestamp = time.mktime(timeArray)\n print(round(timestamp * 1000))\n return round(timestamp * 1000)\n\n\ndef customizeTime(offset=9):\n today = datetime.date.today()\n day = today + datetime.timedelta(days=offset)\n dt = str(day) + ' 00:00:00'\n timeArray = time.strptime(dt, \"%Y-%m-%d %H:%M:%S\")\n timestamp = time.mktime(timeArray)\n # print(round(timestamp*1000))\n return round(timestamp * 1000)\n\n\ndef printOptimizedForm(lists, limit=0):\n lists_len = len(lists)\n single_template = \"\\r{0:>4}\"\n print(single_template.format(\"股票代码\", chr(12288)))\n for i in range(lists_len):\n u = lists[i]\n if limit == 0:\n print(single_template.format(\"\\'\" + u[1] + \"\\', # \" + u[0], chr(12288)))\n else:\n if u[2] >= limit:\n print(single_template.format(\"\\'\" + u[1] + \"\\', # \" + u[0], chr(12288)))\n","repo_name":"qijingyu2013/StockPicker","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"25350038828","text":"from cmath import e\nfrom datetime import date\nfrom models import *\nfrom crud import *\n\n# Variável para checar se existem alterações que precisam ser salvas.\ninformacao = {\n \"alteradas\": False\n}\n\n# ATENÇÃO\n# Implementar os comandos usando as funções já criadas. Testar tudo a fim de encontrar possíveis bugs.\n# Se encontrarem algum bug, tentem resolver e alertem sobre ele no grupo.\n\nmes_com30 = [\"04\",\"06\",\"09\",\"11\"]\nmes_com31 = [\"01\",\"03\",\"05\",\"07\",\"08\",\"10\",\"12\"]\n\n# Cadastrar peça\ndef comando1():\n lista_cores = [COR_AMARELO,COR_AZUL,COR_BRANCO,COR_CINZA,COR_LARANJA,COR_PRETO,COR_ROSA,COR_ROXO,COR_VERDE,COR_VERMELHO,COR_VIOLETA]\n lista_tipo = [TIPO_CALCADO,TIPO_INFERIOR,TIPO_SUPERIOR]\n lista_padrao = [PADRAO_UNISSEX,PADRAO_MASCULINO,PADRAO_FEMININO]\n lista_tamanho = [TAMANHO_G,TAMANHO_M,TAMANHO_P]\n lista_situacao = [SITUACAO_DOACAO,SITUACAO_FICAR,SITUACAO_VENDA] \n\n # Cadastro do tipo da peça\n print(\"\\nPonha o tipo da peça: 'calçado', 'inferior' ou 'superior'\")\n while True:\n try:\n tipo = input()\n tipo_tratado = tipo.lower()\n if tipo_tratado in lista_tipo:\n break\n else:\n print(\"Ponha o tipo 'calçado', 'inferior' ou 'superior'\") \n except ValueError: \n print(\"Ponha apenas letras\")\n\n # Cadastro do tamanho da peça\n print(\"\\nPonha o tamanho da peça: 'p', 'm' ou 'g'\")\n while True:\n try:\n tamanho = input()\n tamanho_tratado = tamanho.lower()\n if tamanho_tratado in lista_tamanho: \n break\n else:\n print(\"Ponha o tamanho: 'p', 'm' ou 'g'\") \n except ValueError: \n print(\"Ponha apenas letras\")\n\n # Cadastro do padrão da peça\n print(\"\\nPonha o padrão da peça: 'feminino', 'masculino' ou 'unissex'\") \n while True:\n try:\n padrao = input()\n padrao_tratado = padrao.lower()\n if padrao_tratado in lista_padrao:\n break\n else:\n print(\"Ponha o padrão: 'feminino', 'masculino' ou 'unissex'\") \n except ValueError: \n print(\"Ponha apenas letras\")\n \n # Cadastro da cor da peça\n print(\"\\nPonha a cor da peça: 'vermelho', 'azul', 'amarelo', 'rosa', 'branco', 'cinza', 'verde', 'preto', 'roxo', 'violeta' ou 'laranja'\")\n while True:\n try:\n cor = input()\n cor_tratado = cor.lower()\n if cor_tratado in lista_cores:\n break\n else:\n print(\"Ponha a cor: 'vermelho', 'azul', 'amarelo', 'rosa', 'branco', 'cinza', 'verde', 'preto', 'roxo', 'violeta' ou 'laranja' \") \n except ValueError: \n print(\"Ponha apenas letras\")\n\n # Cadastro da data da peça \n print(\"Insira o ano da peça:\")\n # Cadastro do ano\n while True:\n try: \n ano = input() \n if int(ano) >=0:\n if len(ano) == 4: \n break \n else:\n print(\"Ponha um número maior que zero de 4 digitos\")\n except ValueError:\n print(\"Ponha um número inteiro\") \n print(\"\\nInsira o mês da peça:\")\n # Cadastro do mês\n while True:\n try:\n mes = input()\n if int(mes) <= 12 and int(mes) > 0:\n if len(mes) == 1:\n mes_aux = \"0\"\n for i in mes:\n mes_aux += i \n mes = mes_aux \n if len(mes) == 2:\n break\n else:\n print(\"ponha um número entre 0 e 12\")\n except ValueError:\n print(\"Ponha um número inteiro\") \n print(\"\\nInsira o dia da peça:\")\n # Cadastro do dia considerando se o mês é de 30 dias, 31 dias ou se o ano é bissexto\n while True:\n dia = input()\n try:\n if int(dia) >= 1:\n dia_aux = \"0\"\n if len(dia) == 1:\n for i in dia:\n dia_aux += i\n dia = dia_aux\n if len(dia) == 2:\n if mes in mes_com30: # Se o mês tem 30 dias\n if int(dia) <= 30:\n break\n else:\n print(f\"\\nEm {mes} só vai até 30\")\n if mes in mes_com31: # Se o mês tem 31 dias\n if int(dia) <=31:\n break\n else:\n print(f\"\\nEm {mes} só vai até 31\")\n if mes == \"02\": \n if int(ano)%4 == 0:\n if int(dia) <= 29:\n break \n else:\n print(f\"\\nEm {ano}, fevereiro só vai até 29\") \n else:\n if int(dia) <= 28:\n break \n else:\n print(f\"\\nEm {ano}, fevereiro só vai até 28\")\n else:\n print(\"\\nPonha um número maior que zero\") \n except ValueError:\n print(\"\\nPonha um número inteiro\") \n data = ano + \"-\" + mes + \"-\" + dia\n data_nova = datetime.fromisoformat(data).date() \n\n #Cadastro da situação da peça\n print(\"\\nPonha a situação da peça: 'venda', 'doação' ou 'ficar'\") \n while True:\n try:\n situacao = input()\n situacao_tratado = situacao.lower()\n if situacao_tratado in lista_situacao:\n break\n else:\n print(\"\\nPonha a situação: 'venda', 'doação' ou 'ficar'\") \n except ValueError: \n print(\"\\nPonha apenas letras\")\n\n # Se a situação da peça for para venda, perguntar o preço da peça\n if situacao_tratado == SITUACAO_VENDA:\n print(\"\\nPonha o preço da peça:\")\n while True:\n try:\n preco_novo = float(input()) \n if preco_novo >= 0:\n break\n else:\n print(\"\\nPonha um número maior que ou igual a zero:\")\n except ValueError:\n print(\"\\nPonha um número:\") \n else:\n preco_novo = 0.0\n\n # Chamando a função inserir_peca\n inserir_peca(tipo_tratado, tamanho_tratado, padrao_tratado, cor_tratado, data_nova, situacao_tratado, preco_novo)\n\n\n# Alterar peça\ndef comando2():\n lista_cores = [\"\",COR_AMARELO,COR_AZUL,COR_BRANCO,COR_CINZA,COR_LARANJA,COR_PRETO,COR_ROSA,COR_ROXO,COR_VERDE,COR_VERMELHO,COR_VIOLETA]\n lista_tipo = [\"\",TIPO_CALCADO,TIPO_INFERIOR,TIPO_SUPERIOR]\n lista_padrao = [\"\",PADRAO_UNISSEX,PADRAO_MASCULINO,PADRAO_FEMININO]\n lista_tamanho = [\"\",TAMANHO_G,TAMANHO_M,TAMANHO_P]\n lista_situacao = [\"\",SITUACAO_DOACAO,SITUACAO_FICAR,SITUACAO_VENDA] \n\n if len(pecas) == 0:\n print(\"\\nNão existem peças para serem alteradas\")\n return\n\n print(\"\\nPonha o id da peça que deseja alterar:\")\n try:\n id_peca = int(input())\n except ValueError:\n print(\"\\nId inválido!\")\n return \n\n peca_para_alteracao = \"\" # Inicializa para comparar depois\n #Identificando e armazenando em uma variável a peça\n for cada_peca in pecas: \n if cada_peca[\"id\"] == id_peca:\n peca_para_alteracao = cada_peca \n\n # Verifica se alguma peça foi encontrada, caso não, retorna\n if peca_para_alteracao == \"\":\n print(f\"\\nPeça de id {id_peca} não encontrado!\")\n return\n\n # Cada parte da data da peça sem alteração\n data_sem_alterar = str(peca_para_alteracao[\"data\"])\n ano_sem_alterar = data_sem_alterar[:4] \n mes_sem_alterar = data_sem_alterar[5:7] \n dia_sem_alterar = data_sem_alterar[8:]\n\n # Pôr a alteração no tipo da peça\n print(\"\\nAltere o tipo da peça para: 'calçado', 'inferior' ou 'superior'\")\n print(\"Caso não deseje alterar o tipo aperte enter.\")\n while True:\n try:\n tipo_peca = input()\n tipo_peca_tratado = tipo_peca.lower()\n if tipo_peca_tratado in lista_tipo:\n break\n else:\n print(\"\\nPonha o tipo 'calçado', 'inferior' ou 'superior'\") \n except ValueError: \n print(\"\\nPonha apenas letras\") \n \n # Alteração do tamanho da peça\n print(\"\\nAltere o tamanho da peça para: 'p', 'm' ou 'g'\")\n print(\"Caso não deseje alterar o tamanho aperte enter.\")\n while True:\n try:\n tamanho_peca = input()\n tamanho_peca_tratado = tamanho_peca.lower()\n if tamanho_peca_tratado in lista_tamanho: \n break\n else:\n print(\"\\nPonha o tamanho: 'p', 'm' ou 'g'\") \n except ValueError: \n print(\"\\nPonha apenas letras\") \n\n # Alteração do padrão da peça \n print(\"\\nAltere o padrão da peça para: 'feminino', 'masculino' ou 'unissex'\") \n print(\"Caso não deseje alterar o padrão aperte enter.\") \n while True:\n try:\n padrao_peca = input()\n padrao_peca_tratado = padrao_peca.lower()\n if padrao_peca_tratado in lista_padrao:\n break\n else:\n print(\"\\nPonha o padrão: 'feminino', 'masculino' ou 'unissex'\") \n except ValueError: \n print(\"\\nPonha apenas letras\")\n\n # Alteração da cor da peça\n print(\"\\nAltere a cor da peça para: 'vermelho', 'azul', 'amarelo', 'rosa', 'branco', 'cinza', 'verde', 'preto', 'roxo', 'violeta' ou 'laranja'\")\n print(\"Caso não deseje alterar a cor aperte enter.\")\n while True:\n try:\n cor_peca = input()\n cor_peca_tratado = cor_peca.lower()\n if cor_peca_tratado in lista_cores:\n break\n else:\n print(\"Ponha a cor: 'vermelho', 'azul', 'amarelo', 'rosa', 'branco', 'cinza', 'verde', 'preto', 'roxo', 'violeta' ou 'laranja' \") \n except ValueError: \n print(\"Ponha apenas letras\")\n \n # Altera a data da peça\n print(\"\\nAlterar a data da peça:\")\n print(\"Caso não deseje alterar a ano da peça aperte enter.\")\n print(\"Para que ano deve ser alterada?\")\n # Alteração do ano\n while True:\n try: \n ano = input() \n if len(ano) == 4 or ano ==\"\": \n break \n else:\n print(\"\\nPonha um número de 4 digitos\")\n except ValueError:\n print(\"\\nPonha um número\") \n \n print(\"Caso não deseje alterar o mês da peça aperte enter\")\n print(\"\\nPara que mês deve ser alterada?\")\n\n # alteração do mês\n while True:\n try:\n mes = input()\n if int(mes) > 0 and int(mes) <= 12:\n if len(mes) == 1:\n mes_aux = \"0\"\n for i in mes:\n mes_aux += i \n mes = mes_aux \n if len(mes) == 2 or mes == \"\":\n break\n elif mes == \"\":\n break\n else:\n print(\"\\nponha um número entre 0 e 12\")\n except ValueError:\n print(\"\\nPonha um número\") \n\n print(\"Caso não deseje alterar o dia da peça aperte enter\")\n print(\"\\nPara que dia deve ser alterada?\")\n # alteração do dia considerando se o mês é de 30 dias, 31 dias ou se o ano é bissexto\n while True:\n try:\n dia = input()\n if int(dia) >= 1:\n if len(dia) == 1:\n dia_aux = \"\" \n for i in dia:\n dia_aux += i\n dia = dia_aux\n if len(dia) == 2:\n if mes != \"\": # Se o mês foi alterado\n if mes in mes_com30: \n if int(dia) <= 30:\n break\n else:\n print(f\"\\nEm {mes} só vai até 30\")\n if mes in mes_com31: \n if int(dia) <=31:\n break\n else:\n print(f\"\\nEm {mes} só vai até 31\")\n if mes == \"02\": \n if ano != \"\": # Se o ano foi alterado\n if int(ano)%4 == 0:\n if int(dia) <= 29:\n break \n else:\n print(f\"\\nNo ano {ano}, o mês de fevereiro só vai até 29\") \n else:\n if int(dia) <= 28:\n break \n else:\n print(f\"\\nNo ano {ano}, o mês de fevereiro só vai até 28\")\n else: # Se o ano não foi alterado\n if int(ano_sem_alterar)%4 == 0:\n if int(dia) <= 29:\n break \n else:\n print(f\"\\nNo ano {ano_sem_alterar}, o mês de fevereiro só vai até 29\") \n else:\n if int(dia) <= 28:\n break \n else:\n print(f\"\\nNo ano {ano_sem_alterar}, o mês de fevereiro só vai até 28\")\n else: # Se o mês não foi alterado \n if mes_sem_alterar in mes_com30: \n if int(dia) <= 30:\n break\n else:\n print(\"\\nO mês selecionado só vai até 30\")\n if mes_sem_alterar in mes_com31: \n if int(dia) <=31:\n break\n else:\n print(\"\\nO mês selecionado só vai até 31\")\n if mes_sem_alterar == \"02\": \n if ano != \"\": # Se o ano foi alterado \n if int(ano)%4 == 0:\n if int(dia) <= 29:\n break \n else:\n print(f\"\\nNo ano {ano}, o mês de fevereiro só vai até 29\") \n else:\n if int(dia) <= 28:\n break \n else:\n print(f\"\\nNo ano {ano}, o mês de fevereiro só vai até 28\")\n else: # Se o ano não foi alterado\n if int(ano_sem_alterar)%4 == 0:\n if int(dia) <= 29:\n break \n else:\n print(f\"\\nNo ano {ano_sem_alterar}, o mês de fevereiro só vai até 29\") \n else:\n if int(dia) <= 28:\n break \n else:\n print(f\"\\nNo ano {ano_sem_alterar}, o mês de fevereiro só vai até 28\")\n\n elif dia == \"\": # Se o dia não foi alterado\n break\n\n else:\n print(\"\\nPonha um número maior que zero\") \n except ValueError:\n print(\"\\nPonha um número\") \n\n if dia == \"\" and mes == \"\" and ano == \"\": # Se nenhuma data foi alterada\n data_nova = \"\"\n elif dia != \"\" and mes == \"\" and ano == \"\": # Se apenas o dia foi alterado\n data = ano_sem_alterar + \"-\" + mes_sem_alterar + \"-\" + dia\n data_nova = datetime.fromisoformat(data).date() \n elif dia == \"\" and mes != \"\" and ano == \"\": # Se apenas mês o foi alterado\n data = ano_sem_alterar + \"-\" + mes + \"-\" + dia_sem_alterar\n data_nova = datetime.fromisoformat(data).date()\n elif dia == \"\" and mes == \"\" and ano != \"\": # Se apenas o ano for alterado\n data = ano + \"-\" + mes_sem_alterar + \"-\" + dia_sem_alterar\n data_nova = datetime.fromisoformat(data).date()\n elif dia != \"\" and mes != \"\" and ano == \"\": # Se o dia for alterado e o mês também\n data = ano_sem_alterar + \"-\" + mes + \"-\" + dia\n data_nova = datetime.fromisoformat(data).date()\n elif dia != \"\" and mes == \"\" and ano != \"\": # Se o dia for alterado e o ano for alterado\n data = ano + \"-\" + mes_sem_alterar + \"-\" + dia\n data_nova = datetime.fromisoformat(data).date()\n elif dia == \"\" and mes != \"\" and ano != \"\": # Se o mês for alterado e o ano for alterado\n data = ano + \"-\" + mes + \"-\" + dia_sem_alterar\n data_nova = datetime.fromisoformat(data).date()\n else: # Se o dia, o mês e o ano forem alterados.\n data = ano + \"-\" + mes + \"-\" + dia\n data_nova = datetime.fromisoformat(data).date()\n\n # Alteração da situação da peça\n print(\"\\nAltere a situação da peça para: 'venda', 'doação' ou 'ficar'\")\n print(\"Caso não deseje alterar a situação aperte enter.\") \n while True:\n try:\n situacao_peca = input()\n situacao_peca_tratado = situacao_peca.lower()\n if situacao_peca_tratado in lista_situacao:\n break\n else:\n print(\"\\nPonha a situação: 'venda', 'doação' ou 'ficar'\") \n except ValueError: \n print(\"\\nPonha apenas letras\") \n\n preco_peca_novo = 0.0 #Se a peça não for para venda o preço da peça será 0 \n # Se a situação da peça alterada for para venda, perguntar o preço da peça\n if situacao_peca_tratado == SITUACAO_VENDA or (peca_para_alteracao[\"situação\"] == SITUACAO_VENDA and situacao_peca_tratado==\"\"):\n print(\"\\nAltere o preço da peça para:\")\n print(\"Caso não deseje alterar o preço digite -1.\")\n while True:\n try:\n preco_peca_novo = float(input()) \n if preco_peca_novo >= 0:\n break\n elif preco_peca_novo == -1:\n break\n else:\n print(\"\\nPonha um valor maior que ou igual a zero:\")\n except ValueError:\n print(\"\\nPonha um número real para representar o preço:\")\n # Se o usuário não desejar mudar o preço, envia para a função de modo que não altere o preço\n if preco_peca_novo == -1:\n preco_peca_novo = \"\"\n\n alterar_peca(id_peca,tipo= tipo_peca_tratado,tamanho= tamanho_peca_tratado, padrao= padrao_peca_tratado,cor=cor_peca_tratado,data=data_nova, preco=preco_peca_novo, situacao=situacao_peca_tratado)\n return\n\n# Remover peça\ndef comando3():\n # Pede ao usuario o id da peça que deseja remover e chama a função remover_peca\n print(\"Digite o id da peça que deseja remover:\")\n while True:\n try:\n id_para_remover = int(input())\n if id_para_remover > 0:\n break\n else:\n print(\"Ponha um id válido\")\n except ValueError:\n print(\"Ponha um número\") \n remover_peca(id_para_remover) \n return \n\n\n# Cadastrar estilo\ndef comando4():\n # Cria um estilo novo com o nome dado pelo usuário e informa possíveis erros no processo.\n try:\n nome_estilo = input(\"\\nDigite o nome do estilo: \")\n criar_estilo(nome_estilo)\n except Exception as e:\n print(f\"\\nAviso: {e}\")\n return\n\n # Checa se o usuário deseja inserir peças ao estilo recém criado.\n inserir = False\n while True:\n resposta = input(\"\\nDeseja inserir peças ao estilo? [s/n] \")\n resposta = resposta.lower()\n\n if resposta == \"s\" or resposta == \"sim\":\n inserir = True\n break\n elif resposta == \"n\" or resposta == \"nao\" or resposta == \"não\":\n inserir = False\n break\n else:\n print('\\nResposta inválida! Digite \"s\" para sim ou \"n\" para não.')\n\n while inserir:\n # Mostra ao usuário as peças existentes no guarda-roupa\n listar_pecas()\n # Insere uma peça escolhida pelo usuário no estilo e trata possíveis entradas inválidas.\n try:\n id_peca = int(input(\"\\nDigite o id da peça que deseja inserir no estilo: \"))\n adicionar_peca_a_estilo(id_peca, nome_estilo)\n print(\"\\nPeça adicionada com sucesso!\")\n # Informa o caso do ID passado não ser inteiro.\n except ValueError:\n print(\"\\nValor de ID inválido. Tente novamente!\")\n continue\n # Informa os casos de erro gerais, como o ID inexistente.\n except Exception as e:\n print(\"\\n%s\" %e)\n\n # Checa se o usuário deseja inserir mais uma peça ao estilo.\n while True:\n resposta = input(\"\\nDeseja inserir outra peça ao estilo? [s/n] \")\n resposta = resposta.lower()\n\n if resposta == \"s\" or resposta == \"sim\":\n break\n elif resposta == \"n\" or resposta == \"nao\" or resposta == \"não\":\n inserir = False\n break\n else:\n print('\\nResposta inválida! Digite \"s\" para sim ou \"n\" para não.')\n\n print(f\"\\nEstilo {nome_estilo} criado com sucesso!\")\n\n\n# Alterar estilo\ndef comando5():\n alterar_estilo()\n\n\n# Remover estilo\ndef comando6():\n lista_estilos = list(estilos.keys())\n nome_estilo = \"\"\n\n # Enumera os estilos cadastrados e recebe a escolha do usuário\n for i in range(len(lista_estilos)):\n print(\"%d - %s\" %((i+1), lista_estilos[i]))\n\n selecao = input(\"\\nSelecione um estilo: \")\n\n # Recebe e trata o input do usuário aceitando o nome do estilo ou o número equivalente.\n try:\n selecao = int(selecao)\n selecao -= 1\n\n if selecao >= 0 and selecao < len(lista_estilos):\n nome_estilo = lista_estilos[selecao]\n else:\n print(\"\\nEstilo inválido!\")\n return\n\n # Se o input for uma string, verificar se está em nomes (se é uma chave do dict estilos)\n except ValueError as e:\n if selecao in lista_estilos:\n nome_estilo = selecao\n else:\n print('\\nEstilo \"%s\" não está cadastrado!' %nome_estilo)\n return\n\n # Se algo der errado, informar problema\n except Exception as e:\n print(\"\\nErro ao cadastrar estilo:\",e)\n return\n\n remover_estilo(nome_estilo)\n print('\\nEstilo \"%s\" foi removido do sistema com sucesso!' %nome_estilo)\n\n\n# Listar todas as peças\ndef comando7():\n listar_pecas()\n\n\n# Listar peças por tamanho e padrão\ndef comando8():\n\n # Ordenando os comandos para chamá-los com o input do usuário\n padrao = [\"\", PADRAO_FEMININO, PADRAO_MASCULINO, PADRAO_UNISSEX]\n tamanho = [\"\", TAMANHO_P, TAMANHO_M, TAMANHO_G]\n\n while True:\n print(\"Para filtar por tamanho digite: \")\n print(\"1 -> Tamanho P\")\n print(\"2 -> Tamanho M\")\n print(\"3 -> Tamanho G\")\n print(\"0 -> Não filtrar por tamanho\")\n\n try:\n resposta_tamanho = int(input(\"\\nSeu comando: \"))\n except:\n # Usuário digitou algo que não pode ser convertido pra int\n print(\"\\nAtenção: Insira apenas 0,1,2 ou 3.\")\n continue\n \n # Usuário digitou um número válido\n if resposta_tamanho in [0,1,2,3]:\n break\n\n # Usuário digitou um número fora do intervalo\n print(\"\\nAtenção: Insira apenas 0,1,2 ou 3.\")\n \n while True:\n print(\"Para filtrar por padrão digite: \")\n print(\"1 -> Padrão Feminino\")\n print(\"2 -> Padrão Masculino\")\n print(\"3 -> Padrão Unissex\")\n print(\"0 -> Não filtrar por padrão\")\n\n try:\n resposta_padrao = int(input(\"\\nSeu comando: \"))\n except:\n # Usuário digitou algo que não pode ser convertido pra ints\n print(\"\\nAtenção: Insira apenas 0,1,2 ou 3.\")\n continue\n \n # Usuário digitou um número válido\n if resposta_padrao in [0,1,2,3]:\n break\n\n # Usuário digitou um número fora do intervalo\n print(\"\\nAtenção: Insira apenas 0,1,2 ou 3.\")\n \n # Com as respostas, chama listar_pecas_tamanho_padrao para expor o resultado ao usuário\n try:\n listar_pecas_tamanho_padrao(tamanho[resposta_tamanho], padrao[resposta_padrao])\n\n # Se algo der errado, printa a mensagem de erro\n except Exception as e:\n print(f\"\\n{e}\")\n\n\n# Listar estilos\ndef comando9():\n listar_estilos()\n return\n\n\n# Pesquisar estilo por nome\ndef comando10():\n selecionar_estilo()\n return\n\n\n# Listar peças para venda\ndef comando11():\n listar_pecas_para_venda()\n return\n\n\n# Listar peças para doação\ndef comando12():\n listar_pecas_para_doacao()\n return\n\n\n# Vender peça\ndef comando13():\n # Vende a peça passada pelo usuário e trata possíveis erros no processo.\n try:\n id_peca = int(input(\"\\nDigite o id da peça: \"))\n vender_para = input(\"Digite o nome do comprador da peça: \")\n vender_peca(id_peca, vender_para)\n print(\"\\nPeça vendida com sucesso!\")\n\n # Informações alteradas apenas se a venda deu certo\n informacao[\"alteradas\"] = True\n # Informa o caso do ID passado não ser inteiro.\n except ValueError:\n print(\"\\nValor de ID inválido. Tente novamente!\")\n # Informa os casos de erro gerais, como o ID inexistente.\n except Exception as e:\n print(\"\\n%s\" %e)\n\n\n# Doar peça\ndef comando14():\n # Doa a peça passada pelo usuário e trata possíveis erros no processo.\n try:\n id_peca = int(input(\"\\nDigite o id da peça: \"))\n doar_para = input(\"Digite o nome do comprador da peça: \")\n doar_peca(id_peca, doar_para)\n print(\"\\nPeça doada com sucesso!\")\n\n # Informações alteradas apenas se a doação deu certo\n informacao[\"alteradas\"] = True\n # Informa o caso do ID passado não ser inteiro.\n except ValueError:\n print(\"\\nValor de ID inválido. Tente novamente!\")\n # Informa os casos de erro gerais, como o ID inexistente.\n except Exception as e:\n print(\"\\n%s\" %e)\n\n\n# Listar peças vendidas\ndef comando15():\n listar_pecas_vendidas()\n\n\n# Listar peças doadas\ndef comando16():\n listar_pecas_doadas()\n return\n\n\n# Salvar alterações\ndef comando17():\n try:\n salvar_alteracoes()\n print(\"Alterações salvas com sucesso!\")\n except Exception as e:\n print(\"\\nErro ao salvar alterações:\", e)\n\n\n# Finalizar programa\ndef comando0():\n if informacao[\"alteradas\"]:\n print(\"\\nALERTA: Você não salvou suas alterações\")\n\n while True:\n resposta = input(\"Deseja salvar suas alterações ao sair? [S/N] \")\n\n if resposta.lower() == \"s\" or resposta.lower() == \"sim\":\n comando17() # Salvar alterações\n print(\"\\nInformações salvas\")\n return\n elif resposta.lower() == \"n\" or resposta.lower() == \"não\" or resposta.lower() == \"nao\":\n print(\"\\nSaindo sem salvar...\")\n return\n else:\n print(\"\\nDigite apenas sim ou não\")\n\n \n \n\n\ndef menu_comandos():\n # Guarda comandos em string para particioná-la e iterar sobre os comandos\n comandos = \"Cadastrar uma peça|Alterar uma peça|Remover uma peça|Cadastrar um estilo|Alterar um estilo|Remover um estilo|Listar todas as peças|Listar peças por tamanho e padrão|Listar estilos|Pesquisar estilo por nome|Listar peças para venda|Listar peças para doação|Vender uma peça|Doar uma peça|Listar peças vendidas|Listar peças doadas|Salvar alterações|Finalizar programa\"\n comandos = comandos.split('|') # Transforma a string em uma lista de strings\n\n # Número de linhas de comandos printadas\n linhas_comandos = len(comandos)//3\n\n print(\"\\nDigite: \")\n for i in range(linhas_comandos):\n\n # Se última linha, printar último comando como 0\n if i == linhas_comandos - 1:\n print(f\"{i+1:2d} --> {comandos[i]:19s} | {linhas_comandos+i+1:2d} --> {comandos[linhas_comandos+i]:33s} | {0:2d} --> {comandos[(linhas_comandos*2)+i]:24s} |\")\n continue\n\n # Printar 3 colunas de comandos\n print(f\"{i+1:2d} --> {comandos[i]:19s} | {linhas_comandos+i+1:2d} --> {comandos[linhas_comandos+i]:33s} | {(linhas_comandos*2)+i+1:2d} --> {comandos[(linhas_comandos*2)+i]:24s} |\")\n \n\ndef interface_usuario():\n comando = -1\n\n print(\"\\n\\n---------- Bem vindo ao Guarda-Roupa Virtual ----------\")\n while comando != 0:\n menu_comandos()\n\n try:\n comando = int(input(\"\\nO que deseja fazer? \"))\n except:\n print(\"\\n\\nATENÇÃO: Digite apenas um número do menu\")\n continue\n\n # Pega o comando do usuário e o executa\n if comando == 1:\n comando1()\n informacao[\"alteradas\"] = True\n elif comando == 2:\n comando2()\n informacao[\"alteradas\"] = True\n elif comando == 3:\n comando3()\n informacao[\"alteradas\"] = True\n elif comando == 4:\n comando4()\n informacao[\"alteradas\"] = True\n elif comando == 5:\n comando5()\n informacao[\"alteradas\"] = True\n elif comando == 6:\n comando6()\n informacao[\"alteradas\"] = True\n elif comando == 7:\n comando7()\n elif comando == 8:\n comando8()\n elif comando == 9:\n comando9()\n elif comando == 10:\n comando10()\n elif comando == 11:\n comando11()\n elif comando == 12:\n comando12()\n elif comando == 13:\n comando13()\n elif comando == 14:\n comando14()\n elif comando == 15:\n comando15()\n elif comando == 16:\n comando16()\n elif comando == 17:\n comando17()\n informacao[\"alteradas\"] = False # False porque não existe mais alterações não salvas\n elif comando == 0:\n comando0()\n print(\"\\nGuarda-Roupa Virtual encerrado\")\n return\n else:\n print(\"ATENÇÃO: Digite apenas um número do menu\")\n \n input(\"\\nDigite qualquer coisa para continuar: \")\n\n \ndef main():\n carregar_arquivos() # Carrega arquivos ao executar o programa\n interface_usuario()\n\nif __name__ == \"__main__\":\n main()","repo_name":"brunosaunders/Trabalho-Final-FuP","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":31045,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5084482754","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 19/07/2021 3:22 PM\n# @Author : Scott\n# @Main : Zheng@utk.edu\n# @File : SYBModel_V13SYBModel_V13.py\n# @Software: PyCharm\n# @Notes : GUROBI code for the SYBModel V13\nimport numpy as np\nfrom gurobipy import *\nimport pandas as pd\n\ndef Get_GuRoBi(Model_Name, Cost_Country_Stream, Cost_Country_Rail, Cost_Stream_Export, Cost_Rail_Export,\n Cost_Export_Import, Cost_Country_Facility, Alpha, Unit_Holding_Cost, Demand_China,\n Supply_Country, Inventory_Country_LastYear, Inventory_Stream_LastYear, Inventory_Rail_LastYear):\n # Parameters\n # Dom_P = 200 # Domestic Soybean price\n Beta1 = 315.948\n Beta2 = 0\n Beta3 = -4.43476 # Regression coefficients\n\n # Glo_P = 400 # Global Soybean price\n Gamma1 = 117.09\n Gamma2 = 0\n Gamma3 = 5.6e-6 # Regression coefficients\n\n Num_Country_Elevators = Cost_Country_Stream.shape[0]\n Num_Stream_Elevators = Cost_Stream_Export.shape[0]\n Num_Rail_Elevators = Cost_Rail_Export.shape[0]\n Num_Export_Terminals = Cost_Export_Import.shape[0]\n Num_Import_Terminals = Cost_Export_Import.shape[1]\n\n # Model\n model = Model(Model_Name)\n\n # Var\n X_Country_Stream = model.addVars(Num_Country_Elevators, Num_Stream_Elevators, lb=0, name='X_Country_Stream')\n X_Country_Rail = model.addVars(Num_Country_Elevators, Num_Rail_Elevators, lb=0, name='X_Country_Rail')\n X_Facility = model.addVars(Num_Country_Elevators, lb=0, name='X_Facility')\n I_Country = model.addVars(Num_Country_Elevators, lb=0, name='I_Country')\n I_Stream = model.addVars(Num_Stream_Elevators, lb=0, name='I_Stream')\n I_Rail = model.addVars(Num_Rail_Elevators, lb=0, name='I_Rail')\n Y_Stream_Export = model.addVars(Num_Stream_Elevators, Num_Export_Terminals, lb=0, name='Y_Stream_Export')\n Y_Rail_Export = model.addVars(Num_Rail_Elevators, Num_Export_Terminals, lb=0, name='Y_Rail_Export')\n Z_Export_Import = model.addVars(Num_Export_Terminals, Num_Import_Terminals, lb=0 , name='Z_Export_Import')\n Domestic_Price = model.addVar(vtype=GRB.CONTINUOUS, name='Domestic_Price')\n Global_Price = model.addVar(vtype=GRB.CONTINUOUS, name='Global_Price')\n Slack_Climate = model.addVar(1, vtype=GRB.BINARY, name='Slack_Climate') # Slack Vars\n Slack_Tariff = model.addVar(1, vtype=GRB.BINARY, name='Slack_Tariff')\n\n # Constraints\n # 2\n for c in range(Num_Country_Elevators):\n model.addConstr(Alpha * Inventory_Country_LastYear[c] + Supply_Country[c] - X_Facility[c] - I_Country[c]\n - quicksum(X_Country_Stream[c, s] for s in range(Num_Stream_Elevators))\n - quicksum(X_Country_Rail[c, r] for r in range(Num_Rail_Elevators)) == 0)\n\n # 3\n for s in range(Num_Stream_Elevators):\n model.addConstr(Alpha*Inventory_Stream_LastYear[s] + quicksum(X_Country_Stream[c,s] for c in range(Num_Country_Elevators))\n - quicksum(Y_Stream_Export[s,e] for e in range(Num_Export_Terminals)) - I_Stream[s] ==0)\n # 4\n for r in range(Num_Rail_Elevators):\n model.addConstr(Alpha*Inventory_Rail_LastYear[r] + quicksum(X_Country_Rail[c,r] for c in range(Num_Country_Elevators))\n - quicksum(Y_Rail_Export[r,e] for e in range(Num_Export_Terminals)) - I_Rail[r] == 0)\n\n # 5\n for e in range(Num_Export_Terminals):\n model.addConstr(quicksum(Y_Stream_Export[s, e] for s in range(Num_Stream_Elevators))\n + quicksum(Y_Rail_Export[r, e] for r in range(Num_Rail_Elevators))\n - quicksum(Z_Export_Import[e, i] for i in range(Num_Import_Terminals)) == 0)\n\n # 6\n model.addConstr(quicksum(Z_Export_Import[e,i] for e in range(Num_Export_Terminals)\n for i in range(Num_Import_Terminals)) <= Demand_China)\n\n # 7\n model.addConstr(Domestic_Price - Beta2 * Slack_Climate - Beta3 * quicksum(I_Country) == Beta1)\n\n # 8\n model.addConstr(Global_Price - Gamma2 * Slack_Tariff - Gamma3 * quicksum(Z_Export_Import) == Gamma1)\n\n # 9\n model.addConstr(Global_Price - Domestic_Price >= 0)\n model.addConstrs(Alpha * Inventory_Country_LastYear[c] + Supply_Country[c] - 10 * X_Facility[c] >= 0 for c in range(Num_Country_Elevators))\n\n # Objective\n obj = LinExpr()\n obj += quicksum((Global_Price-Cost_Export_Import[e, i]) * Z_Export_Import[e, i] for e in range(Num_Export_Terminals)\n for i in range(Num_Import_Terminals))\n obj += quicksum((Domestic_Price-Cost_Country_Facility[c]) * X_Facility[c] for c in range(Num_Country_Elevators))\n obj -= quicksum(Cost_Country_Stream[c,s] * X_Country_Stream[c,s] for c in range(Num_Country_Elevators)\n for s in range(Num_Stream_Elevators))\n obj -= quicksum(Cost_Country_Rail[c, r] * X_Country_Rail[c, r] for c in range(Num_Country_Elevators)\n for r in range(Num_Rail_Elevators))\n obj -= quicksum(Cost_Stream_Export[s, e] * Y_Stream_Export[s, e] for s in range(Num_Stream_Elevators)\n for e in range(Num_Export_Terminals))\n obj -= quicksum(Cost_Rail_Export[r, e] * Y_Rail_Export[r, e] for r in range(Num_Rail_Elevators)\n for e in range(Num_Export_Terminals))\n obj -= Unit_Holding_Cost*(quicksum(I_Country)+quicksum(I_Rail)+quicksum(I_Stream))\n\n model.setObjective(\n obj,\n GRB.MAXIMIZE\n )\n\n # Compile\n model.update()\n model.params.NonConvex = 2\n model.optimize()\n #model.write(Model_Name + '.lp')\n\n # 查看单目标规划模型的目标函数值\n print(\"Optimal Objective Value\", model.objVal)\n # 查看多目标规划模型的目标函数值\n # for i in range(model.NumObj):\n # model.setParam(gurobipy.GRB.Param.ObjNumber, i)\n # print(f\"Obj {i + 1} = {model.ObjNVal}\")\n # 查看变量取值,这个方法用的很少,请看第 4 部分案例\n for var in model.getVars():\n if var.X != 0:\n print(f\"{var.varName}: {round(var.X, 3)}\")\n\n # Create DataFrame of all results\n Matrix_X_Country_Stream = [[X_Country_Stream[a, b].X for a in range(Num_Country_Elevators)] for b in range(Num_Stream_Elevators)]\n Matrix_X_Country_Stream = pd.DataFrame(Matrix_X_Country_Stream).T\n Matrix_X_Country_Stream = Matrix_X_Country_Stream.add_prefix('ToRiver_')\n Matrix_X_Country_Rail = [[X_Country_Rail[a, b].X for a in range(Num_Country_Elevators)] for b in range(Num_Rail_Elevators)]\n Matrix_X_Country_Rail= pd.DataFrame(Matrix_X_Country_Rail).T\n Matrix_X_Country_Rail = Matrix_X_Country_Rail.add_prefix('ToRail_')\n Matrix_X_Facility = [X_Facility[a].X for a in range(Num_Country_Elevators)]\n Matrix_X_Facility= pd.Series(Matrix_X_Facility, name='X_Facility')\n\n Matrix_I_Country = [I_Country[a].X for a in range(Num_Country_Elevators)]\n Matrix_I_Country= pd.Series(Matrix_I_Country, name='I_Country')\n Matrix_I_Stream = [I_Stream[a].X for a in range(Num_Stream_Elevators)]\n Matrix_I_Stream= pd.Series(Matrix_I_Stream, name='I_Stream')\n Matrix_I_Rail = [I_Rail[a].X for a in range(Num_Rail_Elevators)]\n Matrix_I_Rail= pd.Series(Matrix_I_Rail, name='I_Rail')\n #Matrix_I = pd.DataFrame(list(zip(Matrix_X_Facility, Matrix_I_Country, Matrix_I_Stream, Matrix_I_Rail)), columns=['X_Facility','I_Country', 'I_Stream', 'I_Rail'])\n\n Matrix_Y_Stream_Export = [[Y_Stream_Export[a, b].X for a in range(Num_Stream_Elevators)] for b in range(Num_Export_Terminals)]\n Matrix_Y_Stream_Export= pd.DataFrame(Matrix_Y_Stream_Export).T\n Matrix_Y_Rail_Export = [[Y_Rail_Export[a, b].X for a in range(Num_Rail_Elevators)] for b in range(Num_Export_Terminals)]\n Matrix_Y_Rail_Export= pd.DataFrame(Matrix_Y_Rail_Export).T\n\n Matrix_Z_Export_Import = [[Z_Export_Import[a, b].X for a in range(Num_Export_Terminals)] for b in range(Num_Import_Terminals)]\n Matrix_Z_Export_Import= pd.DataFrame(Matrix_Z_Export_Import).T.add_prefix('Import_')\n\n # write to file\n Results_Country = pd.concat([Matrix_X_Country_Stream, Matrix_X_Country_Rail, Matrix_X_Facility, Matrix_I_Country], axis=1)\n Results_River = pd.concat([Matrix_Y_Stream_Export.add_prefix('RiverToExport_'), Matrix_I_Stream], axis=1)\n Results_Rail = pd.concat([Matrix_Y_Rail_Export.add_prefix('RailToExport_'), Matrix_I_Rail], axis=1)\n\n Results_Country.to_csv('.\\Outputs\\ResultsOfCountryElevators.csv')\n Results_River.to_csv('.\\Outputs\\ResultsOfRiverElevators.csv')\n Results_Rail.to_csv('.\\Outputs\\ResultsOfRailElevators.csv')\n Matrix_Z_Export_Import.to_csv('.\\Outputs\\ResultsOfExports.csv')\n\n return Matrix_X_Country_Stream, Matrix_X_Country_Rail, Matrix_X_Facility, Matrix_I_Country, Matrix_I_Stream, Matrix_I_Rail, \\\n Matrix_Y_Stream_Export, Matrix_Y_Rail_Export, Matrix_Z_Export_Import, Domestic_Price.X, Global_Price.X, Matrix_Z_Export_Import.sum().sum()\n\nif __name__ =='__main__':\n import numpy as np\n\n # A Small Demo\n # function parameters\n Model_Name = \"Soybean_V13_test\"\n Year = 'Demo'\n Alpha = 0.99\n\n # @Datasets\n # Country_Elevator to Stream_Elevator by Trucks @(c, s)\n Cost_Country_Stream = np.array([[20, 50], [55, 56], [33, 22]])\n\n # Country_Elevator to Rail_Elevator by Trucks @(c, r)\n Cost_Country_Rail = np.array([[20, 45, 60], [55, 50, 30], [44, 33, 22]])\n\n # Stream_Elevator to Export_Terminals by Barges @(s, e)\n Cost_Stream_Export = np.array([[60, 80, 79, 82], [67, 70, 68, 69]])\n\n # Rail_Elevator to Export_Terminals by Rail @(r, e)\n Cost_Rail_Export = np.array([[60, 80, 79, 82], [67, 70, 68, 69], [66, 80, 76, 77]])\n\n # Export_Terminals to Import_China by Ocean shipment from barge @(e,i)\n Cost_Export_Import = np.array([[100, 110], [109, 119], [125, 136], [133, 145]])\n\n # Country_Elevator to Domestic Processing Facility @(P^D)\n Cost_Country_Facility = np.array([15, 16, 15])\n\n # elevators unit holding cost @h\n Unit_Holding_Cost = 50\n\n # Supply of each Country elevator\n Supply_Country = np.array([65650, 90900, 80800])\n\n # China demand at year 2019\n Demand_China = 205500\n\n # last year inventory for each elevator @2019\n Inventory_Country_LastYear = np.array([6565, 9090, 8080])\n Inventory_Stream_LastYear = np.array([0, 0])\n Inventory_Rail_LastYear = np.array([0, 0, 0])\n\n # model input summary\n print(f'Model Name: {Model_Name}')\n NumOfCountry = Cost_Country_Stream.shape[0] if Cost_Country_Stream.shape[0] == Cost_Country_Rail.shape[0] == \\\n Cost_Country_Facility.shape[0] == Supply_Country.shape[0] == \\\n Inventory_Country_LastYear.shape[0] else 0\n print(f'Country Elevators: {NumOfCountry}')\n NumOfStream = Cost_Country_Stream.shape[1] if Cost_Country_Stream.shape[1] == Cost_Stream_Export.shape[0] \\\n == Inventory_Stream_LastYear.shape[0] else 0\n print(f'Stream Elevators: {NumOfStream}')\n NumOfRail = Cost_Country_Rail.shape[1] if Cost_Country_Rail.shape[1] == Cost_Rail_Export.shape[0] \\\n == Inventory_Rail_LastYear.shape[0] else 0\n print(f'Rail Elevators: {NumOfRail}')\n NumOfExport = Cost_Stream_Export.shape[1] if Cost_Stream_Export.shape[1] == Cost_Rail_Export.shape[1] \\\n == Cost_Export_Import.shape[0] else 0\n print(f'Exports: {NumOfExport}')\n print(f'Imports: {Cost_Export_Import.shape[1]}')\n\n Get_GuRoBi(Model_Name, Cost_Country_Stream, Cost_Country_Rail, Cost_Stream_Export, Cost_Rail_Export,\n Cost_Export_Import, Cost_Country_Facility, Alpha,\n Unit_Holding_Cost, Demand_China, Supply_Country, Inventory_Country_LastYear,\n Inventory_Stream_LastYear, Inventory_Rail_LastYear)","repo_name":"heishanmao/SYBModel","sub_path":"SYBModel_V13.py","file_name":"SYBModel_V13.py","file_ext":"py","file_size_in_byte":12145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1507933990","text":"import pickle\nimport numpy as np\nfrom skimage import io\nfrom intern.remote.boss import BossRemote\nfrom intern.resource.boss.resource import ChannelResource\nimport cmd\nimport sys\n\nclass NeuroDataResource:\n def __init__(self, host, token, collection, experiment, chanList):\n self._collection = collection\n self._experiment = experiment\n self._bossRemote = BossRemote({'protocol':'https',\n 'host':host,\n 'token':token})\n self._chanList = {}\n for chanDict in chanList:\n try:\n self._chanList[chanDict['name']] = ChannelResource(chanDict['name'],\n collection,\n experiment,\n 'image',\n datatype=chanDict['dtype'])\n except:\n #TODO error handle here\n raise Exception(\"Failed to load\")\n sys.exit(1)\n\n def assert_channel_exists(self, channel):\n return channel in self._chanList.keys()\n\n\n def get_cutout(self, chan, zRange=None, yRange=None, xRange=None):\n if not chan in self._chanList.keys():\n print('Error: Channel Not Found in this Resource')\n sys.exit(1)\n return\n if zRange is None or yRange is None or xRange is None:\n print('Error: You must supply zRange, yRange, xRange kwargs in list format')\n sys.exit(1)\n data = self._bossRemote.get_cutout(self._chanList[chan],\n 0,\n xRange,\n yRange,\n zRange)\n return data\n\ndef save_image(datadir, filename, data):\n try:\n filename = datadir + filename\n io.imsave(filename, data)\n except:\n raise Exception(\"Data could not be saved\")\n\n\ndef get_host_token(filename = \"neurodata.cfg\"): #expects neurodata.cfg file format\n print(\"\\n Loading neurodata.cfg \\n\")\n host = None\n token = None\n try:\n with open(filename, \"r\") as f:\n for line in f:\n if line.startswith(\"host\"):\n host = line.split(\" \")[-1]\n if line.startswith(\"token\"):\n token = line.split(\" \")[-1]\n except:\n raise Exception(\"neurodata.cfg file not found.\\n\")\n sys.exit(1)\n if host == None:\n raise Exception(\"Host not found\\n\")\n sys.exit(1)\n if token == None:\n raise Exception(\"Token not found\\n\")\n sys.exit(1)\n print(\"Loaded host: \" + host)\n print(\"Loaded token: \" + token)\n return host, token\n\n\ndef get_validated_user_input(prompt, type_):\n while True:\n ui = input(prompt)\n if (type(ui) == type(type_)):\n break\n else:\n print(\"Invalid input, please try again\\n\")\n continue\n return ui\n\ndef user_get_neurodata_resource(host, token):\n print(\"\\n Specify Boss Resource, User input REQUIRED \\n\")\n\n col = get_validated_user_input(\"Collection: \", \"str\")\n exp = get_validated_user_input(\"Experiment: \", \"str\")\n channel = get_validated_user_input(\"Channel: \", \"str\")\n dtype = get_validated_user_input(\"Datatype: \", \"str\")\n\n print(\"\\n Loading Boss Resource... \\n\")\n\n myResource = NeuroDataResource(host,\n token,\n col,\n exp,\n [{'name': channel, 'dtype': dtype}])\n print(\"Successfully Loaded Boss Resource!\\n\")\n filename = col+'_'+exp+' '+channel+'.tiff'\n print(filename)\n\n return myResource, channel, dtype, filename\n\ndef user_get_cutout(resource, channel, dtype):\n print(\"\\n Specify cutout, User input REQUIRED \\n\")\n\n z_str = get_validated_user_input(\"Z Range, Format: : \", \"str\")\n z_range = [int(z) for z in z_str.split(\" \")]\n\n y_str = get_validated_user_input(\"Y Range, Format: : \", \"str\")\n y_range = [int(y) for y in y_str.split(\" \")]\n\n x_str = get_validated_user_input(\"X Range, Format: : \", \"str\")\n x_range = [int(x) for x in x_str.split(\" \")]\n\n xyz = ' '+x_str.replace(' ','-')+' '+y_str.replace(' ','-')+' '+z_str.replace(' ','-')+' '\n xyz = xyz.replace(\" \", \"_\")\n\n print(\"\\n Getting Cutout... \\n\")\n data = resource.get_cutout(channel,\n z_range,\n y_range,\n x_range)\n\n return data, dtype, xyz\n\ndef user_save_data(data, filename, xyz):\n print(\"\\n Save Data \\n\")\n\n data_path = get_validated_user_input(\"Data Dir, Format: path/to/data/: \", \"str\")\n #filename = get_validated_user_input(\"Filename (.tif recommended): \", \"str\")\n filename = filename.replace(' ',xyz)\n save_image(data_path, filename, data)\n\ndef cast_uint8(data, dtype):\n print('Initial Type: ' + str(data.dtype))\n data = data.astype(dtype)\n print('Fixed Type: ' + str(data.dtype))\n return data\n\n\nif __name__ == '__main__':\n host, token = get_host_token()\n myResource, channel, dtype, filename = user_get_neurodata_resource(host, token) ## TODO: Make this less jank, figure out channel resource\n data, dtype, xyz = user_get_cutout(myResource, channel, dtype) ##TODO: Make this less jank\n data = cast_uint8(data, dtype) #TODO\n user_save_data(data, filename, xyz)\n","repo_name":"NeuroDataDesign/avatr","sub_path":"source/group/base_annotator/NeuroDataResource.py","file_name":"NeuroDataResource.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39921225453","text":"from django import forms\nfrom .models import *\nfrom django.contrib.auth.models import User\nfrom .validators import valid_url_extension\nfrom .validators import valid_url_mimetype\nfrom django.utils.translation import ugettext as _\nfrom markdownx.fields import MarkdownxFormField\nimport mimetypes\nimport pytz,datetime\n\n\nclass Eventsform(forms.ModelForm):\n start_time = forms.DateTimeField(\n input_formats = ['%Y-%m-%dT%H:%M'],\n widget = forms.DateTimeInput(\n attrs={\n 'type': 'datetime-local',\n 'class': 'form-control'},\n format='%Y-%m-%dT%H:%M')\n )\n end_time = forms.DateTimeField(\n input_formats = ['%Y-%m-%dT%H:%M'],\n widget = forms.DateTimeInput(\n attrs={\n 'type': 'datetime-local',\n 'class': 'form-control'},\n format='%Y-%m-%dT%H:%M')\n )\n\n def clean_image_url(self):\n url = self.cleaned_data['image_url'].lower()\n if not valid_url_extension(url) or not valid_url_mimetype(url):\n raise forms.ValidationError(_(\"Not a valid Image. The URL must have an image extensions (.jpg/.jpeg/.png)\"))\n return url\n\n def clean_start_time(self):\n wrong_aware = self.cleaned_data['start_time']\n tz = pytz.timezone('Asia/Kolkata')\n dt = datetime.datetime.fromtimestamp(wrong_aware.timestamp())\n final_dt = tz.localize(dt)\n return final_dt\n\n def clean_end_time(self):\n wrong_aware=self.cleaned_data['end_time']\n tz=pytz.timezone('Asia/Kolkata')\n dt=datetime.datetime.fromtimestamp(wrong_aware.timestamp())\n final_dt=tz.localize(dt)\n return final_dt\n\n class Meta:\n model=Events\n fields=('title','description','image_url','start_time','end_time')","repo_name":"RECursion-NITD/RECursionNITD-website","sub_path":"website/events/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"33354235957","text":"import sys\nimport argparse\nimport logging\n\nfrom .inference import KoGPTInference\n\n\ndef cli(flags: argparse.Namespace):\n model = KoGPTInference(flags.model, flags.revision, device=flags.device,\n model_parallel=flags.model_parallel)\n\n while True:\n prompt = input('prompt> ')\n if not prompt:\n continue\n temperature = float(input('temperature(0.8)> ') or '0.8')\n if temperature <= 0.0:\n print('temperature has to be positive')\n continue\n max_length = int(input('max_length(128)> ') or '128')\n generated = model.generate(prompt, temperature, max_length)\n print(f'{generated}')\n print('')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n prog='KoGPT inference',\n description='KakaoBrain Korean(hangul) Generative Pre-Training Model'\n )\n parser.add_argument('--model', type=str, default='kakaobrain/kogpt', help='huggingface repo (default:kakaobrain/kogpt)')\n parser.add_argument('--revision', type=str, default='KoGPT6B-ryan1.5b-float16', choices=['KoGPT6B-ryan1.5b', 'KoGPT6B-ryan1.5b-float16'], help='(default:KoGPT6B-ryan1.5b-float16)')\n parser.add_argument('--device', type=str, default='cuda', choices=['cpu', 'cuda'], help='(default:cuda)')\n parser.add_argument('--model_parallel', action='store_true', help='distribute the model across multiple GPUs')\n\n parser.add_argument('-d', '--debug', action='store_true')\n args = parser.parse_args()\n\n log_format = '[%(asctime)s] [%(levelname)s] %(message)s'\n level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(level=level, format=log_format, stream=sys.stderr)\n\n try:\n cli(args)\n except KeyboardInterrupt:\n print('terminate KakaoBrain Korean(hangul) Generative Pre-Training Model')\n","repo_name":"kakaobrain/kogpt","sub_path":"kogpt/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":973,"dataset":"github-code","pt":"32"} +{"seq_id":"24030426953","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Xiaobo Yang\n@contact: hal_42@zju.edu.cn\n@software: PyCharm\n@file: logger.py\n@time: 2020/3/23 21:49\n@desc:\n\"\"\"\nimport sys\nimport os\nimport os.path as osp\n\nclass Logger(object):\n \"\"\"After call Logger(outfile), the stdout will print to both stdout and out_file\"\"\"\n\n def __init__(self, out_file, real_time: bool=False, silence: bool=False):\n \"\"\"After call Logger(outfile), the stdout will print to both stdout and out_file\n\n Args:\n out_file: File where record the stdout\n real_time: If True, log file will flush after every write() call. (Default: False)\n silence: If True, output to terminal will be suppressed. (Default: False)\n \"\"\"\n self.terminal = sys.stdout\n sys.stdout = self\n\n os.makedirs(osp.dirname(out_file), exist_ok=True)\n self.log = open(out_file, \"w\", encoding=\"utf-8\")\n\n self.real_time = real_time\n self.silence = silence\n\n def write(self, message):\n if not self.silence:\n self.terminal.write(message)\n\n self.log.write(message)\n\n if self.real_time:\n self.log.flush()\n\n def flush(self):\n self.terminal.flush()\n self.log.flush()\n\n def __del__(self):\n self.flush()\n self.log.close()\n","repo_name":"TxT1212/AlchemyCat","sub_path":"py_tools/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"2979253604","text":"import re\nimport sys,os\nsys.path.append(os.path.realpath('..'))\n\n# Defining the energy reference for easier changing among different files\nE_ref = 47.63700381\n\n# Open input and output files\nwith open('input_file.txt', 'r') as infile, open('data/DMF_extracted.txt', 'w') as outfile1, open('scripts/radius_post_extraction.txt', 'w') as outfile2, open('data/angle_extracted.txt', 'w') as outfile3:\n # Loop through each line in the input file\n for line in infile:\n # Use regular expression to match lines starting with \"!Density functional\" and capture the number after the spaces\n match1 = re.match(r'^\\s*!Density functional\\s+([\\d\\.\\-]+)', line)\n if match1:\n # Write the captured radius to the output file\n outfile1.write(str(abs(abs(float(match1.group(1))) - E_ref)) + '\\n')\n match2 = re.match(r'^\\s*DO\\s+R\\s+=\\s+(.+)', line)\n if match2:\n # Write the captured angle to the output file\n outfile2.write(match2.group(1) + '\\n')\n match3 = re.match(r'^\\s*DO\\s+PHI\\s+=\\s+(.+)', line)\n if match3:\n # Write the captured number to the output file\n outfile3.write(match3.group(1) + '\\n')\n","repo_name":"3ab3at/Data-Extraction-and-3D-Plotting","sub_path":"scripts/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6085738332","text":"# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport hashlib\r\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\r\nfrom scrapy.contrib.linkextractors import LinkExtractor\r\nfrom scrapy.selector import Selector\r\nfrom forum.items import PostItemsList\r\nimport re\r\nimport logging\r\nfrom bs4 import BeautifulSoup\r\nimport string\r\nimport dateparser\r\nimport time\r\n# import lxml.html\r\n# from lxml.etree import ParserError\r\n# from lxml.cssselect import CSSSelector\r\n\r\n## LOGGING to file\r\n#import logging\r\n#from scrapy.log import ScrapyFileLogObserver\r\n\r\n#logfile = open('testlog.log', 'w')\r\n#log_observer = ScrapyFileLogObserver(logfile, level=logging.DEBUG)\r\n#log_observer.start()\r\n\r\n# Spider for crawling Adidas website for shoes\r\nclass ForumsSpider(CrawlSpider):\r\n name = \"renalcellcarcinoma_cancerorg_spider\"\r\n allowed_domains = [\"cancer.org\"]\r\n# start_urls = [\r\n# \"http://www.healingwell.com/community/default.aspx?f=23&m=1001057\",\r\n# ]\r\n start_urls = [\r\n \"http://csn.cancer.org/forum/142\",\r\n ]\r\n\r\n rules = (\r\n # Rule to go to the single product pages and run the parsing function\r\n # Excludes links that end in _W.html or _M.html, because they point to \r\n # configuration pages that aren't scrapeable (and are mostly redundant anyway)\r\n Rule(LinkExtractor(\r\n restrict_xpaths='//tr[contains(@class, \"odd\")]//td[contains(@class, \"title\")]',\r\n canonicalize=True,\r\n ), callback='parsePost', follow=True),\r\n\r\n Rule(LinkExtractor(\r\n restrict_xpaths='//tr[contains(@class, \"even\")]//td[contains(@class, \"title\")]',\r\n canonicalize=True,\r\n ), callback='parsePost', follow=True),\r\n\r\n # Rule to follow arrow to next product grid\r\n Rule(LinkExtractor(\r\n restrict_xpaths='//li[contains(@class, \"pager-item\")]',\r\n canonicalize=True,\r\n deny='http://csn.cancer.org/node/',\r\n #allow='http://www.cancerforums.net/threads/',\r\n ), follow=True),\r\n\r\n Rule(LinkExtractor(\r\n restrict_xpaths='//li[contains(@class, \"pager-item\")]',\r\n canonicalize=True,\r\n deny='http://csn.cancer.org/forum/'\r\n #allow='http://www.cancerforums.net/threads/',\r\n ), callback='parsePost', follow=True),\r\n )\r\n\r\n def getDate(self,date_str):\r\n # date_str=\"Fri Feb 12, 2010 1:54 pm\"\r\n try:\r\n date = dateparser.parse(date_str)\r\n epoch = int(date.strftime('%s'))\r\n create_date = time.strftime(\"%Y-%m-%d'T'%H:%M%S%z\", time.gmtime(epoch))\r\n return create_date\r\n except Exception:\r\n #logging.error(\">>>>>\"+date_str)\r\n return date_str\r\n \r\n # https://github.com/scrapy/dirbot/blob/master/dirbot/spiders/dmoz.py\r\n # https://github.com/scrapy/dirbot/blob/master/dirbot/pipelines.py\r\n def parsePost(self,response):\r\n logging.info(response)\r\n sel = Selector(response)\r\n posts = sel.xpath('//*[@id=\"comments\"]').css('.comment-forum')\r\n condition=\"renal cell carcinoma\"\r\n items = []\r\n topic = self.cleanText(\" \".join(sel.xpath('//*[@id=\"squeeze\"]/div/div/h2/text()').extract()))\r\n url = response.url\r\n\r\n for post in posts:\r\n if len(post.css('.author'))==0:\r\n continue\r\n item = PostItemsList()\r\n item['author'] = self.cleanText(\" \".join(post.css('.author').extract()))\r\n item['author_link']=''\r\n item['condition']=condition\r\n create_date = self.cleanText( \" \".join(post.css('.date').xpath('./span/text()').extract()))\r\n item['create_date'] = self.getDate(create_date)\r\n item['domain'] = \"\".join(self.allowed_domains)\r\n post_msg=self.cleanText(\" \".join(post.css('.content').extract()))\r\n item['post']=post_msg\r\n # item['tag']=''\r\n item['topic'] = topic\r\n item['url']=url\r\n logging.info(post_msg)\r\n items.append(item)\r\n return items\r\n\r\n def cleanText(self,text,printableOnly=True):\r\n soup = BeautifulSoup(text,'html.parser')\r\n text = soup.get_text();\r\n text = re.sub(\"(-+| +|\\n|\\r|\\t|\\0|\\x0b|\\xa0|\\xbb|\\xab)+\",' ',text).strip()\r\n if(printableOnly):\r\n return filter(lambda x: x in string.printable, text)\r\n return text \r\n\r\n","repo_name":"florencefantine/ehealth_scraper","sub_path":"forum/spiders/renalcellcarcinoma_cancerorg_spider.py","file_name":"renalcellcarcinoma_cancerorg_spider.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30185722615","text":"import logging\nfrom Acquisition import aq_inner, aq_base, aq_parent\nfrom zope.interface import implements, Interface\nfrom zope.component import adapts, getMultiAdapter, queryUtility, getUtility\nfrom zope import schema\nfrom zope.formlib import form\nfrom zope.publisher.browser import BrowserPage\n\nfrom plone.i18n.normalizer.interfaces import IIDNormalizer\nfrom Products.CMFPlone.utils import getFSVersionTuple\n\nfrom types import StringType\n\nfrom plone.memoize import instance\nfrom plone.memoize import view\nfrom plone.memoize.compress import xhtml_compress\nfrom plone.memoize.instance import memoize\nfrom plone.app.uuid.utils import uuidToObject\n\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import utils\nfrom Products.ATContentTypes.interface import IATFolder\n\nfrom Products.CMFPlone.interfaces import INonStructuralFolder\ntry:\n from Products.CMFDynamicViewFTI.interfaces import IBrowserDefault\nexcept:\n from Products.CMFPlone.interfaces import IBrowserDefault\nfrom Products.CMFPlone import PloneMessageFactory as _pmf\nfrom Products.CMFPlone.browser.interfaces import INavigationTree\nfrom Products.CMFPlone.browser.navtree import SitemapNavtreeStrategy\n\nfrom plone.portlets.interfaces import IPortletManager, IPortletRenderer, IPortletDataProvider\nfrom plone.portlets.utils import unhashPortletInfo\n\nfrom plone.app.portlets.portlets.navigation import INavigationPortlet\nfrom plone.app.portlets.utils import assignment_from_key\nfrom plone.app.portlets.portlets import base\nfrom plone.app.portlets.portlets import navigation\n\nfrom plone.app.layout.navigation.root import getNavigationRoot\nfrom plone.app.layout.navigation.navtree import buildFolderTree, NavtreeStrategyBase\nfrom plone.app.layout.navigation.defaultpage import isDefaultPage\nfrom plone.app.layout.navigation.interfaces import INavigationQueryBuilder, INavtreeStrategy\n\nfrom plone.app.vocabularies.catalog import SearchableTextSourceBinder\nfrom plone.app.form.widgets.uberselectionwidget import UberSelectionWidget\n\nfrom .interfaces import *\nfrom Solgema.NavigationPortlet.config import _\n_logger = logging.getLogger(__name__)\n\ndef buildFolderTreeCustom(context, request, obj=None, query={}, strategy=NavtreeStrategyBase()):\n\n portal_state = getMultiAdapter((context, request), name=u'plone_portal_state')\n portal_catalog = getToolByName(context, 'portal_catalog')\n\n showAllParents = strategy.showAllParents\n rootPath = strategy.rootPath\n\n request = getattr(context, 'REQUEST', {})\n\n objPath = None\n objPhysicalPath = None\n if obj is not None:\n container = aq_parent(obj)\n objPhysicalPath = obj.getPhysicalPath()\n if isDefaultPage(container, obj):\n objPhysicalPath = objPhysicalPath[:-1]\n objPath = '/'.join(objPhysicalPath)\n\n portalObject = portal_state.portal()\n portalPath = '/'.join( portalObject.getPhysicalPath() )\n\n if 'path' not in query:\n if rootPath is None:\n rootPath = portalPath\n query['path'] = rootPath\n elif rootPath is None:\n pathQuery = query['path']\n if type(pathQuery) == StringType:\n rootPath = pathQuery\n else:\n # Adjust for the fact that in a 'navtree' query, the actual path\n # is the path of the current context\n if pathQuery.get('navtree', False):\n navtreeLevel = pathQuery.get('navtree_start', 1)\n if navtreeLevel > 1:\n navtreeContextPath = pathQuery['query']\n navtreeContextPathElements = navtreeContextPath[len(portalPath)+1:].split('/')\n # Short-circuit if we won't be able to find this path\n if len(navtreeContextPathElements) < (navtreeLevel - 1):\n return {'children': []}\n rootPath = portalPath + '/' + '/'.join(navtreeContextPathElements[:navtreeLevel-1])\n else:\n rootPath = portalPath\n else:\n rootPath = pathQuery['query']\n\n rootDepth = len(rootPath.split('/'))\n\n pruneRoot = False\n if strategy is not None:\n rootObject = portalObject.unrestrictedTraverse(rootPath, None)\n if rootObject is not None:\n pruneRoot = not strategy.showChildrenOf(rootObject)\n\n if 'sort_on' not in query:\n query['sort_on'] = 'getObjPositionInParent'\n\n if 'is_default_page' not in query:\n query['is_default_page'] = False\n elif isinstance(query.get('is_default_page', None), (list, tuple)) and True in query.get('is_default_page') and False in query.get('is_default_page'):\n del query['is_default_page']\n\n results = portal_catalog.searchResults(query)\n\n itemPaths = {}\n\n itemPaths[rootPath] = {'children': []}\n\n if pruneRoot:\n itemPaths[rootPath]['_pruneSubtree'] = True\n\n def insertElement(itemPaths, item, forceInsert=False):\n \n itemPath = item.getPath()\n itemInserted = (itemPaths.get(itemPath, {}).get('item', None) is not None)\n if not forceInsert and itemInserted:\n return\n\n itemPhysicalPath = itemPath.split('/')\n parentPath = '/'.join(itemPhysicalPath[:-1])\n parentPruned = (itemPaths.get(parentPath, {}).get('_pruneSubtree', False))\n\n if not forceInsert and parentPruned:\n return\n\n isCurrent = isCurrentParent = False\n if objPath is not None:\n if objPath == itemPath:\n isCurrent = True\n elif objPath.startswith(itemPath + '/') and len(objPhysicalPath) > len(itemPhysicalPath):\n isCurrentParent = True\n\n relativeDepth = len(itemPhysicalPath) - rootDepth\n\n newNode = {'item': item,\n 'depth': relativeDepth,\n 'currentItem': isCurrent,\n 'currentParent': isCurrentParent, }\n\n insert = True\n if not forceInsert and strategy is not None:\n insert = strategy.nodeFilter(newNode)\n if insert:\n\n if strategy is not None:\n newNode = strategy.decoratorFactory(newNode)\n \n if parentPath in itemPaths:\n itemParent = itemPaths[parentPath]\n if forceInsert:\n nodeAlreadyInserted = False\n for i in itemParent['children']:\n if i['item'].getPath() == itemPath:\n nodeAlreadyInserted = True\n break\n if not nodeAlreadyInserted:\n itemParent['children'].append(newNode)\n elif not itemParent.get('_pruneSubtree', False):\n itemParent['children'].append(newNode)\n else:\n itemPaths[parentPath] = {'children': [newNode]}\n\n if strategy.showAllParents and isCurrentParent:\n expand = True\n else:\n expand = getattr(item, 'is_folderish', True)\n if expand and (not forceInsert and strategy is not None):\n expand = strategy.subtreeFilter(newNode)\n\n children = newNode.setdefault('children', [])\n if expand:\n if itemPath in itemPaths:\n children.extend(itemPaths[itemPath]['children'])\n else:\n newNode['_pruneSubtree'] = True\n\n itemPaths[itemPath] = newNode\n\n for r in results:\n insertElement(itemPaths, r)\n\n if strategy.showAllParents and objPath is not None:\n objSubPathElements = objPath[len(rootPath)+1:].split('/')\n parentPaths = []\n\n haveNode = (itemPaths.get(rootPath, {}).get('item', None) is None)\n if not haveNode:\n parentPaths.append(rootPath)\n\n parentPath = rootPath\n for i in range(len(objSubPathElements)):\n nodePath = rootPath + '/' + '/'.join(objSubPathElements[:i+1])\n node = itemPaths.get(nodePath, None)\n\n if node is None or 'item' not in node:\n parentPaths.append(nodePath)\n else:\n nodeParent = itemPaths.get(parentPath, None)\n if nodeParent is not None:\n nodeAlreadyInserted = False\n for i in nodeParent['children']:\n if i['item'].getPath() == nodePath:\n nodeAlreadyInserted = True\n break\n if not nodeAlreadyInserted:\n nodeParent['children'].append(node)\n\n parentPath = nodePath\n\n if len(parentPaths) > 0:\n query = {'path': {'query': parentPaths, 'depth': 0}}\n results = portal_catalog.unrestrictedSearchResults(query)\n\n for r in results:\n insertElement(itemPaths, r, forceInsert=True)\n\n return itemPaths[rootPath]\n\nclass navTreeItem( BrowserPage ):\n _contenttemplate = ViewPageTemplateFile('contentnavigation.pt')\n _recurse_old = ViewPageTemplateFile('navigation_recurse_old.pt')\n _recurse_p5 = ViewPageTemplateFile('navigation_recurse.pt')\n\n implements(InavTreeItem)\n\n def __init__(self, context, request):\n super(navTreeItem, self).__init__(context, request)\n self.urltool = getToolByName(self.context, 'portal_url')\n self.portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n self.portal = self.portal_state.portal()\n self.data = {}\n self.root = context\n self.showAllParents = False\n self.hasContent = False\n self.portletRenderer = self.getPortletRenderer()\n self.data = self.portletRenderer.data\n\n def getPortletRenderer(self):\n portlethash = self.request.get('portlethash', '')\n info = unhashPortletInfo(portlethash) \n manager = getUtility(IPortletManager, info['manager'])\n \n assignment = assignment_from_key(context = self.context, \n manager_name = info['manager'], \n category = info['category'],\n key = info['key'],\n name = info['name'])\n renderer = getMultiAdapter(\n (self.context, self.request, self, manager, assignment.data),\n IPortletRenderer\n )\n return renderer.__of__(self.context)\n\n def canUseContextualMenu(self):\n self.portletRenderer.canUseContextualMenu()\n\n def getContext(self):\n if hasattr(self.request, 'get') and self.request.get('navtreepath'):\n context = self.portal.restrictedTraverse(self.request.get('navtreepath'))\n if context:\n return context\n return self.context\n\n def navigationTreeRootPath(self):\n return '/'.join(self.portal_state.navigation_root().getPhysicalPath())\n\n @property\n def rootPath(self):\n return self.navigationTreeRootPath()\n\n def canManage(self):\n user = self.portal_state.member()\n return user and user.has_permission('List folder contents', self.context)\n\n def isMember(self):\n return not self.portal_state.anonymous()\n\n def getQuery(self, context):\n user = self.portal_state.member()\n\n portal_properties = getToolByName(context, 'portal_properties')\n navtree_properties = getattr(portal_properties, 'navtree_properties')\n\n # Acquire a custom nav query if available\n customQuery = getattr(context, 'getCustomNavQuery', None)\n if customQuery is not None and utils.safe_callable(customQuery):\n query = customQuery()\n else:\n query = {}\n\n # Construct the path query\n\n rootPath = self.navigationTreeRootPath()\n currentPath = '/'.join(context.getPhysicalPath())\n query['is_default_page'] = False\n # If we are above the navigation root, a navtree query would return\n # nothing (since we explicitly start from the root always). Hence,\n # use a regular depth-1 query in this case.\n\n if currentPath!=rootPath and not currentPath.startswith(rootPath+'/'):\n query['path'] = {'query' : rootPath, 'depth' : 1}\n else:\n query['path'] = {'query' : currentPath, 'navtree' : 1}\n\n # XXX: It'd make sense to use 'depth' for bottomLevel, but it doesn't\n # seem to work with EPI.\n\n # Only list the applicable types\n ploneUtils = getToolByName(self.context, 'plone_utils')\n friendlyTypes = ploneUtils.getUserFriendlyTypes()\n if 'MemberDataContainer' in friendlyTypes:\n friendlyTypes.remove('MemberDataContainer')\n query['portal_type'] = friendlyTypes\n\n # Apply the desired sort\n sortAttribute = navtree_properties.getProperty('sortAttribute', None)\n if sortAttribute is not None:\n query['sort_on'] = sortAttribute\n sortOrder = navtree_properties.getProperty('sortOrder', None)\n if sortOrder is not None:\n query['sort_order'] = sortOrder\n\n # Filter on workflow states, if enabled\n if not user or not user.has_permission('List folder contents', self.context):\n if navtree_properties.getProperty('enable_wf_state_filtering', False):\n query['review_state'] = navtree_properties.getProperty('wf_states_to_show', ())\n return query\n\n @instance.memoize\n def getNavTree(self, context=None):\n if not context:\n context = self.getContext()\n if self.request.get('navtreepath'):\n canUseScrollPane = self.portletRenderer.canUseScrollPane()\n if not canUseScrollPane or ( canUseScrollPane and not self.portletRenderer.canManage()):\n queryBuilder = getMultiAdapter((context, self.data), INavigationQueryBuilder)\n strategy = getMultiAdapter((context, self.data), INavtreeItemStrategy)\n return buildFolderTree(context, obj=context, query=queryBuilder(), strategy=strategy)\n strategy = getMultiAdapter((context, self), ISManagerContentNavtreeStrategy)\n return buildFolderTreeCustom(context, self.request, obj=context, query=self.getQuery(context), strategy=strategy)\n return None\n\n @instance.memoize\n def createNavTree(self, navtreepath=None, level=0):\n context = self.portal.restrictedTraverse(navtreepath)\n datas = self.getNavTree(context)\n base_childs = datas.copy()\n childs = base_childs.get('children', [])\n firstItem = ''\n lastItem = ''\n i = 0\n hasChild = False\n for child in childs:\n if child.get('children', None):\n hasChild = True\n\n if childs and hasChild:\n while i < len(childs):\n while childs[i].get('children', []):\n newchilds = childs[i].copy()\n subchilds = newchilds.get('children')\n newchilds['children'] = []\n childs = [childs[a] for a in range(len(childs)) if a != i]\n childs.insert(i, newchilds)\n for j in range(len(subchilds)):\n childs.insert(i+j+1, subchilds[0])\n i = 0\n else:\n i += 1\n baseChildren = self.getNavTree().get('children', [])\n return self.recurse(children=self.getNavTree().get('children', []), level=level+1, bottomLevel=0, firstItem=firstItem, lastItem=lastItem, childs=str(childs))\n\n def recurse(self, children=[], level=None, bottomLevel=0, firstItem='', lastItem='', childs=''):\n if getFSVersionTuple()[0] == 4:\n return xhtml_compress(self._recurse_old(children=children, level=level, bottomLevel=bottomLevel, firstItem=firstItem, lastItem=lastItem, childs=childs))\n return xhtml_compress(self._recurse_p5(children=children, level=level, bottomLevel=bottomLevel, firstItem=firstItem, lastItem=lastItem, childs=childs))\n\n def __call__(self):\n if self.request.get('navtreepath', None):\n level = int(self.request.get('navtreelevel', 0)) + 1\n navtreepath = self.request.get('navtreepath', None)\n return xhtml_compress(self._contenttemplate(navtreepath=navtreepath, level=level))\n return None\n\nclass Assignment(navigation.Assignment):\n implements(ISolgemaNavigationPortlet)\n\n useScrollPane = True\n allowedRolesToUseScrollPane = ['Manager', 'Site Administrator']\n useContextualMenu = True\n allowedRolesToUseContextualMenu = ['Manager', 'Site Administrator']\n \n def __init__(self, name=u\"\", root=None, currentFolderOnly=False, includeTop=False, topLevel=1, bottomLevel=0, useScrollPane=True, allowedRolesToUseScrollPane=['Manager', 'Site Administrator'], useContextualMenu=True, allowedRolesToUseContextualMenu=['Manager', 'Site Administrator']):\n super(Assignment, self).__init__(name, root, currentFolderOnly, includeTop, topLevel, bottomLevel)\n self.useScrollPane = useScrollPane\n self.allowedRolesToUseScrollPane = allowedRolesToUseScrollPane\n self.useContextualMenu = useContextualMenu\n self.allowedRolesToUseContextualMenu = allowedRolesToUseContextualMenu\n\nclass Renderer(navigation.Renderer):\n\n _template_old = ViewPageTemplateFile('navigation_old.pt')\n _template_p5 = ViewPageTemplateFile('navigation.pt')\n _recurse_old = ViewPageTemplateFile('navigation_recurse_old.pt')\n _recurse_p5 = ViewPageTemplateFile('navigation_recurse.pt')\n\n def __init__(self, context, request, view, manager, data):\n super(Renderer, self).__init__(context, request, view, manager, data)\n self.portal_state = getMultiAdapter((context, request), name=u'plone_portal_state')\n portal_properties = getToolByName(context, 'portal_properties')\n self.properties = getattr(portal_properties, 'navtree_properties', None)\n \n @property\n def available(self):\n return True\n rootpath = self.getNavRootPath()\n if rootpath is None and not self.canManage():\n return False\n\n tree = self.getNavTree()\n root = self.getNavRoot()\n return (root is not None and len(tree['children']) > 0)\n \n def include_top(self):\n if self.canUseScrollPane() and self.canManage():\n return True\n return getattr(self.data, 'includeTop', self.properties and self.properties.getProperty('includeTop', None) or None)\n\n @memoize\n def canUseContextualMenu(self):\n if getattr(self.data, 'useContextualMenu', False):\n return self.allowedToUseContextualMenu()\n\n def allowedToUseContextualMenu(self):\n user = self.portal_state.member()\n for role in getattr(self.data, 'allowedRolesToUseContextualMenu', []):\n if user.has_role(role):\n return True\n return False\n\n @memoize\n def canUseScrollPane(self):\n if getattr(self.data, 'useScrollPane', False):\n return self.allowedToUseScrollPane()\n return False\n\n def allowedToUseScrollPane(self):\n roles = getattr(self.data, 'allowedRolesToUseScrollPane', [])\n user = self.portal_state.member()\n if not user and 'Anonymous' in roles:\n return True\n elif user and 'Authenticated' in roles:\n return True\n for role in roles:\n if user.has_role(role):\n return True\n return False\n\n def navigation_root(self):\n return self.getNavRoot()\n\n def root_type_name(self):\n root = self.getNavRoot()\n return queryUtility(IIDNormalizer).normalize(root.portal_type)\n\n def root_item_class(self):\n context = aq_inner(self.context)\n root = self.getNavRoot()\n isDefaultPage = utils.isDefaultPage(context, self.request) \n if (aq_base(root) is aq_base(context) or\n (aq_base(root) is aq_base(aq_parent(aq_inner(context))) and isDefaultPage)):\n return 'navTreeCurrentItem'\n else:\n return ''\n \n def root_icon(self):\n ploneview = getMultiAdapter((self.context, self.request), name=u'plone')\n icon = ploneview.getIcon(self.getNavRoot())\n return icon.url\n \n def root_is_portal(self):\n root = self.getNavRoot()\n return aq_base(root) is aq_base(self.urltool.getPortalObject())\n\n @memoize\n def getNavRoot(self, _marker=[]):\n portal = self.portal_state.portal()\n currentFolderOnly = self.data.currentFolderOnly or self.properties and self.properties.getProperty('currentFolderOnlyInNavtree', False) or False\n topLevel = self.data.topLevel or self.properties and self.properties.getProperty('topLevel', 0) or 0\n if self.canUseScrollPane() and self.canManage():\n topLevel = 0\n rootPath = getRootPath(self.context, currentFolderOnly, topLevel, self.data.root)\n \n if rootPath == self.urltool.getPortalPath():\n return portal\n else:\n try:\n return portal.unrestrictedTraverse(rootPath)\n except (AttributeError, KeyError,):\n return portal\n\n def createNavTree(self, context=None):\n datas = self.getNavTree()\n base_childs = datas.copy()\n childs = base_childs.get('children', [])\n\n firstItem = ''\n lastItem = ''\n i = 0\n hasChild = False\n for child in childs:\n if child.get('children', None):\n hasChild = True\n\n if childs:\n baseChildren = self.getNavTree().get('children', [])\n firstItem = baseChildren and baseChildren[0]['getURL'] or None\n lastChild = childs[-1]\n if not lastChild.get('children'):\n lastItem = lastChild['getURL']\n else:\n while lastChild.get('children'):\n lastChild = lastChild.get('children')[-1]\n lastItem = lastChild['getURL']\n\n bottomLevel = self.data.bottomLevel or self.properties and self.properties.getProperty('bottomLevel', 0) or 0\n return self.recurse(children=self.getNavTree().get('children', []), level=1, bottomLevel=bottomLevel, firstItem=firstItem, lastItem=lastItem, childs=str(childs))\n\n def canManage(self):\n user = self.portal_state.member()\n return user and user.has_permission('List folder contents', self.context)\n\n# @memoize\n def getNavTree(self, _marker=[]):\n context = aq_inner(self.context)\n canUseScrollPane = self.canUseScrollPane()\n if not canUseScrollPane or ( canUseScrollPane and not self.canManage()):\n queryBuilder = getMultiAdapter((context, self.data), INavigationQueryBuilder)\n strategy = getMultiAdapter((context, self.data), INavtreeStrategy)\n return buildFolderTree(context, obj=context, query=queryBuilder(), strategy=strategy)\n\n parent = aq_parent(context)\n if parent:\n meta_type = getattr(aq_base(parent), 'meta_type', '')\n if meta_type == 'TempFolder':\n context = aq_parent(aq_parent(parent))\n\n queryBuilder = getMultiAdapter((context, self.data), ISManagerNavigationQueryBuilder)\n strategy = getMultiAdapter((context, self.data), ISManagerNavtreeStrategy)\n\n return buildFolderTreeCustom(context, self.request, obj=context, query=queryBuilder(), strategy=strategy)\n\n def isMember(self):\n return not self.portal_state.anonymous()\n\n def update(self):\n pass\n\n def render(self):\n if getFSVersionTuple()[0] == 4:\n return xhtml_compress(self._template_old())\n return xhtml_compress(self._template_p5())\n\n def recurse(self, children=[], level=None, bottomLevel=0, firstItem='', lastItem='', childs=''):\n if getFSVersionTuple()[0] == 4:\n return xhtml_compress(self._recurse_old(children=children, level=level, bottomLevel=bottomLevel, firstItem=firstItem, lastItem=lastItem, childs=childs, include_top=self.include_top()))\n return xhtml_compress(self._recurse_p5(children=children, level=level, bottomLevel=bottomLevel, firstItem=firstItem, lastItem=lastItem, childs=childs, include_top=self.include_top()))\n\nclass AddForm(base.AddForm):\n schema = ISolgemaNavigationPortlet\n form_fields = form.Fields(ISolgemaNavigationPortlet)\n if form_fields.get('root'):\n form_fields['root'].custom_widget = UberSelectionWidget\n label = _pmf(u\"Add Navigation Portlet\")\n description = _pmf(u\"This portlet display a navigation tree.\")\n\n def create(self, data):\n if data.get('root'):\n return Assignment(name=data.get('name', u\"\"),\n root=data.get('root', u\"\"),\n currentFolderOnly=data.get('currentFolderOnly', False),\n includeTop=data.get('includeTop', False),\n topLevel=data.get('topLevel', 0),\n bottomLevel=data.get('bottomLevel', 0),\n useScrollPane=data.get('useScrollPane', True),\n allowedRolesToUseScrollPane=data.get('allowedRolesToUseScrollPane', True))\n else:\n return Assignment(name=data.get('name', u\"\"),\n root=data.get('root_uid', u\"\"),\n currentFolderOnly=data.get('currentFolderOnly', False),\n includeTop=data.get('includeTop', False),\n topLevel=data.get('topLevel', 0),\n bottomLevel=data.get('bottomLevel', 0),\n useScrollPane=data.get('useScrollPane', True),\n allowedRolesToUseScrollPane=data.get('allowedRolesToUseScrollPane', True))\n\nclass EditForm(base.EditForm):\n schema = ISolgemaNavigationPortlet\n \n form_fields = form.Fields(ISolgemaNavigationPortlet)\n if form_fields.get('root'):\n form_fields['root'].custom_widget = UberSelectionWidget\n label = _pmf(u\"Edit Navigation Portlet\")\n description = _pmf(u\"This portlet display a navigation tree.\")\n\nclass QueryBuilder(object):\n \"\"\"Build a navtree query based on the settings in navtree_properties\n and those set on the portlet.\n \"\"\"\n implements(ISNavigationQueryBuilder)\n adapts(Interface, ISolgemaNavigationPortlet)\n\n def __init__(self, context, portlet):\n self.context = context\n self.portlet = portlet\n\n portal_url = getToolByName(context, 'portal_url')\n portal = portal_url.getPortalObject()\n portal_properties = getToolByName(context, 'portal_properties')\n navtree_properties = getattr(portal_properties, 'navtree_properties')\n pm = getToolByName(portal,'portal_membership')\n user = pm.getAuthenticatedMember()\n \n # Acquire a custom nav query if available\n customQuery = getattr(context, 'getCustomNavQuery', None)\n if customQuery is not None and utils.safe_callable(customQuery):\n query = customQuery()\n else:\n query = {}\n\n # Construct the path query\n if hasattr(portlet, 'root'):\n root = portlet.root\n else:\n root = uuidToObject(portlet.root_uid)\n rootPath = getNavigationRoot(context, relativeRoot=root)\n currentPath = '/'.join(context.getPhysicalPath())\n\n # If we are above the navigation root, a navtree query would return\n # nothing (since we explicitly start from the root always). Hence,\n # use a regular depth-1 query in this case.\n\n query['path'] = {'query' : rootPath, 'depth' : 2}\n\n topLevel = portlet.topLevel or navtree_properties.getProperty('topLevel', 0)\n if topLevel and topLevel > 0:\n query['path']['navtree_start'] = topLevel + 1\n\n # XXX: It'd make sense to use 'depth' for bottomLevel, but it doesn't\n # seem to work with EPI.\n\n # Only list the applicable types\n query['portal_type'] = utils.typesToList(context)\n\n # Apply the desired sort\n sortAttribute = navtree_properties.getProperty('sortAttribute', None)\n if sortAttribute is not None:\n query['sort_on'] = sortAttribute\n sortOrder = navtree_properties.getProperty('sortOrder', None)\n if sortOrder is not None:\n query['sort_order'] = sortOrder\n\n # Filter on workflow states, if enabled\n if not user or not user.has_permission('List folder contents', self.context):\n if navtree_properties.getProperty('enable_wf_state_filtering', False):\n query['review_state'] = navtree_properties.getProperty('wf_states_to_show', ())\n\n self.query = query\n\n def __call__(self):\n return self.query\n\nclass ManagerQueryBuilder(object):\n \"\"\"Build a navtree query based on the settings in navtree_properties\n and those set on the portlet.\n \"\"\"\n implements(ISManagerNavigationQueryBuilder)\n adapts(Interface, ISolgemaNavigationPortlet)\n\n def __init__(self, context, portlet):\n self.context = context\n self.portlet = portlet\n\n portal_properties = getToolByName(context, 'portal_properties')\n navtree_properties = getattr(portal_properties, 'navtree_properties')\n\n portal_url = getToolByName(context, 'portal_url')\n pm = getToolByName(context,'portal_membership')\n user = pm.getAuthenticatedMember()\n\n customQuery = getattr(context, 'getCustomNavQuery', None)\n if customQuery is not None and utils.safe_callable(customQuery):\n query = customQuery()\n else:\n query = {}\n if hasattr(portlet, 'root'):\n root = portlet.root\n else:\n root = uuidToObject(portlet.root_uid)\n rootPath = getNavigationRoot(context, relativeRoot=root)\n currentPath = '/'.join(context.getPhysicalPath())\n\n query['path'] = {'query' : currentPath, 'navtree' : 1, 'navtree_start':0}\n\n sortAttribute = navtree_properties.getProperty('sortAttribute', None)\n if sortAttribute is not None:\n query['sort_on'] = sortAttribute\n sortOrder = navtree_properties.getProperty('sortOrder', None)\n if sortOrder is not None:\n query['sort_order'] = sortOrder\n\n # Filter on workflow states, if enabled\n if not user or not user.has_permission('List folder contents', self.context):\n if navtree_properties.getProperty('enable_wf_state_filtering', False):\n query['review_state'] = navtree_properties.getProperty('wf_states_to_show', ())\n\n ploneUtils = getToolByName(self.context, 'plone_utils')\n friendlyTypes = ploneUtils.getUserFriendlyTypes()\n if 'MemberDataContainer' in friendlyTypes:\n friendlyTypes.remove('MemberDataContainer')\n query['portal_type'] = friendlyTypes\n query['is_default_page'] = [False, True]\n self.query = query\n\n def __call__(self):\n return self.query\n \nclass NavtreeStrategy(SitemapNavtreeStrategy):\n \"\"\"The navtree strategy used for the default navigation portlet\n \"\"\"\n implements(INavtreeStrategy)\n adapts(Interface, ISolgemaNavigationPortlet)\n\n def __init__(self, context, portlet):\n SitemapNavtreeStrategy.__init__(self, context, portlet)\n portal_properties = getToolByName(context, 'portal_properties')\n navtree_properties = getattr(portal_properties, 'navtree_properties')\n \n # XXX: We can't do this with a 'depth' query to EPI...\n self.bottomLevel = portlet.bottomLevel or navtree_properties.getProperty('bottomLevel', 0)\n\n currentFolderOnly = portlet.currentFolderOnly or navtree_properties.getProperty('currentFolderOnlyInNavtree', False)\n topLevel = portlet.topLevel or navtree_properties.getProperty('topLevel', 0)\n if hasattr(portlet, 'root'):\n root = portlet.root\n else:\n root = uuidToObject(portlet.root_uid)\n self.rootPath = getRootPath(context, currentFolderOnly, topLevel, root)\n\n def subtreeFilter(self, node):\n sitemapDecision = SitemapNavtreeStrategy.subtreeFilter(self, node)\n if sitemapDecision == False:\n return False\n depth = node.get('depth', 0)\n if depth > 0 and self.bottomLevel > 0 and depth >= self.bottomLevel:\n return False\n else:\n return True\n\nclass NavtreeItemStrategy(SitemapNavtreeStrategy):\n \"\"\"The navtree strategy used for the default navigation portlet\n \"\"\"\n implements(INavtreeItemStrategy)\n adapts(Interface, ISolgemaNavigationPortlet)\n\n def __init__(self, context, portlet):\n SitemapNavtreeStrategy.__init__(self, context, portlet)\n self.bottomLevel = 0\n currentFolderOnly = True\n topLevel = 0\n root = getattr(portlet, 'root', uuidToObject(portlet.root_uid))\n self.rootPath = getRootPath(context, currentFolderOnly, topLevel, root)\n\nclass ManagerNavtreeStrategy(SitemapNavtreeStrategy):\n \"\"\"The navtree strategy used for the default navigation portlet\n \"\"\"\n implements(ISManagerNavtreeStrategy)\n adapts(Interface, ISolgemaNavigationPortlet)\n\n def __init__(self, context, portlet):\n SitemapNavtreeStrategy.__init__(self, context, portlet)\n self.bottomLevel = 0\n currentFolderOnly = False\n topLevel = 0\n if hasattr(portlet, 'root'):\n root = portlet.root\n else:\n root = uuidToObject(portlet.root_uid)\n self.rootPath = getRootPath(context, currentFolderOnly, topLevel, root)\n\n def subtreeFilter(self, node):\n return True\n\n def decoratorFactory(self, node):\n context = aq_inner(self.context)\n portal_url = getToolByName(context, 'portal_url')\n portal = portal_url.getPortalObject()\n request = context.REQUEST\n \n newNode = node.copy()\n item = node['item']\n\n portalType = getattr(item, 'portal_type', None)\n itemUrl = item.getURL()\n if portalType is not None and portalType in self.viewActionTypes:\n itemUrl += '/view'\n\n useRemoteUrl = False\n getRemoteUrl = getattr(item, 'getRemoteUrl', None)\n isCreator = self.memberId == getattr(item, 'Creator', None)\n if getRemoteUrl and not isCreator:\n useRemoteUrl = True\n\n isFolderish = getattr(item, 'is_folderish', None)\n showChildren = False\n if isFolderish and (portalType is None or portalType not in self.parentTypesNQ):\n showChildren = True\n\n ploneview = getMultiAdapter((portal, request), name=u'plone')\n newNode['Title'] = utils.pretty_title_or_id(context, item)\n newNode['id'] = item.getId\n newNode['UID'] = item.UID\n newNode['absolute_url'] = itemUrl\n newNode['getURL'] = itemUrl\n newNode['path'] = item.getPath()\n newNode['item_icon'] = ploneview.getIcon(item)\n newNode['Creator'] = getattr(item, 'Creator', None)\n newNode['creation_date'] = getattr(item, 'CreationDate', None)\n newNode['portal_type'] = portalType\n newNode['review_state'] = getattr(item, 'review_state', None)\n newNode['Description'] = getattr(item, 'Description', None)\n newNode['show_children'] = showChildren\n newNode['no_display'] = False # We sort this out with the nodeFilter\n # BBB getRemoteUrl and link_remote are deprecated, remove in Plone 4\n newNode['getRemoteUrl'] = getattr(item, 'getRemoteUrl', None)\n newNode['useRemoteUrl'] = useRemoteUrl\n newNode['link_remote'] = newNode['getRemoteUrl'] and newNode['Creator'] != self.memberId\n\n idnormalizer = queryUtility(IIDNormalizer)\n newNode['normalized_portal_type'] = idnormalizer.normalize(portalType)\n newNode['normalized_review_state'] = idnormalizer.normalize(newNode['review_state'])\n newNode['normalized_id'] = idnormalizer.normalize(newNode['id'])\n newNode['is_default_page'] = getattr(item, 'is_default_page', None)\n newNode['exclude_from_nav'] = getattr(item, 'exclude_from_nav', None)\n return newNode\n\nclass ManagerContentNavtreeStrategy(ManagerNavtreeStrategy):\n \"\"\"The navtree strategy used for the default navigation portlet\n \"\"\"\n implements(ISManagerContentNavtreeStrategy)\n adapts(Interface, ISolgemaNavigationPortlet)\n\n def __init__(self, context, portlet):\n SitemapNavtreeStrategy.__init__(self, context, portlet)\n currentFolderOnly = True\n topLevel = 0\n if hasattr(portlet, 'root'):\n root = portlet.root\n else:\n root = uuidToObject(portlet.root_uid)\n self.rootPath = getRootPath(context, currentFolderOnly, topLevel, root)\n \ndef getRootPath(context, currentFolderOnly, topLevel, root):\n \"\"\"Helper function to calculate the real root path\n \"\"\"\n context = aq_inner(context)\n if currentFolderOnly:\n folderish = getattr(aq_base(context), 'isPrincipiaFolderish', False) and not INonStructuralFolder.providedBy(context)\n parent = aq_parent(context)\n \n is_default_page = False\n browser_default = IBrowserDefault(parent, None)\n if browser_default is not None:\n is_default_page = (browser_default.getDefaultPage() == context.getId())\n \n if not folderish:\n return '/'.join(parent.getPhysicalPath())\n else:\n return '/'.join(context.getPhysicalPath())\n\n rootPath = getNavigationRoot(context, relativeRoot=root)\n\n # Adjust for topLevel\n if topLevel > 0:\n contextPath = '/'.join(context.getPhysicalPath())\n if not contextPath.startswith(rootPath):\n return None\n contextSubPathElements = contextPath[len(rootPath)+1:]\n if contextSubPathElements:\n contextSubPathElements = contextSubPathElements.split('/')\n if len(contextSubPathElements) < topLevel:\n return None\n rootPath = rootPath + '/' + '/'.join(contextSubPathElements[:topLevel])\n else:\n return None\n \n return rootPath\n \n","repo_name":"collective/Solgema.NavigationPortlet","sub_path":"Solgema/NavigationPortlet/portlets/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":38346,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"45816583722","text":"import numpy as np \nimport cv2\nimport pickle\nimport time\nimport os\n\ncapture = cv2.VideoCapture(0)\ncurr_dir = os.path.dirname(os.path.abspath(__file__))\nface_cascade = cv2.CascadeClassifier(str(curr_dir+'/cascades/data/haarcascade_frontalface_default.xml'))\npsc = cv2.CascadeClassifier(str(curr_dir+'/cascades/data/haarcascade_profileface.xml'))\nface_recognizer = cv2.face.LBPHFaceRecognizer_create()\nface_recognizer.read(str(curr_dir+\"/trained_faces.yml\"))\n\nPERSON_CONF = {}\ntot_faces=0\n\nfaceid_to_name = {}\ndef setup():\n global faceid_to_name\n with open(str(curr_dir+\"/face_labels.pickle\"),\"rb\") as fr:\n name_to_faceid = pickle.load(fr)\n faceid_to_name = {v:u for u,v in name_to_faceid.items()}\ndef detect():\n global faceid_to_name\n global tot_faces\n global PERSON_CONF\n start_time = time.time()\n while True:\n if(time.time()-start_time>=5):\n capture.release()\n break\n ret, frame = capture.read()\n frame=cv2.flip(frame,1)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces_found = face_cascade.detectMultiScale(gray, scaleFactor=1.4, minNeighbors=5)\n pface = psc.detectMultiScale(gray,scaleFactor=1.4,minNeighbors=5)\n\n for (x,y,w,h) in pface:\n print(x,y,w,h)\n #roi=region of interest\n roi_gray = gray[y:y+h, x:x+w]\n # img_detected = \"my-img.png\"\n # cv2.imwrite(img_detected, roi_gray)\n\n face_id,confidence = face_recognizer.predict(roi_gray)\n print(faceid_to_name[face_id])\n\n color=(0,255,255)\n stroke=1\n cv2.rectangle(frame,(x,y),(x+w,y+h), color,stroke)\n if confidence>50:\n tot_faces+=1\n if face_id not in PERSON_CONF:\n PERSON_CONF[face_id]=confidence\n else:\n PERSON_CONF[face_id]+=confidence\n cv2.putText(frame,faceid_to_name[face_id],(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(125,125,0),2,cv2.LINE_AA)\n cv2.putText(frame,str(round(confidence,2)),(x,y+h),cv2.FONT_HERSHEY_SIMPLEX,1,(125,125,0),2,cv2.LINE_AA)\n\n\n for (x,y,w,h) in faces_found:\n print(x,y,w,h)\n #roi=region of interest\n roi_gray = gray[y:y+h, x:x+w]\n # img_detected = \"my-img.png\"\n # cv2.imwrite(img_detected, roi_gray)\n\n face_id,confidence = face_recognizer.predict(roi_gray)\n print(faceid_to_name[face_id])\n\n color=(0,0,255)\n stroke=2\n cv2.rectangle(frame,(x,y),(x+w,y+h), color,stroke)\n if confidence>50:\n tot_faces+=1\n if face_id not in PERSON_CONF:\n PERSON_CONF[face_id]=confidence\n else:\n PERSON_CONF[face_id]+=confidence\n cv2.putText(frame,faceid_to_name[face_id],(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(125,125,0),2,cv2.LINE_AA)\n cv2.putText(frame,str(round(confidence,2)),(x,y+h),cv2.FONT_HERSHEY_SIMPLEX,1,(125,125,0),2,cv2.LINE_AA)\n cv2.imshow('Frame',frame)\n if cv2.waitKey(20) & 0xFF == ord('q'):\n capture.release()\n break\n\n capture.release()\n cv2.destroyAllWindows()\ndef authenticate(person):\n setup()\n detect()\n high_conf=-1\n high_id=-1\n if tot_faces==0:\n return False\n for x in PERSON_CONF:\n if PERSON_CONF[x]>high_conf:\n high_conf=PERSON_CONF[x]\n high_id=x\n high_conf/=tot_faces\n if high_conf>50 and person.lower()==faceid_to_name[high_id].lower():\n return True\n else:\n return False\n\n \n# setup()\n# detect()","repo_name":"yash-brahmkshatriya/Login-Automation","sub_path":"Image_Recog/detect_faces.py","file_name":"detect_faces.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30012944095","text":"import pandas as pd\nimport json\nfrom pandas.io.json import json_normalize\nimport sys\nfrom matplotlib.ticker import FormatStrFormatter\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nlinestyles = ['D-', 's-', 'v-', 'o-', '*-', 'x-']\n\nplot_kwargs = {\n 'style':linestyles,\n 'markevery':1,\n 'markersize':4\n}\n\ndef plot_1(frame):\n pl1 = frame.pivot_table(index=\"PercentDFTasks\", values=\"TimeToFailure\", columns=\"NZones\").plot(**plot_kwargs)\n pl1.set(xlabel=\"% Multizone Tasks\", ylabel=\"Time to Violation (ns)\")\n pl1.legend(title='# Available Zones')\n\ndef bar_plot(frame):\n pl2 = frame.pivot_table(index=\"PercentDFTasks\", values=\"RunsBeforeFailure\", columns=\"NZones\").plot(kind='bar')\n pl2.legend(loc=1, ncol=4, title=\"# Available Zones\")\n pl2.set_xticklabels([\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\", \"0.6\", \"0.7\", \"0.8\", \"0.9\", \"1.0\"])\n pl2.set(xlabel=\"% Multizone Tasks\", ylabel=\"# Invocations before violation\")\n\n\ndef main():\n frame = pd.read_csv(sys.argv[1])\n if len(sys.argv) > 2 and sys.argv[2] == \"pdf\":\n with PdfPages('ttf.pdf') as pdf:\n plot_1(frame)\n pdf.savefig(bbox_inches='tight')\n plt.close()\n bar_plot(frame)\n pdf.savefig(bbox_inches='tight')\n plt.close()\n else:\n plot_1(frame)\n bar_plot(frame)\n plt.show()\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Gradecak/benchmarking","sub_path":"crunchers/ttf_graphs.py","file_name":"ttf_graphs.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43329030515","text":"from L5v3.controller.CustomerController import search_customer\nfrom L5v3.models.Customer import Customer\nfrom L5v3.models.Dish import Dish\nfrom L5v3.models.Drink import Drink\nfrom L5v3.models.Order import Order\n\n\ndef add_order(orders: list[Order], dishes: list[Dish], drinks: list[Drink], customers: list[Customer]):\n \"\"\"\n This function manages everything about orders\n :param orders: A list with all the orders\n :param dishes: A list with all the dishes\n :param drinks: A list with all the drinks\n :param customers: A list with all the customers\n \"\"\"\n\n customer = search_customer(customers)\n\n if customer is None:\n print(\"Add your customer to the list, first\")\n return\n\n dish_IDs = []\n drink_IDs = []\n\n while True:\n print(\"Press 1 to add a dish\")\n print(\"Press 2 to add a drink\")\n print(\"Press 3 to finish the order\")\n print(\"Press 4 to see the current items in the order\")\n print(\"Press 5 to exit\")\n\n option = int(input(\"Your option: \"))\n\n if option == 1:\n for i in range(len(dishes)):\n print(f\"Index: {i}, Dish: {str(dishes[i])}\")\n\n try:\n\n\n option = int(input(\"Choose the index of your desired drink that you want to update \"))\n\n if option not in range(len(dishes)):\n continue\n\n dish_IDs.append(dishes[option].id)\n except:\n continue\n elif option == 2:\n for i in range(len(drinks)):\n print(f\"Index: {i}, Drink: {str(drinks[i])}\")\n\n try:\n\n\n option = int(input(\"Choose the index of your desired drink that you want to update \"))\n\n if option not in range(len(drinks)):\n continue\n\n drink_IDs.append(drinks[option].id)\n except:\n continue\n\n elif option == 3:\n break\n elif option == 4:\n print(dish_IDs)\n print(drink_IDs)\n elif option == 5:\n return\n else:\n pass\n\n order = Order(customer_id=customer.id, drinks_ids=drink_IDs, dish_ids=dish_IDs)\n orders.append(order)\n print(order)\n\n order.show_bill(dishes, drinks)\n print(f\"The estimated time for your order is: {order.generate_estimated_wait_time(dishes)}\")\n","repo_name":"LucaTheSorcerer/PythonUniProjects","sub_path":"L5v3/controller/OrderController.py","file_name":"OrderController.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9088606852","text":"def min_heapify(arr,n,i):\n smallest = i\n l = 2*i +1\n r = 2*i +2\n\n if l None:\n self._sim_config = sim_config\n self._cfg = sim_config.config\n self._task_cfg = sim_config.task_config\n\n self._num_envs = self._task_cfg[\"env\"][\"numEnvs\"]\n self._env_spacing = self._task_cfg[\"env\"][\"envSpacing\"]\n self._max_episode_length = self._task_cfg[\"env\"][\"maxEpisodeLength\"]\n\n self.dt = self._task_cfg[\"sim\"][\"dt\"]\n\n self._num_observations = 21\n self._num_actions = 12\n\n self._copter_position = torch.tensor([0, 0, 1.0])\n\n RLTask.__init__(self, name=name, env=env)\n\n max_thrust = 2.0\n self.thrust_lower_limits = -max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)\n self.thrust_upper_limits = max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)\n\n self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)\n\n return\n\n def set_up_scene(self, scene) -> None:\n self.get_copter()\n self.get_target()\n RLTask.set_up_scene(self, scene)\n self._copters = QuadcopterView(prim_paths_expr=\"/World/envs/.*/Quadcopter\", name=\"quadcopter_view\")\n self._balls = RigidPrimView(prim_paths_expr=\"/World/envs/.*/ball\", name=\"targets_view\", reset_xform_properties=False)\n scene.add(self._copters)\n scene.add(self._copters.rotors)\n scene.add(self._balls)\n return\n\n def get_copter(self):\n copter = Quadcopter(prim_path=self.default_zero_env_path + \"/Quadcopter\", name=\"quadcopter\", translation=self._copter_position)\n self._sim_config.apply_articulation_settings(\"copter\", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config(\"copter\"))\n \n def get_target(self):\n radius = 0.05\n color = torch.tensor([1, 0, 0])\n ball = DynamicSphere(\n prim_path=self.default_zero_env_path + \"/ball\", \n name=\"target_0\",\n radius=radius,\n color=color,\n )\n self._sim_config.apply_articulation_settings(\"ball\", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config(\"ball\"))\n ball.set_collision_enabled(False)\n\n def get_observations(self) -> dict:\n self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)\n self.root_velocities = self._copters.get_velocities(clone=False)\n self.dof_pos = self._copters.get_joint_positions(clone=False)\n\n root_positions = self.root_pos - self._env_pos\n root_quats = self.root_rot\n root_linvels = self.root_velocities[:, :3]\n root_angvels = self.root_velocities[:, 3:]\n\n self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3\n self.obs_buf[..., 3:7] = root_quats\n self.obs_buf[..., 7:10] = root_linvels / 2\n self.obs_buf[..., 10:13] = root_angvels / math.pi\n self.obs_buf[..., 13:21] = self.dof_pos\n\n observations = {\n self._copters.name: {\n \"obs_buf\": self.obs_buf\n }\n }\n return observations\n\n def pre_physics_step(self, actions) -> None:\n reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)\n if len(reset_env_ids) > 0:\n self.reset_idx(reset_env_ids)\n\n actions = actions.clone().to(self._device)\n\n dof_action_speed_scale = 8 * math.pi\n self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8]\n self.dof_position_targets[:] = tensor_clamp(self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits)\n\n thrust_action_speed_scale = 100\n self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12]\n self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits)\n\n self.forces[:, 0, 2] = self.thrusts[:, 0]\n self.forces[:, 1, 2] = self.thrusts[:, 1]\n self.forces[:, 2, 2] = self.thrusts[:, 2]\n self.forces[:, 3, 2] = self.thrusts[:, 3]\n\n # clear actions for reset envs\n self.thrusts[reset_env_ids] = 0.0\n self.forces[reset_env_ids] = 0.0\n self.dof_position_targets[reset_env_ids] = self.dof_pos[reset_env_ids]\n\n _, rotors_quat = self._copters.rotors.get_world_poses(clone=False)\n rotors_quat = rotors_quat.reshape(self._num_envs, 4, 4)\n\n for i in range(4):\n self.forces_world_frame[:, i, :] = quat_apply(rotors_quat[:, i, :], self.forces[:, i, :])\n\n # apply actions\n self._copters.set_joint_position_targets(self.dof_position_targets) \n self._copters.rotors.apply_forces(self.forces_world_frame)\n\n def post_reset(self):\n # control tensors\n self.dof_position_targets = torch.zeros((self._num_envs, self._copters.num_dof), dtype=torch.float32, device=self._device, requires_grad=False)\n self.thrusts = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device, requires_grad=False)\n self.forces = torch.zeros((self._num_envs, self._copters.rotors.count // self._num_envs, 3), dtype=torch.float32, device=self._device, requires_grad=False)\n self.forces_world_frame = torch.zeros((self._num_envs, self._copters.rotors.count // self._num_envs, 3), dtype=torch.float32, device=self._device, requires_grad=False)\n\n self.target_positions = torch.zeros((self._num_envs, 3), device=self._device)\n self.target_positions[:, 2] = 1.0\n\n self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)\n self.root_velocities = self._copters.get_velocities(clone=False)\n self.dof_pos = self._copters.get_joint_positions(clone=False)\n self.dof_vel = self._copters.get_joint_velocities(clone=False)\n self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()\n\n dof_limits = self._copters.get_dof_limits()\n self.dof_lower_limits = torch.tensor(dof_limits[0][:, 0], device=self._device)\n self.dof_upper_limits = torch.tensor(dof_limits[0][:, 1], device=self._device)\n\n def reset_idx(self, env_ids):\n num_resets = len(env_ids)\n\n self.dof_pos[env_ids, :] = torch_rand_float(-0.2, 0.2, (num_resets, self._copters.num_dof), device=self._device)\n self.dof_vel[env_ids, :] = 0\n\n root_pos = self.initial_root_pos.clone()\n root_pos[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)\n root_pos[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)\n root_pos[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), device=self._device).view(-1)\n root_velocities = self.root_velocities.clone()\n root_velocities[env_ids] = 0\n\n # apply resets\n self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)\n self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)\n\n self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)\n self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)\n\n self._balls.set_world_poses(positions=self.target_positions[:, 0:3] + self._env_pos)\n\n # bookkeeping\n self.reset_buf[env_ids] = 0\n self.progress_buf[env_ids] = 0\n\n def calculate_metrics(self) -> None:\n root_positions = self.root_pos - self._env_pos\n root_quats = self.root_rot\n root_angvels = self.root_velocities[:, 3:]\n\n # distance to target\n target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))\n pos_reward = 1.0 / (1.0 + 3*target_dist * target_dist) # 2\n self.target_dist = target_dist\n self.root_positions = root_positions\n\n # uprightness\n ups = quat_axis(root_quats, 2)\n tiltage = torch.abs(1 - ups[..., 2])\n up_reward = 1.0 / (1.0 + 10 * tiltage * tiltage)\n\n # spinning\n spinnage = torch.abs(root_angvels[..., 2])\n spinnage_reward = 1.0 / (1.0 + 0.001 * spinnage * spinnage)\n\n rew = pos_reward + pos_reward * (up_reward + spinnage_reward + spinnage * spinnage * (-1/400)) \n rew = torch.clip(rew, 0.0, None)\n self.rew_buf[:] = rew\n\n def is_done(self) -> None:\n # resets due to misbehavior\n ones = torch.ones_like(self.reset_buf)\n die = torch.zeros_like(self.reset_buf)\n die = torch.where(self.target_dist > 3.0, ones, die)\n die = torch.where(self.root_positions[..., 2] < 0.3, ones, die)\n\n # resets due to episode length\n self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)\n ","repo_name":"s82035/AI_s82035_s80423","sub_path":"tasks/quadcopter.py","file_name":"quadcopter.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72113886490","text":"import cv2\nimport numpy as np \n# from PIL import Image\nimport pytesseract\n\ndef process_img(img):\n\tkernel = np.ones((7,7), np.uint8)\n\timg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # \n\timg = cv2.GaussianBlur(img, (3,3), 0)\n\tretval, img = cv2.threshold(img, 185, 255, cv2.THRESH_BINARY_INV)\n\timg = cv2.dilate(img, kernel, iterations=1)\n\timg = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\n\t# retval, img = cv2.threshold(img, 80, 255, cv2.THRESH_BINARY)\n\t# cv2.medianBlur(img, 5)\n\t# img = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)\n\t# img = cv2.dilate(img, kernel, iterations=1)\n\t# img = cv2.erode(img, kernel, iterations=2)\n\n\t# kernel = 1/16 * np.array([[1,2,1],[2,4,2],[1,2,1]])\n\t# img = cv2.filter2D(img, -1, kernel)\n\t# retval, img = cv2.threshold(img, 165, 255, cv2.THRESH_BINARY)\n\treturn img\n\ndef adjust_luminance(img, s, b):\n\tw = img.shape[1]\n\th = img.shape[0]\n\ttype(img)\n\tfor xi in range(0, w):\n\t\tfor xj in range(0, h):\n\t\t\tif int(img[xj, xi, 0] * s + b) > 255:\n\t\t\t\timg[xj, xi, 0] = 255 \n\t\t\telif int(img[xj, xi, 0] * s + b) < 0:\n\t\t\t\timg[xj, xi, 0] = 0 \n\t\t\telse:\n\t\t\t\timg[xj, xi, 0] = int(img[xj, xi, 0] * s + b)\n\n\t\t\tif int(img[xj, xi, 1] * s + b) > 255:\n\t\t\t\timg[xj, xi, 1] = 255 \n\t\t\telif int(img[xj, xi, 1] * s + b) < 0:\n\t\t\t\timg[xj, xi, 1] = 0 \n\t\t\telse:\n\t\t\t\timg[xj, xi, 1] = int(img[xj, xi, 1] * s + b)\n\n\t\t\tif int(img[xj, xi, 2] * s + b) > 255:\n\t\t\t\timg[xj, xi, 2] = 255 \n\t\t\telif int(img[xj, xi, 2] * s + b) < 0:\n\t\t\t\timg[xj, xi, 2] = 0 \n\t\t\telse:\n\t\t\t\timg[xj, xi, 2] = int(img[xj, xi, 2] * s + b)\n\treturn img\n\ndef get_rect_box(contours):\n\tws = []\n\tvalid_contours = []\n\tfor contour in contours:\n\t\tx,y,w,h = cv2.boundingRect(contour)\n\t\tif w < 7:\n\t\t\tcontinue\n\t\tvalid_contours.append(contour)\n\n\tresult = []\n\tfor contour in valid_contours:\n\t\tx,y,w,h = cv2.boundingRect(contour)\n\t\tbox = np.int0([[x,y],[x+w,y],[x+w,y+h],[x,y+h]])\n\t\tresult.append(box)\n\n\tresult = sorted(result, key=lambda x: x[0][0])\n\treturn result\n\n\n''' suofang img '''\ndef cropxy_img(img, new_x, new_y):\n\tres = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)\n\treturn res\n\ndef cropwh_img(img, new_w, new_h):\n\tres = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)\n\treturn res\n\n''' xuanzhuan img '''\ndef rotate_img(img, rotate_angle):\n\trows, cols = img.shape[0], img.shape[1]\n\tM = cv2.getRotationMatrix2D((cols/2, rows/2), rotate_angle, 1)\n\tdst = cv2.warpAffine(img, M, (rows, cols))\n\treturn dst\n\ndef get_box_xy_and_wh(box):\n\tx, y = box[0][0], box[0][1]\n\tw = box[2][0] - box[0][0]\n\th = box[2][1] - box[0][1]\n\treturn x, y, w, h\n\ndef get_rectimg_from_img(img, box):\n\tx, y, w, h = get_box_xy_and_wh(box)\n\treturn img[y: y+h, x:x+w]\n\n#----------------------------------------\ndef get_IDnum(filename):\n\timg = cv2.imread(filename)\n\timg1 = process_img(img)\n\timg2, contours, hierarchy = cv2.findContours(img1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\tboxes = get_rect_box(contours)\n\tfor box in boxes:\n\t\tcv2.drawContours(img, [box], 0, (0,0,255), 2)\n\t\tx, y, w, h = get_box_xy_and_wh(box)\n\t\tif w > h*6:\n\t\t\tresultimg = get_rectimg_from_img(img, box)\n\t\t\ttext = pytesseract.image_to_string(resultimg, lang='eng')\n\t\t\tif text[-1] in \"0123456789\":\n\t\t\t\treturn text\n\t\t\telse:\n\t\t\t\treturn text[0:-1]+'X'\n\n#----------------------------------------\nif __name__ == '__main__':\n\t\t\n\t# img = cv2.imread(\"./image/reverseid.jpg\")\n\t# img = cv2.imread(\"./image/dd.jpg\")\n\t# img = cv2.imread(\"./image/ret.jpg\")\n\timg = cv2.imread(\"./image/positiveid.jpg\")\n\t# img = cv2.imread(\"./image/mez1.jpg\")\n\t# img = rotate_img(img, 90)\n\t# img = cropwh_img(img, 475, 297)\n\t# img = cv2.resize(img, (475, 297), interpolation=cv2.INTER_CUBIC)\n\t# # img = cv2.resize(img, None, fx=0.3, fy=0.3, interpolation=cv2.INTER_CUBIC)\n\tprint(img.shape)\n\t# img = adjust_luminance(img, 1.2, 10)\n\t# img = adjust_luminance(img, 1.2, 0)\n\t# img = adjust_luminance(img, 0.8, 0)\n\timg1 = process_img(img)\n\tcv2.imshow(\"img1\", img1)\n\timg2, contours, hierarchy = cv2.findContours(img1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n\tboxes = get_rect_box(contours)\n\t# \n\tfor box in boxes:\n\t\tcv2.drawContours(img, [box], 0, (0,255,0), 1)\n\t\tx, y, w, h = get_box_xy_and_wh(box)\n\t\tif w > h*6:\n\t\t\tresultimg = get_rectimg_from_img(img, box)\n\t\t\tcv2.imwrite(\"./image/num.jpg\", resultimg)\n\t\t\ttext = pytesseract.image_to_string(resultimg, lang='eng')\n\t\t\t# text = pytesseract.image_to_string(Image.open(\"./image/num.jpg\"), lang='eng')\n\t\t\tprint(text)\n\t\t\tcontinue\n\n\t\tif w > 150 and h > 150:\n\t\t\tcv2.imwrite(\"./image/tou.jpg\", get_rectimg_from_img(img, box))\n\t\t\tcontinue\n\n\t\tresultimg = get_rectimg_from_img(img, box)\n\t\ttext = pytesseract.image_to_string(resultimg, lang='chi_sim')\n\t\tprint(text)\n\t\tif not text:\n\t\t\t# print(box)\n\t\t\tnames = \"./image/%d_%d.jpg\" % (box[0][0], box[0][1])\n\t\t\t# cv2.imwrite(names, resultimg)\n\t\t# print(box[1])\n\n\tcv2.imshow(\"img\", img)\n\tcv2.waitKey(0)\n\t# cv2.destoryAllWindows()\n","repo_name":"EricLmy/grpcdemo","sub_path":"server/opencvfun.py","file_name":"opencvfun.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24872547694","text":"import cv2 as cv\nimport numpy as np\nimport helper_functions as hf\nimport os\n\nimage = cv.imread('/home/david/Pictures/llama.jpg')\npadded_image = hf.blurred_padding(image,3)\ncv.imshow(\"padded_image\", padded_image) \ncv.waitKey(0)\n\ngray_image = cv.imread('/home/david/Pictures/llama.jpg', cv.IMREAD_GRAYSCALE)\ngray_padded_image = hf.blurred_padding(gray_image,3)\nkernel = np.array([[-1,0,1],[-2,0,2],[-1,0,1]])\nx_gradient_image = hf.convolution_2D(gray_image,kernel)\ncv.imshow(\"x_gradient_image\", x_gradient_image)\n# cv.imshow(\"gray_padded_image\", gray_padded_image)\ncv.waitKey(0)\n\ncwd = os.getcwd()\nimage_path = cwd + '/blocks.png'\nprint(\"image path\" , image_path)\nblocks_image = cv.imread(image_path)\nprint(type(blocks_image))\ncv.imshow(\"blocks_image\" , blocks_image)\ncv.waitKey(0) ","repo_name":"davidcGIThub/computer_vision_assignments","sub_path":"proj1-edges-hough/testing_functions.py","file_name":"testing_functions.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10342238216","text":"#!/usr/bin/env python3\nimport sys\n\nfrom glfw.GLFW import *\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nimport random\nimport time\n\nrandom.seed()\n\n\ndef startup():\n update_viewport(None, 400, 400)\n glClearColor(0.5, 0.5, 0.5, 1.0)\n\ndef shutdown():\n pass\n\ndef render():\n c1 = random.randint(0, 255)\n c2 = random.randint(0, 255)\n c3 = random.randint(0, 255)\n \n d_a = random.randint(-50, 50)\n d_b = random.randint(-50, 50)\n\n\n glClear(GL_COLOR_BUFFER_BIT)\n\n print_rectangle(-50.0, -50.0, 100.0, 100.0, c1, c2, c3, d_a, d_b)\n\n glFlush()\n time.sleep(0.25)\n\n\ndef print_rectangle(x, y, a, b, c1, c2, c3, d_a = 0, d_b = 0):\n glColor3b(c1, c2, c3)\n glBegin(GL_TRIANGLES)\n glVertex2f(x + a + d_a, y + b + d_b)\n glVertex2f(x, y)\n glVertex2f(x + a + d_a, y)\n glEnd()\n\n glColor3b(c1, c2, c3)\n glBegin(GL_TRIANGLES)\n glVertex2f(x + a + d_a, y + b + d_b)\n glVertex2f(x, y + b + d_b)\n glVertex2f(x, y)\n glEnd()\n\n\ndef update_viewport(window, width, height):\n if width == 0:\n width = 1\n if height == 0:\n height = 1\n aspect_ratio = width / height\n\n glMatrixMode(GL_PROJECTION)\n glViewport(0, 0, width, height)\n glLoadIdentity()\n\n if width <= height:\n glOrtho(-100.0, 100.0, -100.0 / aspect_ratio, 100.0 / aspect_ratio,\n 1.0, -1.0)\n else:\n glOrtho(-100.0 * aspect_ratio, 100.0 * aspect_ratio, -100.0, 100.0,\n 1.0, -1.0)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n\ndef main():\n if not glfwInit():\n sys.exit(-1)\n\n window = glfwCreateWindow(400, 400, __file__, None, None)\n if not window:\n glfwTerminate()\n sys.exit(-1)\n\n glfwMakeContextCurrent(window)\n glfwSetFramebufferSizeCallback(window, update_viewport)\n glfwSwapInterval(1)\n\n startup()\n while not glfwWindowShouldClose(window):\n render()\n glfwSwapBuffers(window)\n glfwPollEvents()\n shutdown()\n\n glfwTerminate()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"recelos/GK-lab","sub_path":"lab2/4_0.py","file_name":"4_0.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33889844551","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[2]:\n\n\n# Question 1: Python Program for Topological Sorting\n\nfrom collections import defaultdict\n\nclass Graph:\n def __init__(self, vertices):\n self.graph = defaultdict(list)\n self.V = vertices\n\n def add_edge(self, u, v):\n self.graph[u].append(v)\n\n def topological_sort_util(self, v, visited, stack):\n visited[v] = True\n\n for neighbor in self.graph[v]:\n if visited[neighbor] == False:\n self.topological_sort_util(neighbor, visited, stack)\n\n stack.insert(0, v)\n\n def topological_sort(self):\n visited = [False] * self.V\n stack = []\n\n for i in range(self.V):\n if visited[i] == False:\n self.topological_sort_util(i, visited, stack)\n\n print(\"Topological Sorting:\")\n for vertex in stack:\n print(vertex, end=\" \")\n\n# Example usage\ng = Graph(6)\ng.add_edge(5, 2)\ng.add_edge(5, 0)\ng.add_edge(4, 0)\ng.add_edge(4, 1)\ng.add_edge(2, 3)\ng.add_edge(3, 1)\n\ng.topological_sort()\n\n\n# In[3]:\n\n\n# Question 2: Python Program for Radix Sort\n\ndef counting_sort(arr, exp):\n n = len(arr)\n count = [0] * 10\n output = [0] * n\n\n for i in range(n):\n index = arr[i] // exp\n count[index % 10] += 1\n\n for i in range(1, 10):\n count[i] += count[i - 1]\n\n i = n - 1\n while i >= 0:\n index = arr[i] // exp\n output[count[index % 10] - 1] = arr[i]\n count[index % 10] -= 1\n i -= 1\n\n for i in range(n):\n arr[i] = output[i]\n\ndef radix_sort(arr):\n max_value = max(arr)\n exp = 1\n\n while max_value // exp > 0:\n counting_sort(arr, exp)\n exp *= 10\n\n# Example usage\narr = [170, 45, 75, 90, 802, 24, 2, 66]\nradix_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[4]:\n\n\n# Question 3: Python Program for Binary Insertion Sort\n\ndef binary_insertion_sort(arr):\n for i in range(1, len(arr)):\n key = arr[i]\n low = 0\n high = i - 1\n\n while low <= high:\n mid = (low + high) // 2\n if arr[mid] < key:\n low = mid + 1\n else:\n high = mid - 1\n\n for j in range(i, low, -1):\n arr[j] = arr[j - 1]\n\n arr[low] = key\n\n# Example usage\narr = [64, 34, 25, 12, 22, 11, 90]\nbinary_insertion_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[5]:\n\n\n# Question 4: Python Program for Bitonic Sort\n\ndef bitonic_sort(arr, up=True):\n if len(arr) <= 1:\n return arr\n else:\n mid = len(arr) // 2\n first_half = bitonic_sort(arr[:mid], True)\n second_half = bitonic_sort(arr[mid:], False)\n return bitonic_merge(first_half + second_half, up)\n\ndef bitonic_merge(arr, up=True):\n if len(arr) <= 1:\n return arr\n else:\n bitonic_compare(arr, up)\n mid = len(arr) // 2\n first_half = bitonic_merge(arr[:mid], up)\n second_half = bitonic_merge(arr[mid:], up)\n return first_half + second_half\n\ndef bitonic_compare(arr, up):\n distance = len(arr) // 2\n for i in range(distance):\n if (arr[i] > arr[i + distance]) == up:\n arr[i], arr[i + distance] = arr[i + distance], arr[i]\n\n# Example usage\narr = [64, 34, 25, 12, 22, 11, 90]\nsorted_arr = bitonic_sort(arr)\nprint(\"Sorted array:\", sorted_arr)\n\n\n# In[6]:\n\n\n# Question 5: Python Program for Comb Sort\n\ndef comb_sort(arr):\n n = len(arr)\n gap = n\n shrink_factor = 1.3\n sorted = False\n\n while not sorted:\n gap = int(gap / shrink_factor)\n if gap <= 1:\n gap = 1\n sorted = True\n\n i = 0\n while i + gap < n:\n if arr[i] > arr[i + gap]:\n arr[i], arr[i + gap] = arr[i + gap], arr[i]\n sorted = False\n i += 1\n\n# Example usage\narr = [64, 34, 25, 12, 22, 11, 90]\ncomb_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[7]:\n\n\n# Question 6: Python Program for Pigeonhole Sort\n\ndef pigeonhole_sort(arr):\n min_val = min(arr)\n max_val = max(arr)\n size = max_val - min_val + 1\n holes = [0] * size\n\n for x in arr:\n holes[x - min_val] += 1\n\n i = 0\n for count in range(size):\n while holes[count] > 0:\n holes[count] -= 1\n arr[i] = count + min_val\n i += 1\n\n# Example usage\narr = [8, 3, 2, 7, 4, 6, 8]\npigeonhole_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[8]:\n\n\n# Question 7: Python Program for Cocktail Sort\n\ndef cocktail_sort(arr):\n n = len(arr)\n swapped = True\n start = 0\n end = n - 1\n\n while swapped:\n swapped = False\n\n for i in range(start, end):\n if arr[i] > arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n swapped = True\n\n if not swapped:\n break\n\n swapped = False\n end -= 1\n\n for i in range(end - 1, start - 1, -1):\n if arr[i] > arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n swapped = True\n\n start += 1\n\n# Example usage\narr = [64, 34, 25, 12, 22, 11, 90]\ncocktail_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[9]:\n\n\n# Question 8: Python Program for Gnome Sort\n\ndef gnome_sort(arr):\n n = len(arr)\n i = 0\n\n while i < n:\n if i == 0 or arr[i - 1] <= arr[i]:\n i += 1\n else:\n arr[i], arr[i - 1] = arr[i - 1], arr[i]\n i -= 1\n\n# Example usage\narr = [64, 34, 25, 12, 22, 11, 90]\ngnome_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[10]:\n\n\n# Question 9: Python Program for Odd-Even Sort / Brick Sort\n\ndef odd_even_sort(arr):\n n = len(arr)\n sorted = False\n\n while not sorted:\n sorted = True\n\n for i in range(1, n - 1, 2):\n if arr[i] > arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n sorted = False\n\n for i in range(0, n - 1, 2):\n if arr[i] > arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n sorted = False\n\n# Example usage\narr = [64, 34, 25, 12, 22, 11, 90]\nodd_even_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[11]:\n\n\n# Question 10: Python Program for BogoSort or Permutation Sort\n\nimport random\n\ndef is_sorted(arr):\n n = len(arr)\n for i in range(1, n):\n if arr[i] < arr[i - 1]:\n return False\n return True\n\ndef bogo_sort(arr):\n while not is_sorted(arr):\n random.shuffle(arr)\n\n# Example usage\narr = [64, 34, 25, 12, 22, 11, 90]\nbogo_sort(arr)\nprint(\"Sorted array:\", arr)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kumar2253/asssignment","sub_path":"assignment15.py","file_name":"assignment15.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42212150407","text":"class Scene(object):\r\n def __init__(self, title, urlname, description):\r\n self.title = title\r\n self.urlname = urlname\r\n self.description = description\r\n self.paths = {}\r\n self.event = \"\"\r\n\r\n def do(self, input1):\r\n #self.event = self.paths.get(input1)[1]\r\n\r\n default_direction = None\r\n if '*' in self.paths.keys():\r\n default_direction = self.paths.get('*')[1]\r\n return self.paths.get(input1, default_direction)\r\n\r\n def add_paths(self, paths):\r\n self.paths.update(paths)\r\n\r\n\r\n\r\nstarting_point = Scene(\"Start Room\", \"starting_point\",\r\n\"\"\"This is the first room. One door to your left and one to your right. (Try the left one first, just to see what happens)\r\n\r\nYou can do:\r\ngo right\r\ngo left\r\n\r\nTo check your inventory you can always type: inventory\r\n\r\n\"\"\")\r\n\r\nleft_room = Scene(\"Left Room\", \"left_room\",\r\n\"\"\"This is the left room. In the center lies some Gold! On the other side is another door.\r\n\r\nYou can do:\r\ngo back\r\npick up gold\r\ngo ahead\r\n\r\n\"\"\")\r\n\r\nright_room = Scene(\"Right Room\", \"right_room\",\r\n\"\"\"This is the right room. On the floor lies a big key.\r\n\r\nYou can do:\r\ngo back\r\npick up key\r\n\r\n\"\"\")\r\n\r\noutside = Scene(\"Outside\", \"win\",\r\n\"\"\"You find yourself outside. Happy to see the sun and be some gold richer! :)\r\n\r\n\"\"\")\r\n\r\n\r\n\r\n#here are som important things now:\r\n\r\n#the arrays are build like that:\r\n#\r\n#1 place: what to do! 0 = do something 1 = go somewhere\r\n#if 0(do something):\r\n#what happens, what is put into your inventory, what gets removed from your inventory\r\n\r\n#if 1(go somewhere):\r\n#the next scene, What I need somthing to get there(like a key), what is displayed when I don't have that thing\r\n#\r\n\r\n#\r\nstarting_point.add_paths({\r\n 'go left': [1, left_room, \"key1\", \"The door is locked! You need a key to enter!\"],\r\n 'go right': [1, right_room, None],\r\n '*': [starting_point, None]\r\n})\r\n\r\nright_room.add_paths({\r\n 'go back': [1, starting_point, None],\r\n\t'pick up key': [0, \"You pick up an old big key and it is added to your inventory\", \"key1\", None] ,\r\n '*': [1, right_room, None]\r\n})\r\n\r\nleft_room.add_paths({\r\n 'go back': [1, starting_point, None],\r\n\t'pick up gold': [0, \"You pick up some Gold\", \"gold\", None, None] ,\r\n\t'go ahead': [1, outside, None],\r\n '*': [1, left_room, None]\r\n})\r\n\r\n\r\nSCENES = {\r\n starting_point.urlname: starting_point,\r\n left_room.urlname: left_room,\r\n right_room.urlname: right_room,\r\n\toutside.urlname: outside\r\n}\r\nSTART = starting_point\r\n","repo_name":"sophiamahnke/LPTHW","sub_path":"EX52/gothonweb_alltries/gothonweb4/map2.py","file_name":"map2.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10251232919","text":"class TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution(object):\n def insertIntoBST(self, root, val):\n node = TreeNode(val)\n if not root: return node\n if not root.right and root.val < val:\n root.right = node\n if not root.left and root.val > val:\n root.left = node\n # 前序遍历\n if val > root.val:\n self.insertIntoBST(root.right, val)\n if val < root.val:\n self.insertIntoBST(root.left, val)\n\n return root\n\n\n# 迭代法\nclass Solution:\n def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:\n if not root:\n return TreeNode(val)\n parent = None # 此步可以省略\n cur = root\n\n # 用while循环不断地找新节点的parent\n while cur:\n parent = cur # 首先保存当前非空节点作为下一次迭代的父节点\n if cur.val < val:\n cur = cur.right\n elif cur.val > val:\n cur = cur.left\n\n # 运行到这意味着已经跳出上面的while循环,\n # 同时意味着新节点的parent已经被找到.\n # parent已被找到, 新节点已经ready. 把两个节点黏在一起就好了.\n if parent.val > val:\n parent.left = TreeNode(val)\n else:\n parent.right = TreeNode(val)\n\n return root\n\n\n#\nclass Solution(object):\n def insertIntoBST(self, root, val):\n # 返回更新后的以当前root为根节点的新树,方便用于更新上一层的父子节点关系链\n\n # Base Case\n if not root: return TreeNode(val)\n\n # 单层递归逻辑:\n if val < root.val:\n # 将val插入至当前root的左子树中合适的位置\n # 并更新当前root的左子树为包含目标val的新左子树\n root.left = self.insertIntoBST(root.left, val)\n\n if root.val < val:\n # 将val插入至当前root的右子树中合适的位置\n # 并更新当前root的右子树为包含目标val的新右子树\n root.right = self.insertIntoBST(root.right, val)\n\n # 返回更新后的以当前root为根节点的新树\n return root","repo_name":"Fyw1988/Leetcode","sub_path":"二叉树/701.py","file_name":"701.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36277391886","text":"#All code was tested in Microsoft Visual Studio\r\n\r\n#Use a while loop to find the first 20 numbers that are divisible by \r\n#5, 7, and 11, and print them:\r\nprint(\"\\nExercise 5:\")\r\ndef exe5():\r\n nofound = 0\r\n x = 11\r\n ans = list()\r\n while nofound < 20:\r\n if (x % 5 == 0 and x % 7 == 0 and x % 11 == 0):\r\n ans.append(x)\r\n nofound += 1 \r\n x += 1\r\n return(ans)\r\nprint(exe5())","repo_name":"077x/CS3612017","sub_path":"Python/exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28802965874","text":"# 52. Write a Python program to add 'ing' at the end of a given string (length should be at least 3). If the given\n# string already ends with 'ing' then add 'ly' instead. If the string length of the given string is less than 3,\n# leave it unchanged. Go to the editor\n# a. Sample String : 'abc'\n# b. Expected Result : 'abcing'\n# c. Sample String : 'string'\n# d. Expected Result : 'stringly'\n\nname=input(\"enter the name\")\na=\"ly\"\nb=\"ing\"\nif name!=name+b:\n print(name+a)\nelif name!=name+b:\n print(name+b)\nelse:\n print(name+b+a)","repo_name":"Kavithabathula/If-Else","sub_path":"add ly,ing to given.py","file_name":"add ly,ing to given.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3631974131","text":"from client import create_clients, get_cookies\n\n\ndef bot(url):\n client, user_agent = create_clients()\n\n # Without cookies\n productPage = client.get(url)\n print(\"Without cookies, status code: \", end=\"\")\n print(productPage.status_code)\n # 503, Service Unavailable\n\n\n # Call cookies generator\n cookies = get_cookies(url, user_agent, max_retry=10) # product url you wanna acces, user_agent generated calling the client generator, max_retry in seconds\n if not cookies:\n return\n \n # For each cookie in all loaded cookies -> set the cookie into the client\n for cookie in cookies:\n client.cookies.set(name=cookie.get('name'), value=cookie.get('value'), expires=cookie.get('expires'), domain=cookie.get('domain'))\n\n # Retry getting product page using cookies we passed in the client\n productPage = client.get(url)\n print(\"With generated cookies, status code: \", end=\"\")\n print(productPage.status_code)\n # 200, OK\n\nbot(\"https://www.titoloshop.com/eu_en/air-jordan-13-retro-red-flint-dj5982-600.html\")\n","repo_name":"IHateTomLrge/basic-loader","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"8626693410","text":"\"\"\"\nGiven an array of n positive integers, your task is to count the number of subarrays having sum x.\n\nInput\n\nThe first input line has two integers n and x: the size of the array and the target sum x.\n\nThe next line has n integers a1,a2,…,an: the contents of the array.\n\nOutput\n\nPrint one integer: the required number of subarrays.\n\nConstraints\n1≤n≤2⋅105\n1≤x,ai≤109\nExample\n\nInput:\n5 7\n2 4 1 2 7\n\nOutput:\n3\n\"\"\"\n\nif __name__ == '__main__':\n n, target = list(map(int, input().split()))\n A = list(map(int, input().split()))\n\n mapping = {0: 1}\n res = prefix = 0\n\n for x in A:\n prefix += x\n\n if prefix - target in mapping:\n res += mapping[prefix - target]\n\n mapping[prefix] = mapping.get(prefix, 0) + 1\n\n print(res)\n","repo_name":"denisschmidt/cses","sub_path":"Subarray Sums I/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28120033958","text":"# -*- coding: utf-8 -*-\n'''\n@File : jisuanDate.py\n@Time : 2020/03/02 11:10:23\n'''\n\nimport datetime as dt\n\ntsr = \"2020-03-02 11:03:04\"\ndate, time = tsr.split(' ')\ndate_list = map(lambda x: int(x), date.split('-'))\ntime_list = map(lambda x:int(x),time.split(':'))\ntemobj = dt.datetime(*date_list, *time_list)\nprint(temobj.date())\n\nfor _ in range(6):\n delatt = 90\n delattrObj = dt.timedelta(delatt)\n newdt = temobj-delattrObj\n print(newdt.date())\n temobj = newdt\n","repo_name":"wsyshengyun/mypython","sub_path":"jisuanDate.py","file_name":"jisuanDate.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39662382972","text":"# Given a binary tree of integers, find the maximum path sum between two nodes. \r\n# The path must go through at least one node, and does not need to go through \r\n# the root.\r\n\r\nclass Node:\r\n def __init__(self, value, left=None, right=None):\r\n self.value = value\r\n self.left = left\r\n self.right = right\r\n\r\n\r\ndef max_path_sum(node, is_root=False):\r\n if node is None:\r\n return 0\r\n\r\n paths = [\r\n node.value,\r\n node.value + max_path_sum(node.left),\r\n node.value + max_path_sum(node.right),\r\n ]\r\n\r\n if is_root:\r\n paths.append(node.value + max_path_sum(node.left) + max_path_sum(node.right))\r\n return max(paths)\r\n\r\n\r\nif __name__ == '__main__':\r\n for tree in [\r\n Node(1, Node(2), Node(3)),\r\n Node(10, Node(2, Node(20), Node(1)), Node(10, right=Node(-5, Node(3), Node(4)))),\r\n Node(2, Node(-5, Node(3), Node(3)), Node(6, Node(-2), Node(3))),\r\n Node(2, Node(-5, Node(8), Node(3)), Node(6, Node(-2), Node(3))),\r\n ]:\r\n print(max_path_sum(tree, True))","repo_name":"kemingy/daily-coding-problem","sub_path":"src/max_path_sum.py","file_name":"max_path_sum.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"24501672013","text":"# coding: utf-8\nimport operator\nimport threading\nfrom functools import reduce\nfrom inspect import isclass\nfrom random import choice, randrange\nfrom time import sleep\n\nfrom django.db import models, transaction\nfrom django.db.transaction import atomic\n\n\ndef patch_methods(cls, *bases):\n \"\"\"\n Monkey-patcher une classe avec les méthodes d'autres classes\n\n Le contenu de bases peut être des classes et des fonctions,\n dans n'importe quel ordre.\n\n :param cls: Classe à patcher\n :param bases: Classes depuis lesquelles copier les fonctions + Méthodes\n :type bases: list | lisr\n \"\"\"\n for base in bases:\n if isclass(base):\n for name in base.__dict__:\n if not name.startswith('__'):\n attribute = getattr(base, name)\n if callable(attribute):\n setattr(cls, name, attribute)\n elif callable(base):\n setattr(cls, base.__name__, base)\n\n\nclass DictUpdateModel:\n \"\"\"\n Mixin pour Model ajoutant une fonction update\n\n Monkey-patching done in scoop.core.__init__\n \"\"\"\n\n # Setter\n def update(self, save=False, **kwargs):\n \"\"\"\n Mettre à jour les champs du modèle\n\n :param save: Enregistrer l'instance après avoir modifié les champs\n :param kwargs: Attributs de l'instance à modifier\n :type self: django.db.models.Model\n :type save: bool\n :type kwargs: **dict\n \"\"\"\n for (key, value) in kwargs.items():\n setattr(self, key, value)\n if save is True:\n try:\n self.save(update_fields=kwargs.keys())\n except ValueError: # généralement si un generic field name est employé\n self.save()\n\n\n@atomic\ndef resave_queryset(queryset, fields=None, count=None):\n \"\"\" Réenregistrer chaque instance du queryset\"\"\"\n\n def _resave_queryset_progress(progress, total):\n \"\"\" Afficher la progression pendant la mise à jour du queryset\"\"\"\n percent = progress * 100 / total\n print(\"{percent:>5.1f}% ({progress})\".format(progress=progress, percent=percent))\n sleep(1.0)\n\n t = None\n total = count or queryset.count()\n criteria = {} if fields is None else {'update_fields': fields}\n for i, instance in enumerate(queryset):\n if t is None or not t.isAlive():\n t = threading.Thread(target=_resave_queryset_progress, args=(i, total))\n t.start()\n instance.save(**criteria)\n\n\ndef get_all_related_objects(instance, limit=2048):\n \"\"\"\n Renvoyer tous les objets liés à une instance\n\n Monkey-patching done in scoop.core.__init__\n \"\"\"\n # Liste à renvoyer\n result = set()\n # Récupérer les accesseurs qui seront appelés pour récupérer les objets\n links = [rel.get_accessor_name() for rel in instance._meta.related_objects]\n # Récupérer les accesseurs, tous les objets des accesseurs, et les ajouter\n for link in links:\n if len(result) > limit:\n break\n try:\n items = getattr(instance, link).all()\n for item in items:\n result.add(item)\n except AttributeError:\n try:\n result.add(getattr(instance, link))\n except AttributeError:\n pass\n return result\n\n\ndef search_query(expression, fields, queryset=None):\n \"\"\"\n Fabriquer une requête de recherche de mots dans plusieurs champs\n\n On passe une expression avec un ou plusieurs mots\n et une liste de champs texte dans lesquels rechercher.\n La fonction renvoie les paramètres de QuerySet utiles à la recherche.\n Chaque mot de l'expression peut être agrémenté de ^ et = en préfixe, afin de\n modifier la requête. Le caractère $ peut aussi se placer en suffixe.\n \"\"\"\n tokens = expression.split()\n query_groups = []\n for token in tokens:\n query_list = []\n # Variantes de recherche\n if token.startswith('^'):\n matching = 'istartswith'\n token = token[1:]\n elif token.endswith('$'):\n matching = 'iendswith'\n token = token[:-1]\n elif token.startswith('='):\n matching = 'iexact'\n token = token[1:]\n else:\n matching = 'icontains'\n for field in fields:\n # Ajouter une condition de recherche\n query_list.append(models.Q(**{\"%s__%s\" % (field, matching): token}))\n query_group = reduce(operator.or_, query_list)\n query_groups.append(query_group)\n if queryset is None:\n final_query = reduce(operator.and_, query_groups)\n return final_query\n else:\n for query_group in query_groups:\n queryset = queryset.filter(query_group)\n return queryset\n\n\ndef shuffle_model(self, fields=None, m2m_max=4):\n \"\"\"\n Randomizer les données du modèle, notamment les clés étrangères et clés M2M\n \n :param self:\n :param fields: noms des champs FK/M2M à randomizer\n :type fields: list\n :param m2m_max: nombre maximum de liens M2M à créer sur un champ M2M\n :type m2m_max: int\n \"\"\"\n fields = fields or [self._meta.get_field_by_name(field) for field in set(self._meta.get_all_field_names())]\n for field in fields:\n field = field[0]\n if isinstance(field, models.ForeignKey) and not isinstance(field, models.OneToOneField):\n queryset = field.related.parent_model._default_manager.all()\n queryset = queryset.filter(**field.rel.limit_choices_to).order_by('?')\n if queryset.exists():\n setattr(self, field.name, queryset[0])\n elif isinstance(field, models.ManyToManyField):\n queryset = field.related.parent_model.objects.all().order_by('?')\n queryset = queryset.filter(**field.rel.limit_choices_to)\n if queryset.exists():\n attribute = getattr(self, field.name)\n attribute.clear()\n for i in range(min(randrange(1, m2m_max + 1), queryset.count())):\n attribute.add(queryset[i])\n elif getattr(field, 'choices', None) is not None:\n choices = list(field.choices)\n if len(choices) > 0:\n setattr(self, field.name, choice(choices)[0])\n\n\ndef limit_to_model_names(*names):\n \"\"\" Limit_choices_to via des noms de modèles type app_label.model \"\"\"\n return reduce(operator.or_, [models.Q(**{'app_label': app, 'model': model}) for app, model in [name.split('.') for name in names]])\n\n\ndef make_lazy_picklable(*args):\n \"\"\"\n request.user peut être un objet SimpleLazyObject qui ne peut pas être picklé par\n\n Celery lorsqu'il est dans une autre structure, visiblement.\n Remplacer user par user._wrapped dans ce cas\n :param symbols: normalement, passer locals()\n :param names: normalement, passer les paramètres de la fonction qui sont de type user\n :type names: list[str]\n :type symbols: dict\n \"\"\"\n return [getattr(arg, '_wrapped', arg) for arg in args]\n\n\nclass SingleDeleteQuerySetMixin(object):\n \"\"\" Mixin de queryset implémentant la suppression individuelle des instances du queryset \"\"\"\n\n @transaction.atomic()\n def delete(self):\n \"\"\"\n Supprimer indépendamment chaque objet du queryset\n\n :type self: django.db.models.Queryset\n \"\"\"\n for item in self:\n item.delete()\n\n\nclass SingleDeleteQuerySet(models.QuerySet, SingleDeleteQuerySetMixin):\n \"\"\" Queryset implémentant la suppression individuelle des instances du queryset \"\"\"\n pass\n\n\nclass SingleDeleteManager(models.Manager, object):\n \"\"\" Manager implémentant la suppression individuelle des instances du queryset \"\"\"\n\n def get_queryset(self):\n \"\"\" Renvoyer le queryset par défaut \"\"\"\n return SingleDeleteQuerySet(getattr(self, 'model'), using=getattr(self, '_db'))\n\n\nclass DisableMigrations(object):\n \"\"\" Faux dictionnaire désactivant les migrations pour toutes les applications \"\"\"\n\n def __contains__(self, item):\n return True\n\n def __getitem__(self, item):\n return \"notmigrations\"\n","repo_name":"artscoop/scoop","sub_path":"scoop/core/util/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14788006775","text":"T = int(input())\n\nfor test_case in range(1, T + 1):\n people = int(input())\n record = list(map(int, input().split()))\n \n record_abs = [abs(x) for x in record]\n \n min_ = record_abs[0]\n \n for i in record_abs:\n if i <= min_:\n min_ = i\n \n count_ = record_abs.count(min_) \n \n print('#{} {} {}'.format(test_case, min_, count_))","repo_name":"kleenex1/TIL","sub_path":"문제풀기/SWEA/1285.py","file_name":"1285.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17594088171","text":"# -*- coding:utf-8 -*-\n# print(\"\"\"\n#\n# *************************[ Rule ]****************************\n# 1. DB에 있는 단어만 사용 가능\n# 2. DB에 없는 단어를 입력 할 경우,\n# DB에 저장되어 다음 게임에서 사용 가능\n# 3. 직접 DB를 수정하고 싶을 경우,\n# word_dictionary_kor.txt파일 수정\n# *************************************************************\n#\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n# \"\"\", end=\"\")\nfrom game_word_chain import korean_dictionary_api\nfrom game_word_chain import engine\nimport time, sys, os\n\n\ndef countdown(n):\n if n == 0:\n print(\"Please enter a word\")\n else:\n print(n)\n time.sleep(1)\n countdown(n-1)\n\ndef start(lastChar):\n dicData = [] # 사전목록\n useData = [] # 이미사용된 단어 저장\n\n try:\n f = open(\"game_word_chain/db/word_dictionary_kor.txt\", \"r\")\n while True:\n data = f.readline().rstrip('\\r\\n')\n dicData.append(data)\n if data == \"\":\n break\n except:\n print(\"[Error] DB file not found\")\n print(\"[Fail] Loading Word Chain Game ...\")\n\n hmWord = engine.humanEg.humanInput(lastChar) # 사람이 단어를 입력함\n # hmWord = word_chain.engine.humanEg.humanConnectChar(hmWord,lastChar) # 사람이 입력 한 단어를 가공함\n hmCanUse = engine.humanEg.humanWordDefine(hmWord,dicData) # 사람이 입력한 단어가 있는지 확인\n if hmCanUse:\n ### word_dictionary_kor DB에 구성되지 않은 단어 일 경우, word_dictionary_kor DB 추가 ###\n korean = hmWord\n check = korean_dictionary_api.posCheck(korean)\n if check == \"true\":\n print(\"[word-chain] '{}'은(는) DB에 구성되지 않은 단어입니다.\".format(hmWord))\n print(\"[word-chain] '{}'이(가) DB에 추가되었습니다. 다음 게임에서 적용됩니다.\".format(hmWord))\n f = open(\"game_word_chain/db/word_dictionary_kor.txt\", 'a')\n f.write(hmWord + '\\n')\n f.close()\n else:\n print(\"[word-chain] '{}'은(는) 명사가 아닙니다.\".format(hmWord))\n return \"not a noun\"\n\n used_word = engine.humanEg.humanUseWord(hmWord,useData)\n if used_word:\n # return \"이미 사용한 단어에요, 모피에게 졌어요.\"\n return \"already used words\"\n else:\n useData.append(hmWord)\n ### 사람이 입력할것이 완료됨 ###\n\n ### 컴퓨터의 시작 ###\n lastChar = engine.defaultEg.getLastChar(hmWord)\n comWord = engine.computerEg.useWord(lastChar,dicData)\n if comWord == []:\n print(\"[word-chain] Moppy : \", lastChar[-1])\n print(\"[word-chain] DB에 {}(으)로 시작하는 단어가 없을 경우 종료됩니다.\".format(lastChar[-1]))\n return \"do not think\"\n\n comWord = engine.computerEg.useAgain(comWord,useData)\n if comWord == []:\n print(\"Word Chain Game Ending...\")\n\n ### comWord 변수에 총 사용가능한 단어들이 모여있습니다. ###\n computerUse = engine.computerEg.selectWord(comWord)\n print(\"Moppy>\", computerUse)\n useData.append(computerUse)\n lastChar = engine.defaultEg.getLastChar(computerUse)\n answer = computerUse\n\n return answer\n","repo_name":"Soosang-9/AI-Bot","sub_path":"Server/game_word_chain/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1264639628","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n__date__ = '2018/9/12'\r\n__author__ = 'sunchengquan'\r\n__mail__ = 'sunchq14@lzu.edu.cn'\r\n\r\n\r\n\"\"\"\r\nPOST请求\r\n\"\"\"\r\n\r\nimport requests\r\n\r\ndata = {'name': 'germey', 'age': '22'}\r\nr = requests.post(\"http://httpbin.org/post\", data=data)\r\nprint(r.text)\r\n","repo_name":"sunchengquan/python_spider","sub_path":"python_requests/request_03.py","file_name":"request_03.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7101947704","text":"from ..models import Emergency,User,Subscribers\nfrom ..models import Emergency,User,Conversation,Reply,Solution\nfrom .forms import ConvoForm,UpdateProfile,chatboxForm,SolutionsForm,Update_emergency,SubscriberForm,EmergencyForm\nfrom .. import db,photos\nfrom . import main\nfrom flask import render_template,redirect,url_for,abort,request,flash\nfrom flask_login import login_required,current_user\nfrom ..email import mail_message\nfrom ..request import article_source,location\nimport urllib.request,json\n\n@main.route('/', methods = ['GET','POST'])\ndef index():\n title=\"Home\"\n Form=SubscriberForm()\n formE=EmergencyForm()\n\n locations=location()\n for x in locations:\n lat=x.latitude\n lon=x.longitude\n latitude=lat\n longitude=lon\n\n if formE.validate_on_submit():\n category=formE.category.data\n description=formE.description.data\n\n new_post=Emergency(category=category,description=description,latitude=latitude,longitude=longitude,victim=current_user.username)\n new_post.save_emergency()\n\n subscriber = Subscribers.query.all()\n for my_subscriber in subscriber:\n mail_message(\"New emergecy posted\",\"email/new_emergency\",my_subscriber.email,emergency = emergency)\n \n return redirect(url_for('main.chatbox',category=new_post.category))\n\n return render_template('index.html',title=title,subscriber_form=Form,formE=formE,locations=locations)\n\n@main.route('/emergency/')\ndef emergency(category):\n '''\n view function that renders emergency template with the specific category displaying emergencies by category\n '''\n title=category\n Form=SubscriberForm()\n emergencies=Emergency.get_emergencies(category)\n \n \n return render_template('emergency.html',subscriber_form=Form,title=title,emergencies=emergencies)\n\n\n# Updating profile\n@main.route('/user/')\n@login_required\ndef profile(yusername):\n user=User.query.filter_by(username = yusername).first()\n emergencies = Emergency.get_emergency_by_user(current_user.username)\n Form=SubscriberForm() \n if user is None:\n abort(404)\n return render_template('profile/profile.html',subscriber_form=Form,user = user,emergencies = emergencies)\n\n\n@main.route('/emergency/conversation/', methods=['GET','POST'])\n@login_required\ndef convo(id):\n '''\n view function that renders the conversation page for people to talk\n ''' \n emergency=Emergency.query.filter_by(id=id).first()\n form = ConvoForm()\n Form=SubscriberForm()\n title='Conversations'\n convos=Conversation.get_convos(id) \n\n if form.validate_on_submit():\n new_convo=Conversation(emergency_id=id,convo=form.convo.data,posted_by=current_user.username)\n\n new_convo.save_convo()\n\n return redirect(url_for('main.convo',id=id))\n\n return render_template('conversation.html',subscriber_form=Form,ConvoForm=form,title=title,convos=convos,emergency=emergency) \n\n@main.route('/emergency/conversation/reply/', methods=['GET','POST'])\n@login_required\ndef reply(id):\n '''\n view function that renders the reply page for people to reply a conversation\n ''' \n\n form = ConvoForm()\n title='Reply'\n replies=Reply.get_replies(id)\n Form=SubscriberForm()\n convo=Conversation.query.filter_by(id=id).first()\n\n if form.validate_on_submit():\n new_reply=Reply(convo_id=id,reply=form.convo.data,posted_by=current_user.username)\n\n new_reply.save_reply()\n\n return redirect(url_for('main.reply',id=id))\n\n return render_template('reply.html',subscriber_form=Form,ConvoForm=form,title=title,replies=replies,convo=convo) \n\n\n@main.route('/emergency/new/solution', methods = ['GET','POST'])\n@login_required\ndef new_solution():\n '''\n views function that renders the solution form template in the solution.html\n '''\n\n form = SolutionsForm()\n title ='new solution'\n Form=SubscriberForm()\n\n if form.validate_on_submit():\n new_solution = Solution(body =form.solution.data ,title =form.title.data ,posted_by =current_user.username,category = form.category.data )\n\n new_solution.save_solution()\n\n return redirect(url_for('main.solution'))\n\n return render_template('solution_form.html',subscriber_form=Form,form = form,title=title)\n\n@main.route('/emergency/solution', methods = ['GET','POST'])\ndef solution():\n '''\n this view function is responsible for displaying our the solution on solution.html\n '''\n Form=SubscriberForm()\n accidentSol = Solution.get_solution_by_category('Accidents')\n floodSol = Solution.get_solution_by_category('Floods')\n earthquakeSol = Solution.get_solution_by_category('Earthquakes')\n fluSol = Solution.get_solution_by_category('Flu')\n landslideSol = Solution.get_solution_by_category('Landslide')\n fireSol = Solution.get_solution_by_category('Fire')\n powerSol = Solution.get_solution_by_category('PowerOutage')\n terrorismSol = Solution.get_solution_by_category('Terrorism')\n wildfireSol = Solution.get_solution_by_category('Wildfire')\n\n return render_template('solution.html',subscriber_form=Form,accidents = accidentSol, floods = floodSol,earthquakes = earthquakeSol,flus = fluSol, landslides = landslideSol,fire = fireSol,power = powerSol,terrorism = terrorismSol,wildfire = wildfireSol)\n\n@main.route(\"/map\")\ndef map():\n Form=SubscriberForm()\n\n return render_template('map.html',subscriber_form=Form)\n \n\n\n@main.route('/chatbox/', methods=['GET','POST'])\n@login_required\ndef chatbox(category):\n '''\n view function that renders chatbox html for chatting\n '''\n\n if category=='Accidents':\n flash('Thank for Posting the emergency. Please call our \\'First Aid & Rescue Team\\': \\' 0722233333 \\' to assist you immediately') \n\n elif category=='Floods':\n flash('Thank for Posting the emergency. Please call our \\'First Aid & Rescue Team\\' : \\' 0733344444 \\' to assist you immediately') \n\n elif category=='Earthquakes':\n flash('Thank for Posting the emergency. Please call our \\'Disaster Team\\' : \\' 0744455555 \\' to assist you immediately') \n\n elif category=='Flu':\n flash('Thank for Posting the emergency. Please call our \\'Health Team\\' : \\' 0755566666 \\' to assist you immediately') \n\n elif category=='Fire':\n flash('Thank for Posting the emergency. Please call our \\'Fire Extinguisher Team\\' : \\' 0766677777 \\' to assist you immediately') \n\n elif category=='Landslide':\n flash('Thank for Posting the emergency. Please call our \\'First Aid & Rescue Team\\' : \\' 0777788888 \\' to assist you immediately') \n \n elif category=='PowerOutage':\n flash('Thank for Posting the emergency. Please call the \\'KPLC Team\\' : \\' 0788899999 \\' to assist you immediately') \n\n elif category=='Terrorism':\n flash('Thank for Posting the emergency. Please call the \\'Police \\' : \\' 999 \\' to assist you immediately') \n\n elif category=='Wildfire':\n flash('Thank for Posting the emergency. Please call our \\'Fire Extinguisher Team & Animal rescue Team\\' : \\' 0700011111 \\' to assist you immediately') \n else:\n flash(\"Welcome to Help centre......\") \n \n form=chatboxForm() \n Form=SubscriberForm()\n if form.validate_on_submit():\n \n if form.chatbox.data=='Accidents':\n emergencies=Emergency.get_emergencies('Accidents')\n \n return render_template('emergency.html',subscriber_form=Form,emergencies=emergencies)\n\n elif form.chatbox.data=='Floods':\n emergencies=Emergency.get_emergencies('Floods')\n\n return render_template('emergency.html',subscriber_form=Form,emergencies=emergencies) \n\n elif form.chatbox.data=='Help':\n accidentSol = Solution.get_solution_by_category('Accidents')\n floodSol = Solution.get_solution_by_category('Floods')\n earthquakeSol = Solution.get_solution_by_category('Earthquakes')\n fluSol = Solution.get_solution_by_category('Flu')\n landslideSol = Solution.get_solution_by_category('Landslide')\n fireSol = Solution.get_solution_by_category('Fire')\n powerSol = Solution.get_solution_by_category('PowerOutage')\n terrorismSol = Solution.get_solution_by_category('Terrorism')\n wildfireSol = Solution.get_solution_by_category('Wildfire')\n\n return render_template('solution.html',subscriber_form=Form,accidents = accidentSol, floods = floodSol,earthquakes = earthquakeSol,flus = fluSol, landslides = landslideSol,fire = fireSol,power = powerSol,terrorism = terrorismSol,wildfire = wildfireSol)\n\n elif form.chatbox.data=='Home':\n return redirect(url_for('main.index'))\n\n elif form.chatbox.data=='News': \n return redirect(url_for('main.article'))\n else:\n flash('Did Not get that Message')\n\n \n return render_template('chatbox.html', form=form,subscriber_form=Form)\n\n@main.route('/user//update',methods = ['GET','POST'])\n@login_required\ndef update_profile(yusername):\n '''\n View function for rendering te update profile page\n \n Args:\n yusername:The current user's username\n '''\n Form=SubscriberForm()\n user = User.query.filter_by(username = yusername).first()\n if user is None:\n abort(404)\n\n form = UpdateProfile()\n if form .validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('.profile',subscriber_form=Form,yusername = user.username))\n\n return render_template('profile/update.html',subscriber_form=Form,form = form)\n\n@main.route('/user//update/pic',methods = ['POST'])\n@login_required\ndef update_pic(yusername):\n '''\n View function that will help a user upload a photo\n '''\n Form=SubscriberForm()\n user = User.query.filter_by(username = yusername).first()\n if 'photo' in request.files:\n filename = photos.save(request.files['photo'])\n path = f'photos/{filename}'\n user.profile_pic_path = path\n db.session.commit()\n \n return redirect(url_for('main.profile',subscriber_form=Form,yusername = yusername))\n \n\n@main.route('/article')\ndef article():\n '''\n view article page function that returns article details page and its data\n '''\n articles = article_source()\n Form=SubscriberForm()\n return render_template('news.html',subscriber_form=Form,articles=articles)\n\n@main.route('/emergency/update_emergency/' , methods=['GET','POST'])\ndef update_emergency(id):\n '''\n view function that renders update emergency form\n '''\n Form=SubscriberForm()\n u_emergency=Emergency.query.filter_by(id=id).first()\n title='Update Emergency'\n if emergency is None:\n abort(404)\n\n form=Update_emergency()\n if form.validate_on_submit():\n u_emergency.category=form.category.data\n u_emergency.description=form.description.data \n\n db.session.add(u_emergency)\n db.session.commit()\n\n return redirect(url_for('main.emergency',subscriber_form=Form,category=form.category.data))\n\n return render_template('updateemergency.html',subscriber_form=Form,title=title,form=form) \n\n@main.route('/delEmergency/')\n@login_required\ndef delEmergency(id):\n '''\n view function that deletes an emergency if only the emergency belongs to the current user\n '''\n Form=SubscriberForm()\n \n emergency_del=Emergency.query.filter_by(id=id).first()\n\n emergency_del.delete_emergency()\n\n return redirect(url_for('main.emergency',subscriber_form=Form,category=emergency_del.category))\n\n\n\n \n\n\n","repo_name":"petermirithu/Emergency_app","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"32598259969","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n#***************************************************************************\n# MetEvolSim (Metabolome Evolution Simulator)\n# -------------------------------------------\n# MetEvolSim is a numerical framework dedicated to the study of metabolic\n# abundances evolution.\n#\n# © 2018-2023 Charles Rocabert, Gábor Boross, Orsolya Liska and Balázs Papp\n# Web: https://github.com/charlesrocabert/MetEvolSim\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#***************************************************************************\n\nimport os\nimport sys\nimport metevolsim\n\n\n##################\n# MAIN #\n##################\n\n################################################################################\n# This Python script simply computes the steady-state of the erythrocyte\n# metabolism model from Holzhutter et al. (2004).\n#\n# Prior to the usage of this script, the user must intall MetEvolSim python\n# package using the following commande line:\n# > pip install MetEvolSim\n# and also download CopasiSE define the path to CopasiSE below.\n################################################################################\n\nsbml = \"./model/erythrocyte_metabolism.xml\"\ncopasi = \"Path to CopasiSE\" # Define here the path to CopasiSE\n\n# Load the SBML model\nmodel = metevolsim.Model(sbml, [], copasi)\n\n# Show some informations about the model\nprint(\"> Number of species: \"+str(model.get_number_of_species()))\nprint(\"> Number of variable species: \"+str(model.get_number_of_variable_species()))\nprint(\"> Number of reactions: \"+str(model.get_number_of_reactions()))\nprint(\"> Number of kinetic parameters: \"+str(model.get_number_of_parameters()))\n\n# Compute the steady-state of the original (wild-type) model\nmodel.compute_wild_type_steady_state()\n","repo_name":"charlesrocabert/MetEvolSim","sub_path":"example/steady_state.py","file_name":"steady_state.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"37476453827","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom psychopy import visual, core, event, gui\r\nimport tkinter\r\nimport pandas as pd\r\nimport numpy as np\r\nimport random\r\n\r\n# p及value Wu 1999\r\np = np.array([0.01, 0.05, 0.1, 0.25, 0.4, 0.5, 0.6, 0.75, 0.9, 0.95, 0.99])\r\nvalue = np.array([(25, 0), (50, 0), (75, 0), (100, 0), (150, 0), (200, 0), (400, 0), (800, 0),\r\n (50, 25), (75, 50), (100, 50), (150, 50), (150, 100), (200, 100), (200, 150)])\r\n\r\n# 实验刺激预处理 N = 11(p)*15(value)*2 = 330 = 2*5(block)*33(trial)\r\n# 读取初始数据\r\ndata = pd.read_csv('data.csv')\r\ndata_np = data.values\r\nitem = [0] * len(p)\r\nfor i in range(len(p)):\r\n item[i] = data.loc[data['p'] == p[i]].values\r\n np.random.shuffle(item[i]) # 打乱每个p下的项目\r\n# 打乱总体\r\nnp.random.shuffle(item)\r\nblock = [[0] * 33 for _ in range(5)]\r\n# 每个block 33个trial 11(p)* 3(x1 x2)\r\nfor i in range(5):\r\n for j in range(11):\r\n block[i][3 * j] = item[j][i * 3]\r\n block[i][3 * j + 1] = item[j][i * 3 + 1]\r\n block[i][3 * j + 2] = item[j][i * 3 + 2]\r\n np.random.shuffle(block[i])\r\n# 分配8个sure_reward (间隔分配)\r\nsure_asignment = np.random.randint(0, 2, size=(5, 33))\r\npart2 = [[0] * 33 for _ in range(5)]\r\npart1 = [[0] * 33 for _ in range(5)]\r\nsur = [0, 1]\r\nfor i in range(5):\r\n for j in range(33):\r\n p_value = block[i][j][0]\r\n v_x1 = block[i][j][1]\r\n v_x2 = block[i][j][2]\r\n sur[0] = [block[i][j][x] for x in [3, 5, 7, 9]]\r\n sur[1] = [block[i][j][x] for x in [4, 6, 8, 10]]\r\n if sure_asignment[i][j] == 0:\r\n part1[i][j] = {'p': p_value, 'v': (v_x1, v_x2), 'sure_reward': sur[0]}\r\n part2[i][j] = {'p': p_value, 'v': (v_x1, v_x2), 'sure_reward': sur[1]}\r\n else:\r\n part1[i][j] = {'p': p_value, 'v': (v_x1, v_x2), 'sure_reward': sur[1]}\r\n part2[i][j] = {'p': p_value, 'v': (v_x1, v_x2), 'sure_reward': sur[0]}\r\n# 将part2逆序与part1拼接\r\ntrial_set = part1 + part2[::-1]\r\n# 最终数据格式: 2*5(block)*33(trial)\r\n# for each trial: dict{'p', 'v' = (x1, x2), 'sure_reward':[r1,r2,r3,r4]}\r\n\r\n\r\n(w, h) = (1280, 720)\r\nwin = visual.Window(size=(w, h), fullscr=True, units='pix', color=[0, 0, 0])\r\nmyMouse = event.Mouse()\r\nmyMouse.setVisible(0)\r\nclk = core.Clock()\r\n# 时间间隔\r\nt_trial = {'t_fix': 0.5, 't_gamble': 1.5, 't_response': 1.5, 't_int': [2, 3, 4]}\r\n# 文本\r\ntext_gamble_1 = visual.TextStim(win, height=64*h/720, pos=(-150, 0))\r\ntext_gamble_2 = visual.TextStim(win, height=64*h/720, pos=(150, 0))\r\ntext_gamble2 = visual.TextStim(win, height=64*h/720, pos=(-240,0))\r\ntext_reward = visual.TextStim(win, height=64*h/720, pos=(240,0))\r\ntext_p = visual.TextStim(win, height=64*h/720)\r\ntxt = visual.TextStim(win, height=64*h/720)\r\n# 注视点\r\nfix = visual.ImageStim(win, image=\"dot.png\", size=64)\r\n\r\nfor i in range(len(trial_set)):\r\n for j in range(33):\r\n # 数据\r\n p_v = trial_set[i][j]['p']\r\n x1, x2 = trial_set[i][j]['v']\r\n sure_reward = trial_set[i][j]['sure_reward']\r\n np.random.shuffle(sure_reward)\r\n # 注视点\r\n fix.draw()\r\n win.flip()\r\n core.wait(t_trial['t_fix'])\r\n # Gamble\r\n text_p.text = \"%s%%\" % int(100*p_v)\r\n text_p.draw()\r\n win.flip()\r\n core.wait(3)\r\n win.flip()\r\n core.wait(0.1)\r\n #\r\n text_gamble_1.text = \"¥%s\" % int(x1)\r\n text_gamble_2.text = \"¥%s\" % int(x2)\r\n text_gamble_1.draw()\r\n fix.draw()\r\n text_gamble_2.draw()\r\n win.flip()\r\n core.wait(3)\r\n win.flip()\r\n core.wait(1)\r\n # sure_reward & response\r\n for each in sure_reward:\r\n # text_gamble2.text = \"%s%%,¥%s %s%%,¥%s\" % (int(100 * p_v), int(x1), int(100*(1 - p_v)), int(x2))\r\n text_gamble2.text = '奖券'\r\n text_reward.text = \"¥%s\" % int(each)\r\n text_gamble2.draw()\r\n text_reward.draw()\r\n win.flip()\r\n key = event.waitKeys(keyList=['space', 'f', 'j', 'escape'], maxWait=3)\r\n if not key:\r\n pass\r\n elif 'escape' in key:\r\n win.close()\r\n core.quit()\r\n elif 'f' in key:\r\n text_gamble2.color = [0, 1, 0]\r\n elif 'j' in key:\r\n text_reward.color = [0, 1, 0]\r\n text_gamble2.draw()\r\n text_reward.draw()\r\n win.flip()\r\n text_gamble2.color = [1, 1, 1]\r\n text_reward.color = [1, 1, 1]\r\n core.wait(0.1)\r\n event.clearEvents()\r\n win.flip()\r\n core.wait(0.1)\r\n # 空屏\r\n win.flip()\r\n core.wait(random.randrange(20, 41, step=1)/10.)\r\n if i != 4:\r\n # 休息 30s强制+30\r\n txt.text = \"请休息,按【空格键】继续\"\r\n txt.draw()\r\n win.flip()\r\n core.wait(30)\r\n key = event.waitKeys(keyList=['space', 'escape'])\r\n if 'escape' in key:\r\n break\r\n# 实验结束\r\ntxt.text = \"实验结束!\"\r\nwin.flip()\r\ncore.wait(1)\r\nwin.close()\r\ncore.quit()","repo_name":"zhebushipinyin/cdlab_project1","sub_path":"exp_demo_nTracker.py","file_name":"exp_demo_nTracker.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37208623980","text":"import pickle\nimport socket\nfrom datetime import datetime\n\nimport logger as logger\nfrom models2 import CodeEnum, CollectionDescription, HistoricalCollection, WorkerProperty, Description, DataSet\nfrom database import readAll, insertData, readByDateAndCode, readLastValueByCode, ModelDB\n\nglobal server_socket\n\nlocalHost = \"127.0.0.1\"\nport = 10254\n\n\ndef DataSetForCodeEnum(code_: CodeEnum):\n if code_ == CodeEnum.CODE_ANALOG or code_ == CodeEnum.CODE_DIGITAL:\n return 1\n elif code_ == CodeEnum.CODE_CUSTOM or code_ == CodeEnum.CODE_LIMITSET:\n return 2\n elif code_ == CodeEnum.CODE_SINGLENODE or code_ == CodeEnum.CODE_MULTIPLENODE:\n return 3\n elif code_ == CodeEnum.CODE_CONSUMER or code_ == CodeEnum.CODE_SOURCE:\n return 4\n\n\ndef CodesForDataSet(dataSet: int):\n if dataSet == 1:\n return CodeEnum.CODE_ANALOG, CodeEnum.CODE_DIGITAL\n elif dataSet == 2:\n return CodeEnum.CODE_CUSTOM, CodeEnum.CODE_LIMITSET\n elif dataSet == 3:\n return CodeEnum.CODE_SINGLENODE, CodeEnum.CODE_MULTIPLENODE\n elif dataSet == 4:\n return CodeEnum.CODE_CONSUMER, CodeEnum.CODE_SOURCE\n\n\ndef ConvertWorkerData(desc: Description) -> CollectionDescription:\n if desc is None:\n raise TypeError\n else:\n return CollectionDescription(\n desc.Id,\n HistoricalCollection(\n [WorkerProperty(it.Code, it.Value) for it in desc.Items]\n ),\n desc.DataSet\n )\n\n\ndef Deadband(fromDB: int, newOne: int) -> bool:\n if fromDB is None or newOne is None:\n raise TypeError\n\n difference = fromDB - newOne\n if difference < 0:\n difference *= -1\n twopercent = 0.02 * fromDB\n return difference > twopercent\n\n\ndef Connect(): # pragma: no cover\n global server_socket\n server_socket = socket.socket()\n print('\\nWaiting for connection')\n while True:\n try:\n server_socket.connect((localHost, port))\n break\n except:\n pass\n\n\ndef send_message(message):\n package = pickle.dumps(message)\n try:\n server_socket.send(package)\n except:\n pass\n\n\nclass Worker:\n def __init__(self, id):\n self.id = id\n\n self.CDS: dict[int, CollectionDescription] = {\n 1: CollectionDescription(\n 0, HistoricalCollection([]), DataSet(CodeEnum.CODE_ANALOG, CodeEnum.CODE_DIGITAL)\n ),\n 2: CollectionDescription(\n 0, HistoricalCollection([]), DataSet(CodeEnum.CODE_CUSTOM, CodeEnum.CODE_LIMITSET)\n ),\n 3: CollectionDescription(\n 0, HistoricalCollection([]), DataSet(CodeEnum.CODE_SINGLENODE, CodeEnum.CODE_MULTIPLENODE)\n ),\n 4: CollectionDescription(\n 0, HistoricalCollection([]), DataSet(CodeEnum.CODE_CONSUMER, CodeEnum.CODE_SOURCE)\n )\n }\n Connect()\n send_message(self)\n\n def readByDateTimeAndCode(self, dfrom: datetime, dto: datetime, code_: int):\n if (\n type(dfrom) is not datetime or\n type(dto) is not datetime or\n type(code_) is not int\n ):\n raise TypeError\n\n return readByDateAndCode(dfrom, dto, code_)\n\n @staticmethod\n def GetLastForCodes():\n return readAll()\n\n # TODO: Test\n def GetNewValues(self, dataset: int) -> dict[CodeEnum, int]:\n ret: dict[CodeEnum, int] = {}\n for p in self.CDS[dataset].HistoricalCollection.Workers:\n ret[CodeEnum(p.Code)] = p.WorkerValue\n return ret\n\n def GetCodesCount(self, dataset: int) -> dict[CodeEnum, int]:\n codeCount: dict[CodeEnum, int] = {\n c: 0 for c in CodesForDataSet(dataset)\n }\n for p in self.CDS[dataset].HistoricalCollection.Workers:\n codeCount[CodeEnum(p.Code)] += 1\n\n return codeCount\n\n def ReceiveDescriptions(self, desc: Description): # pragma: no cover\n if type(desc) is not Description:\n raise TypeError\n\n logger.logData('Worker {id} received data from LoadBalancer'.format(id=self.id))\n currentColDes: CollectionDescription = ConvertWorkerData(desc)\n\n for p in currentColDes.HistoricalCollection.Workers:\n self.CDS[DataSetForCodeEnum(currentColDes.DataSet[0])].HistoricalCollection.Workers.append(p)\n\n currentValues: dict[CodeEnum, int] = {\n c: readLastValueByCode(c.value)[1] for c in CodeEnum\n }\n newValues: dict[CodeEnum, int] = self.GetNewValues(DataSetForCodeEnum(currentColDes.DataSet[0]))\n codesCount: dict[CodeEnum, int] = self.GetCodesCount(DataSetForCodeEnum(currentColDes.DataSet[0]))\n\n datasetCodes = (currentColDes.DataSet[0], currentColDes.DataSet[1])\n dataSetInt = DataSetForCodeEnum(currentColDes.DataSet[0])\n\n if codesCount[datasetCodes[0]] > 0 and codesCount[datasetCodes[1]] > 0:\n if datasetCodes[1] == CodeEnum.CODE_DIGITAL:\n\n insertData(ModelDB(CodeEnum.CODE_DIGITAL.value, newValues[CodeEnum.CODE_DIGITAL], datetime.now(),\n dataSetInt))\n logger.logData('Worker {id} wrote DIGITAL code value to Database'.format(id=self.id))\n\n if Deadband(currentValues[CodeEnum.CODE_ANALOG], newValues[CodeEnum.CODE_ANALOG]):\n insertData(ModelDB(1,\n newValues[CodeEnum.CODE_ANALOG],\n datetime.now(),\n dataSetInt))\n logger.logData('Worker {id} wrote ANALOG code value to Database'.format(id=self.id))\n else:\n for c in datasetCodes:\n if Deadband(currentValues[c], newValues[c]):\n insertData(ModelDB(c.value, newValues[c], datetime.now(), dataSetInt))\n logger.logData(\n 'Worker {id} wrote {code} code value to Database'.format(id=self.id, code=c.value))\n\n self.CDS[DataSetForCodeEnum(currentColDes.DataSet[0])].HistoricalCollection.Workers.clear()\n logger.logData('Worker {id} cleared internal data for dataset after insert to database'.format(id=self.id))\n\n\nif __name__ == '__main__':\n w = Worker(1)\n input()\n","repo_name":"Saska146/RES-Tim8","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19262660294","text":"from functools import wraps\r\nfrom flask import request\r\n\r\nfrom app.main.service.auth_helper import Auth\r\n\r\n\r\ndef access_control(authorized_role):\r\n def decorated(f):\r\n @wraps(f)\r\n def wrapper(*args, **kwargs):\r\n data, status = Auth.get_logged_in_user(request)\r\n token = data.get('data')\r\n print(data, status)\r\n if not token:\r\n print(data,status)\r\n return data, status\r\n elif authorized_role in token['roles']:\r\n return f(*args, **kwargs)\r\n else:\r\n response_object = {\r\n 'status': 'fail',\r\n 'message': 'Unauthorized access to endpoint.'\r\n }\r\n return response_object, 401\r\n\r\n return wrapper\r\n return decorated","repo_name":"TomasRacil/VSIS-2021","sub_path":"backend/app/main/util/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73834049692","text":"import sys\nimport os\nimport ConfigParser\nfrom shutil import copytree\n\nfrom PyQt4 import QtCore, QtGui\nimport CloneProjectUI\nfrom FALLOW.gui.uis.prj import prj_open\nfrom FALLOW import projectManager\n\ninput_para_extension = \"Excel Files (*.xls)\"\nmodel_file_extension = \"Python Files (*.py)\"\n\n\nclass CloneProject(QtGui.QDialog, CloneProjectUI.Ui_diagcloneprj):\n def __init__(self, parent=None, project_path=\"C:/\"):\n super(CloneProject, self).__init__(parent)\n self.setupUi(self)\n self.project_path = project_path\n self.connect(self.btnoriginalprj,\n QtCore.SIGNAL(\"clicked()\"),\n self.browse_original_project)\n self.connect(self.btndestinationprj,\n QtCore.SIGNAL(\"clicked()\"),\n self.browse_destination_project)\n self.ready = True\n self.value = None\n\n def _raise_message(self, header, message):\n msg = QtGui.QMessageBox()\n msg.setIcon(QtGui.QMessageBox.Information)\n msg.setText(header)\n msg.setInformativeText(message)\n msg.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)\n msg.show()\n msg.exec_()\n\n def _check_blank(self, line_edit, text):\n if not line_edit.text():\n header = \"%s information should be filled\" % text\n message = \"Please fill the information to \" \\\n \"%s\" % text\n self.ready = False\n self._raise_message(header, message)\n\n def browse_original_project(self):\n open_prj = prj_open.OpenProject(self)\n open_prj.show()\n if open_prj.exec_():\n value = open_prj.return_value()\n if value:\n self.lineoriginalprj.setText(value)\n\n def browse_destination_project(self):\n self._browse_project(self.linedestinationprj)\n\n def _browse_project(self, line_edit):\n project = QtGui.QFileDialog.getExistingDirectory(\n self, 'Select a folder:', self.project_path,\n QtGui.QFileDialog.ShowDirsOnly)\n if project:\n line_edit.setText(project)\n projects = projectManager.get_projects()\n project = str(project)\n projects[os.path.basename(project)] = os.path.abspath(project)\n projectManager.put_projects(projects)\n\n def _prepare_frame(self):\n if not os.path.isdir(str(self.linedestinationprj.text())):\n os.mkdir(str(self.linedestinationprj.text()))\n\n def accept(self):\n self._check_blank(self.lineoriginalprj, 'original project')\n self._check_blank(self.linedestinationprj, 'destination project')\n self._check_blank(self.linemodelername, 'modeler name')\n self._check_blank(self.linemodeleremail, 'modeler email')\n\n if self.ready:\n self._prepare_frame()\n self.value = str(self.linedestinationprj.text())\n super(CloneProject, self).accept()\n else:\n self.ready = True\n\n def return_value(self):\n return self.value\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n form = CloneProject()\n form.show()\n app.exec_()\n del form\n\n\nif __name__ == '__main__':\n main()","repo_name":"uttu90/NewFALLOW","sub_path":"FALLOW/gui/uis/prj/prj_clone/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15143412178","text":"from qgis.core import *\nfrom qgis.gui import *\n\n'''\nWritten by @klaskarlsson\nLicence LGPL\n\nParameters:\n-g : Grid North Reference, default 0\n-m : Magnetic Declination relative True North, default 0\n-t : True North Reference relative Grid North, default 0\n-s : SVG size centimeters, default 2.5\n\n'''\nimport math\n\ndef generatesvg(g, m, t, s):\n \n t = round(t, 1)\n\n svg1=\"\\n\"\n svg1+=\"\\n\"\n svg1+=\"\\n\"\n\n svgm=\" \\n\"\n svgm+=\" \\n\"\n svgm+=\" Mag Decl. = %s°\\n\"\n svgm+=\" \\n\"\n \n svgt=\" \\n\"\n svgt+=\" \\n\"\n svgt+=\" True North = %s°\\n\"\n svgt+=\" \\n\"\n\n svgg=\" \\n\"\n svgg+=\" GN\\n\"\n svgg+=\" \\n\"\n svgg+=\" Grid North = %s°\\n\"\n svgg+=\" \\n\"\n\n svgr=\" \\n\"\n svgr+=\" \\n\"\n svgr+=\"\\n\"\n \n vg = math.radians(g)\n vt = math.radians(t)\n vm = math.radians(t+m)\n\n xm = 200 + 300 * math.sin(vm)\n ym = 380 - 300 * math.cos(vm)\n axm = 200 + 305 * math.sin(vm)\n aym = 380 - 305 * math.cos(vm)\n bxm = 200 + 335 * math.sin(vm)\n bym = 380 - 335 * math.cos(vm)\n cxm = 200 + 305 * math.sin(vm + math.radians(2))\n cym = 380 - 305 * math.cos(vm + math.radians(2))\n\n generera = svg1\n generera += svgm % ( xm, ym, axm, aym, bxm, bym, cxm, cym, m)\n\n xt = 200 + 330 * math.sin(vt)\n yt = 380 - 330 * math.cos(vt)\n axt = 200 + 330 * math.sin(vt - math.radians(1.8))\n ayt = 380 - 330 * math.cos(vt - math.radians(1.8))\n bxt = 200 + 360 * math.sin(vt)\n byt = 380 - 360 * math.cos(vt)\n cxt = 200 + 330 * math.sin(vt + math.radians(1.8))\n cyt = 380 - 330 * math.cos(vt + math.radians(1.8))\n dxt = 200 + 349 * math.sin(vt - math.radians(2.6))\n dyt = 380 - 349 * math.cos(vt - math.radians(2.6))\n ext = 200 + 349 * math.sin(vt + math.radians(2.6))\n eyt = 380 - 349 * math.cos(vt + math.radians(2.6))\n\n generera += svgt % ( xt, yt, axt, ayt, bxt, byt, cxt, cyt, dxt, dyt, ext, eyt, t)\n\n xg = 200 + 360 * math.sin(vg)\n yg = 380 - 360 * math.cos(vg)\n axg = 200 + 363 * math.sin(vg)\n ayg = 380 - 363 * math.cos(vg)\n\n generera += svgg % (xg, yg, axg, ayg, g, g)\n\n xrt1 = 200 + 300 * math.sin(vg)\n yrt1 = 380 - 300 * math.cos(vg)\n xrt2 = 200 + 300 * math.sin(vt)\n yrt2 = 380 - 300 * math.cos(vt)\n\n xrm1 = 200 + 280 * math.sin(vt)\n yrm1 = 380 - 280 * math.cos(vt)\n xrm2 = 200 + 280 * math.sin(vm)\n yrm2 = 380 - 280 * math.cos(vm)\n \n if (vm <= vt):\n mp = 0\n else:\n mp = 1\n if (vt <= vg):\n tp = 0\n else:\n tp = 1\n\n generera += svgr % (xrt1, yrt1, tp, xrt2, yrt2, xrm1, yrm1, mp, xrm2, yrm2)\n\n return generera\n\n@qgsfunction(args='auto', group='Military')\ndef north_ref_svg(grid_north, true_north, magnetic_north, size_svg, feature, parent):\n \"\"\"\n

Generate North Reference SVG.

\n

Place inside HTML frame

\n
    \n
  • arg 1: Grid North (normally 0)
  • \n
  • arg 2: True North (calc)
  • \n
  • arg 3: Magnetic Declination (look up)
  • \n
  • arg 4: SVG size in centimeters
  • \n
\n

Example usage:

\n northref(0, 3.4, -7, 2.5)\n \n \"\"\"\n return generatesvg(grid_north, magnetic_north, true_north, size_svg)\n","repo_name":"klakar/LayoutLoader","sub_path":"profile/python/expressions/generatesvg.py","file_name":"generatesvg.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"13452233393","text":"from collections import namedtuple\n\nfrom pyedj.compute.fields.field_factory import FieldFactory\n\n\nclass EventTypeFactory(type):\n\n def __new__(self, name, schema, checked=True):\n name = name + 'Event'\n fields = [k for k, v in schema['fields'].items()]\n\n if checked:\n slot_vars = ['_' + k for k, v in schema['fields'].items()]\n slots = {'__slots__': slot_vars}\n inst_vars = {k: FieldFactory(k, v) for k, v in schema['fields'].items()}\n all_attrs = {**slots, **inst_vars}\n X = type(name, (object,), all_attrs)\n\n def init(self, *args, **kwargs):\n x = {k: v for k, v in self.__class__.__dict__.items() if not k.startswith('__') and k not in self.__slots__}\n\n for k, v in x.items():\n setattr(self, k, v.default)\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n X.__init__ = init\n\n else:\n X = namedtuple(name, fields)\n\n return X\n\n def __init__(self, name, schema, checked=True):\n pass","repo_name":"fstakem/pyedj","sub_path":"pyedj/compute/event_type_factory.py","file_name":"event_type_factory.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"36322938885","text":"from typing import List, Tuple, Dict\nimport re\n\n\nclass WireGrid():\n def __init__(self):\n self.grid = dict()\n self.wirecount = 0\n\n def addwireinstructions(self, instructions: List[str]) -> None:\n x = 0\n y = 0\n length = 0\n self.addwirenode(x, y, length)\n for i in instructions:\n m = re.match(r\"(\\w)(\\d+)\", i)\n if m.group(1) == 'U':\n for _ in range(int(m.group(2))):\n y += 1\n length += 1\n self.addwirenode(x, y, length)\n elif m.group(1) == 'D':\n for _ in range(int(m.group(2))):\n y -= 1\n length += 1\n self.addwirenode(x, y, length)\n elif m.group(1) == 'R':\n for _ in range(int(m.group(2))):\n x += 1\n length += 1\n self.addwirenode(x, y, length)\n elif m.group(1) == 'L':\n for _ in range(int(m.group(2))):\n x -= 1\n length += 1\n self.addwirenode(x, y, length)\n self.wirecount += 1\n\n def addwirenode(self, x: int, y: int, length: int):\n self.grid.setdefault((x, y), dict())\n self.grid[(x, y)].setdefault(self.wirecount, length)\n\n def getintersections(self) -> List[Tuple[int, int]]:\n return [(k, v) for k, v in self.grid.items() if (len(v.keys()) > 1 and k != (0, 0))]\n\n\ndef getminimummanhatten(intersections: List[Tuple[Tuple[int, int], Dict[int, int]]]) -> int:\n return min([abs(x[0][0]) + abs(x[0][1]) for x in intersections])\n\n\ndef getminimumsteps(intersections: List[Tuple[Tuple[int, int], Dict[int, int]]]):\n return min([sum(x[1].values()) for x in intersections])\n","repo_name":"General-Chaos/AdventofCode2019","sub_path":"3/mod3.py","file_name":"mod3.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40840444185","text":"# Codreanu Radu Stefan\r\n# Grupa 461\r\n\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Exercitiul 2\r\n\r\nt = np.linspace(0, 1, 2000)\r\nt_es = np.linspace(0, 1, 9)\r\nfig, axs = plt.subplots(4)\r\naxs[0].plot(t, np.sin(2 * np.pi * 18 * t), c=\"magenta\")\r\n\r\naxs[1].plot(t, np.sin(2 * np.pi * 18 * t), c=\"magenta\")\r\naxs[1].scatter(t_es, np.sin(2 * np.pi * 18 * t_es), c=\"yellow\")\r\n\r\naxs[2].plot(t, np.sin(2 * np.pi * 10 * t), c=\"purple\")\r\naxs[2].scatter(t_es, np.sin(2 * np.pi * 10 * t_es), c=\"yellow\")\r\n\r\naxs[3].plot(t, np.sin(2 * np.pi * 6 * t), c=\"green\")\r\naxs[3].scatter(t_es, np.sin(2 * np.pi * 6 * t_es), c=\"yellow\")\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n#Not working properly\r\n","repo_name":"radustefan2311/461-TemeProcesareaSemnalelorCTI","sub_path":"Laborator-4-final/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15929707254","text":"import os\n\nfrom arg import init_config, compilation_args\n\nfrom compile.mass_compile import compiler_run\nfrom compile.make_runner import make_runner\n\nargs = init_config(compilation_args)\n\nmutant_models_dir = os.path.join(args.mutants_dir, args.model_name, str(args.seed_number),\n args.mutation_method, \"models\")\n\nresult_dir = os.path.join(args.compile_record_dir, args.compiler_name,\n args.model_name, str(args.seed_number),\n args.mutation_method)\nprint(\"Result saving directory is\", result_dir)\n\nrunner = make_runner(args.compiler_name, args.compiler_path, \"default\", True)\ncompiler_run(runner, mutant_models_dir, args.input_data_path, result_dir,\n args.retain_build, args.mode, args.compile_list)\n","repo_name":"Wilbur-Django/Testing-DNN-Compilers","sub_path":"compile_run.py","file_name":"compile_run.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"9256072886","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 15 11:41:48 2020\r\n\r\n@author: straw\r\n\"\"\"\r\nimport re\r\n\r\nfile = open(\"data.txt\", \"r\")\r\ncontent = file.read()\r\nfile.close()\r\nres = re.findall(r\"\\w+\", content)\r\nprint('Nombre de mots présents dans le fichier : {}.'.format(len(res)))\r\n\r\n","repo_name":"kawthar-eltarr/AI-Runtrack-1","sub_path":"jour03/word_finder.py","file_name":"word_finder.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7281066727","text":"__author__ = \"geffner@google.com (Jason Geffner)\"\n__version__ = \"2.0\"\n\nimport json\nimport re\nimport subprocess\nimport threading\nimport traceback\n\n\"\"\"\n# r0capture\n\nID: r0ysue \n\n安卓应用层抓包通杀脚本\n\nhttps://github.com/r0ysue/r0capture\n\n## 简介\n\n- 仅限安卓平台,测试安卓7、8、9、10 可用 ;\n- 无视所有证书校验或绑定,无视任何证书;\n- 通杀TCP/IP四层模型中的应用层中的全部协议;\n- 通杀协议包括:Http,WebSocket,Ftp,Xmpp,Imap,Smtp,Protobuf等等、以及它们的SSL版本;\n- 通杀所有应用层框架,包括HttpUrlConnection、Okhttp1/3/4、Retrofit/Volley等等;\n\"\"\"\n\n# Windows版本需要安装库:\n# pip install 'win_inet_pton'\n# pip install hexdump\nimport argparse\nimport os\nimport pprint\nimport random\nimport signal\nimport socket\nimport struct\nimport sys\nimport time\nfrom pathlib import Path\n\nimport frida\nfrom loguru import logger\n\ntry:\n if os.name == 'nt':\n import win_inet_pton\nexcept ImportError:\n # win_inet_pton import error\n pass\n\ntry:\n import myhexdump as hexdump # pylint: disable=g-import-not-at-top\nexcept ImportError:\n pass\ntry:\n from shutil import get_terminal_size as get_terminal_size\nexcept:\n try:\n from backports.shutil_get_terminal_size import get_terminal_size as get_terminal_size\n except:\n pass\n\ntry:\n import click\nexcept:\n class click:\n @staticmethod\n def secho(message=None, **kwargs):\n print(message)\n\n @staticmethod\n def style(**kwargs):\n raise Exception(\"unsupported style\")\nbanner = \"\"\"\n--------------------------------------------------------------------------------------------\n .oooo. . \n d8P'`Y8b .o8 \noooo d8b 888 888 .ooooo. .oooo. oo.ooooo. .o888oo oooo oooo oooo d8b .ooooo. \n`888\"\"8P 888 888 d88' `\"Y8 `P )88b 888' `88b 888 `888 `888 `888\"\"8P d88' `88b \n 888 888 888 888 .oP\"888 888 888 888 888 888 888 888ooo888 \n 888 `88b d88' 888 .o8 d8( 888 888 888 888 . 888 888 888 888 .o \nd888b `Y8bd8P' `Y8bod8P' `Y888\"\"8o 888bod8P' \"888\" `V88V\"V8P' d888b `Y8bod8P' \n 888 \n o888o \n https://github.com/r0ysue/r0capture\n--------------------------------------------------------------------------------------------\\n\n\"\"\"\n\n\ndef show_banner():\n colors = ['bright_red', 'bright_green', 'bright_blue', 'cyan', 'magenta']\n try:\n click.style('color test', fg='bright_red')\n except:\n colors = ['red', 'green', 'blue', 'cyan', 'magenta']\n try:\n columns = get_terminal_size().columns\n if columns >= len(banner.splitlines()[1]):\n for line in banner.splitlines():\n click.secho(line, fg=random.choice(colors))\n except:\n pass\n\n\n# ssl_session[] = (,\n# )\nssl_sessions = {}\ndef append_to_json_file(file_path, data, file_lock):\n with file_lock:\n if os.path.exists(file_path) and os.path.getsize(file_path) > 0:\n # 如果文件存在且非空,则读取原有数据,并将新数据追加到数组中\n with open(file_path, 'r', encoding='utf-8') as json_file:\n json_data = json.load(json_file)\n json_data.append(data)\n else:\n # 如果文件不存在或为空,则创建一个包含新数据的数组\n json_data = [data]\n\n # 将更新后的数据重新写入JSON文件\n with open(file_path, 'w', encoding='utf-8') as json_file:\n json.dump(json_data, json_file, ensure_ascii=False, indent=4)\n\ndef process_function(file_path, data, file_lock):\n append_to_json_file(file_path, data, file_lock)\n\ndef resource_monitor(emulator_address, appInfo, interval, duration, filelock):\n package_name = appInfo['package_name']\n gfxinfo_cmd = f\"adb -s {emulator_address} shell dumpsys gfxinfo {package_name}\"\n meminfo_cmd = f\"adb -s {emulator_address} shell dumpsys meminfo {package_name}\"\n\n end_time = time.time() + duration\n Janky_frames_List = []\n Caches_frames_List = []\n Total_Memory_List = []\n while time.time() < end_time:\n try:\n gpu_usage = subprocess.check_output(gfxinfo_cmd, shell=True).decode()\n Janky_frames = re.findall(r'Janky frames:\\s+(\\d+)\\s+\\(([\\d.]+)%\\)', gpu_usage)\n Caches = re.findall(r'Total memory usage:\\s+(\\d+) bytes', gpu_usage)\n if Janky_frames and Caches:\n Janky_frames_List.append(Janky_frames[0])\n Caches_frames_List.append(Caches[0])\n\n # 获取内存使用情况\n memory_usage = subprocess.check_output(meminfo_cmd, shell=True).decode()\n total_memory = re.findall(r\"TOTAL:\\s+(\\d+)\", memory_usage)\n if total_memory:\n Total_Memory_List.append(total_memory[0]+'KB')\n time.sleep(interval)\n except Exception as e:\n print(\"出现异常:\",traceback.print_exc(),e)\n appInfo['Janky_frames_List'] = Janky_frames_List\n appInfo['Caches_frames_List'] = Caches_frames_List\n appInfo['Total_Memory_List'] = Total_Memory_List\n print(appInfo)\n process_function(file_path='RunTimeAppStatus_normal_new.json', data=appInfo, file_lock=filelock)\ndef ssl_log(process, pcap=None, host=False, verbose=False, isUsb=False, ssllib=\"\", isSpawn=True, wait=0, worktime=0,emulator_address=None, file_lock=None,apk_file=None):\n def log_pcap(pcap_file, ssl_session_id, function, src_addr, src_port,\n dst_addr, dst_port, data):\n t = time.time()\n\n if ssl_session_id not in ssl_sessions:\n ssl_sessions[ssl_session_id] = (random.randint(0, 0xFFFFFFFF),\n random.randint(0, 0xFFFFFFFF))\n client_sent, server_sent = ssl_sessions[ssl_session_id]\n\n if function == \"SSL_read\":\n seq, ack = (server_sent, client_sent)\n else:\n seq, ack = (client_sent, server_sent)\n\n for writes in (\n # PCAP record (packet) header\n (\"=I\", int(t)), # Timestamp seconds\n (\"=I\", int((t * 1000000) % 1000000)), # Timestamp microseconds\n (\"=I\", 40 + len(data)), # Number of octets saved\n (\"=i\", 40 + len(data)), # Actual length of packet\n # IPv4 header\n (\">B\", 0x45), # Version and Header Length\n (\">B\", 0), # Type of Service\n (\">H\", 40 + len(data)), # Total Length\n (\">H\", 0), # Identification\n (\">H\", 0x4000), # Flags and Fragment Offset\n (\">B\", 0xFF), # Time to Live\n (\">B\", 6), # Protocol\n (\">H\", 0), # Header Checksum\n (\">I\", src_addr), # Source Address\n (\">I\", dst_addr), # Destination Address\n # TCP header\n (\">H\", src_port), # Source Port\n (\">H\", dst_port), # Destination Port\n (\">I\", seq), # Sequence Number\n (\">I\", ack), # Acknowledgment Number\n (\">H\", 0x5018), # Header Length and Flags\n (\">H\", 0xFFFF), # Window Size\n (\">H\", 0), # Checksum\n (\">H\", 0)): # Urgent Pointer\n pcap_file.write(struct.pack(writes[0], writes[1]))\n pcap_file.write(data)\n\n if function == \"SSL_read\":\n server_sent += len(data)\n else:\n client_sent += len(data)\n ssl_sessions[ssl_session_id] = (client_sent, server_sent)\n\n def on_message(message, data):\n \"\"\"Callback for errors and messages sent from Frida-injected JavaScript.\n Logs captured packet data received from JavaScript to the console and/or a\n pcap file. See https://www.frida.re/docs/messages/ for more detail on\n Frida's messages.\n Args:\n message: A dictionary containing the message \"type\" and other fields\n dependent on message type.\n data: The string of captured decrypted data.\n \"\"\"\n if message[\"type\"] == \"error\":\n logger.info(f\"{message}\")\n os.kill(os.getpid(), signal.SIGTERM)\n return\n if len(data) == 1:\n logger.info(f'{message[\"payload\"][\"function\"]}')\n logger.info(f'{message[\"payload\"][\"stack\"]}')\n return\n p = message[\"payload\"]\n if verbose:\n src_addr = socket.inet_ntop(socket.AF_INET,\n struct.pack(\">I\", p[\"src_addr\"]))\n dst_addr = socket.inet_ntop(socket.AF_INET,\n struct.pack(\">I\", p[\"dst_addr\"]))\n session_id = p['ssl_session_id']\n logger.info(f\"SSL Session: {session_id}\")\n logger.info(\"[%s] %s:%d --> %s:%d\" % (\n p[\"function\"],\n src_addr,\n p[\"src_port\"],\n dst_addr,\n p[\"dst_port\"]))\n gen = hexdump.hexdump(data, result=\"generator\",only_str=True)\n str_gen = ''.join(gen)\n logger.info(f\"{str_gen}\")\n logger.info(f\"{p['stack']}\")\n if pcap:\n log_pcap(pcap_file, p[\"ssl_session_id\"], p[\"function\"], p[\"src_addr\"],\n p[\"src_port\"], p[\"dst_addr\"], p[\"dst_port\"], data)\n\n if isUsb:\n try:\n device = frida.get_usb_device()\n except:\n device = frida.get_remote_device()\n else:\n if host:\n manager = frida.get_device_manager()\n device = manager.add_remote_device(host)\n else:\n device = frida.get_local_device()\n\n if isSpawn:\n pid = device.spawn([process])\n start_time = time.time()\n time.sleep(1)\n session = device.attach(pid)\n time.sleep(1)\n device.resume(pid)\n\n else:\n print(\"attach\")\n session = device.attach(process)\n if wait > 0:\n print(f\"wait for {wait} seconds\")\n time.sleep(wait)\n if pcap:\n pcap_file = open(pcap, \"wb\", 0)\n for writes in (\n (\"=I\", 0xa1b2c3d4), # Magic number\n (\"=H\", 2), # Major version number\n (\"=H\", 4), # Minor version number\n (\"=i\", time.timezone), # GMT to local correction\n (\"=I\", 0), # Accuracy of timestamps\n (\"=I\", 65535), # Max length of captured packets\n (\"=I\", 228)): # Data link type (LINKTYPE_IPV4)\n pcap_file.write(struct.pack(writes[0], writes[1]))\n\n with open(Path(__file__).resolve().parent.joinpath(\"./script.js\"), encoding=\"utf-8\") as f:\n _FRIDA_SCRIPT = f.read()\n script = session.create_script(_FRIDA_SCRIPT)\n script.on(\"message\", on_message)\n script.load()\n\n if ssllib != \"\":\n script.exports.setssllib(ssllib)\n def stoplog(signum, frame):\n session.detach()\n if pcap:\n pcap_file.flush()\n pcap_file.close()\n exit()\n signal.signal(signal.SIGINT, stoplog)\n signal.signal(signal.SIGTERM, stoplog)\n\n app_info = {}\n package_name = process\n app_info['package_name'] = package_name\n\n\n try:\n top_cmd = f\"adb -s {emulator_address} shell top -n 1 -p $(pidof {package_name})\"\n top_output = subprocess.check_output(top_cmd, shell=True).decode()\n top_info = top_output.split('\\n')[-1].split(' ')\n top_info = [x for x in top_info if x]\n app_info['cpu_usage'] = top_info[-4]\n except Exception as e:\n app_info['cpu_usage'] = ' '\n print(\"出现异常:\",traceback.print_exc(),e)\n return\n\n # 判断应用是否启动成功\n time.sleep(5)\n windows = subprocess.run(['adb', '-s', emulator_address, 'shell', 'dumpsys', 'window', 'windows'],\n capture_output=True, text=True, check=True).stdout\n\n if f'Application Error: {package_name}' in windows:\n print(f'应用启动失败,应用包名:{package_name}')\n with open('checked_apps_normal.txt', 'a') as f:\n f.write(f'{apk_file},{package_name},False\\n')\n else:\n # 开始资源监控线程\n monitor_thread = threading.Thread(target=resource_monitor, args=(emulator_address, app_info, 5, worktime, file_lock))\n monitor_thread.start()\n\n time.sleep(worktime)\n # 等待资源监控线程完成\n monitor_thread.join()\n # 停止应用\n subprocess.run(['adb', '-s', emulator_address, 'shell', 'am', 'force-stop', package_name], check=True)\n\n session.detach()\n if pcap:\n pcap_file.flush()\n pcap_file.close()\n exit()\n\n # sys.stdin.read()\n\n#\n# if __name__ == \"__main__\":\n# # show_banner()\n#\n#\n# class ArgParser(argparse.ArgumentParser):\n#\n# def error(self, message):\n# print(\"ssl_logger v\" + __version__)\n# print(\"by \" + __author__)\n# print(\"Modified by BigFaceCat\")\n# print(\"Error: \" + message)\n# print()\n# print(self.format_help().replace(\"usage:\", \"Usage:\"))\n# self.exit(0)\n#\n#\n# parser = ArgParser(\n# add_help=False,\n# description=\"Decrypts and logs a process's SSL traffic.\",\n# formatter_class=argparse.RawDescriptionHelpFormatter,\n# epilog=r\"\"\"\n# Examples:\n# %(prog)s -pcap ssl.pcap openssl\n# %(prog)s -verbose 31337\n# %(prog)s -pcap log.pcap -verbose wget\n# %(prog)s -pcap log.pcap -ssl \"*libssl.so*\" com.bigfacecat.testdemo\n# \"\"\")\n# # 本机 python r0capture.py -U -f io.faceapp -t 10 -p D:\\secComm\\r0capture\\pcap\\test.pcap\n# # 远控 python r0capture.py -f io.faceapp -t 10 -p D:\\secComm\\r0capture\\pcap\\test.pcap -H 127.0.0.1:1234\n# args = parser.add_argument_group(\"Arguments\")\n# args.add_argument(\"-pcap\", '-p', metavar=\"\", required=False,\n# help=\"Name of PCAP file to write\")\n# args.add_argument(\"-host\", '-H', metavar=\"<192.168.1.1:27042>\", required=False,\n# help=\"connect to remote frida-server on HOST\")\n# args.add_argument(\"-verbose\", \"-v\", required=False, action=\"store_const\", default=True,\n# const=True, help=\"Show verbose output\")\n# args.add_argument(\"process\", metavar=\"\",\n# help=\"Process whose SSL calls to log\")\n# args.add_argument(\"-ssl\", default=\"\", metavar=\"\",\n# help=\"SSL library to hook\")\n# args.add_argument(\"--isUsb\", \"-U\", default=False, action=\"store_true\",\n# help=\"connect to USB device\")\n# args.add_argument(\"--isSpawn\", \"-f\", default=False, action=\"store_true\",\n# help=\"if spawned app\")\n# args.add_argument(\"-wait\", \"-w\", type=int, metavar=\"\", default=0,\n# help=\"Time to wait for the process\")\n# args.add_argument(\"-time\", '-t', metavar=\"work time\", required=False,\n# help=\"Setting the work time\",type=int)\n# parsed = parser.parse_args()\n# # logger.add(f\"{parsed.process.replace('.','_')}-{int(time.time())}.log\", rotation=\"500MB\", encoding=\"utf-8\", enqueue=True, retention=\"10 days\")\n# print(parsed)\n# ssl_log(\n# int(parsed.process) if parsed.process.isdigit() else parsed.process,\n# parsed.pcap,\n# parsed.host,\n# parsed.verbose,\n# isUsb=parsed.isUsb,\n# isSpawn=parsed.isSpawn,\n# ssllib=parsed.ssl,\n# wait=parsed.wait,\n# worktime=parsed.time\n# )\n","repo_name":"ayjin-dev/DLAppInspector","sub_path":"r0capture/r0capture.py","file_name":"r0capture.py","file_ext":"py","file_size_in_byte":16056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1507118917","text":"import idaapi\n\nfrom . import actions\nimport HexRaysPyTools.core.helper as helper\nimport HexRaysPyTools.core.struct_xrefs as struct_xrefs\nimport HexRaysPyTools.forms as forms\n\n\nclass FindFieldXrefs(actions.HexRaysPopupAction):\n description = \"Field Xrefs\"\n hotkey = \"Ctrl+X\"\n\n def __init__(self):\n super(FindFieldXrefs, self).__init__()\n\n def check(self, hx_view):\n return hx_view.item.citype == idaapi.VDI_EXPR and \\\n hx_view.item.it.to_specific_type.op in (idaapi.cot_memptr, idaapi.cot_memref)\n\n def activate(self, ctx):\n hx_view = idaapi.get_widget_vdui(ctx.widget)\n if not self.check(hx_view):\n return\n\n data = []\n offset = hx_view.item.e.m\n struct_type = idaapi.remove_pointer(hx_view.item.e.x.type)\n ordinal = helper.get_ordinal(struct_type)\n result = struct_xrefs.XrefStorage().get_structure_info(ordinal, offset)\n for xref_info in result:\n data.append([\n idaapi.get_short_name(xref_info.func_ea) + \"+\" + hex(int(xref_info.offset)),\n xref_info.type,\n xref_info.line\n ])\n\n field_name = helper.get_member_name(struct_type, offset)\n chooser = forms.MyChoose(\n data,\n \"Cross-references to {0}::{1}\".format(struct_type.dstr(), field_name),\n [[\"Function\", 20 | idaapi.Choose.CHCOL_PLAIN],\n [\"Type\", 2 | idaapi.Choose.CHCOL_PLAIN],\n [\"Line\", 40 | idaapi.Choose.CHCOL_PLAIN]]\n )\n idx = chooser.Show(True)\n if idx == -1:\n return\n\n xref = result[idx]\n idaapi.open_pseudocode(xref.func_ea + xref.offset, False)\n\nactions.action_manager.register(FindFieldXrefs())\n","repo_name":"igogo-x86/HexRaysPyTools","sub_path":"HexRaysPyTools/callbacks/struct_xref_representation.py","file_name":"struct_xref_representation.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":1288,"dataset":"github-code","pt":"32"} +{"seq_id":"26963237121","text":"# -*- coding+: utf-8 -*-\n\"\"\"\n@author:Kirito\n@file:Email.py\n@time:2021/04/19\n@describe:发送邮件\n\"\"\"\nimport smtplib\nimport os\nimport logging\nfrom Comm.log import log_init\nfrom Conf.config import smtp_cfg, email_cfg\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\n\nlog_init()\nlogger = logging.getLogger('Mario.email')\n\n# 文件大小限制20M\nfile_size = 20\n# 文件大小限制10个\nfile_count = 10\n\n\nclass Email:\n\n def __init__(self, subject, context=None, attachment=None):\n '''\n 构造函数\n :param subject:邮件标题\n :param context:邮件正文\n :param attachment:邮件附件\n '''\n self.subject = subject\n self.context = context\n self.attachment = attachment\n # 发送带附件的邮件,首先要创建MIMEMultipart()实例,然后构造附件,如果有多个附件,可依次构造,最后利用smtplib.smtp发送。\n self.message = MIMEMultipart()\n self.message_init()\n\n def message_init(self):\n '''\n 邮件内容处理\n :return:\n '''\n # 邮件标题\n if self.subject:\n self.message['Subject'] = Header(self.subject, 'utf-8')\n else:\n raise ValueError(\"无效的标题:{},请输入正确的标题!\".format(self.subject))\n logger.error(\"无效的标题:{},请输入正确的标题!\".format(self.subject))\n # 邮件发件人\n self.message['Form'] = email_cfg['sender']\n # 邮件收件人\n self.message['To'] = email_cfg['receivers']\n # 邮件正文内容\n if self.context:\n self.message.attach(MIMEText(self.context, 'html', 'utf-8'))\n # 邮箱附件\n if self.attachment:\n # isinstance() 函数来判断一个对象是否是一个已知的类型;判断是否为单个文件\n if isinstance(self.attachment, str):\n self.attach_handle(self.attachment)\n # 判断是否为多个文件\n if isinstance(self.attachment, list):\n count = 0\n # 循环多个文件\n for each in self.attachment:\n # 判断文件数量是否等于小于预设值\n if count <= file_count:\n self.attach_handle(each)\n count += 1\n else:\n logger.warning(\"附件数量超过预设值:{}个\".format(file_count))\n break\n\n def attach_handle(self, file):\n '''\n 附件处理\n :param file:附件\n :return:\n '''\n # 判断是否为文件并且大小是否符合预设值\n if os.path.isfile(file) and os.path.getsize(file) <= file_size * 1024 * 1024:\n attach = MIMEApplication(open(file, 'rb').read())\n attach.add_header('Content-Disposition', 'attachment', filename=os.path.basename(file))\n attach[\"Content-Type\"] = 'application/octet-stream'\n self.message.attach(attach)\n else:\n logger.error('附件超过{0}M,或者{1}不存在'.format(file_size, file))\n\n def send_mail(self):\n '''\n 发送邮件\n :return:发送结果\n '''\n # 创建邮件发送连接(smtp有两个端口号:465.587)\n conn = smtplib.SMTP_SSL(smtp_cfg['host'], int(smtp_cfg['port']))\n logger.info(\"连接邮箱成功~host:{0},port:{1}\".format(smtp_cfg['host'], smtp_cfg['port']))\n # 邮件发送结果变量\n result = True\n try:\n # 登陆邮件\n conn.login(smtp_cfg['user'], smtp_cfg['password'])\n logger.info('登陆邮箱成功~ 登陆用户名:{}'.format(smtp_cfg['user']))\n conn.sendmail(email_cfg['sender'], email_cfg['receivers'], self.message.as_string())\n logger.info(\"获取发件人信息成功:{0},获取收件人成功:{1}\".format(email_cfg['sender'], email_cfg['receivers']))\n except smtplib.SMTPAuthenticationError:\n result = False\n logger.error(\"登陆邮箱失败~请检查账号密码是否正确!\", exc_info=True)\n except smtplib.SMTPException:\n result = False\n logger.error(\"发送邮件失败!\", exc_info=True)\n finally:\n conn.close()\n logger.info('关闭邮箱连接~')\n return result\n\n# https://www.baidu.com/s?ie=UTF-8&wd=BeautifulReport\n# 自动化测试报告\n# BeautifulReport\n\n# mail = Email('测试组第三周周报','第一次发送')\n# send = mail.send_mail()\n# print(send)\n","repo_name":"Mario-1107/GtmshApiTestFrame","sub_path":"Comm/Email.py","file_name":"Email.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39617946686","text":"\"\"\"Binary/String Conversion\r\nfor final project\"\"\"\r\n\r\n\r\n#create binary class\r\nclass Binary(object):\r\n def __init__(self):\r\n object.__init__(self)\r\n\r\n\r\n ###---encrypt method---###\r\n def encrypt(self):\r\n binText = \"\"\r\n\r\n encode = input(\"\"\"\r\nPlease enter the message you would like to convert to binary:\r\n\r\n\"\"\")\r\n\r\n #for each char in encode, convert char to binary, seperate by -\r\n #use ord(), to find ASCII code and display in binary\r\n binText = \"-\".join(format(ord(char), \"b\") for char in encode)\r\n\r\n #print converted message\r\n print (\"\"\"\r\nYour converted message is:\r\n\r\n{}\"\"\".format(binText))\r\n\r\n\r\n ###---decrpyt method---###\r\n def decrypt(self):\r\n plainText = \"\"\r\n\r\n decode = input(\"\"\"\r\nPlease enter the binary message you would like to convert.\r\nCharacters are seperated by a '-':\r\n\r\n\"\"\")\r\n\r\n #for each binary item in decode...(use split(\"-\") to seperate binary\r\n #characters) convert binary number to string\r\n #use chr() to find ASCII character for number\r\n #binary string must be converted to decimal int\r\n for item in decode.split(\"-\"):\r\n i = chr(int(item, base=2))\r\n\r\n plainText += i\r\n\r\n print(\"\"\"\r\nYour converted message is:\r\n\r\n{}\"\"\".format(plainText))\r\n\r\n\r\n###---testing main---### \r\ndef main():\r\n c = Binary()\r\n c.encrypt()\r\n c.decrypt()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n \r\n","repo_name":"jdhaines3/Cipher-Project-Computing","sub_path":"Final Project computing/Binary.py","file_name":"Binary.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36618502631","text":"import os\nimport time\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nos.environ['WANDB'] = '0'\n\nfrom train import train\nfrom config import Config\n\nconfig = Config()\n\nconfig.seed = 2021\nconfig.root_dir = \"croppedData\"\nconfig.train_batch_size = 10000\nconfig.test_batch_size = 10000\nconfig.lr = 1e-3\nconfig.test_ratio = 0.2\nconfig.num_epoch = 200\nconfig.grad_accum_step = 1\nconfig.data_parallel = False\nconfig.optim_type = \"Adam\"\nconfig.loss_type = \"CE\"\nconfig.model_type = \"resnet18\"\nconfig.gray_scale = False\nconfig.pretrain = True\nconfig_cfg_str = \"\"\nconfig_cfg_str += \"gray\" if config.gray_scale else \"\"\nconfig_cfg_str += \"_pretrain\" if config.pretrain else \"\"\n\ncurrent_time = time.strftime(\"%m%d_%H%M\") #%Y%S\ncurrent_time = \"0\"\nconfig.dest_path = os.path.join(\"ckpt\", current_time)\n\n\nf = open(\"performance.csv\", \"a\")\n\nperformance = [] \nfor model_type in [\"resnet18\"]: \n for lr in [1e-3]:\n\n config.model_type = model_type\n config.lr = lr\n config.dest_path = os.path.join(\"ckpt\", f\"{model_type}_lr{lr}_{config.loss_type}_{config_cfg_str}\")\n\n min_loss, best_acc = train(config)\n\n config.dump(os.path.join(config.dest_path, \"config.json\"))\n print(\"{}, lr={}, loss_type={}, min_loss={:6f}, best_acc={:6f}%\\n\".format(\n model_type, lr, config.loss_type, min_loss, best_acc*100))\n\nf.close()\n\n \n","repo_name":"pavlion/Hand-written-character-recognition","sub_path":"src/run_null.py","file_name":"run_null.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4396821737","text":"\"\"\"Callback for dealing with interaction events in BARD\"\"\"\n\nfrom platform import system\nfrom subprocess import run, CalledProcessError\nfrom collections import deque\nfrom time import time\nimport numpy as np\n\nimport sksurgerycore.transforms.matrix as sksmat\n\n\nclass BardKBEvent:\n \"\"\"\n Handles keyboard events for BARD.\n \"\"\"\n def __init__(self, pointer_writer, visualisation_control,\n bard_widget):\n self._pointer_writer = pointer_writer\n self._visualisation_control = visualisation_control\n self._bard_widget = bard_widget\n\n def __call__(self, event, _event_type_not_used):\n key = event.GetKeySym()\n if key == 'd':\n self._pointer_writer.write_pointer_tip()\n if key == 'b':\n self._visualisation_control.cycle_visible_anatomy_vis()\n if key == 'n':\n self._visualisation_control.next_target()\n if key == 'm':\n self._visualisation_control.turn_on_all_targets()\n if key in '5t6y7u':\n self._translate_model(key)\n if key in '8i9o0p':\n self._rotate_model(key)\n if key == 'Up':\n self._bard_widget.tracker.stop_tracking()\n if key == 'Down':\n self._bard_widget.tracker.start_tracking()\n\n\n def _translate_model(self, key):\n \"\"\"\n Handles model tranlations.\n\n :param key: key code defining direction of translation\n :raises: Value error is key not in valid range\n \"\"\"\n if key not in ('5t6y7u'):\n raise ValueError(\"Invalid key value\")\n\n distance = 1.0\n direction = 1.0\n if key in 'tyu':\n direction = -1.0\n\n translation = np.array([0.0, 0.0, 0.0])\n rotation = np.eye(3)\n if key in '5t':\n translation = np.array([distance * direction, 0.0, 0.0])\n if key in '6y':\n translation = np.array([0.0, distance * direction, 0.0])\n if key in '7u':\n translation = np.array([0.0, 0.0, distance * direction])\n\n increment = sksmat.construct_rigid_transformation(rotation, translation)\n self._bard_widget.position_model_actors(increment)\n\n def _rotate_model(self, key):\n \"\"\"\n Handles model tranlations.\n\n :param key: key code defining direction of rotation\n :raises: Value error is key not in valid range\n \"\"\"\n if key not in ('8i9o0p'):\n raise ValueError(\"Invalid key value\")\n\n distance = 1.0\n is_in_radians = False\n direction = 1.0\n\n if key in 'iop':\n direction = -1.0\n\n translation = np.array([0.0, 0.0, 0.0])\n rotation = np.eye(3)\n if key in '8i':\n rotation = sksmat.construct_rx_matrix(distance * direction,\n is_in_radians)\n if key in '9o':\n rotation = sksmat.construct_ry_matrix(distance * direction,\n is_in_radians)\n if key in '0p':\n rotation = sksmat.construct_rz_matrix(distance * direction,\n is_in_radians)\n\n increment = sksmat.construct_rigid_transformation(rotation, translation)\n self._bard_widget.position_model_actors(increment)\n\n\nclass BardFootSwitchEvent:\n \"\"\"\n Handles footswitch events for BARD.\n This is for the footswitch in our lab,\n which plugs into USB and has three buttons, that\n return ctrl-alt[5,6,7]\n \"\"\"\n def __init__(self, maximum_delay, visualisation_control):\n \"\"\"\n param: maximum delay (s) between first key in sequence and last\n \"\"\"\n #disable ctrl-alt-f[] events on linux systems\n if system() == 'Linux':\n try:\n _ = run(['setxkbmap', '-option', 'srvrkeys:none'], check=True)\n except CalledProcessError:\n print(\"Failed to disable ctrl-alt-f[]\",\n \"using the footpedal may have unpredictable results\")\n\n self._time_tol = maximum_delay\n self._key_buff = deque(maxlen=3)\n self._time_stamps = deque(maxlen=3)\n for _ in range(3):\n self._key_buff.append('null')\n self._time_stamps.append(0)\n\n self._visualisation_control = visualisation_control\n\n def __call__(self, event, _event_type_not_used):\n self._key_buff.append(event.GetKeySym())\n self._time_stamps.append(time())\n\n if self._key_buff[0] == 'Control_L' and self._key_buff[1] == 'Alt_L':\n if (self._time_stamps[2] - self._time_stamps[0]) < self._time_tol:\n if self._key_buff[2] == 'F5':\n self._visualisation_control.cycle_visible_anatomy_vis()\n if self._key_buff[2] == 'F6':\n self._visualisation_control.next_target()\n if self._key_buff[2] == 'F7':\n self._visualisation_control.turn_on_all_targets()\n\n def __del__(self):\n #reenable ctrl-alt-f[] events on linux systems\n print(\"killing footswitch\")\n if system() == 'Linux':\n try:\n print(\"resetting keyboard\")\n _ = run(['setxkbmap'], check=True)\n _ = run(['setxkbmap', '-option'], check=True)\n except CalledProcessError:\n print(\"Failed to reset xkbmap srvrkeys, sorry.\")\n\n\nclass BardMouseEvent:\n \"\"\"\n Handles mouse events for BARD.\n \"\"\"\n def __init__(self, visualisation_control):\n self.screen_interaction_layout = {\n 'x_right_edge' : 0.80,\n 'x_left_edge' : 0.20\n }\n\n self._visualisation_control = visualisation_control\n\n def __call__(self, event, _event_type_not_used):\n mouse_x, mouse_y = event.GetEventPosition()\n window_x, window_y = event.GetSize()\n\n mouse_x /= window_x\n mouse_y /= window_y\n\n if mouse_x > self.screen_interaction_layout.get('x_right_edge'):\n self._visualisation_control.visibility_toggle(mouse_y)\n\n if mouse_x < self.screen_interaction_layout.get('x_left_edge'):\n self._visualisation_control.change_opacity(mouse_y)\n","repo_name":"SciKit-Surgery/scikit-surgerybard","sub_path":"sksurgerybard/interaction/interaction.py","file_name":"interaction.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"39763793285","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isValidBST(self, root: Optional[TreeNode]) -> bool:\n li=[]\n def DFS(node):\n if node:\n DFS(node.left)\n li.append(node.val)\n DFS(node.right)\n DFS(root)\n return li==list(sorted(set(li)))\n ","repo_name":"narendrasinghdangi/leetcode-problems","sub_path":"0098-validate-binary-search-tree/0098-validate-binary-search-tree.py","file_name":"0098-validate-binary-search-tree.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28865277118","text":"import os\nimport logging\nfrom sample import Sample\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom constant import CLUSTER_COLORS\nfrom constant import COLUMNS\nimport dash_core_components as dcc\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nlog.addHandler(logging.StreamHandler())\n\nTRAIN_SAMPLES_NUM = 5\nCSV_PATH = (os.path.dirname(os.path.realpath(__file__))\n + '/../network/train_data_activations_layer4.csv')\n\n\nclass Clusters:\n \"\"\"\n Class for cluster visualization of all train data together with currently\n analyzed sample and possibly 5 more highlighted train samples.\n\n Attributes\n ----------\n train_samples : list of str\n list of max 5 JSON samples, e.g. '30log/AST1.json'\n train_data : pd.dataFrame\n train data predictions (last layer activations + label) loaded from\n network/train_data_activations_layer4.csv\n sample_data : pd.dataFrame\n activations from the last layer and prediction for currently analyzed\n sample\n tsne_traces : list of dict\n list of dict for every possible label (result of prediction), each\n dict contains t-SNE coordinates of train data samples which were\n labeled with the corresponding label\n tsne_sample_trace : dict\n x and y coordinates of currently analyzed sample in diagram using t-SNE\n algorithm for reduction of dimensionality\n pca_traces : list of dict\n list of dict for every possible label (result of prediction), each\n dict contains PCA coordinates of train data samples which were\n labeled with the corresponding label\n pca_sample_trace : dict\n x and y coordinates of currently analyzed sample in diagram using PCA\n for reduction of dimensionality\n\n Methods\n -------\n add_sample(sample)\n If a sample wasn't provided when the Clusters instance was created,\n the sample can be added by this method. The activations from the last\n layer and the prediction (label) is read from the sample and\n the coordinates are calculated for training data and currently\n analyzed sample using both t-SNE and PCA for dimensionality reduction.\n get_figure(algorithm, height=None)\n Returns go.Figure instance of cluster diagram with coordinates\n calculated by given algorithm.\n view(dash_id, columns, algorithm, height=None)\n Returns dcc.Graph object which contains cluster diagram with\n coordinates calculated by given algorithm.\n \"\"\"\n\n def __init__(self, sample=None):\n \"\"\"\n Reads train data activations and predictions from\n network/train_data_activations_layer4.csv. If the sample is provided,\n prediction data and activations are assigned to sample_data as well as\n coordinates are calculated for training data and currently analyzed\n sample using both t-SNE and PCA for dimensionality reduction.\n\n Parameters\n ----------\n sample : Sample or None, optional\n Sample instance representing currently analysed sample contained\n in JSON file (default is None)\n \"\"\"\n\n self.train_samples = [None for _ in range(TRAIN_SAMPLES_NUM)]\n self.train_data = self.__load_train_data()\n\n if sample:\n self.sample_data = self.__load_sample_data(sample)\n log.debug('Performing fit_transform for T-SNE...')\n self.tsne_traces, self.tsne_sample_trace = (\n self.__prepare_tsne_traces()\n )\n log.debug('Performing fit_transform for PCA...')\n self.pca_traces, self.pca_sample_trace = (\n self.__prepare_pca_traces()\n )\n log.debug('Successfully finished fit_transform...')\n\n else:\n self.sample_data = None\n self.tsne_traces = None\n self.tsne_sample_trace = None\n self.pca_traces = None\n self.pca_sample_trace = None\n\n @staticmethod\n def __load_sample_data(sample: Sample) -> pd.DataFrame:\n \"\"\"\n Returns data frame containing activations from the last layer and\n the prediction (label). The data from provided sample is used to get\n all the information necessary.\n\n Parameters\n ----------\n sample : Sample\n Sample instance representing currently analysed sample contained\n in JSON file\n\n Returns\n -------\n pd.dataFrame\n data frame containing activations from the last layer and\n the prediction (label) for the provided sample\n \"\"\"\n\n df = pd.DataFrame()\n df['label'] = sample.label\n last_layer = list(sample.activations.keys())[-1]\n activations = sample.activations[last_layer][0].tolist()\n for i, activation in enumerate(activations):\n df['d{}'.format(i)] = [activation]\n\n return df\n\n @staticmethod\n def __load_train_data() -> pd.DataFrame:\n \"\"\"\n Reads and pre-processes train data activations and predictions from\n network/train_data_activations_layer4.csv.\n\n Returns\n -------\n pd.dataFrame\n data frame containing activations from the last layer and\n the prediction (label) for the train data\n \"\"\"\n\n df = pd.read_csv(CSV_PATH)\n df = df.dropna()\n layer = [l for l in df.columns if 'layer' in l][0]\n dimensions = str(df[layer][0]).split(' ')\n dimensions = ['d{}'.format(d) for d in range(len(dimensions))]\n df[dimensions] = df[layer].str.split(expand=True)\n df = df.drop(columns=[layer])\n\n return df\n\n def __prepare_pca_traces(self):\n \"\"\"\n Performs dimensionality reduction of train data + analyzed sample using\n PCA (Principal Component Analysis).\n\n Returns\n -------\n list of dict\n list of dict for every possible label (result of prediction), each\n dict contains PCA coordinates of train data samples which were\n labeled with the corresponding label\n dict\n x and y coordinates of currently analyzed sample in diagram using\n PCA for reduction of dimensionality\n \"\"\"\n\n labels = self.train_data['label'].tolist()\n data_files = self.train_data['data path'].tolist()\n data = self.train_data.drop(\n columns=['label', 'module path', 'data path'])\n\n # append sample module and get values\n sample_data = self.sample_data.drop(columns=['label'])\n data = data.append(sample_data, ignore_index=True)\n X = data.values\n\n X_std = StandardScaler().fit_transform(X)\n pca = PCA()\n pca.fit(X_std)\n pca_results = pca.transform(X_std)\n\n x = pca_results[:, 0]\n y = pca_results[:, 1]\n\n dimensions = len(data.columns)\n traces = [\n dict(x=list(), y=list(), text=list()) for _ in range(dimensions)\n ]\n\n # select sample point from results\n sample_trace = dict(x=x[len(x) - 1], y=y[len(y) - 1])\n\n # labels list doesn't contain sample data label, so it's OK to do this\n for i, label in enumerate(labels):\n traces[label]['x'].append(x[i])\n traces[label]['y'].append(y[i])\n traces[label]['text'].append(data_files[i])\n\n return traces, sample_trace\n\n def __prepare_tsne_traces(self):\n \"\"\"\n Performs dimensionality reduction of train data + analyzed sample using\n t-SNE (t-distributed stochastic neighbor embedding).\n\n Returns\n -------\n list of dict\n list of dict for every possible label (result of prediction), each\n dict contains t-SNE coordinates of train data samples which were\n labeled with the corresponding label\n dict\n x and y coordinates of currently analyzed sample in diagram using\n t-SNE algorithm for reduction of dimensionality\n \"\"\"\n\n labels = self.train_data['label'].tolist()\n data_files = self.train_data['data path'].tolist()\n data = self.train_data.drop(\n columns=['label', 'module path', 'data path'])\n\n # append sample module and get values\n sample_data = self.sample_data.drop(columns=['label'])\n data = data.append(sample_data, ignore_index=True)\n X = data.values\n\n X_std = StandardScaler().fit_transform(X)\n tsne = TSNE(n_components=2, perplexity=40)\n tsne_results = tsne.fit_transform(X_std)\n\n x = tsne_results[:, 0]\n y = tsne_results[:, 1]\n\n dimensions = len(data.columns)\n traces = [\n dict(x=list(), y=list(), text=list()) for _ in range(dimensions)\n ]\n\n # select sample point from results\n sample_trace = dict(x=x[len(x) - 1], y=y[len(y) - 1])\n\n # labels list doesn't contain sample data label, so it's OK to do this\n for i, label in enumerate(labels):\n traces[label]['x'].append(x[i])\n traces[label]['y'].append(y[i])\n traces[label]['text'].append(data_files[i])\n\n return traces, sample_trace\n\n def add_sample(self, sample: Sample):\n \"\"\"\n If a sample wasn't provided when the Clusters instance was created,\n the sample can be added by this method. Data from provided sample are\n used to get activations from last layer and the prediction (label).\n Moreover, coordinates are determined for training data and currently\n analyzed sample using both t-SNE and PCA for dimensionality reduction.\n\n Parameters\n ----------\n sample : Sample\n Sample instance representing currently analysed sample contained\n in JSON file\n \"\"\"\n\n self.sample_data = self.__load_sample_data(sample)\n log.debug('Performing fit_transform for T-SNE...')\n self.tsne_traces, self.tsne_sample_trace = self.__prepare_tsne_traces()\n log.debug('Performing fit_transform for PCA...')\n self.pca_traces, self.pca_sample_trace = self.__prepare_pca_traces()\n log.debug('Successfully finished fit_transform...')\n\n def get_figure(self, algorithm: str, height=None) -> go.Figure:\n \"\"\"\n Returns cluster diagram with coordinates calculated by given algorithm.\n It's optional to set the height of diagram in pixels.\n\n Parameters\n ----------\n algorithm : str\n 'pca' or 'tsne', this parameter determines which coordinates\n should be used for dimensionality reduction\n height : int or None, optional\n height of diagram in pixels (default is None)\n\n Returns\n --------\n go.Figure\n go.Figure instance of cluster diagram\n \"\"\"\n\n if algorithm == 'pca':\n traces = self.pca_traces\n sample_trace = self.pca_sample_trace\n else:\n traces = self.tsne_traces\n sample_trace = self.tsne_sample_trace\n\n fig = go.Figure()\n # add all cluster traces\n for i, trace in enumerate(traces):\n fig.add_trace(\n go.Scatter(\n x=trace['x'],\n y=trace['y'],\n name='Label {}'.format(i),\n text=trace['text'],\n hoverinfo='x+y+text',\n mode='markers',\n marker=dict(\n size=8,\n color=CLUSTER_COLORS[i],\n opacity=0.6,\n )\n )\n )\n\n # add sample point\n fig.add_trace(\n go.Scatter(\n x=[sample_trace['x']],\n y=[sample_trace['y']],\n name='Analyzed sample',\n mode='markers',\n hoverinfo='x+y',\n marker=dict(size=10, color='black')\n )\n )\n\n # add possible train samples for comparison\n for train_sample in self.train_samples:\n if train_sample:\n for i, trace in enumerate(traces):\n if train_sample in trace['text']:\n index = trace['text'].index(train_sample)\n fig.add_trace(\n go.Scatter(\n x=[trace['x'][index]],\n y=[trace['y'][index]],\n hovertext=[trace['text'][index]],\n mode='markers',\n hoverinfo='x+y+text',\n marker=dict(size=8, color=CLUSTER_COLORS[i],\n line=dict(width=2,\n color='DarkSlateGrey')),\n showlegend=False\n )\n )\n break\n\n fig.update_layout(\n height=height or 500,\n template='plotly_white',\n showlegend=True,\n hovermode='closest',\n margin={'l': 10, 'b': 10, 't': 20}\n )\n\n return fig\n\n def view(self, dash_id: str, columns: str, algorithm: str, height=None):\n \"\"\"\n Returns dcc.Graph object which contains cluster diagram with\n coordinates calculated by given algorithm. It's optional to set\n the height of diagram in pixels.\n\n Parameters\n ----------\n dash_id : str\n id of the dcc.Graph component\n columns : str\n relative width of diagram, e.g. '6'\n algorithm : str\n 'pca' or 'tsne', this parameter determines which coordinates\n should be used for dimensionality reduction\n height : int or None, optional\n height of diagram in pixels (default is None)\n\n Returns\n --------\n dcc.Graph\n dcc.Graph instance of cluster diagram\n \"\"\"\n\n return dcc.Graph(\n id=dash_id,\n figure=self.get_figure(algorithm, height),\n style={\n 'height': height or '40vh'\n },\n className=COLUMNS[columns]\n )\n","repo_name":"katka-juhasova/CodeNNVis","sub_path":"components/clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":14376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2812637877","text":"import html\nfrom typing import List\n\nfrom pygments import highlight\nfrom pygments.formatters.html import HtmlFormatter\nfrom pygments.lexers.c_cpp import CLexer, CppLexer\nfrom pygments.lexers.markup import TexLexer\nfrom pygments.lexers.python import PythonLexer\nfrom pygments.lexers.templates import HtmlDjangoLexer\n\nfrom strictdoc import __version__\nfrom strictdoc.backend.sdoc_source_code.models.source_file_info import (\n SourceFileTraceabilityInfo,\n)\nfrom strictdoc.core.finders.source_files_finder import SourceFile\nfrom strictdoc.core.project_config import ProjectConfig\nfrom strictdoc.core.traceability_index import TraceabilityIndex\nfrom strictdoc.export.html.document_type import DocumentType\nfrom strictdoc.export.html.html_templates import HTMLTemplates\nfrom strictdoc.export.html.renderers.link_renderer import LinkRenderer\nfrom strictdoc.export.html.renderers.markup_renderer import MarkupRenderer\n\n\nclass SourceFileViewHTMLGenerator:\n @staticmethod\n def export(\n *,\n project_config: ProjectConfig,\n source_file: SourceFile,\n traceability_index: TraceabilityIndex,\n html_templates: HTMLTemplates,\n ):\n output = \"\"\n\n document_type = DocumentType.document()\n template = html_templates.jinja_environment().get_template(\n \"screens/source_file_view/index.jinja\"\n )\n\n with open(source_file.full_path, encoding=\"utf-8\") as opened_file:\n source_file_lines = opened_file.readlines()\n\n pygmented_source_file_lines: List[str] = []\n pygments_styles: str = \"\"\n\n if len(source_file_lines) > 0:\n coverage_info: SourceFileTraceabilityInfo = (\n traceability_index.get_coverage_info( # noqa: E501\n source_file.in_doctree_source_file_rel_path_posix\n )\n )\n (\n pygmented_source_file_lines,\n pygments_styles,\n ) = SourceFileViewHTMLGenerator.get_pygmented_source_lines(\n source_file, source_file_lines, coverage_info\n )\n link_renderer = LinkRenderer(\n root_path=source_file.path_depth_prefix,\n static_path=project_config.dir_for_sdoc_assets,\n )\n markup_renderer = MarkupRenderer.create(\n \"RST\",\n traceability_index,\n link_renderer,\n html_templates,\n project_config,\n None,\n )\n output += template.render(\n project_config=project_config,\n source_file=source_file,\n source_file_lines=source_file_lines,\n pygments_styles=pygments_styles,\n pygmented_source_file_lines=pygmented_source_file_lines,\n traceability_index=traceability_index,\n link_renderer=link_renderer,\n renderer=markup_renderer,\n document_type=document_type,\n strictdoc_version=__version__,\n standalone=False,\n )\n return output\n\n @staticmethod\n def get_pygmented_source_lines(\n source_file: SourceFile,\n source_file_lines: List[str],\n coverage_info: SourceFileTraceabilityInfo,\n ):\n assert isinstance(source_file, SourceFile)\n assert isinstance(source_file_lines, list)\n assert isinstance(coverage_info, SourceFileTraceabilityInfo)\n\n if source_file.is_python_file():\n lexer = PythonLexer()\n elif source_file.is_c_file():\n lexer = CLexer()\n elif source_file.is_cpp_file():\n lexer = CppLexer()\n elif source_file.is_tex_file():\n lexer = TexLexer()\n elif source_file.is_jinja_file():\n lexer = HtmlDjangoLexer()\n else:\n raise NotImplementedError(source_file)\n\n # HACK:\n # Otherwise, Pygments will skip the first line as if it does not exist.\n # This behavior surprisingly has an effect on the first line if its empty.\n hack_first_line: bool = False\n if source_file_lines[0] == \"\\n\":\n source_file_lines[0] = \" \\n\"\n hack_first_line = True\n\n # HACK:\n # Pygments does not process lines if they are empty and are at the end\n # of a file. Adding a marker to the end so that Pygments do not cut the\n # corners.\n source_file_content = \"\".join(source_file_lines)\n source_file_content_with_marker = source_file_content + \"\\n###\"\n\n html_formatter = HtmlFormatter()\n pygmented_source_file_content = highlight(\n source_file_content_with_marker, lexer, html_formatter\n )\n\n # HACK: split content into lines by cutting off the header and footer\n # parts generated by Pygments:\n #
 and 
\n # TODO: Implement proper splitting.\n start_pattern = '
'\n        end_pattern = \"
\\n\"\n assert pygmented_source_file_content.startswith(start_pattern)\n assert pygmented_source_file_content.endswith(\n end_pattern\n ), f\"{pygmented_source_file_content}\"\n\n slice_start = len(start_pattern)\n slice_end = len(pygmented_source_file_content) - len(end_pattern)\n pygmented_source_file_content = pygmented_source_file_content[\n slice_start:slice_end\n ]\n pygmented_source_file_lines = pygmented_source_file_content.split(\"\\n\")\n if hack_first_line:\n pygmented_source_file_lines[0] = \"\"\n\n if pygmented_source_file_lines[-1] == \"\":\n pygmented_source_file_lines.pop()\n assert (\n \"###\" in pygmented_source_file_lines[-1]\n ), \"Expected marker to be in place.\"\n # Pop ###, pop \"\\n\"\n pygmented_source_file_lines.pop()\n if pygmented_source_file_lines[-1] == \"\":\n pygmented_source_file_lines.pop()\n\n assert len(pygmented_source_file_lines) == len(source_file_lines), (\n f\"Something went wrong when running Pygments against \"\n f\"the source file: \"\n f\"{len(pygmented_source_file_lines)} == {len(source_file_lines)}, \"\n f\"{pygmented_source_file_lines} == {source_file_lines}.\"\n )\n\n for pragma in coverage_info.pragmas:\n pragma_line = pragma.ng_source_line_begin\n source_line = source_file_lines[pragma_line - 1]\n assert len(pragma.reqs_objs) > 0\n before_line = source_line[\n : pragma.reqs_objs[0].ng_source_column - 1\n ].rstrip(\"/\")\n closing_bracket_index = source_line.index(\"]\")\n after_line = source_line[closing_bracket_index:].rstrip()\n\n before_line = html.escape(before_line)\n after_line = html.escape(after_line)\n\n pygmented_source_file_lines[pragma_line - 1] = (\n before_line,\n after_line,\n pragma,\n )\n pygments_styles = html_formatter.get_style_defs(\".highlight\")\n return pygmented_source_file_lines, pygments_styles\n","repo_name":"strictdoc-project/strictdoc","sub_path":"strictdoc/export/html/generators/source_file_view_generator.py","file_name":"source_file_view_generator.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"32"} +{"seq_id":"39617451726","text":"from pyitab.analysis.decoding.roi_decoding import RoiDecoding\nfrom scipy.stats.stats import f_oneway\nfrom sklearn.model_selection import *\n\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_selection import f_oneway, SelectKBest\n\nimport numpy as np\nfrom pyitab.io.loader import DataLoader\nimport os\n\n\nfrom pyitab.analysis.iterator import AnalysisIterator\nfrom pyitab.analysis.configurator import AnalysisConfigurator\nfrom pyitab.analysis.pipeline import AnalysisPipeline\nfrom pyitab.analysis.decoding.roi_decoding import RoiDecoding\nfrom pyitab.preprocessing.pipelines import PreprocessingPipeline\nfrom pyitab.preprocessing.functions import Detrender, SampleSlicer, \\\n TargetTransformer, Transformer\nfrom pyitab.preprocessing.normalizers import SampleZNormalizer\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nconf_file = \"/media/robbis/DATA/meg/reftep/bids.conf\"\nloader = DataLoader(configuration_file=conf_file, \n task='reftep', \n load_fx='reftep-conn',\n loader='bids-meg',\n bids_pipeline='connectivity+lv'\n )\n\nds = loader.fetch(n_subjects=9)\n\n\n_default_options = {\n\n 'prepro': [\n ['sample_slicer', 'target_transformer'],\n ['sample_slicer', 'feature_znormalizer', 'target_transformer'],\n ['sample_slicer', 'sample_znormalizer', 'target_transformer'],\n \n ],\n \n 'sample_slicer__subject': [[s] for s in np.unique(ds.sa.subject)],\n\n 'estimator__fsel__k': [50, 100, 150],\n 'estimator__clf': [\n LogisticRegression(penalty='l1', solver='liblinear'),\n SVC(C=1, kernel='linear'),\n ],\n}\n\n\n_default_config = {\n\n 'prepro': ['sample_slicer', 'target_transformer'],\n 'target_transformer__fx':\n ('binarize', lambda x: np.int_(x > np.median(x))),\n\n 'sample_slicer__subject': [[s] for s in ds.sa.subject],\n\n 'estimator': [\n ('fsel', SelectKBest(k=50, score_func=f_oneway)),\n ('clf', SVC(C=1, kernel='linear'))\n ],\n\n 'cv': StratifiedShuffleSplit,\n 'cv__n_splits': 25,\n 'cv__test_size': 0.25,\n\n 'analysis__scoring' : ['accuracy'],\n\n 'analysis': RoiDecoding,\n 'analysis__n_jobs': -1,\n 'analysis__permutation': 0,\n 'analysis__verbose': 0,\n 'kwargs__roi': ['matrix_values'],\n #'kwargs__cv_attr': 'mep-right',\n\n}\n\n\nerrors = []\niterator = AnalysisIterator(_default_options, AnalysisConfigurator, config_kwargs=_default_config)\nfor conf in iterator:\n kwargs = conf._get_kwargs()\n #try:\n a = AnalysisPipeline(conf, name=\"reftep+connectivity+lv\").fit(ds, **kwargs)\n a.save()\n #except Exception as err:\n # capture_exception(err)\n # errors.append([conf, err])\n\n\n\n####################\nfrom pyitab.analysis.results import get_results_bids, filter_dataframe, apply_function\nimport seaborn as sns\nimport h5py\n\npipeline = 'reftep+connectivity+lv'\n\npath = \"/media/robbis/DATA/meg/reftep/derivatives/pipeline-\"+pipeline\ndataframe = get_results_bids(path, \n field_list=['sample_slicer', \n 'estimator__clf', \n 'estimator__fsel'], \n pipeline=[pipeline], scores=['accuracy'])\n\n\n\n#####################\nimport mne\nimport h5py\nfrom scipy.io import loadmat\n\nfname = '/home/robbis/mount/meg_analysis/roberto/sub-002_source-spaceiPLV_window-1000_band-mu_vpphaseandtau_seed_ICA_10368_indiv_muscle_1.mat'\nmat = h5py.File(fname, \"r\")\ndata = mat['oPLV'][()]\n\nmodel_fname = \"/home/robbis/git/fieldtrip/template/sourcemodel/standard_sourcemodel3d7point5mm.mat\"\nmat = loadmat(model_fname, squeeze_me=True)\n\npos = mat['sourcemodel']['pos'][()]\ninside = pos[mat['sourcemodel']['inside'] == 1]\ninside /= 100 # meters\n\nt1spm = '/home/robbis/mne_data/MNE-spm-face/subjects/spm/mri/T1.mgz'\nbem = '/home/robbis/mne_data/MNE-spm-face/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'\nsrc = mne.setup_volume_source_space(pos=7.5, \n subject='spm', \n bem=bem,\n subjects_dir='/home/robbis/mne_data/MNE-spm-face/subjects/')\n\nvoldata = np.zeros((src[0]['nuse'], data.shape[0]))\n\nvertno = src[0]['vertno']\nvertpos = src[0]['rr'][vertno]\n\ndist = cdist(vertpos, inside)\nminvert = np.argmin(dist, axis=0)\nvoldata[minvert] = data.T\n\nstc = mne.VolSourceEstimate(voldata, [src[0]['vertno']], 0, 1, subject='spm')\nbrain = stc.plot(src, \n colormap='gnuplot',\n subjects_dir='/home/robbis/mne_data/MNE-spm-face/subjects/', \n mode='glass_brain',\n clim=dict(kind='percent', lims=[75, 85, 95]), \n initial_pos=np.array([-36, -25, 60])/1000.,\n )\n\nmorph = mne.compute_source_morph(src, subject_from=None, subject_to='fsaverage', subjects_dir=None, zooms='auto', niter_affine=(100, 100, 10), niter_sdr=(5, 5, 3), spacing=5, smooth=None, warn=True, xhemi=False, sparse=False, src_to=None, precompute=False, verbose=False)\nmorph = mne.compute_source_morph(src, subject_from='spm', subjects_dir='fsaverage')","repo_name":"robbisg/mvpa_itab_wu","sub_path":"scripts/mambo/reftep/reftep_prediction_connectivity.py","file_name":"reftep_prediction_connectivity.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"7275458077","text":"# Problem Description:\n# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.\n# Example:\n# Input: nums1 = [1,2,3,0,0,0], m = 3\n# nums2 = [2,5,6], n = 3\n# Output: [1,2,2,3,5,6]\n# Note:\n# The number of elements initialized in nums1 and nums2 are m and n respectively.\n# You may assume that nums1 has enough space (size that is greater or equal to m + n)\n# to hold additional elements from nums2.\n#\n# Solution:\n# Time Complexity: O(n)\n# Space Complexity: O(1)\n\nfrom typing import List\n\n\nclass Solution:\n\n # Solution 1: Merge and Sort\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n for j in range(n):\n nums1[m + j] = nums2[j]\n nums1.sort()\n\n # Solution 2: Two Pointers / Start from the end\n def merge2(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n i = m - 1\n j = n - 1\n k = m + n - 1\n\n while j >= 0:\n if i >= 0 and nums1[i] > nums2[j]:\n nums1[k] = nums1[i]\n i -= 1\n else:\n nums1[k] = nums2[j]\n j -= 1\n k -= 1\n\n\n# Test Cases\nif __name__ == \"__main__\":\n solution = Solution()\n nums1 = [1, 2, 3, 0, 0, 0]\n solution.merge(nums1, 3, [2, 5, 6], 3)\n print(nums1)\n nums1 = [1, 2, 3, 0, 0, 0]\n solution.merge2(nums1, 3, [2, 5, 6], 3)\n print(nums1)\n","repo_name":"Angeldahal/leetcode-30","sub_path":"Day 4/mergeSortedArray.py","file_name":"mergeSortedArray.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35301223081","text":"import os\n\nfrom project.movie_specification.movie import Movie\nfrom project.user import User\n\n\nclass MovieApp:\n def __init__(self):\n self.movies_collection = []\n self.users_collection = []\n\n def __str__(self):\n result = ''\n if len(self.users_collection) > 0:\n result += f'All users: {\", \".join([user.username for user in self.users_collection])}' + os.linesep\n else:\n result += \"All users: No users.\" + os.linesep\n\n if len(self.movies_collection) > 0:\n result += f'All movies: {\", \".join([current_movie.title for current_movie in self.movies_collection])}'\n else:\n result += \"All movies: No movies.\"\n\n return result\n\n def register_user(self, username: str, age: int):\n if username in [user.username for user in self.users_collection]:\n raise Exception('User already exists!')\n\n self.users_collection.append(User(username, age))\n return f'{username} registered successfully.'\n\n def upload_movie(self, username: str, movie: Movie):\n if username not in [user.username for user in self.users_collection]:\n raise Exception('This user does not exist!')\n elif username in [user.username for user in self.users_collection] and movie.owner.username != username:\n raise Exception(f'{username} is not the owner of the movie {movie.title}!')\n elif movie in self.movies_collection:\n raise Exception('Movie already added to the collection!')\n else:\n self.movies_collection.append(movie)\n user = [x for x in self.users_collection if x.username == username][0]\n user.movies_owned.append(movie)\n return f'{username} successfully added {movie.title} movie.'\n\n def edit_movie(self, username: str, movie: Movie, **kwargs):\n if movie.owner.username != username:\n raise Exception(f'{username} is not the owner of the movie {movie.title}!')\n if movie not in [current_movie for current_movie in self.movies_collection]:\n raise Exception(f'The movie {movie.title} is not uploaded!')\n\n edit_movie_title = movie.title\n\n if edit_movie_title:\n edit_movie = [x for x in self.movies_collection if x.title == edit_movie_title][0]\n if edit_movie:\n edit_movie.title = kwargs['title']\n for key, value in kwargs.items():\n edit_movie.key = value\n\n return f'{username} successfully edited {movie.title} movie.'\n\n def delete_movie(self, username: str, movie: Movie):\n if movie.owner.username != username:\n raise Exception(f'{username} is not the owner of the movie {movie.title}!')\n if movie not in [current_movie for current_movie in self.movies_collection]:\n raise Exception(f'The movie {movie.title} is not uploaded!')\n\n user = [x for x in self.users_collection if x.username == username][0]\n\n self.movies_collection.remove(movie)\n user.movies_owned.remove(movie)\n\n return f'{username} successfully deleted {movie.title} movie.'\n\n def like_movie(self, username: str, movie: Movie):\n user = [x for x in self.users_collection if x.username == username][0]\n\n if movie.owner.username == username:\n raise Exception(f'{username} is the owner of the movie {movie.title}!')\n if movie in user.movies_liked:\n raise Exception(f'{username} already liked the movie {movie.title}!')\n\n user.movies_liked.append(movie)\n movie.likes += 1\n\n return f'{username} liked {movie.title} movie.'\n\n def dislike_movie(self, username: str, movie: Movie):\n user = [x for x in self.users_collection if x.username == username][0]\n\n if movie not in user.movies_liked:\n raise Exception(f'{username} has not liked the movie {movie.title}!')\n\n user.movies_liked.remove(movie)\n movie.likes -= 1\n\n return f'{username} disliked {movie.title} movie.'\n\n def display_movies(self):\n if len(self.movies_collection) > 0:\n self.movies_collection.sort(key=lambda x: (x.year, x.title), reverse=False)\n return os.linesep.join([current_movie.details() for current_movie in self.movies_collection])\n\n return \"No movies found.\"\n","repo_name":"VenelinKadankov/SoftUni-All","sub_path":"Python/OOP Practice exams/18.04.2022/project/movie_app.py","file_name":"movie_app.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44426518147","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Dict\n\nimport numpy as np\nfrom google.cloud import storage\nfrom transformers import AutoConfig, AutoTokenizer\n\nimport kserve\nimport tritonclient.http as httpclient\n\nlogging.basicConfig(level=logging.DEBUG)\n\nSTORAGE_URI = os.getenv(\n \"CONFIG_PATH_STORAGE_URI\",\n \"gs://kserve-models-saga-sandbox/models/mot-eggs\",\n)\nK8S_NAMESPACE = \"models\"\n\n\ndef softmax(_outputs):\n maxes = np.max(_outputs, axis=-1, keepdims=True)\n shifted_exp = np.exp(_outputs - maxes)\n return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)\n\n\ndef load_configs(model_name: str):\n bucket = STORAGE_URI.split(\"/\")[2]\n object_name = \"/\".join(STORAGE_URI.split(\"/\")[3:])\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket)\n config_blobs = [\n blob\n for blob in bucket.list_blobs(prefix=object_name)\n if \"config.json\" in blob.name\n ]\n\n configs = {}\n for config_blob in config_blobs:\n version_id = config_blob.name.split(\"/\")[-2]\n if version_id == model_name:\n version_id = \"default\"\n\n config = json.loads(config_blob.download_as_bytes())\n configs[version_id] = config\n\n assert configs, \"Can't find the config file for AR model\"\n return configs\n\n\n# this needs to work kind of like textclassificationpipeline from transformers\nclass SequenceTransformer(kserve.Model):\n def __init__(self, predictor_host: str, name: str = \"mot-eggs\"):\n super().__init__(name)\n self.predictor_host = predictor_host\n configs = load_configs(name)\n self.bert_config = configs[\"default\"]\n tokenizer_path = self.bert_config[\"_name_or_path\"]\n self.bert_tokenizer = AutoTokenizer.from_pretrained(\n tokenizer_path,\n )\n self.triton_client = None\n\n # inputs is model_name as the first argument in the string, and then a dict with texts and then a list of texts\n def preprocess(self, inputs: Dict[str, Dict]) -> Dict:\n # inputs = {\"text\": \"please process this po\"}\n max_length: int = 400\n tokenized_inputs = self.bert_tokenizer(\n inputs[\"text\"],\n is_split_into_words=False,\n return_offsets_mapping=False,\n padding=False,\n max_length=max_length,\n truncation=True,\n return_tensors=\"np\",\n )\n return tokenized_inputs\n\n async def predict(self, tokenized_inputs: Dict) -> Dict:\n if not self.triton_client:\n self.triton_client = httpclient.InferenceServerClient(\n url=self.predictor_host, verbose=False\n )\n\n attention_mask = (\n tokenized_inputs[\"attention_mask\"].reshape(\n -1, tokenized_inputs[\"attention_mask\"].shape[-1]\n )\n # .astype(np.int32)\n )\n input_ids = (\n tokenized_inputs[\"input_ids\"].reshape(\n -1, tokenized_inputs[\"input_ids\"].shape[-1]\n )\n # .astype(np.int32)\n )\n\n inputs = [\n httpclient.InferInput(\n \"attention_mask\", list(attention_mask.shape), \"INT64\"\n ),\n httpclient.InferInput(\"input_ids\", list(input_ids.shape), \"INT64\"),\n ]\n inputs[0].set_data_from_numpy(attention_mask)\n inputs[1].set_data_from_numpy(input_ids)\n\n outputs = [httpclient.InferRequestedOutput(\"logits\", binary_data=False)]\n result = self.triton_client.infer(\"mot-eggs\", inputs, outputs=outputs)\n res = result.get_response()\n return res\n\n def postprocess(self, res: Dict) -> Dict:\n top_k = None\n id2label = self.bert_config[\"id2label\"]\n\n logits = res[\"outputs\"][0][\"data\"]\n\n dict_scores = [\n {\"label\": id2label[str(i)], \"score\": score.item()}\n for i, score in enumerate(softmax(logits))\n ]\n dict_scores.sort(key=lambda x: x[\"score\"], reverse=True)\n if top_k is not None:\n dict_scores = dict_scores[:top_k]\n\n return {\"label_scores\": dict_scores}\n\nif __name__ == \"__main__\":\n transformer = SequenceTransformer(\n predictor_host=\"localhost:8000\",\n name=\"mot-eggs\",\n )\n self = transformer\n server = kserve.ModelServer()\n server.start(models=[transformer])\n","repo_name":"Hojland/mot-eggs","sub_path":"kserve-transformer/transformer/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13202738583","text":"open = True\n\nimport turtle \nsixsten = turtle.Turtle()\nsixsten.speed(50)\nsixsten.penup()\n\n\nwhile open:\n planX = input(\"Var vill du ha pricken på plan X?\")\n \n planY = input(\"Var vill du ha pricken på plan Y?\")\n \n sixsten.goto(int(planX), int(planY))\n sixsten.pendown()\n sixsten.dot()\n\n sixsten.penup()\n\n\na = input()\n","repo_name":"LukasWarna/programmerings-kurs","sub_path":"Paddan/sixsten.py","file_name":"sixsten.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15411096856","text":"# LWYapi/ccp/views.py\nimport os\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom LWYapi import settings\nfrom . import constant\nfrom .models import User, MemberInfo, TOTAL_SEQ_INFO_M, IFD_POST_DATA_M\nimport json\nfrom datetime import datetime\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# from .forms import UserForm, LoginForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth import get_user_model\n\n#-----------------------------------------------------\n# Main\n#-----------------------------------------------------\n# @csrf_exempt\n# def LoginView(request, u_loginid, u_loginpass):\n# print('LoginView : ulogin[{}], u_loginpass[{}] '.format(u_loginid, u_loginpass))\n# return HttpResponse('LoginView : ulogin[{}], u_loginpass[{}] '.format(u_loginid, u_loginpass))\n#\n\n\n # username = request.POST['username']\n # password = request.POST['password']\n # user = authenticate(request, username=username, password=password)\n # if user is not None:\n # login(request, user)\n # # Redirect to a success page.\n #\n # else:\n # # Return an 'invalid login' error message.\n # pass\n\n@csrf_exempt\ndef MainView(request,\n u_product=\"\",\n u_member=\"\",\n u_date=\"\",\n u_process=\"\",\n u_item=\"\",\n u_seq=\"\" ):\n print('MainView : ulogin [ request: ] ' + str(request))\n # -----------------------------------------------------\n #[공통] Change the input url to lower charector format\n # -----------------------------------------------------\n u_product = u_product.lower()\n u_member = u_member.lower()\n u_date = u_date.lower()\n u_process = u_process.lower()\n u_item = u_item.lower()\n u_seq = u_seq.lower()\n\n\n\n # -----------------------------------------------------\n #[공통] Url Path Check\n # -----------------------------------------------------\n rtUrlCheck = UrlCheckView(u_product,\n u_member,\n u_date,\n u_process,\n u_item,\n u_seq)\n\n if rtUrlCheck == constant.CHECK_PRODUCT:\n return HttpResponse(constant.CHECK_PRODUCT)\n elif rtUrlCheck == constant.CHECK_MEMBER:\n return HttpResponse(constant.CHECK_MEMBER)\n elif rtUrlCheck == constant.CHECK_DATE:\n return HttpResponse(constant.CHECK_DATE)\n elif rtUrlCheck == constant.CHECK_PROCESS:\n return HttpResponse(constant.CHECK_PROCESS)\n elif rtUrlCheck == constant.CHECK_ITEM:\n if u_process not in ['login']:\n return HttpResponse(constant.CHECK_ITEM)\n elif rtUrlCheck == constant.CHECK_SEQ:\n if u_process not in ['list', 'login']:\n return HttpResponse(constant.CHECK_SEQ)\n\n\n '''\n POST 일경우 TRCODE 만 허용\n '''\n ''' 향후 Meta 정보값을 읽어서 Check하는것으로 변경 필요 '''\n if request.method in ['POST' , 'UPDATE' , 'DELETE']:\n if u_item in ['all', 'clearing', 'settlement', 'risk', 'rds', 'pricing']:\n return HttpResponse(constant.CHECK_ITEM)\n else:\n pass\n else:\n pass\n\n #-----------------------------------------------------\n #[공통] Login 정보 Check\n #-----------------------------------------------------\n #username = request.POST.get('username' , 'default') 동일방법\n username = request.POST['username']\n password = request.POST['password']\n # print('username : ' + username)\n # print('password : ' + password)\n\n # 사용방법 모름, 향후 authenticate관련하여 이해후 반영 필요\n # user = authenticate(request, username=username, password=password)\n\n UserModel = get_user_model()\n user = UserModel.objects.get(username=username)\n if ( user.username == username ) and \\\n (user.password == password ):\n user.login_state = 'yes'\n user.save()\n return HttpResponse(constant.CHECK_OK)\n else:\n return HttpResponse(constant.CHECK_ACCOUNTS)\n\n\n # username = request.POST['username']\n # password = request.POST['password']\n # user = authenticate(request, username=username, password=password)\n # if user is not None:\n # login(request, user)\n # # Redirect to a success page.\n #\n # else:\n # # Return an 'invalid login' error message.\n # pass\n\n\n\n #\n #user = authenticate(request, username=username, password=password)\n # if user is not None:\n # login(request , user)\n # # return redirect('index')\n # return HttpResponse('로그인 성공.')\n # else:\n # return HttpResponse('로그인 실패. 다시 시도 해보세요.')\n\n rtMemberCheck = MemberCheckView(u_product, u_member)\n\n if rtMemberCheck == constant.CHECK_PRODUCT:\n return HttpResponse(constant.CHECK_PRODUCT + \" [ product input :\" + u_product +\" ]\")\n elif rtMemberCheck == constant.CHECK_MEMBER:\n return HttpResponse(constant.CHECK_MEMBER+ \" [ member input :\" + u_member + \" ]\")\n\n\n\n\n\n #-----------------------------------------------------\n #[공통] 회원정보 Check\n #-----------------------------------------------------\n rtMemberCheck = MemberCheckView(u_product, u_member)\n\n if rtMemberCheck == constant.CHECK_PRODUCT:\n return HttpResponse(constant.CHECK_PRODUCT + \" [ product input :\" + u_product +\" ]\")\n elif rtMemberCheck == constant.CHECK_MEMBER:\n return HttpResponse(constant.CHECK_MEMBER+ \" [ member input :\" + u_member + \" ]\")\n\n\n\n #-----------------------------------------------------\n # GET , POST\n # -----------------------------------------------------\n # GET 요청\n if request.method not in ['POST', 'UPDATE', 'DELETE']:\n if u_process == \"list\":\n RtData = ListView(u_product ,\n u_member ,\n u_date ,\n u_process ,\n u_item)\n\n elif u_process == \"data\":\n RtData = DataView(u_product ,\n u_member ,\n u_date ,\n u_process ,\n u_item,\n u_seq)\n else:\n return HttpResponse(\"Check the u_process(:{}) \".format(u_process))\n\n\n return JsonResponse(\n {\n 'Message Type ': \"JsonResponse Qury.....\" ,\n 'REQUEST':\n ['PRODUCT : ' + u_product ,\n 'MEMBER : ' + u_member ,\n 'DATE : ' + u_date ,\n 'PROCESS : ' + u_process ,\n 'ITEM : ' + u_item ,\n 'SEQ : ' + u_seq] ,\n 'REPLY': [RtData.split(',')] ,\n\n # jsonString = json.dumps(rows , indent=4)\n } , json_dumps_params={'ensure_ascii': True}\n )\n else: #POST 요청\n # Login 정보 확인\n # if u_process == 'login':\n # username = request.POST.get('username','default')\n # password = request.POST.get('password', 'default')\n # # username = request.POST['username']\n # # password = request.POST['password']\n # print('username : '+username)\n # qs = User.objects.filter(login_id=username)\n # if qs:\n # for item in qs:\n # print('item.login_id' + item.login_id)\n #\n # if (item.login_id == username) and\\\n # (item.password == password):\n # ## login state update\n # print('login_state : ')\n # qs.login_state = 'yes'\n # qs.save()\n # return HttpResponse(constant.CHECK_OK)\n # else:\n # return HttpResponse(constant.CHECK_USERNAME)\n # else:\n # return HttpResponse(constant.CHECK_USERNAME)\n #\n\n content = request.POST.get('data', 'default')\n\n # seq duplicate check\n qs = TOTAL_SEQ_INFO_M_.objects.filter(product=u_product)\n qs = qs.filter(member=u_member)\n qs = qs.filter(item=u_item)\n qs = qs.filter(item_seq=u_seq)\n\n if qs:\n for item in qs:\n if item.item_seq == u_seq:\n return HttpResponse(constant.CHECK_ITEM)\n else:\n pass\n else:\n pass\n\n # No Check the Data , only checkt the received seq and next seq\n # st_content = str(content)\n # print('content : '+ st_content[0:11])\n\n # Save the receving data at 'IFD_POST_DATA' Table\n ifd = IFD_POST_DATA_M_(\n created_at = u_date,\n updated_at = datetime.now(),\n market = 'ccp',\n product = u_product,\n member = u_member,\n item = u_item,\n item_group = 'item_group',\n item_seq = u_seq ,\n data = content )\n\n RtData = ''\n\n rtn = ifd.save()\n if rtn :\n RtData = 'DB save Fail[rtn:{}]'.format(rtn)\n ifd.save()\n else: #Success : return NONE\n\n try:\n qs = TOTAL_SEQ_INFO_M_.objects.filter(product=u_product)\n qs = qs.filter(member=u_member)\n qs = qs.get(item=u_item)\n except ObjectDoesNotExist as err:\n print('DB Object 존재치 않음 에러')\n RtData = 'ERROR : DB Object 존채치 않음 '\n else:\n # Update Seq to DB Table as 'TOT_SEQ_INFO'\n if qs:\n qs.update_at = datetime.now()\n qs.save()\n qs.item_seq = u_seq\n qs.save()\n qs.fmtoa_seq = u_seq\n qs.save()\n\n RtData = 'DB Save O.K [rtn:{}]'.format(rtn)\n else:\n RtData = 'Not found the item({}) in TOTAL_SEQ_INFO_M'.format(u_item)\n finally:\n print('finally job ')\n\n\n return JsonResponse(\n {\n 'message': \"JsonResponse POST.....\" ,\n 'REQUEST ':\n ['PRODUCT : ' + u_product ,\n 'MEMBER : ' + u_member ,\n 'DATE : ' + u_date ,\n 'PROCESS : ' + u_process ,\n 'ITEM : ' + u_item ,\n 'SEQ : ' + u_seq ,\n 'CONTENTS: ' + content ],\n 'REPLY': [RtData] ,\n } , json_dumps_params={'ensure_ascii': True}\n )\n\n\n\n\n\n\n\n\n\n\n'''\n-----------------------------------------------------\n[Function] UrlCheckView()\n-----------------------------------------------------\n'''\ndef UrlCheckView(u_product,\n u_member,\n u_date,\n u_process,\n u_item,\n u_seq):\n\n if u_product <= \"\" or \\\n u_product not in [\"irs-won\", \"irs-usd\", \"ndf\", \"pro_fx\"] :\n\n return constant.CHECK_PRODUCT\n\n\n if u_member <= \"\" or \\\n len(u_member) !=5 :\n return constant.CHECK_MEMBER\n\n\n if u_date <= \"\" or \\\n len(u_date) !=8 :\n return constant.CHECK_DATE\n\n if u_process <= \"\" or \\\n u_process not in [\"list\", \"data\", 'create', 'delete', 'update', 'login'] :\n return constant.CHECK_PROCESS\n\n if u_item <= \"\" :\n return constant.CHECK_ITEM\n\n if u_seq <= \"\" :\n return constant.CHECK_SEQ\n\n return constant.CHECK_OK\n\n\n\n'''\n-----------------------------------------------------\n[Function] MemberCheckView()\n-----------------------------------------------------\n'''\ndef MemberCheckView(u_product, u_member):\n\n qs = MemberInfo.objects.all()\n\n qs = qs.filter(member=u_member)\n if qs.exists():\n pass\n else:\n return constant.CHECK_MEMBER\n\n\n for i in qs:\n if u_product == 'irs-won':\n if i.irs_won =='y':\n pass\n else:\n return constant.CHECK_PRODUCT\n\n if u_product == 'irs-usd':\n if i.irs_usd == 'y':\n pass\n else:\n return constant.CHECK_PRODUCT\n\n if u_product == 'ndf':\n if i.ndf == 'y':\n pass\n else:\n return constant.CHECK_PRODUCT\n\n if u_product == 'pro-fx':\n if i.pro_fx == 'y':\n pass\n else:\n return constant.CHECK_PRODUCT\n\n return constant.CHECK_OK\n\n\n\n'''\n-----------------------------------------------------\n[Function] ListView()\n-----------------------------------------------------\n'''\ndef ListView(u_product ,\n u_member ,\n u_date ,\n u_process ,\n u_item):\n\n print(\"- Called the ListView() \" + u_product)\n\n\n if u_product == 'irs-won':\n rt = IrsWonListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item\n )\n elif u_product == 'irs-usd':\n rt = IrsUsdListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item)\n elif u_product == 'ndf':\n rt = NdfListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item)\n elif u_product == 'fx':\n rt = FxListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item)\n else:\n rt = 'Not Found (list_view(): ' + u_product\n\n if rt:\n return rt\n else:\n rt = 'Check the IrsWonListView() :rt : '+rt\n return rt\n\n\n\n\n'''\n-----------------------------------------------------\n[Function] IrsWonListView()\n-----------------------------------------------------\n'''\ndef IrsWonListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item):\n print(\"- Called the IrsWonListView() \")\n\n # qs = MemberInfo.objects.all().filter(member=u_member)\n\n rows = ''\n qs = TOTAL_SEQ_INFO_M_.objects.filter(product=u_product)\n qs = qs.filter(member=u_member)\n\n if u_item not in [\"all\" , \"clearing\" , \"settlement\" , \"risk\" , \"pricing\"]:\n qs = qs.filter(item=u_item)\n if qs:\n for item in qs:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'End_Bit:' + item.end_bit + ','\n else:\n rows = 'Not Found : ' + u_item\n elif u_item == 'all':\n #qs = TOTAL_SEQ_INFO_M_.objects.filter(member=u_member)\n if qs:\n for item in qs:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'End_Bit:' + item.end_bit + ','\n else:\n rows = 'Not Found : ' + u_item\n else: # Get the 'item_group'\n qs = qs.filter(item_group =u_item)\n for item in qs:\n if item.item_group:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'End_Bit:' + item.end_bit + ','\n else:\n rows = 'Not Found, Group Item: ' + u_item\n\n\n # rows = rows + \"BIC_CODE : \" + item.bic_code + \", \"\n # jsonString = json.dumps(\"BIC_CIDE : \" + item.bic_code)\n # rows = rows + jsonString +\", \"\n # rows = rows + item.get_market_display()\n\n\n\n # \\\n # +\\\n # item.created_at + \"\"\n #\n\n # # q = request.GET.get('q' , '')\n # # q = u_item.GET.get()\n # # qs = qs.filter(bic_code__icontains=u_item)\n\n\n return rows\n\n\n # return (\" * Called the ListView() \")\n\n\n\n'''\n-----------------------------------------------------\n[Function] DataView()\n-----------------------------------------------------\n'''\ndef DataView(u_product, u_member, u_date, u_process, u_item, u_seq):\n\n if u_product == 'irs-won':\n rt = IrsWonDataView(u_product ,\n u_member ,\n u_date ,\n u_process ,\n u_item,\n u_seq)\n elif u_product == 'irs-usd':\n rt = IrsUsdDataView(u_product ,\n u_member ,\n u_date ,\n u_process ,\n u_item,\n u_seq)\n elif u_product == 'ndf':\n rt = NdfDataView(u_product ,\n u_member ,\n u_date ,\n u_process ,\n u_item,\n u_seq)\n elif u_product == 'fx':\n rt = FxDataView(u_product ,\n u_member ,\n u_date ,\n u_process ,\n u_item,\n u_seq)\n else:\n rt = 'Not Found (Dist_view(): ' + u_product\n\n if rt:\n return rt\n else:\n rt = 'Check the ListVie() :rt : ' + rt\n return rt\n\n\n\n'''\n-----------------------------------------------------\n[Function] IrsWonListView()\n-----------------------------------------------------\n'''\ndef IrsWonListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item):\n print(\"- Called the IrsWonListView() \")\n\n # qs = MemberInfo.objects.all().filter(member=u_member)\n\n rows = ''\n qs = TOTAL_SEQ_INFO_M_.objects.filter(product=u_product)\n qs = qs.filter(member=u_member)\n\n if u_item not in [\"all\" , \"clearing\" , \"settlement\" , \"risk\" , \"pricing\"]:\n qs = qs.filter(item=u_item)\n if qs:\n for item in qs:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'End_Bit:' + item.end_bit + ','\n else:\n rows = 'Not Found : ' + u_item\n elif u_item == 'all':\n #qs = TOTAL_SEQ_INFO_M_.objects.filter(member=u_member)\n if qs:\n for item in qs:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'End_Bit:' + item.end_bit + ','\n else:\n rows = 'Not Found : ' + u_item\n else: # Get the 'item_group'\n qs = qs.filter(item_group =u_item)\n for item in qs:\n if item.item_group:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'End_Bit:' + item.end_bit + ','\n else:\n rows = 'Not Found, Group Item: ' + u_item\n\n\n # rows = rows + \"BIC_CODE : \" + item.bic_code + \", \"\n # jsonString = json.dumps(\"BIC_CIDE : \" + item.bic_code)\n # rows = rows + jsonString +\", \"\n # rows = rows + item.get_market_display()\n\n\n\n # \\\n # +\\\n # item.created_at + \"\"\n #\n\n # # q = request.GET.get('q' , '')\n # # q = u_item.GET.get()\n # # qs = qs.filter(bic_code__icontains=u_item)\n\n\n return rows\n\n\n # return (\" * Called the ListView() \")\n\n\n\n'''\n-----------------------------------------------------\n[Function] IrsUsdListView()\n-----------------------------------------------------\n'''\ndef IrsUsdListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item):\n #print(\"- Called the IrsUsdListView() \")\n return (\" * Called the IrsUsdListView() \")\n\n\n\n\n'''\n-----------------------------------------------------\n[Function] NdfListView()\n-----------------------------------------------------\n'''\ndef NdfListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item):\n #print(\"- Called the NdfListView() \")\n return (\" * Called the NdfListView() \")\n\n\n\n'''\n-----------------------------------------------------\n[Function] FxListView()\n-----------------------------------------------------\n'''\ndef FxListView(u_product,\n u_member,\n u_date,\n u_process,\n u_item):\n #print(\"- Called the FxListView() \")\n return (\" * Called the FxListView() \")\n\n\n\n\n'''\n-----------------------------------------------------\n[Function] IrsWonDataView()\n-----------------------------------------------------\n'''\ndef IrsWonDataView( u_product,\n u_member,\n u_date,\n u_process,\n u_item,\n u_seq):\n print(\"- Called the IrsWonDataView() \")\n\n rows = ''\n qs = IFD_POST_DATA_M.objects.filter(product=u_product)\n qs = qs.filter(member=u_member)\n\n if u_item not in [\"all\" , \"clearing\" , \"settlement\" , \"risk\" , \"pricing\"]:\n qs = qs.filter(item=u_item)\n if qs:\n for item in qs:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'Data:' + item.data + ','\n else:\n rows = 'Not Found : ' + u_item\n elif u_item == 'all':\n if qs:\n for item in qs:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'Data:' + item.data + ','\n else:\n rows = 'Not Found : ' + u_item\n else: # Get the 'item_group'\n qs = qs.filter(item_group =u_item)\n for item in qs:\n if item.item_group:\n rows = rows + \\\n 'Date:' + str(item.created_at) + ',' + \\\n 'Market:' + item.market + ',' + \\\n 'Product:' + item.product + ',' + \\\n 'Member:' + item.member + ',' + \\\n 'Item:' + item.item + ',' + \\\n 'Item_Group:' + item.item_group + ',' + \\\n 'Item_Seq:' + item.item_seq + ',' + \\\n 'Data:' + item.data + ','\n else:\n rows = 'Not Found, Group Item: ' + u_item\n\n\n # rows = rows + \"BIC_CODE : \" + item.bic_code + \", \"\n # jsonString = json.dumps(\"BIC_CIDE : \" + item.bic_code)\n # rows = rows + jsonString +\", \"\n # rows = rows + item.get_market_display()\n\n\n\n # \\\n # +\\\n # item.created_at + \"\"\n #\n\n # # q = request.GET.get('q' , '')\n # # q = u_item.GET.get()\n # # qs = qs.filter(bic_code__icontains=u_item)\n\n\n return rows\n\n\n # return (\" * Called the ListView() \")\n\n\n\n\n\n'''\n-----------------------------------------------------\n[Function] IrsUsdDataView()\n-----------------------------------------------------\n'''\ndef IrsUsdDataView( u_product,\n u_member,\n u_date,\n u_process,\n u_item,\n u_seq):\n print(\"- Called the IrsUsdDataView() \")\n\n\n\n\n'''\n-----------------------------------------------------\n[Function] NdfDataView()\n-----------------------------------------------------\n'''\ndef NdfDataView( u_product,\n u_member,\n u_date,\n u_process,\n u_item,\n u_seq):\n print(\"- Called the NdfDataView() \")\n\n\n\n\n\n'''\n-----------------------------------------------------\n[Function] FxDataView()\n-----------------------------------------------------\n'''\ndef FxDataView( u_product,\n u_member,\n u_date,\n u_process,\n u_item,\n u_seq):\n print(\"- Called the FxDataView() \")\n","repo_name":"iampeterkr/LWYapi","sub_path":"ccp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":27502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41732004416","text":"from __future__ import annotations\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport heapq\nimport itertools\n\nimport torch\n\n\nclass BeamHypotheses:\n def __init__(\n self,\n length_penalty: float,\n group_size: int,\n stop_early: bool = False,\n ) -> None:\n self.length_penalty = length_penalty\n self.group_size = group_size\n self.stop_early = stop_early\n self.sorted_beams = []\n self.worst_score = 1e9\n\n def __len__(self):\n return len(self.sorted_beams)\n\n def add(\n self,\n input_ids: torch.LongTensor,\n next_token: int,\n logsoftmax_sum: float,\n next_beam_index: int,\n beam_indices: Optional[torch.LongTensor],\n ):\n \"\"\"add a completed sentence to the storage\n\n Args:\n input_ids (torch.LongTensor): (n_sequence) - Note the eos_token is not included in this input_ids\n logsoftmax_sum (float)\n beam_indices (torch.LongTensor): (n_sequence)\n next_beam_index: int\n \"\"\"\n sequence_length = input_ids.shape[0]\n score = logsoftmax_sum / (sequence_length**self.length_penalty)\n if len(self) >= self.group_size and score <= self.worst_score:\n return\n\n if beam_indices is not None:\n concatenated_beam_indices = torch.cat(\n (\n beam_indices,\n torch.tensor(next_beam_index, dtype=beam_indices.dtype, device=beam_indices.device).unsqueeze(0),\n )\n )\n else:\n concatenated_beam_indices = torch.tensor(\n next_beam_index, dtype=beam_indices.dtype, device=beam_indices.device\n ).unsqueeze(0)\n heapq.heappush(self.sorted_beams, (score, input_ids, concatenated_beam_indices))\n if len(self) > self.group_size:\n heapq.heappop(self.sorted_beams)\n self.worst_score = self.sorted_beams[0][2]\n\n def is_done(\n self,\n input_ids: torch.LongTensor,\n next_scores: torch.FloatTensor,\n max_length: int,\n ) -> bool:\n \"\"\"check if this beamHypotheses can stop early\n\n Args:\n input_ids (torch.LongTensor): input_ids of shape (group_size, n_sequence). Next token is not included\n next_scores (torch.FloatTensor): (group_size)\n max_length (int): the max_length\n\n Returns:\n bool: do we want to stop early\n \"\"\"\n if len(self) < self.group_size:\n return False\n if self.stop_early:\n return True\n\n cur_len = input_ids.shape[1] + 1\n max_score = torch.max(next_scores).item()\n # because logsoftmax are always negative\n if self.length_penalty > 0.0:\n highest_possible_score = max_score / (max_length**self.length_penalty)\n else:\n highest_possible_score = max_score / (cur_len**self.length_penalty)\n return self.worst_score >= highest_possible_score\n\n def get_sorted_inputs(self) -> List[Tuple[float, torch.LongTensor, torch.LongTensor]]:\n return sorted(self.sorted_beams, reverse=True)\n\n\nclass BeamSearchScorer:\n def __init__(\n self,\n batch_size: int,\n n_beams: int,\n n_beam_groups: int,\n device: torch.device,\n eos_token_ids: Union[list[int], int],\n pad_token_id: int,\n length_penalty: Optional[float] = 1.0,\n num_beam_hyps_to_keep: Optional[int] = 1,\n max_length: Optional[int] = None,\n ) -> None:\n self.batch_size = batch_size\n self.n_beams = n_beams\n self.n_beam_groups = n_beam_groups\n self.group_size = self.n_beams // self.n_beam_groups\n self.device = device\n self.length_penalty = length_penalty\n self.num_beams_hypes_to_keep = num_beam_hyps_to_keep\n self.max_length = max_length\n if isinstance(eos_token_ids, int):\n eos_token_ids = [eos_token_ids]\n self.eos_token_ids = eos_token_ids\n self.pad_token_id = pad_token_id\n self.beam_hypothesis_list = [\n BeamHypotheses(\n length_penalty,\n self.group_size,\n stop_early=False,\n )\n for _ in range(self.batch_size * self.n_beam_groups)\n ]\n self._done = torch.tensor(\n [False for _ in range(batch_size * self.n_beam_groups)], dtype=torch.bool, device=self.device\n )\n\n @property\n def is_done(self) -> bool:\n return self._done.all()\n\n def process(\n self,\n input_ids: torch.LongTensor,\n input_beam_scores: torch.FloatTensor,\n input_beam_tokens: torch.LongTensor,\n input_beam_indices: torch.LongTensor,\n beam_group_index: int,\n beam_indices: torch.LongTensor,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"process one beam_group\n\n Args:\n input_ids (torch.LongTensor): (n_batch * group_size, n_sequence)\n input_beam_scores (torch.FloatTensor): beam scores for the sampled data (n_batch, n_sampled)\n input_beam_tokens (torch.LongTensor): beam tokens for the sampled data (n_batch, n_sampled)\n input_beam_indices (torch.LongTensor): which beam does this sample belongs to (n_batch, n_sampled)\n beam_group_index (int): which beam group are we processing\n beam_indices (torch.LongTensor): beam_indices of previous sequence. (n_batch * group_size, n_sequence)\n Returns:\n Dict[str, torch.Tensor]: {beam_tokens, beam_scores, beam_indices}\n \"\"\"\n n_sampled = input_beam_scores.shape[1]\n output_beam_tokens = torch.zeros(\n (self.batch_size, self.group_size), dtype=input_beam_tokens.dtype, device=input_beam_tokens.device\n )\n output_beam_scores = torch.zeros(\n (self.batch_size, self.group_size), dtype=input_beam_scores.dtype, device=input_beam_scores.device\n )\n output_beam_indices = torch.zeros(\n (self.batch_size, self.group_size), dtype=input_beam_indices.dtype, device=input_beam_indices.device\n )\n\n for i in range(self.batch_size):\n beam_index = i * self.n_beam_groups + beam_group_index\n insert_index = 0\n if self._done[beam_index]:\n output_beam_tokens[i, :] = self.pad_token_id\n output_beam_scores[i, :] = 0\n output_beam_indices[i, :] = 0\n continue\n for j in range(n_sampled):\n score = input_beam_scores[i, j].item()\n token = input_beam_tokens[i, j].item()\n index = input_beam_indices[i, j].item()\n if token in self.eos_token_ids:\n # if beam_token does not belong to top num_beams tokens, it should not be added\n if j < self.group_size:\n self.beam_hypothesis_list[beam_index].add(\n input_ids[i * self.group_size + index],\n token,\n score,\n beam_indices,\n index,\n )\n else:\n output_beam_scores[i, insert_index] = score\n output_beam_tokens[i, insert_index] = token\n # the index in the input_id\n output_beam_indices[i, insert_index] = i * self.group_size + index\n ++insert_index\n if insert_index >= self.group_size:\n break\n\n self.is_done[beam_index] = self.is_done[beam_index] or self.beam_hypothesis_list[beam_index].is_done(\n input_ids[i * self.group_size : i * self.group_size + self.group_size, :],\n output_beam_scores[i],\n self.max_length,\n )\n\n return {\n \"beam_tokens\": output_beam_tokens,\n \"beam_scores\": output_beam_scores,\n \"beam_indices\": output_beam_indices,\n }\n\n def finalize(\n self,\n input_ids: torch.LongTensor,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"get the output decoded\n\n Args:\n input_ids (torch.LongTensor): (n_batch * n_beam, n_sequence)\n input_beam_scores (torch.FloatTensor): beam scores for the sampled data (n_batch * n_beam_groups, n_sampled)\n input_beam_tokens (torch.LongTensor): beam tokens for the sampled data (n_batch * n_beam_groups, n_sampled)\n input_beam_indices (torch.LongTensor): which beam does this sample belongs to (n_batch * n_beam_groups, n_sampled)\n\n Returns:\n Dict[str, torch.Tensor]: {sequences, sequence_scores, beam_indices}\n \"\"\"\n # 1. firstly handle the case that we still have some hype_beams unfinished yet\n for i in range(self.batch_size * self.n_beam_groups):\n if self.is_done[i]:\n continue\n # this means it ends due to StoppingCriteria, gracefully handle\n self.beam_hypothesis_list[i].add()\n\n # 2. loop over the beam_groups and get the decoded results\n beam_scores = torch.zeros(\n (self.batch_size * self.num_beams_hypes_to_keep),\n dtype=torch.float,\n device=input_ids.device,\n )\n max_result_length = 0\n best_hypos = []\n for i in range(self.batch_size):\n beam_hyp_list_for_this_batch = self.beam_hypothesis_list[\n i * self.n_beam_groups : i * self.n_beam_groups + self.n_beam_groups\n ]\n list_of_ordered_list = [beam_hyp.get_sorted_inputs() for beam_hyp in beam_hyp_list_for_this_batch]\n merged_list = list(itertools.chain.from_iterable(list_of_ordered_list))\n sorted_list = sorted(merged_list, key=lambda x: x[0], reverse=True)\n sorted_list = sorted_list[: self.num_beams_hypes_to_keep]\n best_hypos.add(sorted_list)\n\n for j, (score, input_ids, _) in enumerate(sorted_list):\n beam_scores[i * self.num_beams_hypes_to_keep + j] = score\n # leave space for the eos_token_id\n max_result_length = max(max_result_length, input_ids.shape[0] + 1)\n\n max_result_length = max(max_result_length, self.max_length)\n result_sequences = torch.full(\n (self.batch_size * self.num_beams_hypes_to_keep, max_result_length),\n self.pad_token_id,\n dtype=input_ids.dtype,\n device=input_ids.device,\n )\n result_beam_indices = torch.full(\n (self.batch_size * self.num_beams_hypes_to_keep, max_result_length),\n -1,\n dtype=input_ids.dtype,\n device=input_ids.device,\n )\n for i, (score, input_ids, beam_indices) in enumerate(best_hypos):\n # we have max_length stopping criteria, so we can make sure input_ids size is smaller or equal to max_length\n result_sequences[i, : input_ids.shape[0]] = input_ids\n if input_ids.shape[0] < max_result_length:\n result_sequences[i, input_ids.shape[0]] = self.eos_token_ids[0]\n result_beam_indices[i, : beam_indices.shape[0]] = beam_indices\n\n return {\n \"sequences\": result_sequences,\n \"sequence_scores\": beam_scores,\n \"beam_indices\": result_beam_indices,\n }\n","repo_name":"xyg-coder/simple_learning","sub_path":"dpo_finetune/beam_search.py","file_name":"beam_search.py","file_ext":"py","file_size_in_byte":11481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5143202682","text":"#####################################\n## Team ID #2457\n## Author List Shreyas Shubhankar, Rudra Narayan Pandey\n## Filename: Image Processing\n## Theme Harvester Bot\n## Functions: fruit_estimate(int,int,int),fruit_size_estimate(),main() \n## Global Variables : \n#####################################\n\nimport numpy as np\nimport cv2\nimport serial\nser=serial.Serial('/dev/ttyUSB0')\n#####################################\n## Global Variables\n#####################################\n\ncurrent_fruit=0 #Current side of the tree being faced by bot in clockwise manner\nFire_output1=1\nfirst_deposition=0 #First Deposition Zone\napple=[0,0,0] #[small,large,max]\norange=[0,0,0]\nblueberry=[0,0,0]\n######################################\n##Fruit Estimating Function\n######################################\ndef fruit_estimate(hsv,low_threshold,high_threshold):\n #Recognising the fruit in image\n #Fruit is recognised based on which hsv range has a contour of maximum area in it\n mask=cv2.inRange(hsv,low_threshold,high_threshold) #The image is being masked in the hue of interest\n kernel = np.ones((5,5),np.uint8)\n mask=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel) #Removing noise and filling gaps in the thresholded binary image\n mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)\n image, contours, hierarchy = cv2.findContours(mask,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE) #Finding Contours in the binary image\n contour_array_size=len(contours) #Size of Contour array length\n area_max=0 #Max Area of contour found in the image\n current_contour=0\n for i in contours[0:contour_array_size]:\n v=contours[current_contour]\n contour_area=int(cv2.contourArea(v)) #Finding area of contour\n if contour_area>area_max:\n area_max=contour_area #Finding a contour of max area in the image\n image=cv2.drawContours(img,contours,current_contour,(100,100,0),3)\n #print(p)\n cv2.imshow('image',image)\n current_contour=current_contour+1\n return area_max #Returning the max area\n######################################\n## Fruit Size Estimating Function\n######################################\ndef fruit_size_estimate(fruit_area,fruit_number): #Finding the size of fruit\n if fruit_number == 0: ##Red\n if fruit_area in range(0,10000):\n fruit_size=2 #0-Large fruit, 1-Max Fruit, 2-Small Fruit\n apple[2]=apple[2]+1 #Keeping count of the fruits recognised\n req_fruits_table[0,2]=req_fruits_table[0,2]-1 #Subtracting the found fruit from required fruits table\n return fruit_size\n elif fruit_area in range(10000,20000):\n fruit_size=0\n apple[0]=apple[0]+1\n req_fruits_table[0,0]=req_fruits_table[0,0]-1\n return fruit_size\n elif fruit_area in range(20000,70000):\n fruit_size=1\n apple[1]=apple[1]+1\n req_fruits_table[0,1]=req_fruits_table[0,1]-1\n return fruit_size\n elif fruit_number == 2: #Orange\n if fruit_area in range(0,10000):\n fruit_size=2\n orange[2]=orange[2]+1\n req_fruits_table[2,2]=req_fruits_table[2,2]-1\n return fruit_size\n elif fruit_area in range(10000,60000):\n fruit_size=0\n orange[0]=orange[0]+1\n req_fruits_table[2,0]=req_fruits_table[2,0]-1\n return fruit_size\n elif fruit_area in range(60000,100000):\n fruit_size=1\n orange[1]=orange[1]+1\n req_fruits_table[2,1]=req_fruits_table[2,1]-1\n return fruit_size\n elif fruit_number == 1: #Blueberry\n if fruit_area in range(0,10000):\n fruit_size=2\n blueberry[2]=blueberry[2]+1\n req_fruits_table[1,2]=req_fruits_table[1,2]-1\n return fruit_size\n elif fruit_area in range(10000,20000):\n fruit_size=0\n blueberry[0]=blueberry[0]+1\n req_fruits_table[1,0]=req_fruits_table[1,0]-1\n return fruit_size\n elif fruit_area in range(20000,70000):\n fruit_size=1\n blueberry[1]=blueberry[1]+1\n req_fruits_table[1,1]=req_fruits_table[1,1]-1\n return fruit_size\n\ncapture_frame = cv2.VideoCapture(0) #Starting video input from the camera\n\n#######################\n## Get Input from User\n#######################\n#tree_config=input('Enter the Arena Configuration')\ntree_config=[9,18,29]\ntree_config=np.array(tree_config)\nprint(tree_config)\n\n##fruit_table=input('Enter the Fruit Table')\nfruit_table=[3,6,1,4],[5,2,3,3],[8,3,0,4]\nfruit_table=np.array(fruit_table)\nprint(fruit_table)\n\n##depo_zone=input('Enter the Deposition Zone')\ndepo_zone=[37,38,44,45],[35,36,42,43],[40,41,47,48]\ndepo_zone=np.array(depo_zone)\nprint(depo_zone)\n##\n##req_fruits_table=input('Enter the Required Fruits Table')\nreq_fruits_table=[1,1,1],[1,1,1],[0,2,0]\nreq_fruits_table=np.array(req_fruits_table)\nprint(req_fruits_table)\n\n \n######################################\n## Main Program\n######################################\napple_deposit=0 #After recognising the fruits the required deposition zone will be found\norange_deposit=0\nblueberry_deposit=0\nt1=str(tree_config[0])\nser.write(t1) #Writing Tree Table to Firebird V\nser.write('n') #This is used to communicate that one element has been sent\nser.write(str(tree_config[1]))\nser.write('n')\nser.write(str(tree_config[2]))\nser.write('n')\n\nFire_output1=ser.read(); #Bot reaches in front of tree\nprint(Fire_output1)\nwhile(1):\n print('inside while')\n if(Fire_output1=='1'):\n print('inside if')\n while (current_fruit<4): #Till bot has faced all the sides of the tree\n \n\n ret, img = capture_frame.read() #Image is captured\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) #Conversion to hsv\n max_area_red=0\n max_area_orange=0\n max_area_blue=0\n\n low_threshold_red=np.array([155,100,100]) #Red Threshold in HSV range\n high_threshold_red=np.array([180,255,255])\n max_area_red=fruit_estimate(hsv,low_threshold_red,high_threshold_red)\n print(max_area_red)\n\n low_threshold_orange=np.array([0,100,100]) #Orange Threshold in HSV range\n high_threshold_orange=np.array([40,255,255])\n max_area_orange=fruit_estimate(hsv,low_threshold_orange,high_threshold_orange)\n print(max_area_orange)\n\n low_threshold_blue=np.array([75,100,100]) #Blue Threshold in HSV range\n high_threshold_blue=np.array([130,255,255])\n max_area_blue=fruit_estimate(hsv,low_threshold_blue,high_threshold_blue)\n print(max_area_blue)\n\n if max_area_red>max_area_orange and max_area_red>max_area_blue:\n fruit_size=fruit_size_estimate(max_area_red,0)\n print (fruit_size)\n print('apple')\n if (req_fruits_table[0,fruit_size]>=0):\n ser.write(fruit_table[0,current_fruit]) ##Send the position of fruit block\n ser.write('n')\n else:\n ser.write('0')\n ser.write('n')\n current_fruit=current_fruit+1\n apple_deposit=1 #Deposit in the apple deposition zone\n orange_deposit=0\n blueberry_deposit=0\n elif max_area_blue>max_area_red and max_area_blue>max_area_orange:\n fruit_size=fruit_size_estimate(max_area_blue,1)\n print (fruit_size)\n print('Blueberry')\n if (req_fruits_table[1,fruit_size]>=0):\n ser.write(fruit_table[1,current_fruit]) ##Send the position of fruit block\n ser.write('n')\n else:\n ser.write('0')\n ser.write('n')\n current_fruit=current_fruit+1\n apple_deposit=0\n orange_deposit=0\n blueberry_deposit=1 #Deposit in the blueberry deposition zone\n elif max_area_orange>max_area_blue and max_area_orange>max_area_red:\n fruit_size=fruit_size_estimate(max_area_orange,2)\n print (fruit_size)\n print('Orange')\n if (req_fruits_table[2,fruit_size]>=0):\n ser.write(fruit_table[2,current_fruit]) ##Send the position of fruit block\n ser.write('n')\n else:\n ser.write('0')\n ser.write('n')\n current_fruit=current_fruit+1\n apple_deposit=0\n orange_deposit=1 #Deposit in the orange deposition zone\n blueberry_deposit=0\n\n \n\n Fire_output1=ser.read() ##Waiting for Firebird to reach the next side of the tree\n current_fruit=current_fruit+1\n\n if (Fire_output1==1):\n if(apple_deposit==1):\n ser.write(depo_zone[0,1]) #Deposition Zone of Apple\n ser.write('n')\n elif(blueberry_deposit==1):\n ser.write(depo_zone[1,1]) #Deposition Zone of Blueberry\n ser.write('n')\n elif(orange_deposit==1):\n ser.write(depo_zone[2,1]) #Deposition Zone of Orange\n ser.write('n')\n apple_deposit=0\n orange_deposit=0\n blueberry_deposit=0\n current_fruit=0\n Fire_output1=ser.read(); #Till bot reaches the side of tree again and the loop is repeated\n\n else:\n break #Task Completed\n\n\n#######################################\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n","repo_name":"Akashleena/Eyantra","sub_path":"Pickandplaceharvester.py","file_name":"Pickandplaceharvester.py","file_ext":"py","file_size_in_byte":9717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15492662951","text":"def fun():\n b=\"hello my\",\" name is ujala\"\n k=\"\".join(b)\n f=k.split()\n i=0\n while i= self.retries:\n raise\n LOGGER.warning('Timeout occured, retrying…')\n return self.send_message(message, attempt + 1)\n","repo_name":"confirm/Rocket-R60V","sub_path":"rocket_r60v/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"71740045851","text":"import os\nimport logging\nimport docker\nfrom kubernetes import client, config\nimport datetime\nfrom requests import HTTPError\n\nlogging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')\nlogger = logging.getLogger(__name__)\n\n\ndef restart_deployments_with_new_images():\n # Load the Kubernetes configuration from default location\n config.load_incluster_config()\n\n # Create an instance of the Kubernetes client\n v1 = client.AppsV1Api()\n\n # Create an instance of the Docker client\n docker_client = docker.from_env()\n v1Core = client.CoreV1Api()\n # Retrieve all deployments in the cluster\n pods = v1Core.list_pod_for_all_namespaces(watch=False).items\n updated = []\n\n for pod in pods:\n pod_name = pod.metadata.name\n pod_namespace = pod.metadata.namespace\n deployment = get_deployment_name(v1Core, v1, pod_name, namespace=pod_namespace)\n deployment_name = deployment[\"deployment_name\"]\n deployment_type = deployment[\"deployment_type\"]\n\n # Retrieve the current image and its SHA used by the deployment\n current_image = pod.spec.containers[0].image\n current_tag = current_image.split(\":\")[1]\n current_sha = None\n if len(pod.status.container_statuses) > 0 and 'sha256' in pod.status.container_statuses[0].image_id:\n current_sha = pod.status.container_statuses[0].image_id.split('sha256:')[1]\n\n # Check if a newer version of the image is available\n if deployment_name is not None and current_tag == \"latest\" and current_sha is not None and is_newer_image_available(docker_client, current_image, current_sha):\n logger.info(f\"Newer image available for deployment: {deployment_name}\")\n updated.append(deployment_name)\n\n # Patch the deployment to trigger a restart\n patch = {\n \"spec\": {\n \"template\": {\n \"metadata\": {\n \"annotations\": {\n \"kubectl.kubernetes.io/restartedAt\": str(datetime.datetime.now())\n }\n }\n }\n }\n }\n if deployment_type == \"ReplicaSet\":\n v1.patch_namespaced_deployment(name=deployment_name, namespace=pod_namespace, body=patch)\n elif deployment_type == \"StatefulSet\":\n v1Core.delete_namespaced_pod(pod_name, pod_namespace)\n\n logger.info(f\"Deployment {deployment_name} in namespace {pod.metadata.namespace} restarted.\")\n\n\ndef get_deployment_name(v1Core, v1, pod_name, namespace='default'):\n pod = v1Core.read_namespaced_pod(pod_name, namespace)\n\n owner_references = pod.metadata.owner_references\n deployment_name = None\n deployment_type = None\n\n for owner_reference in owner_references:\n if owner_reference.kind == 'ReplicaSet':\n replica_set_name = owner_reference.name\n replica_set = v1.read_namespaced_replica_set(replica_set_name, namespace)\n deployment_name = replica_set.metadata.owner_references[0].name\n deployment_type = owner_reference.kind\n break\n if owner_reference.kind == 'StatefulSet':\n deployment_name = owner_reference.name\n deployment_type = owner_reference.kind\n break\n\n return {\"deployment_name\": deployment_name, \"deployment_type\": deployment_type}\n\n\ndef get_image_sha(docker_client, image):\n # Retrieve the Docker image object\n try:\n docker_client.images.pull(image)\n except HTTPError:\n logger.error(f\"Error fetching {image}\")\n return None\n image_obj = docker_client.images.get(image)\n\n # Retrieve the SHA of the image\n if image_obj and \"RepoDigests\" in image_obj.attrs:\n repo_digests = image_obj.attrs[\"RepoDigests\"]\n if repo_digests:\n return repo_digests[0].split(\"@sha256:\")[1]\n\n # Return None if the image SHA could not be retrieved\n return None\n\n\ndef is_newer_image_available(docker_client, current_image, current_sha):\n # Retrieve the SHA of the latest image\n latest_sha = get_image_sha(docker_client, current_image)\n logger.debug(f\"Latest SHA for image {current_image} is {latest_sha}. Current one is {current_sha}.\")\n if latest_sha == current_sha:\n logger.info(f\"No update available for image {current_image}. Skipping\")\n else:\n logger.info(f\"Update available for image {current_image}\")\n if latest_sha is None or current_sha is None:\n return False\n # Compare the SHA values\n return latest_sha and latest_sha != current_sha\n\n\ndef gotify_notify(updated):\n if 'GOTIFY_URL' not in os.environ:\n logger.info('Gotify URL not set, skipping notifications')\n return\n updated_string = ','.join(updated)\n gotify_url = os.environ.get('GOTIFY_URL')\n gotify_payload = {\n 'title': f'Kube updater',\n 'message': f'Applications {updated_string} were updated',\n 'priority': 5, \n }\n\n if not gotify_url:\n raise RuntimeError('Gotify URL not provided')\n\n try:\n response = requests.post(gotify_url, json=gotify_payload)\n response.raise_for_status()\n print('Message sent to Gotify successfully!')\n except requests.exceptions.RequestException as e:\n print(f'Failed to send message to Gotify: {e}')\n \n\n\nif __name__ == '__main__':\n restart_deployments_with_new_images()\n","repo_name":"IvanVojtko/kube-updater","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40399988871","text":"\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport h5py \nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nplt.rcParams['mathtext.fontset'] = 'stix'\nplt.rcParams['font.family'] = 'STIXGeneral'\nplt.rcParams.update({'font.size': 14})\n\n#inputs\n\nprtl_base = \"../../tristan_acc-mec_Ez/testing_edotv/output/prtl.tot.\"\n#prtl_base = \"../../tristan_acc-mec_Ez/testing_edotv/recon_rgn/output/prtl.tot.\"\n\n#20 to 40 seem messed up, not sure why...\ntlow= 10\ntup = 31\ntstep = 20\nmaxind = 1.2e9 #taken empirically from first timestep, where particles that are initialized in sheet are actually saved in the output, 1 per proc (should be ~340 of them), let's just delete them out here\nminind = 1e5\n\n\nfig, ax1 = plt.subplots(1)\n\nmymax = 0\n\nfor t in range(tlow,tup,tstep):\n tstr = '%03d'%t\n filename = prtl_base + tstr\n print(filename)\n f_prtl = h5py.File(filename,'r')\n gammae = np.array(f_prtl['gammae'])\n edotv = np.array(f_prtl['edotve'])\n inde = np.array(f_prtl['inde'])\n proce = np.array(f_prtl['proce'])\n\n numprtls = np.size(gammae)\n\n\n print('maxgam : ', np.max(gammae))\n\n #coldprtls = gammae < 20.\n #gammae*coldprtls #only look at particles accelerated out of thermal pool\n\n\n #goodprtls = inde/', methods=['POST'])\n@api.validate(query=Query, data=Data, resp=Response, x=[e403], tags=['model'])\ndef predict(source, target):\n \"\"\"\n predict demo\n\n demo for `query`, `data`, `resp`, `x`\n \"\"\"\n print(f'=> from {source} to {target}') # path\n print(f'Data: {request.json_data}') # Data\n print(f'Query: {request.query}') # Query\n if random() < 0.5:\n e403.abort('bad luck')\n return Response(label=int(10 * random()), score=random())\n\n\n@app.route('/api/code', methods=['POST'])\n@api.validate(x=[e233], tags=['test'])\ndef withcode():\n \"\"\"\n demo for JSON with status code\n \"\"\"\n return jsonify('code'), 203\n\n\n@app.route('/api/code', methods=['GET'])\n@api.validate()\ndef getcode():\n \"\"\"\n demo for the same route with different methods\n \"\"\"\n return jsonify('code'), 200\n\n\n@app.route('/api/header', methods=['POST'])\n@api.validate(x=[e233], tags=['test', 'demo'])\ndef withheader():\n \"\"\"\n demo for JSON with status code and header\n \"\"\"\n return jsonify('header'), 203, {'X': 233}\n\n\nif __name__ == '__main__':\n api.register(app)\n app.run()\n","repo_name":"kemingy/flaskerk","sub_path":"examples/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"32"} +{"seq_id":"72506205851","text":"\"\"\"\nLabels\n0: background\n1: ground glass opacity\n2: consolidation\n3: lung\n\n\"\"\"\n\nimport SimpleITK as sitk\n\n\ndef intensities_and_textures(img, lesions):\n lesions = sitk.Cast(lesions, sitk.sitkUInt8)\n\n labelstatsFilter = sitk.LabelIntensityStatisticsImageFilter()\n labelstatsFilter.Execute(lesions, img)\n labels = labelstatsFilter.GetLabels()\n if 3 in labels:\n mean_healthy = labelstatsFilter.GetMean(3)\n kurtosis_healthy = labelstatsFilter.GetKurtosis(3)\n skewness_healthy = labelstatsFilter.GetSkewness(3)\n else:\n mean_healthy = -1\n kurtosis_healthy = 6.1 # median value from the training data\n skewness_healthy = 2.3 # median value from the training data\n if 1 in labels:\n mean_ggo = labelstatsFilter.GetMean(1)\n kurtosis_ggo = labelstatsFilter.GetKurtosis(1)\n skewness_ggo = labelstatsFilter.GetSkewness(1)\n else:\n mean_ggo = -1 # healthy lung\n kurtosis_ggo = kurtosis_healthy\n skewness_ggo = skewness_healthy\n if 2 in labels:\n mean_cons = labelstatsFilter.GetMean(2)\n kurtosis_cons = labelstatsFilter.GetKurtosis(2)\n skewness_cons = labelstatsFilter.GetSkewness(2)\n else:\n mean_cons = -1\n kurtosis_cons = kurtosis_healthy\n skewness_cons = skewness_healthy\n\n\n return {\n 'mean_healthy': mean_healthy,\n 'kurtosis_healthy': kurtosis_healthy,\n 'skewness_healthy': skewness_healthy,\n 'mean_ggo': mean_ggo,\n 'kurtosis_ggo': kurtosis_ggo,\n 'skewness_ggo': skewness_ggo,\n 'mean_cons': mean_cons,\n 'kurtosis_cons': kurtosis_cons,\n 'skewness_cons': skewness_cons,\n }\n","repo_name":"IneDirks/Covid_severity_prediction","sub_path":"extract_intensities.py","file_name":"extract_intensities.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1867027376","text":"#Viope-Chap5-Exercise2-4:\n\na = input(\"Give a file name: \")\nb = input(\"Write something: \")\nmyfile = open(a, \"w\")\n\nmyfile.write(b)\nmyfile.close()\n\nprint(f\"Wrote {b} to the file {a}\")\n\n","repo_name":"lytrieuminh/Python-Programming","sub_path":"17.Chap5-Exercise2-4.py","file_name":"17.Chap5-Exercise2-4.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29913553797","text":"\ndef true_alphabetic(txt):\n lens = [len(i) for i in txt.split()]\n all_sorted = sorted(j for i in txt.split() for j in i)\n n = 0\n final = []\n for i in range(len(lens)): # 0, 1, 2\n final.append((str(\"\".join(all_sorted[n : n + lens[i]])))) \n n = n + lens[i]\n return \" \".join(final)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"6DppMcokmzJ3TtNNB_23.py","file_name":"6DppMcokmzJ3TtNNB_23.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33321138373","text":"import multiprocessing\n\n\ndef cal_sq(n, q):\n for i in n:\n q.put(i * i)\n\n\nif __name__ == \"__main__\":\n arr = [2, 3, 4]\n q = multiprocessing.Queue()\n p1 = multiprocessing.Process(target=cal_sq, args=(arr, q,))\n\n p1.start()\n p1.join()\n while q.empty() is False:\n print(q.get())\n\n## ### multiprocessing\n# q = multiprocessing.Queue()\n# -->lives in shared memory\n# -->used to share data between process\n\n##### queue module\n# import queue\n# q = queue.Queue()\n# lives in in-process memory\n# used to share data between threads\n","repo_name":"Anilkanta-sudo/Multi_threading_and_processing","sub_path":"multi_q.py","file_name":"multi_q.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1954019469","text":"from flask import current_app, g\nfrom flask_login import current_user, login_user, logout_user\nfrom flask_restplus import Namespace, Resource, abort\nfrom backend.models import db\nfrom backend.models.event import Event\nfrom backend.models.dance import Dance\nfrom backend.models.discipline import Discipline\nfrom backend.models.dancing_class import DancingClass\nfrom backend.models.competition import Competition\nfrom backend.models.user import User\nfrom backend.constants import OK, TOURNAMENT, XTDS, ODK, SOND, AL_TOURNAMENT_OFFICE_MANAGER\nfrom .functions import create_base_dances, create_disciplines, create_dancing_classes, create_second_base_dances, \\\n generate_xtds_competitions, generate_odk_competitions, generate_sond_competitions\nfrom datetime import datetime, date\nfrom backend.models.adjudicator import Adjudicator\nfrom backend.apis.adjudicators import adjudicators\nfrom backend.apis.competition import competitions\nfrom backend.models.user.wrappers import login_required, requires_access_level\nfrom backend.models.event_result import EventResult\n\n\napi = Namespace(\"event\", description=\"Events\")\n\n\n@api.route(\"/\")\nclass EventAPI(Resource):\n\n @api.doc(\"list_events\", security=None)\n def get(self):\n \"\"\"List all events\"\"\"\n return [e.json() for e in Event.query.all()]\n\n @api.doc(\"create_event\")\n @api.param(\"name\", \"Event name\")\n @api.param(\"date\", \"Date the event takes place\")\n @login_required\n @requires_access_level([AL_TOURNAMENT_OFFICE_MANAGER])\n def post(self):\n \"\"\"Create a new event\"\"\"\n e = Event()\n e.name = api.payload[\"name\"]\n e.is_active = True\n e.date = date.fromtimestamp(datetime.strptime(api.payload[\"date\"], \"%Y-%m-%d\").timestamp())\n db.session.add(e)\n db.session.commit()\n return OK\n\n\n@api.route(\"/defaults\")\nclass EventAPIDefaults(Resource):\n\n @api.doc(\"create_default_competitions\")\n @api.param(\"competitions\", \"Competitions that will be held during the event\")\n def post(self):\n \"\"\"Create default dances, discipline, classes, and selected competitions\"\"\"\n create_base_dances()\n if current_app.config.get(TOURNAMENT) == XTDS or current_app.config.get(TOURNAMENT) == SOND:\n create_second_base_dances()\n create_dancing_classes()\n create_disciplines()\n g.event = Event.query.filter(Event.is_active.is_(True)).first()\n d = g.event.date\n start_time = datetime.utcfromtimestamp(datetime(d.year, d.month, d.day, 9).timestamp())\n if current_app.config.get(TOURNAMENT) == XTDS:\n generate_xtds_competitions(start_time, api.payload[\"competitions\"])\n elif current_app.config.get(TOURNAMENT) == ODK:\n generate_odk_competitions(start_time, api.payload[\"competitions\"])\n elif current_app.config.get(TOURNAMENT) == SOND:\n generate_sond_competitions(start_time, api.payload[\"competitions\"])\n return {\n \"dances\": [d.json() for d in Dance.query.order_by(Dance.order).all()],\n \"disciplines\": [d.json() for d in Discipline.query.order_by(Discipline.name).all()],\n \"classes\": [d.json() for d in DancingClass.query.order_by(DancingClass.name).all()],\n \"competitions\": [c.json() for c in Competition.query.order_by(Competition.when).all()],\n }\n\n\n@api.route(\"/\")\n@api.param(\"event_id\", \"Event id\")\n@api.response(404, \"Event not found\")\nclass EventAPISpecific(Resource):\n\n @api.doc(\"get_event\")\n @login_required\n @requires_access_level([AL_TOURNAMENT_OFFICE_MANAGER])\n def get(self, event_id):\n \"\"\"Fetch a specific Event\"\"\"\n e = Event.query.get(event_id)\n if e is not None:\n return e.json()\n abort(404, \"Unknown event_id\")\n\n\n@api.route(\"/dashboard\")\nclass EventAPIDashboard(Resource):\n\n @api.doc(\"get_dashboard\")\n @login_required\n @requires_access_level([AL_TOURNAMENT_OFFICE_MANAGER])\n def get(self):\n \"\"\"Get the dashboard data\"\"\"\n users = User.query.filter(User.is_active.is_(True)).order_by(User.username).all()\n users = [u for u in users if u != current_user]\n return {\n \"users\": [u.json() for u in users]\n }\n\n @api.doc(\"switch user\")\n @api.param(\"user_id\", \"User_id\")\n @login_required\n @requires_access_level([AL_TOURNAMENT_OFFICE_MANAGER])\n def post(self):\n \"\"\"Create a new event\"\"\"\n logout_user()\n user = User.query.get(api.payload[\"user_id\"])\n # login_user(user)\n return user.get_auth_token()\n\n\n@api.route(\"/assignments\")\nclass EventAPIAssignments(Resource):\n\n @api.doc(\"switch user\")\n @api.param(\"assignments\", \"List of strings {competition_id-adjudicator_id}\")\n @login_required\n @requires_access_level([AL_TOURNAMENT_OFFICE_MANAGER])\n def patch(self):\n \"\"\"Assign adjudicators to competitions\"\"\"\n all_adjudicators = Adjudicator.query.order_by(Adjudicator.name).all()\n for comp in competitions():\n if comp.is_configurable():\n checks = [a for a in [f\"{comp.competition_id}-{adj.adjudicator_id}\"\n for adj in all_adjudicators] if a in api.payload[\"assignments\"]]\n adj = [int(a) for a in [a.split('-')[1] for a in checks]]\n comp.adjudicators = Adjudicator.query.filter(Adjudicator.adjudicator_id.in_(adj)).all()\n comp.update_adjudicator_assignments()\n db.session.commit()\n return {\n \"adjudicators\": adjudicators(),\n \"competitions\": [c.json() for c in competitions()],\n }\n\n\n@api.route(\"//results\")\n@api.param(\"event_id\", \"Event id\")\n@api.response(404, \"Event not found\")\nclass EventAPIResults(Resource):\n\n @api.doc(\"get_event_results_list\")\n def get(self, event_id):\n \"\"\"Fetch a specific Event results\"\"\"\n e = Event.query.get(event_id)\n if e is not None:\n if e.is_active:\n return [c.publish_json() for c in competitions() if c.show_result_list()]\n else:\n return {\n \"event\": e.json(),\n \"results\": [r.json() for r in e.sorted_results()],\n }\n abort(404, \"Unknown event_id\")\n\n\n@api.route(\"/results/\")\n@api.param(\"event_result_id\", \"Event result id\")\n@api.response(404, \"Event result not found\")\nclass EventAPIResults(Resource):\n\n @api.doc(\"get_event_result\")\n def get(self, event_result_id):\n \"\"\"Fetch a specific EventResult\"\"\"\n result = EventResult.query.get(event_result_id)\n if result is not None:\n return {\n \"event\": result.event.json(),\n \"data\": result.json(results=True),\n }\n abort(404, \"Unknown event_result_id\")\n","repo_name":"AlenAlic/DANCE","sub_path":"backend/apis/event/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20136116648","text":"\nimport cv2\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET','POST'])\ndef index():\n thres=128\n if request.method == 'POST':\n thres=int(request.form.get('thres', '128'))\n print('you selected',thres)\n img = cv2.imread('static/sample.png')\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv2.imwrite('static/gray_img.png', gray_img)\n threshold_val = thres\n max_val = 255\n threshold_type = cv2.THRESH_BINARY\n _, binary_img = cv2.threshold(gray_img, threshold_val, max_val, threshold_type)\n cv2.imwrite('static/binary_img.png',binary_img)\n\n return render_template('index.html',thres=thres)\n\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n\n","repo_name":"tztechno/tz_flask_20230314_bainary3","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23515663355","text":"import django_filters\nfrom django.core.paginator import Paginator\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Count, F, Q, Prefetch, Case, When\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom rest_framework import mixins, viewsets, status, permissions, exceptions\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom django.db.models import Value\nfrom engage.account.models import User\nfrom engage.services import notify_when\nfrom engage.core.constants import NotificationTemplate\nfrom datetime import datetime, timedelta\nfrom engage.account.constants import SubscriptionPackages, SubscriptionPlan\nfrom uuid import uuid4\nfrom engage.settings.base import API_SERVER_URL\nimport requests\n\n\n\nfrom django.http import JsonResponse\nfrom django.utils import timezone\n\nfrom engage.account.exceptions import (\n GameAccountUnavailable,\n MinimumProfileLevelException\n)\nfrom .constants import TournamentState\nfrom .exceptions import ParticipantExists, FreeUserCannotJoinTournament,TournamentCloseException,TournamentFirstException,TournamentStartException,UserInformException,UnbilledUserCannotJoinTournament,TournamentGetPrizeException\nfrom .models import (\n Tournament,\n TournamentParticipant,\n TournamentPrize,\n TournamentMatch\n)\nfrom .serializers import (\n TournamentSerializer,\n TournamentParticipantSerializer,\n TournamentPrizeSerializer,\n TournamentWinnerSerializer\n)\nfrom ..tournament.models import get_prize\nfrom ..core.models import Sticker\nfrom ..operator.constants import SubscriptionType\n\nimport logging \n\n\ndef send_sms(user, message, vault=None):\n headers = {'Content-type':'application/json', \n 'accept': 'text/plain'} # post data\n command = '/api/User/SendSms'\n if user.subscription==SubscriptionPlan.FREE:\n subs = SubscriptionPackages.FREE\n elif user.subscription==SubscriptionPlan.PAID1:\n subs = SubscriptionPackages.PAID1\n else:\n subs = SubscriptionPackages.PAID2\n data = {\n 'msisdn': user.mobile,\n 'message': message.replace('
', ''),\n 'message_id': str(uuid4()),\n 'service_id': subs\n } \n if vault:\n return vault.send(command=command, headers=headers, data=data)\n url = API_SERVER_URL+command\n \n try:\n api_call = requests.post(url, headers=headers, json=data, timeout=2)\n except requests.exceptions.RequestException as e:\n print(e)\n return 'Server error', 555\n if api_call.status_code==200:\n # print(api_call)\n res = api_call.json()['statusCode']\n return res['message'], res['code']\n else:\n return api_call.content, api_call.status_code\n\nclass TournamentFilter(django_filters.FilterSet):\n state = django_filters.ChoiceFilter(choices=TournamentState.choices,\n method='filter_state')\n\n def filter_state(self, queryset, name, value):\n\n if value == TournamentState.UPCOMING:\n return queryset.upcoming()\n elif value == TournamentState.PAST:\n return queryset.past()\n elif value == TournamentState.ONGOING:\n return queryset.ongoing()\n\n class Meta:\n model = Tournament\n fields = ('state',)\n\n\nclass TournamentViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin,\n viewsets.GenericViewSet):\n queryset = Tournament.objects.select_related('game').prefetch_related(\n 'tournamentparticipant_set',\n Prefetch(\n 'tournamentprize_set',\n queryset=TournamentPrize.objects.order_by('position')\n )\n )\n serializer_class = TournamentSerializer\n permission_classes = (permissions.AllowAny,)\n search_fields = ('name',)\n ordering_fields = ['created', 'start_date']\n lookup_field = 'slug'\n\n\n # def get_queryset(self):\n # user = self.request.user\n # now = timezone.now()\n # queryset = self.queryset.filter(regions__in=[self.request.region])\n\n # if self.action in ['start', 'join']:\n # return queryset.all()\n\n # state = self.request.query_params.get('state', TournamentState.UPCOMING)\n \n # # if user.is_authenticated :\n # # queryset = queryset.filter(Q(open_date__lte=now) |\n # # Q(Q(minimum_profile_level__lte=user.level) | Q(minimum_profile_level__isnull=True))\n \n # # ).order_by('open_date')\n \n # if not user.is_authenticated:\n # queryset = queryset.filter(\n # free_open_date__lte=now,\n # ).order_by('free_open_date')\n \n # # if not user.is_subscriber:\n # else :\n # queryset = queryset.annotate(\n # is_min_level=Case(\n # When(Q(minimum_profile_level__isnull=False) &\n # Q(minimum_profile_level__gt=user.level),\n # then=False),\n # default=True\n # )\n # ).filter(\n # Q(free_open_date__lte=now) |\n # (Q(Q(minimum_profile_level__lte=user.level) | Q(minimum_profile_level__isnull=True)) & Q(free_open_date__gt=now))\n # ).order_by('free_open_date')\n\n # if self.action == 'list':\n # if state == TournamentState.UPCOMING:\n # return queryset.filter(end_date__gte=now)\n # else:\n # return queryset.filter(end_date__lt=now)\n # else:\n # return queryset.all()\n\n def get_queryset(self):\n user = self.request.user\n now = timezone.now()\n queryset = self.queryset.filter(regions__in=[self.request.region])\n \n\n if self.action in ['start', 'join', 'close']:\n return queryset.all()\n\n state = self.request.query_params.get('state', TournamentState.UPCOMING)\n game = self.request.query_params.get('game', 0)\n if game != '0' :\n queryset = queryset.filter(game__id=int(game))\n \n # if user.is_authenticated :\n # queryset = queryset.filter(Q(open_date__lte=now) |\n # Q(Q(minimum_profile_level__lte=user.level) | Q(minimum_profile_level__isnull=True))\n \n # ).order_by('open_date')\n \n if not user.is_authenticated:\n queryset = queryset.filter(\n free_open_date__lte=now,\n ).order_by('free_open_date')\n \n else :\n queryset = queryset.annotate(\n is_min_level=Case(\n When(Q(minimum_profile_level__isnull=False) &\n Q(minimum_profile_level__gt=user.level),\n then=False),\n default=True\n )\n ).filter(\n Q(free_open_date__lte=now) |\n (Q(Q(minimum_profile_level__lte=user.level) | Q(minimum_profile_level__isnull=True)) & Q(free_open_date__gt=now))\n ).filter(open_date__lte=now).order_by('free_open_date')\n \n\n if self.action == 'list':\n if state == TournamentState.UPCOMING:\n tournaments = queryset.filter(end_date__gte=now,started_on__isnull=True)\n elif state == TournamentState.PAST:\n tournaments = queryset.filter(end_date__lt=now)\n elif state == TournamentState.ONGOING:\n tournaments = queryset.filter(end_date__gt=now,started_on__isnull=False)\n else:\n tournaments = queryset.all() \n else:\n tournaments = queryset.all() \n \n return tournaments \n\n \n\n @action(methods=['GET'], detail=True, permission_classes=(permissions.IsAdminUser,))\n @transaction.atomic()\n def start(self, request, slug):\n tournament = self.get_object()\n \n \n if tournament.started_on:\n raise TournamentStartException() \n\n room_size = tournament.game.room_size\n participants = tournament.tournamentparticipant_set.all()\n count = participants.count()\n\n if not count:\n raise TournamentFirstException()\n \n if not room_size:\n return redirect(request.META[\"HTTP_REFERER\"])\n\n # for k, i in enumerate(range(0, count, room_size), 1):\n # TournamentMatch.objects.create( \n # tournament=tournament,\n # match_name=f'[Round 1] Match {k}',\n # round_number=1,\n # )\n\n tournament.started_on = timezone.now()\n tournament.save()\n\n return redirect(request.META[\"HTTP_REFERER\"])\n \n\n @action(methods=['GET'], detail=True, permission_classes=(permissions.IsAdminUser,))\n @transaction.atomic()\n def close(self, request, slug):\n is_closed = False\n tournament = self.get_object()\n \n\n prizes = tournament.tournamentprize_set.filter(winner__isnull=True)\n count = prizes.count()\n\n if count :\n raise TournamentCloseException()\n\n for tournament_prize in prizes :\n winner = tournament_prize.winner\n prize_type = tournament_prize.prize_type\n if prize_type == 'cash':\n prize = tournament_prize.cash_amount\n else:\n prize = tournament_prize.actual_data_package\n if not get_prize(winner.mobile, prize, prize_type, winner.subscription,tournament.id):\n is_closed = True\n \n \n \n tournament.end_date = timezone.now()\n tournament.closed_on = timezone.now()\n tournament.save()\n tournament.send_notification_close()\n if is_closed == True:\n return Response(\n {\"detail\": \"Tournament closed but error in granting prizes, please check with technical team!\"},\n status=status.HTTP_406_NOT_ACCEPTABLE\n )\n return redirect(request.META[\"HTTP_REFERER\"])\n\n\n @action(methods=['POST'], detail=True, permission_classes=(permissions.IsAuthenticated,))\n def join(self, request, slug):\n now = timezone.now()\n tournament = self.get_object()\n user = request.user\n linked_account = user.usergamelinkedaccount_set.filter(\n game=tournament.game\n ).first()\n if not linked_account:\n raise GameAccountUnavailable()\n\n if not tournament.allow_free_users:\n if user.subscription == SubscriptionType.FREE: \n raise FreeUserCannotJoinTournament()\n elif user.is_billed == False:\n raise UnbilledUserCannotJoinTournament()\n\n if tournament.minimum_profile_level and \\\n tournament.minimum_profile_level > user.level and tournament.free_open_date > now:\n raise MinimumProfileLevelException()\n\n is_waiting_list = False\n if tournament.current_participants() >= tournament.max_participants:\n is_waiting_list = True\n \n try:\n participant = TournamentParticipant.objects.get_or_create(\n tournament=tournament,\n participant=user,\n defaults={\n 'is_waiting_list': is_waiting_list\n }\n )\n except IntegrityError:\n raise ParticipantExists()\n\n\n # if tournament.give_sticker:\n # if user.stickers.all() :\n # sticker = Sticker.objects.filter(\n # ~Q(id__in=user.stickers.all())\n # ).order_by('?').first()\n # if sticker :\n # user.stickers.add(sticker)\n # user.save()\n\n if is_waiting_list:\n return Response(\n {\"code\": \"waiting_list\",\n \"message\": \"You have been added to the waiting list.\"},\n status=status.HTTP_200_OK\n )\n\n return Response(status=status.HTTP_200_OK)\n\n # TODO: must be fixed and updated on the frontend\n @action(detail=False, methods=['get'], permission_classes=(permissions.AllowAny,))\n def get_participants(self, request):\n slug = self.request.query_params.get('slug', None)\n\n if not slug:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n try:\n tournament = Tournament.objects.get(\n slug=slug,\n regions__in=[request.region]\n )\n\n except Tournament.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n participants = tournament.participants()\n\n\n page_number = self.request.query_params.get('page', 1)\n page_size = self.request.query_params.get('size', 20)\n\n paginator = Paginator(participants, page_size)\n try:\n participants = paginator.page(int(page_number))\n except:\n participants = paginator.page(1)\n\n serializer = TournamentParticipantSerializer(participants, many=True)\n return Response({\n \"data\": serializer.data,\n \"pagination\": {\n \"has_next\": participants.has_next()\n }\n })\n\n \n @action(detail=False, methods=['get'], permission_classes=(permissions.AllowAny,))\n def get_tournaments(self, request):\n page_number = self.request.query_params.get('page', 1)\n page_size = self.request.query_params.get('size', 6)\n state = self.request.query_params.get('state', TournamentState.UPCOMING)\n game = self.request.query_params.get('game', 0)\n user = self.request.user\n now = timezone.now()\n tournament_list = Tournament.objects.select_related('game').prefetch_related(\n 'tournamentparticipant_set',\n Prefetch(\n 'tournamentprize_set',\n queryset=TournamentPrize.objects.order_by('position')\n ))\n tournament_list = gettour(user,tournament_list,self.request.region)\n if game != '0' :\n tournament_list = tournament_list.filter(game__id=int(game))\n \n upcoming = tournament_list.filter(end_date__gte=now,started_on__isnull=True).order_by('start_date')\n ongoing = tournament_list.filter(end_date__gt=now,started_on__isnull=False).order_by('-live_null', 'start_date')\n previous = tournament_list.filter(end_date__lt=now)\n exceptprevioustournaments = list(ongoing) + list(upcoming)\n if state == TournamentState.UPCOMING:\n tournaments = upcoming\n elif state == TournamentState.PAST:\n tournaments = previous\n elif state == TournamentState.ONGOING:\n tournaments = ongoing\n else:\n tournaments = list(exceptprevioustournaments) + list(previous) # tournament_list.all().order_by('id') # added order to remove warning\n \n paginator = Paginator(tournaments, page_size)\n all_paginator = Paginator(exceptprevioustournaments, page_size)\n try:\n tournaments = paginator.page(int(page_number))\n exceptprevioustournaments = all_paginator.page(int(page_number))\n except:\n tournaments = paginator.page(1)\n exceptprevioustournaments = all_paginator.page(1)\n \n serializer = TournamentSerializer(paginator.page(int(page_number)), many=True, context={'requesto': request})\n upcomingserializer = TournamentSerializer(upcoming, many=True, context={'requesto': request})\n allserializer = TournamentSerializer(exceptprevioustournaments, many=True, context={'requesto': request})\n return Response({\n \"data\": serializer.data,\n \"tournaments\": upcomingserializer.data,\n \"all_serializer\": allserializer.data,\n \"pagination\": {\n \"pages\":paginator.num_pages,\n \"all_pages\":all_paginator.num_pages\n },\n \n }) \n\n \n @action(detail=False, methods=['get'], permission_classes=(permissions.AllowAny,))\n def get_tournaments2(self, request):\n user = self.request.user\n \n search = self.request.query_params.get('search',None)\n \n queryset = Tournament.objects.all().order_by('name')\n tournament_list = queryset\n tournament_list = gettour(user,tournament_list,self.request.region)\n tournament_list=tournament_list.filter(name__icontains=search)\n \n serializer = TournamentSerializer(tournament_list,many=True, context={'requesto': request})\n return Response({\n \"data\": serializer.data,\n \n \n }) \n \n @action(methods=['POST'], detail=False, permission_classes=(permissions.IsAdminUser,))\n @transaction.atomic()\n def inform_participants(self, request):\n tourid = request.data.get('tourid')\n matchid = int(request.data.get('formid'))\n if not tourid:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n try:\n tournament = Tournament.objects.get(\n id=tourid,\n regions__in=[request.region]\n )\n\n except Tournament.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n matches = TournamentMatch.objects.filter(tournament=tournament).order_by('id').all()\n match = matches[matchid]\n participids = request.data.get('participants').split(',')\n print(participids)\n if not participids :\n raise UserInformException()\n # only select uninformed participants\n tourparts = TournamentParticipant.objects.filter(tournament=tournament, participant__in=participids).exclude(matches_informed=match) # is_informed=False\n partic = tourparts.values_list('participant', flat=True)\n participants = User.objects.filter(id__in=partic) # participids\n \n \n count = participants.count()\n print(participants, count)\n if count>0:\n #sched = \"Match Schedule for \"+tournament.name\n \n # for match in matches:\n #sched+=\"
Round \"+str(match.round_number)+\" - Match \"+match.match_name\n #sched+=\"
Start: \"+str(match.start_date)\n\n #sched=str(match.start_date)\n if match.start_date:\n if match.tournament.label_next_time and match.tournament.time_compared_to_gmt:\n date_time = (match.start_date+timedelta(hours=int(match.tournament.time_compared_to_gmt))).strftime(\"%H:%M\") + \" \"+ match.tournament.label_next_time\n else:\n date_time = match.start_date.strftime(\"%H:%M\")\n else:\n date_time = ''\n\n #sched = (match.start_date+timedelta(hours=1)).strftime(\"%Y/%m/%d %H:%M\")+ match.label_next_time\n sched = date_time\n\n stri_repl = {}\n stri_repl['MATCH_SCHEDULE'] = sched\n stri_repl['TOURNAMENT_NAME'] = match.tournament.name\n stri_repl['STARTDATE'] = sched\n \n \n for user in participants:\n @notify_when(events=[NotificationTemplate.MATCH_SCHEDULE], is_route=False,\n is_one_time=False, str_repl=stri_repl)\n def notify(user, user_notifications):\n \"\"\" extra logic if needed \"\"\"\n for notificationi in user_notifications:\n print(\"inside notificationi\")\n notificationi.text=notificationi.notification.text.replace('TOURNAMENT_NAME',match.tournament.name).replace('STARTDATE',sched)\n print(notificationi.text)\n notificationi.save()\n resp, code = send_sms(user, notificationi.text)\n print(resp, code)\n notify(user=user)\n\n #for user in participants:\n # notify(user=user)\n\n print(\"tourparts\", tourparts)\n print(tourparts.values_list('matches_informed', flat=True))\n # tourparts.matches_informed.add(*match) # add match to informed\n StudentClass = TournamentParticipant.matches_informed.through\n items = [\n StudentClass(tournamentmatch_id=match.pk, tournamentparticipant_id=student.pk)\n for student in tourparts\n ]\n\n StudentClass.objects.bulk_create(items)\n if count==1:\n return Response(\"1 participant has been informed\", status=status.HTTP_200_OK)\n else:\n return Response(str(count)+\" participants have been informed\", status=status.HTTP_200_OK)\n else:\n return Response(\"All participants have already been informed\", status=status.HTTP_200_OK)\n \n \n \n\ndef gettour(user,tournament_list,region):\n \n \n now = timezone.now()\n \n \n if not user.is_authenticated:\n tournament_list = tournament_list.filter(\n free_open_date__lte=now,\n ).annotate(live_null=Count('live_link'),started_null=Count('started_on')) \n else :\n tournament_list = tournament_list.annotate(\n is_min_level=Case(\n When(Q(minimum_profile_level__isnull=False) &\n Q(minimum_profile_level__gt=user.level),\n then=False),\n default=True\n )\n ).filter(\n Q(free_open_date__lte=now) |\n (Q(Q(minimum_profile_level__lte=user.level) | Q(minimum_profile_level__isnull=True)) & Q(free_open_date__gt=now))\n ).filter(open_date__lte=now).annotate(live_null=Count('live_link'),started_null=Count('started_on'))\n \n return tournament_list.filter(regions__in=[region])\n\n\n\n \n \n \n\n\n\n\n\nclass TournamentPrizeViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n queryset = TournamentPrize.objects.select_related(\n 'tournament').exclude(image='')\n serializer_class = TournamentPrizeSerializer\n permission_classes = (permissions.AllowAny,)\n filterset_fields = ('prize_type',)\n\n def get_queryset(self):\n now = timezone.now()\n user = self.request.user\n prize_list = self.queryset.filter(\n tournament__regions__in=[self.request.region],\n tournament__end_date__gt=now\n ).exclude(image='')\n if not user.is_authenticated:\n prize_list = prize_list.filter(\n tournament__free_open_date__lte=now,\n )\n else:\n prize_list = prize_list.filter(\n Q(tournament__free_open_date__lte=now) |\n (Q(Q(tournament__minimum_profile_level__lte=user.level) | Q(tournament__minimum_profile_level__isnull=True)) & Q(tournament__free_open_date__gt=now))\n ).filter(tournament__open_date__lte=now)\n return prize_list\n\n\nclass TournamentWinnerViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n queryset = TournamentPrize.objects.all()\n serializer_class = TournamentWinnerSerializer\n permission_classes = (permissions.AllowAny,)\n\n # def list(self, request, *args, **kwargs):\n # try:\n # game = request.query_params['game']\n # except KeyError:\n # raise ValidationError('Game parameter is missing')\n\n # queryset = TournamentPrize.objects.filter(\n # winner__isnull=False,\n # tournament__game__slug__iexact=game,\n # tournament__regions__in=[request.region]\n # ).values('winner').annotate(\n # winner_name=F('winner__nickname'),\n # win_count=Count('winner')\n # ).values('winner_name').order_by('-win_count').all()[:10]\n\n # return Response(list(queryset), status=status.HTTP_200_OK)\n\n\n def list(self, request, *args, **kwargs):\n \n game = request.query_params['game']\n tournament = request.query_params.get('tournament', None)\n\n \n if game and game!= '':\n queryset = TournamentPrize.objects.filter(\n winner__isnull=False,\n tournament__id=tournament,\n tournament__game__slug__iexact=game,\n tournament__regions__in=[request.region]\n ).values('winner').annotate(\n winner_name=F('winner__nickname'),\n win_count=Count('winner')\n ).values('winner_name').order_by('position').all()\t\n else :\n queryset = TournamentPrize.objects.filter(\n winner__isnull=False,\n tournament__id=tournament,\n tournament__regions__in=[request.region]\n ).values('winner').annotate(\n winner_name=F('winner__nickname'),\n win_count=Count('winner')\n ).values('winner_name').order_by('position').all()\t\n return Response(list(queryset), status=status.HTTP_200_OK)\n","repo_name":"serena612/engageRobi","sub_path":"backend/engage/tournament/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":25145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12032279135","text":"import numpy as np\r\nimport torch\r\nimport os\r\nimport random\r\nfrom collections import OrderedDict\r\nimport util.util as util\r\nfrom util.image_pool import ImagePool\r\nfrom .base_model import BaseModel\r\nfrom . import networks\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom PIL import Image\r\nfrom torchvision import transforms as vtransforms\r\nfrom torchvision import utils as vutils\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nfrom .multiloss import *\r\nfrom util import flow_utils\r\nfrom skimage.transform import resize\r\n\r\nclass ComboGANflowModel(BaseModel):\r\n def name(self):\r\n return 'ComboGANflowModel'\r\n\r\n def __init__(self, opt):\r\n super(ComboGANflowModel, self).__init__(opt)\r\n\r\n self.n_domains = opt.n_domains\r\n self.DA, self.DB = None, None\r\n self.loadSize = opt.loadSize\r\n self.cropSize = opt.cropSize\r\n self.batchSize = opt.batchSize\r\n self.input_nc = opt.input_nc\r\n self.use_grayscale_images = False\r\n\r\n # load/define networks\r\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,\r\n opt.netG_n_blocks, opt.netG_n_shared,\r\n self.n_domains, opt.norm, opt.use_dropout, self.gpu_ids)\r\n self.netFlow = networks.define_FlowNet(opt.use_grayscale_images, self.n_domains, self.gpu_ids)\r\n if self.isTrain:\r\n self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD_n_layers,\r\n self.n_domains, self.Tensor, opt.norm, self.gpu_ids)\r\n\r\n if not self.isTrain or opt.continue_train:\r\n which_epoch = opt.which_epoch\r\n self.load_network(self.netG, 'G', which_epoch)\r\n self.netFlow.load(os.path.join(self.save_dir, '%d_net_%s' % (which_epoch, 'FlowNet0.pth')), \\\r\n os.path.join(self.save_dir, '%d_net_%s' % (which_epoch, 'FlowNet1.pth')),\r\n opt)\r\n if self.isTrain:\r\n self.load_network(self.netD, 'D', which_epoch)\r\n else:\r\n self.netFlow.load(opt.loadmodel_flownet, opt.loadmodel_flownet, opt)\r\n\r\n if self.isTrain:\r\n self.fake_pools = [ImagePool(opt.pool_size) for _ in range(self.n_domains)]\r\n # define loss functions\r\n self.L1 = torch.nn.SmoothL1Loss()\r\n self.downsample = torch.nn.AvgPool2d(3, stride=2)\r\n self.criterionCycle = self.L1\r\n # self.criterionGAN = lambda r,f,v : (networks.GANLoss(r[0], f[0], v) + \\\r\n # networks.GANLoss(r[1], f[1], v) + \\\r\n # networks.GANLoss(r[2], f[2], v) + \\\r\n # networks.GANLoss(r[3], f[3], v) + \\\r\n # networks.GANLoss(r[4], f[4], v) + \\\r\n # networks.GANLoss(r[5], f[5], v)) / 6\r\n self.criterionGAN = lambda r,f,v : (networks.GANLoss(r[0], f[0], v) + \\\r\n networks.GANLoss(r[1], f[1], v)) / 2\r\n\r\n # initialize optimizers\r\n self.netG.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))\r\n self.netFlow.init_optimizers(torch.optim.Adam, opt.lr/10, (opt.beta1, 0.999))\r\n self.netD.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))\r\n # initialize loss storage\r\n self.loss_D, self.loss_G = [0]*self.n_domains, [0]*self.n_domains\r\n self.loss_cycle = [0]*self.n_domains\r\n self.loss_flowrec = [0]*self.n_domains\r\n # initialize loss multipliers\r\n self.lambda_cyc = opt.lambda_cycle\r\n self.lambda_flowrec = opt.lambda_flowrec\r\n\r\n print('---------- Networks initialized ---------------')\r\n print('-----------------------------------------------')\r\n\r\n def set_input(self, input, val_set):\r\n if not val_set:\r\n self.real_A = self.Tensor(self.batchSize, self.input_nc, self.cropSize[0], self.cropSize[1])\r\n self.real_B = self.Tensor(self.batchSize, self.input_nc, self.cropSize[0], self.cropSize[1])\r\n input_A = input['A']\r\n self.real_A_path = input['path']\r\n self.real_A.resize_(input_A.size()).copy_(input_A)\r\n self.DA = input['DA'][0]\r\n if self.isTrain:\r\n input_B = input['B']\r\n self.real_B.resize_(input_B.size()).copy_(input_B)\r\n self.DB = input['DB'][0]\r\n else:\r\n raise NotImplementedError\r\n # self.real_A = self.Tensor(self.batchSize, self.input_nc,\r\n # self.loadSize[0], self.loadSize[1])\r\n # self.real_B = self.Tensor(self.batchSize, self.input_nc,\r\n # self.loadSize[0], self.loadSize[1])\r\n # self.real_A_FlowGT = self.Tensor(self.batchSize, self.input_nc,\r\n # self.loadSize[0], self.loadSize[1])\r\n # input_A = input['A']\r\n # input_A_FlowGT = input['A_GT']\r\n # self.real_A.resize_(input_A.size()).copy_(input_A)\r\n # self.real_A_FlowGT.resize_(input_A_FlowGT.size()).copy_(input_A_FlowGT)\r\n # self.DA = input['DA'][0]\r\n # self.DB = input['DB'][0]\r\n self.image_paths = input['path']\r\n\r\n # To modify the test code later (add Flow predictions)- AA, 09/10/18, 11:08am\r\n def test(self):\r\n raise NotImplementedError\r\n\r\n def get_image_paths(self):\r\n return self.image_paths\r\n\r\n def backward_D_basic(self, pred_real, fake, domain):\r\n pred_fake = self.netD.forward(fake.detach(), domain)\r\n loss_D = self.criterionGAN(pred_real, pred_fake, True) * 0.5\r\n loss_D.backward()\r\n return loss_D\r\n\r\n def backward_D(self):\r\n #D_A\r\n fake_B = self.fake_pools[self.DB].query(self.fake_B)\r\n self.loss_D[self.DA] = self.backward_D_basic(self.pred_real_B, fake_B, self.DB)\r\n #D_B\r\n fake_A = self.fake_pools[self.DA].query(self.fake_A)\r\n self.loss_D[self.DB] = self.backward_D_basic(self.pred_real_A, fake_A, self.DA)\r\n\r\n def backward_G(self):\r\n encoded_A_left = self.netG.encode(self.real_A[:, 0:self.input_nc, :, :], self.DA)\r\n encoded_A_right = self.netG.encode(self.real_A[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DA)\r\n encoded_B_left = self.netG.encode(self.real_B[:, 0:self.input_nc, :, :], self.DB)\r\n encoded_B_right = self.netG.encode(self.real_B[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DB)\r\n # GAN loss\r\n # D_A(G_A(A))\r\n self.fake_B = torch.cat((self.netG.decode(encoded_A_left, self.DB), \\\r\n self.netG.decode(encoded_A_right, self.DB)), 1)\r\n pred_fake = self.netD.forward(self.fake_B, self.DB)\r\n self.loss_G[self.DA] = self.criterionGAN(self.pred_real_B, pred_fake, False)\r\n # D_B(G_B(B))\r\n self.fake_A = torch.cat((self.netG.decode(encoded_B_left, self.DA), \\\r\n self.netG.decode(encoded_B_right, self.DA)), 1)\r\n pred_fake = self.netD.forward(self.fake_A, self.DA)\r\n self.loss_G[self.DB] = self.criterionGAN(self.pred_real_A, pred_fake, False)\r\n # Cycle losses\r\n # Forward cycle loss\r\n rec_encoded_A_left = self.netG.encode(self.fake_B[:, 0:self.input_nc, :, :], self.DB)\r\n rec_encoded_A_right = self.netG.encode(self.fake_B[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DB)\r\n self.rec_A = torch.cat((self.netG.decode(rec_encoded_A_left, self.DA), \\\r\n self.netG.decode(rec_encoded_A_right, self.DA)), 1)\r\n self.loss_cycle[self.DA] = self.criterionCycle(self.rec_A, self.real_A)\r\n # Backward cycle loss\r\n rec_encoded_B_left = self.netG.encode(self.fake_A[:, 0:self.input_nc, :, :], self.DA)\r\n rec_encoded_B_right = self.netG.encode(self.fake_A[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DA)\r\n self.rec_B = torch.cat((self.netG.decode(rec_encoded_B_left, self.DB), \\\r\n self.netG.decode(rec_encoded_B_right, self.DB)), 1)\r\n self.loss_cycle[self.DB] = self.criterionCycle(self.rec_B, self.real_B)\r\n\r\n # combined loss\r\n loss_G = self.loss_G[self.DA] + self.loss_G[self.DB] + \\\r\n (self.loss_cycle[self.DA] + self.loss_cycle[self.DB]) * self.lambda_cyc\r\n loss_G.backward(retain_graph=True)\r\n\r\n def backward_netFlow(self):\r\n #######################################################################################\r\n # self.netFlow.param_reguires_grad(self.DB, False) # Gradients not needed for FlowNet_B\r\n # self.netFlow.param_reguires_grad(self.DA, True) # Gradients needed for FlowNet_A\r\n # Forward pass along the netFlow_B predicting output for the real daytime images (real_B)\r\n flow1_real_B, flow2_real_B, \\\r\n flow3_real_B, flow4_real_B, \\\r\n flow5_real_B = self.netFlow.forward(self.real_B[:, 0:self.input_nc, :, :], \\\r\n self.real_B[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DB)\r\n # Forward pass along the netFlow_A predicting output for the fake nighttime images (fake_A)\r\n flow1_fake_A, flow2_fake_A, \\\r\n flow3_fake_A , flow4_fake_A, \\\r\n flow5_fake_A = self.netFlow.forward(self.fake_A[:, 0:self.input_nc, :, :],\r\n self.fake_A[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DA)\r\n # print(torch.min(self.fake_A[:, 0:self.input_nc, :, :]),\r\n # torch.max(self.fake_A[:, 0:self.input_nc, :, :]))\r\n flow_fake_A = [flow1_fake_A, flow2_fake_A, flow3_fake_A, flow4_fake_A, flow5_fake_A]\r\n # Compute the EPE loss between the predictions\r\n self.loss_flowrec[self.DA] = multiscaleEPE(flow_fake_A, flow1_real_B.detach()) * \\\r\n self.lambda_flowrec\r\n # Back-propagate the loss\r\n loss_netFlow_1 = self.loss_flowrec[self.DA]\r\n loss_netFlow_1.backward()\r\n ##########################################################################################\r\n # # self.netFlow.param_reguires_grad(self.DB, False) # Gradients not needed for FlowNet_B\r\n # # self.netFlow.param_reguires_grad(self.DA, False) # Gradients not needed for FlowNet_A\r\n # Forward pass along the netFlow_A predicting output for the rec. nighttime images (rec_A)\r\n flow1_rec_A, flow2_rec_A, \\\r\n flow3_rec_A, flow4_rec_A, \\\r\n flow5_rec_A = self.netFlow.forward(self.rec_A[:, 0:self.input_nc, :, :], \\\r\n self.rec_A[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DA)\r\n # Forward pass along the netFlow_B)predicting output for the fake daytime images (fake_B)\r\n flow1_fake_B, flow2_fake_B, \\\r\n flow3_fake_B, flow4_fake_B, \\\r\n flow5_fake_B = self.netFlow.forward(self.fake_B[:, 0:self.input_nc, :, :],\\\r\n self.fake_B[:, self.input_nc:2*self.input_nc, :, :],\r\n self.DB)\r\n flow_fake_B = [flow1_fake_B, flow2_fake_B, flow3_fake_B, flow4_fake_B, flow5_fake_B]\r\n # Compute the EPE loss between the predictions\r\n self.loss_flowrec[self.DB] = multiscaleEPE(flow_fake_B, flow1_rec_A.detach()) * \\\r\n self.lambda_flowrec\r\n # self.loss_flowrec[self.DB] = 0.0\r\n # Back-propagate the loss\r\n loss_netFlow_2 = self.loss_flowrec[self.DB]\r\n loss_netFlow_2.backward()\r\n #########################################################################################\r\n\r\n # Store data for visuals (only the first image of the mini-batch is sufficient)\r\n self.flow_rec_A = flow1_rec_A[0, :, :].detach().cpu().numpy().transpose((1, 2, 0))\r\n self.flow_real_B = flow1_real_B[0, :, :].detach().cpu().numpy().transpose((1, 2, 0))\r\n self.flow_fake_A = flow1_fake_A[0, :, :].detach().cpu().numpy().transpose((1, 2, 0))\r\n self.flow_fake_B = flow1_fake_B[0, :, :].detach().cpu().numpy().transpose((1, 2, 0))\r\n\r\n\r\n def optimize_parameters(self, epoch):\r\n # Set the netGs, netDs in train mode\r\n # Set the FlowNets in train (FlowNet_A) and eval FlowNet_B) mode respectively\r\n self.netG.net_in_trainmode(self.DB, True) # Put netG_B in train() mode\r\n self.netG.net_in_trainmode(self.DA, True) # Put netG_A in train() mode\r\n self.netD.net_in_trainmode(self.DB, True) # Put netD_B in train() mode\r\n self.netD.net_in_trainmode(self.DA, True) # Put netD_A in train() mode\r\n self.netFlow.net_in_trainmode(self.DB, False) # Put FlowNet_B in eval() mode\r\n self.netFlow.net_in_trainmode(self.DA, True) # Put FlowNet_A in train() mode\r\n # Forward predictions for the real_A/B images from their corresponding discriminators\r\n self.pred_real_A = self.netD.forward(self.real_A, self.DA)\r\n self.pred_real_B = self.netD.forward(self.real_B, self.DB)\r\n # [G_A and G_B] and FlowNet_A (since FlowNet_B is always frozen)\r\n self.netG.zero_grads(self.DA, self.DB)\r\n self.netFlow.zero_grads(self.DA)\r\n self.backward_G()\r\n self.backward_netFlow()\r\n self.netG.step_grads(self.DA, self.DB)\r\n self.netFlow.step_grads(self.DA)\r\n # D_A and D_B\r\n self.netD.zero_grads(self.DA, self.DB)\r\n self.backward_D()\r\n self.netD.step_grads(self.DA, self.DB)\r\n\r\n def perform_validation(self, epoch, epoch_iter, savedir1):\r\n raise NotImplementedError\r\n\r\n def get_current_errors(self):\r\n extract = lambda l: [(i if type(i) is int or type(i) is float else i.item()) for i in l]\r\n D_losses, G_losses, cyc_losses, flowrec_losses = extract(self.loss_D), \\\r\n extract(self.loss_G), \\\r\n extract(self.loss_cycle),\\\r\n extract(self.loss_flowrec)\r\n # Modifying the code - AA, 02/10/18, 12:20pm\r\n errors_ret = OrderedDict()\r\n for i in range(len(D_losses)):\r\n errors_ret['D_'+str(i)] = D_losses[i]\r\n for i in range(len(G_losses)):\r\n errors_ret['G_'+str(i)] = G_losses[i]\r\n for i in range(len(cyc_losses)):\r\n errors_ret['Cyc_'+str(i)] = cyc_losses[i]\r\n for i in range(len(flowrec_losses)):\r\n errors_ret['FlowNet_'+str(i)] = flowrec_losses[i]\r\n return errors_ret\r\n\r\n def save_current_visuals(self, epoch, epoch_iter, savedir1, savedir2):\r\n # Visuals1\r\n if self.use_grayscale_images:\r\n inv_norm_ = util.inv_normalize_grayscale()\r\n else:\r\n inv_norm_ = util.inv_normalize()\r\n totensor_ = vtransforms.ToTensor()\r\n real_A_imgs = inv_norm_(torch.cat((self.real_A[0, self.input_nc:2*self.input_nc, :, :],\r\n self.real_A[0, 0:self.input_nc, :, :]),2).detach().cpu())\r\n fake_B_imgs = inv_norm_(torch.cat((self.fake_B[0, self.input_nc:2*self.input_nc, :, :],\r\n self.fake_B[0, 0:self.input_nc, :, :]),2).detach().cpu())\r\n rec_A_imgs = inv_norm_(torch.cat((self.rec_A[0, self.input_nc:2*self.input_nc, :, :],\r\n self.rec_A[0, 0:self.input_nc, :, :]),2).detach().cpu())\r\n real_B_imgs = inv_norm_(torch.cat((self.real_B[0, self.input_nc:2*self.input_nc, :, :],\r\n self.real_B[0, 0:self.input_nc, :, :]),2).detach().cpu())\r\n fake_A_imgs = inv_norm_(torch.cat((self.fake_A[0, self.input_nc:2*self.input_nc, :, :],\r\n self.fake_A[0, 0:self.input_nc, :, :]),2).detach().cpu())\r\n rec_B_imgs = inv_norm_(torch.cat((self.rec_B[0, self.input_nc:2*self.input_nc, :, :],\r\n self.rec_B[0, 0:self.input_nc, :, :]),2).detach().cpu())\r\n if self.use_grayscale_images:\r\n real_A_imgs = torch.cat((real_A_imgs, real_A_imgs, real_A_imgs), 0)\r\n fake_B_imgs = torch.cat((fake_B_imgs, fake_B_imgs, fake_B_imgs), 0)\r\n rec_A_imgs = torch.cat((rec_A_imgs, rec_A_imgs, rec_A_imgs), 0)\r\n real_B_imgs = torch.cat((real_B_imgs, real_B_imgs, real_B_imgs), 0)\r\n fake_A_imgs = torch.cat((fake_A_imgs, fake_A_imgs, fake_A_imgs), 0)\r\n rec_B_imgs = torch.cat((rec_B_imgs, rec_B_imgs, rec_B_imgs), 0)\r\n _, h, w2 = real_A_imgs.size()\r\n w = w2/2\r\n flow_rec_A = resize(flow_utils.flowToImg(self.flow_rec_A).astype(np.float32)/255.0,\r\n [h, w], mode='reflect').astype(np.float32)\r\n flow_rec_A = totensor_(Image.fromarray(np.uint8(flow_rec_A*255.0)))\r\n flow_fake_B = resize(flow_utils.flowToImg(self.flow_fake_B).astype(np.float32) / 255.0,\r\n [h, w], mode='reflect').astype(np.float32)\r\n flow_fake_B = totensor_(Image.fromarray(np.uint8(flow_fake_B * 255.0)))\r\n flow_real_B = resize(flow_utils.flowToImg(self.flow_real_B).astype(np.float32) / 255.0,\r\n [h, w], mode='reflect').astype(np.float32)\r\n flow_real_B = totensor_(Image.fromarray(np.uint8(flow_real_B * 255.0)))\r\n flow_fake_A = resize(flow_utils.flowToImg(self.flow_fake_A).astype(np.float32) / 255.0,\r\n [h, w], mode='reflect').astype(np.float32)\r\n flow_fake_A = totensor_(Image.fromarray(np.uint8(flow_fake_A * 255.0)))\r\n output_img1 = torch.cat((torch.cat((rec_A_imgs, flow_rec_A,\r\n fake_B_imgs, flow_fake_B, real_A_imgs), 2), \\\r\n torch.cat((real_B_imgs, flow_real_B,\r\n fake_A_imgs, flow_fake_A, rec_B_imgs ), 2)), 1)\r\n savename = savedir1 + '/visual' + '_epoch' + str(epoch) + '_' + str(epoch_iter) + '.jpg'\r\n vutils.save_image(output_img1, savename)\r\n # Visuals2\r\n if savedir2 is not None:\r\n raise NotImplementedError\r\n\r\n\r\n def print_current_errors(self, epoch, i, errors, t):\r\n message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)\r\n for k, v in errors.items():\r\n # print(self.real_A_path)\r\n message += '%s: %.3f ' % (k, v)\r\n print(message)\r\n\r\n def plot_current_errors(self, exp_name, glb_iter, errors, logger):\r\n logger.add_scalars(exp_name + '/G', {'G_0':errors['G_0'], 'G_1':errors['G_1']}, glb_iter)\r\n logger.add_scalars(exp_name + '/D', {'D_0': errors['D_0'], 'D_1': errors['D_1']}, glb_iter)\r\n logger.add_scalars(exp_name + '/Cyc', {'Cyc_0': errors['Cyc_0'],\r\n 'Cyc_1': errors['Cyc_1']}, glb_iter)\r\n logger.add_scalars(exp_name + '/FlowNet', {'FlowNet_0': errors['FlowNet_0'],\r\n 'FlowNet_1': errors['FlowNet_1']}, glb_iter)\r\n\r\n\r\n def save(self, label):\r\n self.save_network(self.netG, 'G', label, self.gpu_ids)\r\n self.save_network(self.netFlow, 'FlowNet', label, self.gpu_ids)\r\n self.save_network(self.netD, 'D', label, self.gpu_ids)\r\n\r\n def update_hyperparams(self, curr_iter):\r\n if curr_iter > self.opt.niter:\r\n decay_frac = (curr_iter - self.opt.niter) / self.opt.niter_decay\r\n new_lr = self.opt.lr * (1 - decay_frac)\r\n self.netG.update_lr(new_lr)\r\n self.netFlow.update_lr(new_lr/10)\r\n self.netD.update_lr(new_lr)\r\n print('updated learning rate: %f' % new_lr)\r\n","repo_name":"aasharma90/Night-DepthFlow_PyTorch","sub_path":"CycleFlowGAN_NighttimeFlow/models/combogan_flow_model.py","file_name":"combogan_flow_model.py","file_ext":"py","file_size_in_byte":20551,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"20917957255","text":"from src.bounding_box import BBFormat, BoundingBox\nfrom src.evaluators.coco_evaluator import get_coco_summary\n\nfrom uuid import uuid4\nimport numpy as np\nfrom typing import Any, Dict, Tuple, List\n\n\ndef create_mea_metric(metric_value : float, data_row_id: str) -> Dict[str, Any]:\n \"\"\" Create the upload format for mea metrics \"\"\"\n return {\n \"uuid\": str(uuid4()),\n \"dataRow\": {\n \"id\": data_row_id,\n },\n \"metricValue\": metric_value\n }\n\n\ndef format_bbox(bbox: Dict[str, int]):\n \"\"\"Convert the bounding box from l,t,w,h to l,t,b,r\"\"\"\n return [\n bbox[\"left\"],\n bbox[\"top\"],\n bbox[\"left\"] + bbox[\"width\"],\n bbox[\"top\"] + bbox[\"height\"],\n ]\n\n\ndef swap_dims_and_scale(bbox: Tuple[float, float, float, float], image_h: int,\n image_w: int) -> Tuple[float, float, float, float]:\n \"\"\"\n Convert normalized bbox with y0,x0,y1,x1 coordinates into image coordinates x0,y0,x1,y1\n \"\"\"\n return [\n bbox[1] * image_w, bbox[0] * image_h, bbox[3] * image_w,\n bbox[2] * image_h\n ]\n\n\ndef get_summary(preds: Dict[str, Any], gts: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Generate summary statistics for a data row\n \"\"\"\n result = get_coco_summary(gts, preds)\n result = {\n k: v for k, v in result.items() if\n k in [\"AP\", \"AP50\", \"AP75\", \"AR1\", \"AR10\", \"AR100\"] and not np.isnan(v)\n }\n scores = np.array([pred._confidence for pred in preds])\n for (fn, name) in [(np.mean, 'mean'), (np.max, 'max'), (np.min, 'min')]:\n value = 0. # set default so fn() doesn't raise value error if there were no predictions\n if len(scores):\n value = fn(scores)\n result[f'{name}_score'] = value\n result[\"predictions\"] = len(preds)\n result[\"labels\"] = len(gts)\n return result\n\n\ndef construct_boxes(\n inference: Dict[str, Any],\n annotation: Dict[str, Any],\n name=\"exp_name\") -> Tuple[List[BoundingBox], List[BoundingBox]]:\n \"\"\"\n Convert inference and annotation json payloads into a consistent BoundingBox format\n \"\"\"\n annotation_boxes = [format_bbox(box) for box in annotation[\"boxes\"]]\n image_size = (inference[\"image_w\"], inference[\"image_h\"])\n gt = [\n BoundingBox(\n name,\n class_id=class_name,\n coordinates=coords,\n format=BBFormat.XYX2Y2,\n img_size=image_size,\n ) for coords, class_name in zip(annotation_boxes,\n annotation[\"class_names\"])\n ]\n pred = [\n BoundingBox(\n name,\n class_id=class_name,\n coordinates=swap_dims_and_scale(coords, image_size[1],\n image_size[0]),\n img_size=image_size,\n format=BBFormat.XYX2Y2,\n confidence=score,\n ) for coords, score, class_name in zip(\n inference[\"boxes\"], inference[\"scores\"], inference[\"class_names\"])\n ]\n return gt, pred\n\n\ndef format_annotation(json_label: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Converts the json representation of the label into a format that is easier to work with\n \"\"\"\n instances = json_label[\"objects\"]\n boxes = [annot[\"bbox\"] for annot in instances]\n class_names = [annot[\"title\"] for annot in instances]\n return {\"boxes\": boxes, \"class_names\": class_names}\n","repo_name":"Labelbox/model-observability","sub_path":"services/monitor_svc/monitor/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19011010874","text":"from dwave_networkx.algorithms.tsp import traveling_salesperson_qubo\nimport dwave_networkx as dnx\nimport networkx as nx\nimport numpy as np\nimport dimod\nfrom random import SystemRandom\nrandom = SystemRandom()\n\ndef tsp(n):\n G = nx.Graph()\n G = nx.complete_graph(n)\n for (u, v) in G.edges():\n G.edges[u,v]['weight'] = round(random.random()*100,2)\n \n #G.add_edge(node1,node2,weight=round(random.random()*100, 2))\n \n d = traveling_salesperson_qubo(G) \n \n indexes = dict()\n it = 0\n for i in range(n):\n for j in range(n):\n indexes[(i,j)] = it\n it += 1\n\n matrix = np.zeros((n**2,n**2), dtype=np.float_)\n\n for key_1, key_2 in d:\n matrix[indexes[key_1],indexes[key_2]] = d[key_1,key_2]\n\n return G, matrix\n\ndef generate_QUBO_problem(S):\n \"\"\"\n Generate a QUBO problem (The number partitioning problem) from a vector S\n \"\"\"\n n = len(S)\n c = 0\n for i in range(n):\n c += S[i]\n col_max = 0\n col = 0\n QUBO = np.zeros((n,n))\n #QUBO = [[0 for i in range(n)] for j in range(n)] #Old\n for row in range(n):\n col_max += 1\n while col < col_max:\n if row == col:\n QUBO[row][col] = S[row]*(S[row]-c)\n else:\n QUBO[row][col] = S[row] * S[col]\n QUBO[col][row] = QUBO[row][col]\n col += 1\n col = 0\n return QUBO, c\n\ndef read_integers(filename:str):\n with open(filename) as f:\n return [int(elem) for elem in f.read().split()]\n\ndef generate_QAP_problem(file):\n file_it = iter(read_integers(file))\n n = next(file_it)\n P = [[next(file_it) for j in range(n)] for i in range(n)]\n L = [[next(file_it) for j in range(n)] for i in range(n)]\n \n Q = np.kron(P,L)\n \n pen = (Q.max() * 2.25)\n matrix = qubo_qap(P,L,pen)\n y = pen * (len(P) + len(L))\n return matrix, pen, len(matrix), y\n \ndef qubo_qap(flow: np.ndarray, distance: np.ndarray, penalty):\n \"\"\"Quadratic Assignment Problem (QAP)\"\"\"\n n = len(flow)\n q = np.einsum(\"ij,kl->ikjl\", flow, distance).astype(np.float)\n\n i = range(len(q))\n\n q[i, :, i, :] += penalty\n q[:, i, :, i] += penalty\n q[i, i, i, i] -= 4 * penalty\n return q.reshape(n ** 2, n ** 2)\n \ndef generate_chimera(n):\n G = dnx.chimera_graph(16)\n tmp = nx.to_dict_of_lists(G)\n rows = []\n cols = []\n for i in range(n):\n rows.append(i)\n cols.append(i)\n for j in tmp[i]:\n if(j < n):\n rows.append(i)\n cols.append(j)\n\n return list(zip(rows, cols))\n\ndef generate_pegasus(n):\n G = dnx.pegasus_graph(16)\n\n tmp = nx.to_numpy_matrix(G)\n \n rows = []\n cols = []\n \n for i in range(n):\n rows.append(i)\n cols.append(i)\n for j in range(n):\n if(tmp.item(i,j)):\n rows.append(i)\n cols.append(j)\n \n return list(zip(rows, cols))\n ","repo_name":"bonom/Quantum-Annealing-for-solving-QUBO-Problems","sub_path":"QA4QUBO/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41153081443","text":"f = open(\"testfile.txt\",\"w\")\r\nf.write('''An Apple day keeps the doctor away.\r\nWe all pray for everyone's safety.\r\nA marked difference will come in our country.''')\r\nf.close()\r\n\r\ndef countlines():\r\n f = open(\"testfile.txt\",\"r\")\r\n data = f.readlines()\r\n c = 0\r\n for line in data:\r\n if line[0].lower() not in 'aeiou':\r\n c += 1\r\n print(line)\r\n print(\"No. of Lines not Starting with a Vowel Are : \",c)\r\n\r\ncountlines()\r\n","repo_name":"prachibarnwal/Basics-in-Python","sub_path":"217.) sqp qno 27.py","file_name":"217.) sqp qno 27.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9922791898","text":"import mimetypes\nimport random\nimport string\n\nfrom io import BytesIO, BufferedWriter\n\n\nclass StringBytesWriter(BufferedWriter):\n def __init__(self, encoding=\"utf-8\", *args, **kwargs):\n super().__init__(BytesIO(), *args, **kwargs)\n self.encoding = encoding\n\n def write(self, data):\n if isinstance(data, str):\n return super().write(data.encode(self.encoding))\n else:\n return super().write(data)\n\n def getvalue(self):\n self.flush()\n return self.raw.getvalue()\n\ndef get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\ndef choose_boundary():\n return ''.join(\n [random.choice(string.ascii_letters + string.digits)\n for x in range(60)])\n\ndef encode_multipart_formdata(fields, boundary=None):\n \"\"\"\n Encode a dictionary of ``fields`` using the multipart/form-data mime format.\n\n :param fields:\n Dictionary of fields. The key is treated as the field name, and the\n value as the body of the form-data. If the value is a tuple of two\n elements, then the first element is treated as the filename of the\n form-data section.\n\n :param boundary:\n If not specified, then a random boundary will be generated using\n :func:`mimetools.choose_boundary`.\n \"\"\"\n body = StringBytesWriter()\n if boundary is None:\n boundary = choose_boundary()\n\n for fieldname, value in fields.items():\n body.write('--%s\\r\\n' % (boundary))\n\n if isinstance(value, tuple):\n filename, data = value\n body.write('Content-Disposition: form-data; name=\"%s\"; '\n 'filename=\"%s\"\\r\\n' % (fieldname, filename))\n body.write('Content-Type: %s\\r\\n\\r\\n' %\n (get_content_type(filename)))\n else:\n data = value\n body.write('Content-Disposition: form-data; name=\"%s\"\\r\\n'\n % (fieldname))\n body.write('Content-Type: text/plain\\r\\n\\r\\n')\n\n if isinstance(data, int):\n data = str(data) # Backwards compatibility\n\n body.write(data)\n\n body.write('\\r\\n')\n\n body.write('--%s--\\r\\n' % (boundary))\n\n content_type = 'multipart/form-data; boundary=%s' % boundary\n\n return body.getvalue(), content_type\n\nif __name__ == \"__main__\":\n print(\"Test output:\")\n print(encode_multipart_formdata({\n \"test_field\": \"1\",\n \"file1\": b\"test1\",\n \"file2\": (\"file2.jpg\", b\"test2\"),\n \"file3\": (r\"c:\\file3.jpg\", b'test3'),\n })[0].decode(\"utf-8\"))\n","repo_name":"SAPikachu/simple_httplib2","sub_path":"multipart.py","file_name":"multipart.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"13643634961","text":"# Get the nth number in the Fibonacci sequence given n\r\n\r\n# Get input from the user and set it to an integer\r\nnterms = int(input(\"How many terms? \"))\r\n\r\n# Initalise the first two terms\r\nn1 = 0\r\nn2 = 1\r\n\r\ncount = 0\r\n\r\n# Check if input is a positive number\r\nif nterms <= 0:\r\n print(\"Please enter a positive number\")\r\n# Then generate the Fibonacci sequence\r\nelse:\r\n print(\"Fibonacci sequence up to \", nterms, \":\")\r\n while count < nterms:\r\n print(n1)\r\n nth = n1 + n2\r\n n1 = n2\r\n n2 = nth\r\n count += 1\r\n","repo_name":"Freesselak/Planit-SDET-Assessment","sub_path":"Challenge 1.py","file_name":"Challenge 1.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42447554993","text":"import json\nimport tornado.web\nfrom tools.interesting_fields_builder import InterestingFieldsBuilder\nfrom tools.interesting_fields_loader import InterestingFieldsLoader\nfrom typing import Dict\n\n__author__ = \"Ilia Sagaidak\"\n__copyright__ = \"Copyright 2022, Open Technologies 98\"\n__credits__ = []\n__license__ = \"\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Ilia Sagaidak\"\n__email__ = \"isagaidak@isgneuro.com\"\n__status__ = \"Dev\"\n\n\nclass GetInterestingFields(tornado.web.RequestHandler):\n \"\"\"\n Returns a list of dictionaries where every dictionary represents interesting fields for one column of data\n\n interesting fields consist of:\n :id: serial number of a column\n :text: name of a column\n :totalCount: number of not empty cells in the column (null is considered an empty cell)\n :static: list of dictionaries where every dictionary is an info about every unique value in a column consists of:\n :value: value itself\n :count: how many times the value appears in the column\n :%: percent of count from all rows in the data table\n \"\"\"\n\n def initialize(self, mem_conf: Dict, static_conf: Dict):\n self.builder = InterestingFieldsBuilder()\n self.loader = InterestingFieldsLoader(mem_conf, static_conf)\n\n async def get(self):\n params = self.request.query_arguments\n cid = params.get('cid')[0].decode()\n from_time = params.get('from')\n to_time = params.get('to')\n if from_time:\n from_time = from_time[0].decode()\n if not from_time.isdigit():\n return self.write(json.dumps({'status': 'failed', 'error': f'from: {from_time} not a number'},\n default=str))\n from_time = int(from_time)\n if to_time:\n to_time = to_time[0].decode()\n if not to_time.isdigit():\n return self.write(json.dumps({'status': 'failed', 'error': f'to: {to_time} not a number'},\n default=str))\n to_time = int(to_time)\n try:\n data = self.loader.load_data(cid, from_time, to_time)\n interesting_fields = self.builder.get_interesting_fields(data)\n except tornado.web.HTTPError as e:\n return self.write(json.dumps({'status': 'failed', 'error': e}, default=str))\n except Exception as e:\n return self.write(json.dumps({'status': 'failed', 'error': f'{e} cid {cid}'}, default=str))\n self.write(json.dumps(interesting_fields))\n","repo_name":"ISGNeuroTeam/ot_simple_rest","sub_path":"ot_simple_rest/handlers/eva/interesting_fields.py","file_name":"interesting_fields.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11510585922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 23 00:05:31 2022\n\n@author: HP\n\"\"\"\n\ndef multiple(intervalle, facteur):\n multiples = []\n for i in intervalle:\n if i % facteur == 0:\n multiples.append(i)\n return multiples\n\n\nprint(multiple(range(2, 31), 2))","repo_name":"StagiairesMIASHS/testing","sub_path":"Fatou/exo3.py","file_name":"exo3.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11254651343","text":"import math\nimport operator\nimport itertools\n\ndef entropy(probability_list):\n sum_result = 0\n for probability_item in probability_list:\n if probability_item == 0:\n sum_result += 0\n else:\n sum_result += (-1)*probability_item*math.log2(probability_item)\n return sum_result\n\n\ndef information_gain(parent_probability, children_properties):\n children_weighted_entopry = 0\n for child in children_properties:\n children_weighted_entopry += child['weight']*entropy(child['probability'])\n return entropy(parent_probability) - children_weighted_entopry\n\n\ndef most_common(L):\n # get an iterable of (item, iterable) pairs\n SL = sorted((x, i) for i, x in enumerate(L))\n # print 'SL:', SL\n groups = itertools.groupby(SL, key=operator.itemgetter(0))\n # auxiliary function to get \"quality\" for an item\n def _auxfun(g):\n item, iterable = g\n count = 0\n min_index = len(L)\n for _, where in iterable:\n count += 1\n min_index = min(min_index, where)\n # print 'item %r, count %r, minind %r' % (item, count, min_index)\n return count, -min_index\n # pick the highest-count/earliest item\n return max(groups, key=_auxfun)[0]","repo_name":"alirezahi/DataMining","sub_path":"Decision Tree/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"26847182544","text":"import timeit\nimport sys\nimport getopt\nfrom encode import encode\nfrom decode import decode\n\n\nif __name__ == \"__main__\":\n start = timeit.default_timer()\n # Handle passed arguments\n # \n arg_dict = {\n # Initiliaze with default values\n \"base_value\": 10,\n \"recursive\": 1,\n }\n argumentList = sys.argv[1:]\n # Options\n options = \"b:r:\"\n # Long options\n long_options = [\"base =\", \"recursive =\"]\n try:\n # Parsing argument\n arguments, values = getopt.getopt(argumentList, options, long_options)\n \n # Checking each argument with options\n for currentArgument, currentValue in arguments:\n \n if currentArgument in (\"-b\", \"--base\"):\n arg_dict[\"base_value\"] = int(currentValue)\n elif currentArgument in (\"-r\", \"--recursive\"):\n arg_dict[\"recursive\"] = int(currentValue)\n\n # Check rest of the arguments\n if len(values) < 4:\n raise getopt.error(\"Required arguments were not given correctly\")\n arg_dict[\"method\"] = values[0]\n arg_dict[\"input_file_name\"] = values[1]\n arg_dict[\"encoding\"] = values[2]\n arg_dict[\"output_file_name\"] = values[3]\n\n # Call method with arguments passed as parameters\n if arg_dict[\"method\"] == \"encode\":\n result = encode(\n file_name=arg_dict[\"input_file_name\"],\n output_name=arg_dict[\"output_file_name\"],\n encoding=arg_dict[\"encoding\"],\n base_value=arg_dict[\"base_value\"]\n )\n for i in range(arg_dict[\"recursive\"] - 1):\n result = encode(\n file_name=arg_dict[\"output_file_name\"],\n output_name=arg_dict[\"output_file_name\"],\n encoding=arg_dict[\"encoding\"],\n base_value=arg_dict[\"base_value\"]\n )\n if result == -1:\n raise Exception()\n \n elif arg_dict[\"method\"] == \"decode\": \n for i in range(arg_dict[\"recursive\"] - 1):\n result = decode(\n file_name=arg_dict[\"input_file_name\"],\n output_file_name=arg_dict[\"input_file_name\"],\n encoding=arg_dict[\"encoding\"],\n base_value=arg_dict[\"base_value\"]\n )\n if result == -1:\n raise Exception()\n result = decode(\n file_name=arg_dict[\"input_file_name\"],\n output_file_name=arg_dict[\"output_file_name\"],\n encoding=arg_dict[\"encoding\"],\n base_value=arg_dict[\"base_value\"]\n )\n else:\n # Invalid method was given\n raise getopt.error(\"Invalid method was given, enter 'encode' or 'decode' in the arguments\")\n \n if result == -1:\n # Operation failed\n raise Exception()\n else:\n # Operation successful\n print(\"Operation successful\")\n \n stop = timeit.default_timer()\n\n print('Time: ', stop - start) \n\n # Output error, and return\n except getopt.error as err:\n print(str(err))\n except Exception as err:\n print(str(err))\n print(\"An error has occurred\")\n \n","repo_name":"mertbezirgan/COMP305_PROJECT_SPRING2021_GROUP9","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18628915123","text":"import requests\n\nAPI_KEY = 'your_api_key_here'\nAPI_URL = f'https://api.news.com/top-headlines?country=us&category=business&apiKey={API_KEY}'\n\ndef get_top_headlines():\n \"\"\"\n Fetches top business headlines from a news API.\n \n Returns:\n A list of dictionaries, where each dictionary contains information about a top business headline, including the title\n and a link to the full article.\n \"\"\"\n response = requests.get(API_URL)\n if response.ok:\n data = response.json()\n articles = data['articles']\n headlines = []\n for article in articles:\n headline = {\n 'title': article['title'],\n 'link': article['url']\n }\n headlines.append(headline)\n return headlines\n else:\n raise ValueError(f'Error fetching top headlines: {response.text}')\n\nif __name__ == '__main__':\n top_headlines = get_top_headlines()\n for headline in top_headlines:\n print(f'{headline[\"title\"]}: {headline[\"link\"]}')\n","repo_name":"spacewink9/Spacewink-Terminal-","sub_path":"fundamental_analysis/news_analysis/top_headlines.py","file_name":"top_headlines.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5258022547","text":"from tensorflow import keras\nimport numpy as np\nimport tensorflow as tf\nimport keras.backend as K\n\nfrom losses.mdn_loss import get_mdn_loss\n\n\ndef get_MDN_RWTA_loss(num_comp, eps, output_shape, fixed_variance=False):\n mdn_loss_function = get_mdn_loss(num_comp, eps, output_shape=output_shape, fixed_variance=fixed_variance)\n nll_rwta_loss_function = get_RWTA_loss(num_comp, eps, output_shape=output_shape, fixed_variance=fixed_variance)\n \n def get_combined_loss(y_true, y_pred):\n \n loss_mdn = mdn_loss_function(y_true, y_pred)\n loss_rwta = nll_rwta_loss_function(y_true, y_pred)\n \n loss = loss_mdn + loss_rwta\n return loss\n \n return get_combined_loss\n\n\n\ndef multivariate_NLL(y_true, y_pred):\n \"\"\" negative log likelihood loss function for multivariate gaussian with DIAGONAL covariance\n Parameters: \n y_true [batch_size, output_dim]\n y_pred [batch_size, output_dim*2] i.e. means followed by logvariance (both predicted by a NN)\n \"\"\"\n n_dims = int(int(y_pred.shape[1])/2)\n mu = y_pred[:, 0:n_dims]\n sigma = y_pred[:, n_dims:]\n \n mse = -0.5*K.sum(K.square((y_true-mu)/sigma),axis=1)\n sigma_trace = -K.sum(K.log(sigma), axis=1)\n log2pi = -0.5*n_dims*np.log(2*np.pi)\n \n log_likelihood = mse+sigma_trace+log2pi\n\n return -log_likelihood\n\n\ndef get_RWTA_loss(M, eps, output_shape, fixed_variance=False):\n \"\"\"\n M - number of mixture components; \n eps - (epsilon) the relaxation weight - usually something very small i.e. 0.001\n output_dim - how many variables does the gaussian have? usually 1 (for 1D - x coord) or 3 (for 3D - x,y,z)\n \"\"\"\n output_dim = output_shape\n\n \n def RWTA_loss_for_mdn(y_true, y_pred):\n # Reshape inputs in case this is used in a TimeDistribued layer\n y_pred = tf.reshape(y_pred, [-1, (2 * M * output_dim) + M], name='reshape_ypreds')\n y_true = tf.reshape(y_true, [-1, output_dim], name='reshape_ytrue')\n \n # Split the inputs into paramaters\n out_pi, out_mu, out_sigma = tf.split(y_pred, num_or_size_splits=[M,\n M * output_dim,\n M * output_dim],\n axis=-1, name='mdn_coef_split')\n if fixed_variance:\n# batch_size = out_sigma.get_shape()\n# print(batch_size)\n out_sigma = tf.fill(tf.shape(out_sigma), 1.0) #np.ones((batch_size[0], M*output_dim))\n\n # get NLL for each mixture\n mixtures_nll = []\n for m_ind in range(M):\n mixture_y_pred = tf.concat([out_mu[:, m_ind*output_dim:(m_ind+1)*output_dim], out_sigma[:, m_ind*output_dim:(m_ind+1)*output_dim]], -1)\n nll = multivariate_NLL(y_true, mixture_y_pred)\n nll = tf.expand_dims(nll, 1)\n mixtures_nll.append(nll)\n \n mixtures_combined = tf.concat(mixtures_nll, 1)\n # RWTA\n loss = (1-eps) * (M-1/M) * tf.math.reduce_min(mixtures_combined, axis=1) + (eps/M) * tf.math.reduce_sum(mixtures_combined, axis=1)\n return tf.reduce_mean(loss)\n \n return RWTA_loss_for_mdn","repo_name":"janekzimoch/localisation_with_image","sub_path":"PlayGround/Multimodality/2D_set_of_explenations/losses/mdn_rwta.py","file_name":"mdn_rwta.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30064164207","text":"\"\"\"\r\n\n\nWrite a function that takes a number and returns `True` if it's a prime;\n`False` otherwise. The number can be `2^64-1` (2 to the power of 63, not XOR).\nWith the standard technique it would be `O(2^64-1)`, which is much too large\nfor the 10 second time limit imposed by Edabit.\n\n![Sieve of\nEratosthenes](https://upload.wikimedia.org/wikipedia/commons/b/b9/Sieve_of_Eratosthenes_animation.gif)\n\n### Examples\n\n prime(7) ➞ True\n \n prime(56963) ➞ True\n \n prime(5151512515524) ➞ False\n\n### Notes\n\nA \"prime\" number is a number that can only be divided by itself and `1` (upon\ndivision the result is a whole number).\n\n\"\"\"\r\n\nimport math\ndef prime(num):\n if num < 2:\n return False\n elif num == 2 or num == 3:\n return True\n elif num != 2 and num % 2 == 0:\n return False\n else:\n m = int(math.sqrt(num)) + 2\n for i in range(3,m,2):\n if num % i == 0:\n return False\n return True\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"EcBpRwgYsbmEWXKB9_19.py","file_name":"EcBpRwgYsbmEWXKB9_19.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30297847380","text":"from django.urls import path\nfrom .views import (RegisterUserView, \n LoginView, \n ImageUploadViewSet,\n index)\n\nurlpatterns = [\n path('', index, name=\"home-route\"),\n path('auth/register/', RegisterUserView.as_view(), name=\"auth-register\"),\n path('auth/login/', LoginView.as_view(), name=\"auth-login\"),\n path('user/upload/', ImageUploadViewSet.as_view(), name=\"file-upload\"),\n path('user/upload//', ImageUploadViewSet.as_view(), name=\"file-upload-detail\")\n]\n","repo_name":"Celoka/flight-booking-system","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"15216120100","text":"\"\"\"\nThis is based on the \"Transformations, Time-sampled Animation, and Layer Offsets\" USD tutorial:\nhttps://graphics.pixar.com/usd/docs/567231471.html\n\n... but with a cube.\n\"\"\"\n\nfrom pxr import Usd, UsdGeom, Sdf\n\nimport os\nimport tempfile\nimport shutil\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self._tempDir = tempfile.mkdtemp()\n return self._tempDir\n\n def __exit__(self, exc_type, exc_value, traceback):\n shutil.rmtree(self._tempDir)\n\n\ndef AuthorStaticModel(modelPath):\n # Create a new stage.\n stage = Usd.Stage.CreateNew(modelPath)\n\n # Define an Xform with a child Cube.\n xform = UsdGeom.Xform.Define(stage, \"/Model\")\n UsdGeom.Cube.Define(stage, \"/Model/Geometry\")\n stage.SetDefaultPrim(xform.GetPrim())\n\n # Save the stage.\n stage.Save()\n\n\ndef AuthorAnimatedReferencedModel(animPath, modelPath):\n # Create a new stage.\n stage = Usd.Stage.CreateNew(animPath)\n stage.SetStartTimeCode(1)\n stage.SetEndTimeCode(10)\n\n # Author an Xform which references the static model.\n xform = UsdGeom.Xform.Define(stage, \"/SpinningModel\")\n stage.SetDefaultPrim(xform.GetPrim())\n xform.GetPrim().GetReferences().AddReference(modelPath)\n\n # Author a Y-axis \"spin\" rotation animation.\n spin = xform.AddRotateYOp(opSuffix='spin')\n spin.Set(time=1, value=0)\n spin.Set(time=10, value=180)\n\n # Save work.\n stage.Save()\n\n # stage.GetRootLayer().Export(\"spinningCube.usda\")\n\n\ndef AuthorMultipleAnimWithOffsets(multiAnimPath, animPath):\n # Create a new stage.\n stage = Usd.Stage.CreateNew(multiAnimPath)\n stage.SetStartTimeCode(1)\n stage.SetEndTimeCode(10)\n\n # Author a Xform which references the animated model as a child.\n left = UsdGeom.Xform.Define(stage, \"/Left\")\n left = stage.DefinePrim(\"/Left/Model\")\n left.GetPrim().GetReferences().AddReference(\n assetPath=animPath,\n )\n\n # Author a translated Xform which references the animated model as a child,\n # with a offset layerOffset.\n middle = UsdGeom.Xform.Define(stage, \"/Middle\")\n translate = middle.AddTranslateOp()\n translate.Set((5, 0, 0))\n middleModel = stage.DefinePrim(\"/Middle/Model\")\n middleModel.GetReferences().AddReference(\n assetPath=animPath,\n layerOffset=Sdf.LayerOffset(offset=5),\n )\n\n # Author a translated Xform which references the animated model as a child,\n # with a scale layerOffset.\n right = UsdGeom.Xform.Define(stage, \"/Right\")\n translate = right.AddTranslateOp()\n translate.Set((10, 0, 0))\n rightModel = stage.DefinePrim(\"/Right/Model\")\n rightModel.GetReferences().AddReference(\n assetPath=animPath,\n layerOffset=Sdf.LayerOffset(scale=0.5),\n )\n\n # Save work.\n stage.Save()\n\n # stage.GetRootLayer().Export(\"spinningCubes.usda\")\n\n\nif __name__ == \"__main__\":\n\n with TemporaryDirectory() as tempDir:\n # Author static model.\n modelPath = os.path.join(tempDir, \"model.usda\")\n AuthorStaticModel(modelPath)\n\n # Author animated model.\n animPath = os.path.join(tempDir, \"anim.usda\")\n AuthorAnimatedReferencedModel(animPath, modelPath)\n\n # Author animated models with offsets.\n multiAnimPath = os.path.join(tempDir, \"multiAnim.usda\")\n AuthorMultipleAnimWithOffsets(multiAnimPath, animPath)\n","repo_name":"moddyz/USDSandbox","sub_path":"src/usdSnippets/authorSpinningCubes.py","file_name":"authorSpinningCubes.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70039842333","text":"from collections import defaultdict\n\n\nclass Solution:\n def find(self, a):\n if self.par[a] != a:\n self.par[a] = self.find(self.par[a])\n return self.par[a]\n return a\n\n def union(self, a, b):\n self.par[self.find(b)] = self.find(a)\n\n def smallestStringWithSwaps(self, s: str, pairs: List[List[int]]) -> str:\n n = len(s)\n self.par = [i for i in range(n)]\n ans = ['.'] * n\n\n for pair in pairs:\n self.union(pair[0], pair[1])\n\n pars = defaultdict(list)\n\n for i in range(n):\n pars[self.find(i)].append(i)\n\n for par in pars:\n l = pars[par]\n chars = [s[i] for i in l]\n chars.sort()\n for i in range(len(l)):\n ans[l[i]] = chars[i]\n\n return \"\".join(ans)\n","repo_name":"forewing/lc","sub_path":"python/p1202.py","file_name":"p1202.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18518442201","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport openpyxl\nwb=openpyxl.load_workbook('SIPRI-Milex-data-1949-2020_0.xlsx')\nsheet=wb['Constant (2019) USD']\nsheet.delete_rows(sheet.min_row,5)\nwb.save('result.xlsx')\nd=pd.read_excel('result.xlsx',engine='openpyxl',sheet_name='Constant (2019) USD')\nusa=d.loc[d.Country=='USA']\nchina=d.loc[d.Country=='China']\nrussia=d.loc[d.Country=='Russia']\nus=[]\nfor i in range(2000,2021):\n us.append(int(usa[i]))\ncn=[]\nfor i in range(2000,2021):\n cn.append(int(china[i]))\nru=[]\nfor i in range(2000,2021):\n ru.append(int(russia[i]))\nx=[]\nfor i in range(2000,2021):\n x.append(i)\nplt.plot(x,us,'k-',label=\"US\")\nplt.plot(x,cn,'k:',label=\"China\")\nplt.plot(x,ru,'k--',label=\"Russia\")\nplt.legend()\nplt.savefig('result.png')\nplt.show()\n","repo_name":"ytakefuji/defense","sub_path":"us_china_russia.py","file_name":"us_china_russia.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43596139445","text":"import os\n\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, HttpResponse\nfrom django.http import HttpRequest, JsonResponse\nimport json\nimport csv\nimport datetime\n# Create your views here.\n\ndef reader_to_table(csv_reader):\n table = []\n for line in csv_reader:\n table.append(line)\n return table\n\n\ndef qid_to_question(qid):\n for line in question_id:\n if qid == line[2]:\n return line[3]\n return ''\n\n\ndef timestr_to_time(str):\n '''\n 假设时间格式为“年/月/日 时:分”,返回五元组\n '''\n l = 0\n r = 0\n year = 0\n month = 0\n day = 0\n hour = 0\n minute = 0\n print(str)\n while str[r] != '-':\n r += 1\n year = int(str[l:r])\n l = r + 1\n r = r + 1\n\n while str[r] != '-':\n r += 1\n month = int(str[l:r])\n l = r + 1\n r = r + 1\n\n while str[r] != ' ':\n r += 1\n day = int(str[l:r])\n l = r + 1\n r = r + 1\n\n while str[r] != ':':\n r += 1\n hour = int(str[l:r])\n l = r + 1\n r = r + 1\n\n while str[r] != ':':\n r += 1\n minute = int(str[l:r])\n l = r + 1\n r = r + 1\n\n return year, month, day, hour, minute\n\n\ndef too_late(t):\n return t[3] >= 23 or t[3] <= 5\n\n\nprint('okok')\nquestion_detail_table = reader_to_table(csv.reader(open('data/答题情况明细.csv', encoding='utf8')))\nquestion_id = reader_to_table(csv.reader(open('data/题目编号名称关系.csv', encoding='utf-8')))\ntutorials = {}\nfor root, ds, fs in os.walk('data/观看教程统计/'):\n for tutorial_name in fs:\n print(tutorial_name)\n # 有的人名字里有生僻字,得用gb18030编码,utf8 gdb gb2312都不行\n tutorials[tutorial_name[:-4]] = reader_to_table(csv.reader(open('data/观看教程统计/'+tutorial_name, encoding='gb18030')))\n\n# 计算每道题的全部提交时间\nquestion_last_submit_time = {}\nfor l in question_id:\n qid = l[2]\n question_last_submit_time[qid] = []\n for line in question_detail_table:\n if line[3] == qid:\n question_last_submit_time[qid].append(timestr_to_time(line[20]))\n question_last_submit_time[qid].sort()\n\n# for line in table:\n# print(line)\n\n@csrf_exempt\ndef say(request):\n assert isinstance(request, HttpRequest)\n # body = json.loads(request.body)\n sid = request.POST['ID']\n str_ = request.POST['str']\n t = str(datetime.datetime.now())\n t = t.replace(':', '-')\n with open(f'data/感想/{sid} {t}.txt', 'w') as f:\n f.write(str_)\n return JsonResponse({})\n\n@csrf_exempt\ndef all_in_one(request):\n assert isinstance(request, HttpRequest)\n # body = json.loads(request.body)\n sid = request.POST['ID']\n number = int(request.POST['number'])\n print(sid)\n print(number)\n if number == 1:\n return cal1(sid)\n elif number == 2:\n return cal2(sid)\n elif number == 3:\n return cal3(sid)\n elif number == 4:\n return cal4(sid)\n elif number == 5:\n return cal5(sid)\n elif number == 6:\n return cal6(sid)\n elif number == 7:\n return cal7(sid)\n elif number == 8:\n return cal8(sid)\n return JsonResponse({})\n\n\ndef cal1(sid):\n '''\n 在这半学期, 你在计组上\n 进行了xxx次提交\n 通过了xxx道题目\n 观看了xxx分钟教程\n 其中你在xxxx章节的停留时间最长\n 看起来你对这一部分知识更感兴趣?\n '''\n # 提交次数\n submit_count = 0\n for line in question_detail_table:\n if line[1] == sid:\n submit_count += int(line[10])\n # 通过题目\n pass_count = 0\n for line in question_detail_table:\n if line[1] == sid and line[8] != '0':\n pass_count += 1\n # 观看教程总时间、章最大时间\n time_sum = 0\n max_time = 0\n max_name = ''\n for k,v in tutorials.items():\n for line in v:\n if line[1] == sid:\n this_time = time_to_minute(line[-2])\n time_sum += this_time\n if this_time >= max_time:\n max_name = this_time\n max_name = k\n print('cal1 done')\n return JsonResponse([\n submit_count,\n pass_count,\n time_sum,\n max_name,\n ], safe=False)\n\n\ndef cal2(sid):\n '''\n 你提交最多的题目是\n \txxxx\n 共提交了 xx 次\n '''\n max_submit_name = ''\n max_submit_count = 0\n for line in question_detail_table:\n if line[1] == sid and int(line[10]) > max_submit_count:\n max_submit_count = int(line[10])\n max_submit_name = qid_to_question(line[3])\n return JsonResponse([\n max_submit_name,\n max_submit_count\n ], safe=False)\n\n\ndef cal3(sid):\n '''\n 你在xxxxx题目上\n 提交了xxx次完成了首次AC\n (聪明的你很快就完成了它|你百折不挠地解决了它)*\n '''\n # 策略:按以下优先级\n # 最大提交次数超过8次\n # 最小提交次数小于等于2次\n # 最大的提交次数\n submit_counts = []\n for line in question_detail_table:\n if line[1] == sid:\n submit_counts.append((int(line[10]), qid_to_question(line[3])))\n if len(submit_counts) == 0:\n return JsonResponse(['', ''], safe=False)\n submit_counts.sort()\n if submit_counts[-1][0] >= 8:\n return JsonResponse(submit_counts[-1], safe=False)\n if submit_counts[0][0] <= 2:\n return JsonResponse(submit_counts[0], safe=False)\n return JsonResponse(submit_counts[-1], safe=False)\n\n\ndef cal4(sid):\n '''\n 你在本学期第一次实验数制基础中\n 获得了**分\n '''\n return JsonResponse({})\n\n\ndef time_to_response(t):\n return [\n t[1],\n t[2],\n str(t[3]) + ':' + str(t[4])\n ]\n\ndef cal5(sid):\n '''\n 在xx月xx日,深夜 XXXX\n 你依旧在提交xxxx题\n 要好好注意身体(●'◡'●)\n '''\n for line in question_detail_table:\n if line[1] == sid:\n first_submit_time = timestr_to_time(line[19])\n last_submit_time = timestr_to_time(line[20])\n if too_late(first_submit_time):\n result = time_to_response(first_submit_time)\n result.append(qid_to_question(line[3]))\n return JsonResponse(result, safe=False)\n if too_late(last_submit_time):\n result = time_to_response(last_submit_time)\n result.append(qid_to_question(line[3]))\n return JsonResponse(result, safe=False)\n return JsonResponse(['','','',''], safe=False)\n\n\ndef cal6(sid):\n '''\n 在MIPS汇编实验中\n 你一共完成了xxx行代码\n (汇编写OS指日可期 | 很精简的作风!)\n '''\n result = 0\n for line in question_detail_table:\n if line[1] == sid and line[6] == 'MIPS 汇编':\n result += int(line[16])\n return JsonResponse([result], safe=False)\n\n\ndef cal7(sid):\n '''\n 你在这半学期中\n 完成了xx%的题目\n 后半学期要加油 |下半学期再接再厉\n '''\n pass_count = 0\n for line in question_detail_table:\n if line[1] == sid and line[8] != '0':\n pass_count += 1\n return JsonResponse([pass_count / 19 * 100], safe=False) # TODO: 以后可能不是16道题\n\n\ndef cal8(sid):\n '''\n 你在 xxxx 题目中,\n 比 xx% 的同学更x(早或晚)完成提交\n 早起的鸟儿有虫吃|你就是ddl战神?\n '''\n # 每个人的每道题完成时间,在所有人当中的比例\n ratio_and_question = []\n for line in question_detail_table:\n if line[1] == sid:\n t = timestr_to_time(line[20])\n qid = line[3]\n i = 0\n for submit_time in question_last_submit_time[qid]:\n if t < submit_time:\n break\n i += 1\n ratio = i / len(question_last_submit_time[qid]) # 比ratio比例的人提交得更晚\n ratio_and_question.append((ratio,qid_to_question(qid)))\n ratio_and_question.sort()\n if ratio_and_question[0][0] < 0.5:\n return JsonResponse([\n ratio_and_question[0][1],\n (1-ratio_and_question[0][0])*100,\n '早'\n ], safe=False)\n return JsonResponse([\n ratio_and_question[-1][1],\n ratio_and_question[-1][0]*100,\n '晚'\n ], safe=False)\n\n\ndef time_to_minute(time_str):\n print(time_str)\n return 60*int(time_str[0]) + int(time_str[2:3])\n\n\n","repo_name":"SuperColand/semester_summary_backend","sub_path":"cal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3282712509","text":"import jsonlines\nimport json\nfrom collections import OrderedDict\n\ntext_form = \"### 질문: {instruction} ### 답변: {output}\"\n\n# dataset = list()\n# with jsonlines.open(\".\\\\KoAlpaca_v1.1.jsonl\", 'r') as data:\n# for line in data.iter():\n# data_dict = OrderedDict()\n# data = text_form.format(instruction=line['instruction'], output=line['output'])\n# data_dict[\"text\"] = data\n# dataset.append(data_dict)\n# print(dataset)\n \n# with open(\".\\\\odego_data.json\", 'w', encoding=\"utf-8\") as file:\n# json.dump(dataset, file, indent='\\t', ensure_ascii=False)\n\n\ndataset = list()\nwith jsonlines.open(\".\\\\KoAlpaca_v1.1.jsonl\", 'r') as data:\n for line in data.iter():\n data_dict = OrderedDict()\n data_dict[\"instruction\"] = line['instruction']\n data_dict[\"input\"] = ''\n data_dict[\"output\"] = line['output']\n dataset.append(data_dict)\n\nwith open(\".\\\\odego_data.json\", 'w', encoding=\"utf-8\") as file:\n json.dump(dataset, file, indent='\\t', ensure_ascii=False)\n\nfrom langchain.chains.hyde.base import HypotheticalDocumentEmbedder","repo_name":"sjinwoo/odego","sub_path":"dataset/data_converter.py","file_name":"data_converter.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"7922736645","text":"import requests\nimport csv\nimport random\nfrom bs4 import BeautifulSoup\nimport socket\nimport csv\n\ndef get_content(url,data=None):\n header={\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'\n }\n\n timeout = random.choice(range(80,180))\n r = requests.get(url,headers=header,timeout=timeout)\n r.encoding = \"utf-8\"\n\n return r.text\n\ndef get_data(html_text):\n final = []\n bs =BeautifulSoup(html_text,\"html.parser\")\n body = bs.body\n data = body.find(\"div\",{\"id\":\"7d\"})\n ul = data.find(\"ul\")\n li = ul.find_all(\"li\")\n\n for day in li:\n temp = []\n date = day.find(\"h1\").string\n temp.append(date)\n inf = day.find_all(\"p\")\n temp.append(inf[0].string,)\n if inf[1].find('span') is None:\n temperature_highest = None # 天气预报可能没有当天的最高气温(到了傍晚,就是这样),需要加个判断语句,来输出最低气温\n else:\n temperature_highest = inf[1].find('span').string # 找到最高温\n temperature_highest = temperature_highest.replace('℃', '') # 到了晚上网站会变,最高温度后面也有个℃\n temperature_lowest = inf[1].find('i').string # 找到最低温\n temperature_lowest = temperature_lowest.replace('℃', '') # 最低温度后面有个℃,去掉这个符号\n temp.append(temperature_highest) # 将最高温添加到temp中\n temp.append(temperature_lowest) #将最低温添加到temp中\n final.append(temp) #将temp加到final中\n\n return final\n\ndef write_data(data,name):\n file_name = name\n with open(file_name,\"a\",errors=\"ignore\",newline=\"\") as f:\n f_csv = csv.writer(f)\n f_csv.writerows(data)\n\nif __name__ == \"__main__\":\n url = \"http://www.weather.com.cn/weather/101190401.shtml\"\n \n html = get_content(url)\n #print(html)\n result = get_data(html)\n #print(result)\n write_data(result,\"weather.csv\")\n ","repo_name":"sinvana/easy_spider","sub_path":"weather_spider.py","file_name":"weather_spider.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73797771291","text":"import torch\n# from ..utils.stochastic import GaussianSample\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nif torch.cuda.is_available():\n device = 'cuda'\nelse:\n device = \"cpu\"\nimport torch\n\n\nclass Stochastic(nn.Module):\n \"\"\"\n Base stochastic layer that uses the\n reparametrization trick [Kingma 2013]\n to draw a sample from a distribution\n parametrised by mu and log_var.\n \"\"\"\n\n def reparametrize(self, mu, log_var):\n epsilon = Variable(torch.randn(mu.size()), requires_grad=False)\n\n epsilon = epsilon.to(device)\n\n # log_std = 0.5 * log_var\n # std = exp(log_std)\n std = log_var.mul(0.5).exp_()\n\n # y = x.T * beta + std * epsilon\n # mu is x.T * beta\n # y = mu _ std * epsilon\n y = (mu).addcmul(std, epsilon)\n return y\n\n\nclass GaussianSample(Stochastic):\n \"\"\"\n Layer that represents a sample from a\n Gaussian distribution.\n \"\"\"\n\n def __init__(self, in_features, out_features):\n super(GaussianSample, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.mu = nn.Linear(in_features, out_features).to(device)\n self.log_var = nn.Linear(in_features, out_features).to(device)\n\n def forward(self, x):\n mu = self.mu(x)\n log_var = F.softplus(self.log_var(x))\n\n return self.reparametrize(mu, log_var), mu, log_var\n\n def mle(self, x):\n return self.mu(x)\n\n\nclass Simple3DCNN(torch.nn.Module):\n def __init__(self,\n activation,\n is_bns,\n is_dropouts,\n final_activation=None,\n drop_val=0.5,\n is_bayesian=False,\n random_node=\"output\"\n ):\n super(Simple3DCNN, self).__init__()\n if is_bayesian:\n if random_node == \"output\":\n self.GaussianSample = GaussianSample(1, 1)\n elif (random_node == \"last\"):\n self.GaussianSample = GaussianSample(1233, 1233)\n self.is_bayesian = is_bayesian\n\n self.activation = activation #.to(device)\n self.is_bns = is_bns\n self.is_dropouts = is_dropouts\n self.final_activation = final_activation\n self.layers = []\n self.bns = []\n self.lns = []\n self.pooling_layers = []\n in_channels = [1, 64, 128, 256]\n out_channels = [64, 128, 256, 1]\n kernel_sizes = [4, 4, 4, 1]\n strides = [3, 3, 3, 1]\n self.pooling = [0, 0, 0, 0]\n self.relu = torch.nn.ReLU()\n i = 0\n for ins, outs, ksize, stride in zip(in_channels, out_channels, kernel_sizes, strides):\n self.layers += [\n torch.nn.Conv3d(in_channels=ins, out_channels=outs, kernel_size=ksize, stride=stride).to(device)]\n if self.pooling[i] == 1:\n self.pooling_layers += [torch.nn.AdaptiveAvgPool3d(output_size=2).to(device)]\n else:\n self.pooling_layers += [None]\n self.bns += [nn.BatchNorm3d(num_features=outs).to(device)]\n # self.lns += [nn.LayerNorm(normalized_shape=None).to(device)]\n i += 1\n self.dense1 = torch.nn.Linear(in_features=4, out_features=1).to(device)\n self.dropout = nn.Dropout(drop_val)\n self.layers = nn.ModuleList(self.layers)\n\n def random_init(self, init_method):\n for i in range(len(self.layers)):\n init_method(self.layers[i].weight)\n nn.init.constant_(self.layers[i].bias, 0)\n init_method(self.dense1.weight)\n nn.init.constant_(self.dense1.bias, 0)\n\n def forward(self, x, random_node=None):\n for i in range(len(self.layers)):\n # if i == len(self.layers) - 1:\n # x = self.bns[i-1](x)\n x = self.dropout(x)\n x = self.layers[i](x)\n x = self.activation(x)\n if self.pooling[i] == 1:\n x = self.pooling_layers[i](x)\n x = x.squeeze()\n # x = self.dense1(x)\n if self.is_bayesian:\n x, _, _ = self.GaussianSample.float()(x)\n if self.final_activation is not None:\n x = self.final_activation(x)\n\n return x, None, None, None\n\n def get_parameters(self):\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.data.shape)\n\n def get_total_parameters(self):\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.data.shape)\n\n\nclass ResBlock3D(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv3d(in_channel, channel, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, in_channel, 1),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\n\n\nclass ResBlockDeconv3D(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.ConvTranspose3d(in_channel, channel, 1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose3d(channel, in_channel, 3, padding=1),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\n\n\nclass ConvResnet3D(nn.Module):\n def __init__(self,\n in_channel,\n channel,\n n_res_block,\n n_res_channel,\n stride,\n activation,\n dense_layers_sizes,\n is_bns,\n is_dropouts,\n final_activation=None,\n drop_val=0.5,\n is_bayesian=False,\n random_node=\"output\"\n ):\n super().__init__()\n self.is_bayesian = is_bayesian\n if is_bayesian:\n if random_node == \"output\":\n self.GaussianSample = GaussianSample(1, 1)\n elif (random_node == \"last\"):\n self.GaussianSample = GaussianSample(dense_layers_sizes[-2], dense_layers_sizes[-2])\n if stride == 4:\n blocks = [\n nn.Conv3d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.MaxPool3d(3),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel // 2, channel, 4, stride=2, padding=1),\n nn.MaxPool3d(2),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, channel, 3, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel, channel, 3, padding=1),\n ]\n\n elif stride == 2:\n blocks = [\n nn.Conv3d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(channel // 2, channel, 3, padding=1),\n ]\n\n for i in range(n_res_block):\n blocks.append(ResBlock3D(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n self.is_dropouts = is_dropouts\n self.dropout = [[] for _ in dense_layers_sizes]\n self.bns = [[] for _ in dense_layers_sizes]\n self.linears = [[] for _ in dense_layers_sizes]\n self.bn0 = torch.nn.BatchNorm1d(dense_layers_sizes[0])\n self.is_bns = is_bns\n self.blocks = nn.Sequential(*blocks)\n for i in range(len(dense_layers_sizes) - 1):\n self.linears[i] = torch.nn.Linear(in_features=dense_layers_sizes[i],\n out_features=dense_layers_sizes[i + 1]).to(device)\n if self.is_bns[i] == 1:\n self.bns[i] = torch.nn.BatchNorm1d(dense_layers_sizes[i]).to(device)\n else:\n self.bns[i] = None\n if self.is_dropouts[i] == 1:\n self.dropout[i] = nn.Dropout(drop_val).to(device)\n else:\n self.dropout[i] = None\n\n self.activation = activation\n self.final_activation = final_activation\n\n def random_init(self, init_method=nn.init.xavier_normal_):\n print(\"Random init\")\n for m in self.modules():\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose3d):\n init_method(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, input, random_node):\n x = self.blocks(input)\n x = x.view(-1, 256)\n if self.is_bns[0]:\n x = self.bns[0](x)\n x = self.activation(x)\n for i, (dense, bn, is_bn, is_drop) in enumerate(zip(self.linears, self.bns, self.is_bns, self.is_dropouts)):\n if is_drop:\n x = self.dropout[i](x)\n # TODO linear layers are not turning to float16\n if random_node == \"last\" and i == len(self.bns) - 2:\n x_mean, mu, log_var = self.GaussianSample.float()(x)\n x = x_mean\n x = dense(x.float())\n if i < len(self.bns) - 2:\n if self.is_bns[i + 1]:\n x = self.bns[i + 1](x)\n x = self.activation(x)\n\n if self.is_bayesian:\n if self.final_activation is not None:\n x = self.final_activation(x)\n else:\n x = x.clone()\n # TODO GaussianSample turning to float16 (half), but x is float32 (float)\n if random_node == \"output\":\n x_mean, mu, log_var = self.GaussianSample.float()(x)\n x = x_mean.clone()\n else:\n mu = None\n log_var = None\n x_mean = None\n if self.final_activation is not None:\n x = self.final_activation(x)\n else:\n x = x.clone()\n return x, mu, log_var, x_mean\n\n def mle_forward(self, input, random_node):\n x = self.blocks(input)\n x = x.view(-1, 256)\n mu = None\n log_var = None\n if self.is_bns[0]:\n x = self.bns[0](x)\n for i, (dense, bn, is_bn, is_drop) in enumerate(zip(self.linears, self.bns, self.is_bns, self.is_dropouts)):\n # linear layers are not turning to float16\n if is_drop:\n x = self.dropout[i](x)\n if random_node == \"last\" and i == len(self.bns) - 2:\n x, mu, log_var = self.GaussianSample.float()(x)\n x = dense(x.float())\n\n if i < len(self.bns) - 2:\n if self.is_bns[i + 1]:\n x = self.bns[i + 1](x)\n x = self.activation(x)\n if random_node == \"last\":\n if self.final_activation is not None:\n x = self.final_activation(x)\n\n assert self.is_bayesian\n # GaussianSample turning to float16 (half), but x is float32 (float)\n # y = self.GaussianSample.float().mle(x)\n if random_node == \"output\":\n if self.final_activation is not None:\n x = self.final_activation(x)\n x, mu, log_var = self.GaussianSample.float()(x)\n return x, mu, log_var\n\n def get_parameters(self):\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.data.shape)\n\n def get_total_parameters(self):\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.data.shape)\n\n\nclass DeconvResnet3D(nn.Module):\n def __init__(self,\n in_channel,\n channel,\n n_res_block,\n n_res_channel,\n stride,\n activation,\n dense_layers_sizes,\n is_bns,\n is_dropouts,\n final_activation=None,\n drop_val=0.5,\n is_bayesian=False,\n random_node=\"output\"\n ):\n super().__init__()\n self.is_bayesian = is_bayesian\n self.blocks1 = ResBlockDeconv3D(channel, n_res_channel)\n self.blocks2 = ResBlockDeconv3D(channel, n_res_channel)\n self.blocks3 = ResBlockDeconv3D(channel, n_res_channel)\n self.blocks4 = ResBlockDeconv3D(channel, n_res_channel)\n\n self.blocks5 = nn.ConvTranspose3d(channel, channel, 3, padding=1)\n self.blocks6 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks7 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks8 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks9 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks10 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks11 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks12 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks13 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks14 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks15 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks16 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks17 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks18 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks19 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks20 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks21 = nn.ConvTranspose3d(channel, channel, 4, stride=2, padding=1)\n self.blocks22 = nn.ConvTranspose3d(channel, channel // 2, 4, stride=2, padding=1)\n self.blocks23 = nn.ConvTranspose3d(channel // 2, in_channel, 4, stride=2, padding=1)\n self.is_dropouts = is_dropouts\n self.dropout = [[] for _ in dense_layers_sizes]\n self.bns = [[] for _ in dense_layers_sizes]\n self.linears = [[] for _ in dense_layers_sizes]\n self.bn0 = torch.nn.BatchNorm3d(dense_layers_sizes[0])\n self.is_bns = is_bns\n for i in range(len(dense_layers_sizes) - 1):\n self.linears[i] = torch.nn.Linear(in_features=dense_layers_sizes[i],\n out_features=dense_layers_sizes[i + 1]).to(device)\n if self.is_bns[i] == 1:\n self.bns[i] = torch.nn.BatchNorm3d(dense_layers_sizes[i]).to(device)\n else:\n self.bns[i] = None\n if self.is_dropouts[i] == 1:\n self.dropout[i] = nn.Dropout(drop_val).to(device)\n else:\n self.dropout[i] = None\n\n self.activation = activation\n self.final_activation = final_activation\n\n def random_init(self, init_method=nn.init.xavier_normal_):\n print(\"Random init\")\n for m in self.modules():\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose3d):\n init_method(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x):\n # for i, dense in enumerate(self.linears[:-1]):\n x = self.linears[0](x.float())\n x = self.activation(x)\n x = x.unsqueeze(2)\n x = self.blocks1(x)\n x = self.activation(x)\n x = self.blocks2(x)\n x = self.activation(x)\n x = self.blocks3(x)\n x = self.activation(x)\n x = self.blocks4(x)\n x = self.activation(x)\n x = self.blocks5(x)\n x = self.activation(x)\n x = self.blocks6(x)\n x = self.activation(x)\n x = self.blocks7(x)\n x = self.activation(x)\n x = self.blocks8(x)\n x = self.activation(x)\n x = self.blocks9(x)\n x = self.activation(x)\n x = self.blocks10(x)\n x = self.activation(x)\n x = self.blocks11(x)\n x = self.activation(x)\n x = self.blocks12(x)\n x = self.activation(x)\n x = self.blocks13(x)\n x = self.activation(x)\n x = self.blocks14(x)\n x = self.activation(x)\n x = self.blocks15(x)\n x = self.activation(x)\n x = self.blocks16(x)\n x = self.activation(x)\n x = self.blocks17(x)\n x = self.activation(x)\n x = self.blocks18(x)\n x = self.activation(x)\n x = self.blocks19(x)\n x = self.activation(x)\n x = self.blocks20(x)\n x = self.activation(x)\n x = self.blocks21(x)\n x = self.activation(x)\n x = self.blocks22(x)\n x = self.activation(x)\n x = self.blocks23(x)\n if self.final_activation is not None:\n x = self.final_activation(x)\n return x\n\n def get_parameters(self):\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.data.shape)\n\n def get_total_parameters(self):\n for name, param in self.named_parameters():\n if param.requires_grad:\n print(name, param.data.shape)\n","repo_name":"spell00/fmri-autosegmentation","sub_path":"fmri/models/supervised/cnn3d.py","file_name":"cnn3d.py","file_ext":"py","file_size_in_byte":17330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"23645302346","text":"from .signalclidbusmock import SignalCLIDBusMock\nfrom gi.repository import GLib\nfrom pydbus import SessionBus\nfrom threading import Thread\nimport time\n\n\nclass Mocker(object):\n\n def start(self):\n self._bus = SessionBus()\n self._mock = SignalCLIDBusMock()\n self._mockerservice = self._bus.publish(\n \"org.signalbot.signalclidbusmock\",\n self._mock)\n self._loop = GLib.MainLoop()\n self._thread = Thread(target=self._loop.run, daemon=True)\n self._thread.start()\n self.tosignalbot = []\n self._wait_until = 0\n\n def messageSignalbot(self, sender, group_id, text, attachmentfiles):\n self._mock.MessageReceived(int(time.time()),\n sender, group_id, text, attachmentfiles)\n self.tosignalbot.append([int(time.time()),\n sender, group_id, text, attachmentfiles])\n\n def _wait_until_n_messages(self, n=1, timeout=1):\n return self._mock.wait_until_n_messages(n=n, timeout=timeout)\n\n def wait_for_n_messages(self, n=1, timeout=1):\n self._wait_until += n\n self._wait_until_n_messages(n=self._wait_until, timeout=timeout)\n\n @property\n def fromsignalbot(self):\n return self._mock._sentmessages\n\n def stop(self):\n self._loop.quit()\n self._thread.join()\n self._mockerservice.unpublish()\n","repo_name":"signal-bot/signal-bot","sub_path":"signalclidbusmock/mocker.py","file_name":"mocker.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"32"} +{"seq_id":"22714632465","text":"#Import required library.\r\nfrom tkinter import*\r\nimport random\r\n#create object\r\nroot=Tk()\r\n#Geometry\r\nroot.geometry(\"1500x1500\")\r\n# title\r\nroot.title(\"Rock, Paper and Scissors Game\")\r\n#Computer choice\r\ncomputer_choice={\"0\":\"Rock\",\"1\":\"Paper\",\"2\":\"Scissors\"}\r\n#Reset game\r\ndef reset_game():\r\n b1[\"state\"]=\"active\"\r\n b2[\"state\"]=\"active\"\r\n b3[\"state\"]=\"active\"\r\n l1.config(text=\"Player\")\r\n l3.config(text=\"Computer\")\r\n l4.config(text=\" \")\r\n#Disable the Buttons\r\ndef button_disable():\r\n b1[\"state\"]=\"disable\"\r\n b2[\"state\"]=\"disable\"\r\n b3[\"state\"]=\"disable\"\r\n \r\n#If the player selected rock\r\ndef rock():\r\n c=computer_choice[str(random.randint(0,2))]\r\n if c==\"Rock\":\r\n result=\"Match Draw,try again!\"\r\n elif c==\"Scissors\":\r\n result=\"You Win,Congratulations!\"\r\n else:\r\n result=\"Computer Wins,better luck next time!\"\r\n l4.config(text=result)\r\n l1.config(text=\"Rock\")\r\n l3.config(text=c)\r\n button_disable()\r\n#If the player selected paper\r\ndef paper():\r\n c=computer_choice[str(random.randint(0,2))]\r\n if c==\"Paper\":\r\n result=\"Match Draw,try again!\"\r\n elif c==\"Scissors\":\r\n result=\"Computer Wins,better luck next time!\"\r\n else:\r\n result=\"You Win,Congratulations!\"\r\n l4.config(text=result)\r\n l1.config(text=\"Paper\")\r\n l3.config(text=c)\r\n button_disable()\r\n#If the player selected scissors\r\ndef scissors():\r\n c=computer_choice[str(random.randint(0,2))]\r\n if c==\"Rock\":\r\n result=\"Computer Wins,better luck next time!\"\r\n elif c==\"Scissors\":\r\n result=\"Match Draw,try again!\"\r\n else:\r\n result=\"You win,Congratulations!\"\r\n l4.config(text=result)\r\n l1.config(text=\"Scissors\")\r\n l3.config(text=c)\r\n button_disable()\r\n\r\n#Add Labels,Frames and Button\r\nLabel(root,\r\n text=\"Rock,Paper and Scissors Game\",\r\n font=\"normal 20 bold\",fg=\"purple\").pack(pady=20)\r\nframe=Frame(root)\r\nframe.pack()\r\nl1=Label(frame,\r\n text=\"Player\",\r\n font=10)\r\n\r\nl2=Label(frame,\r\n text=\"VS\",\r\n font=\"normal 20 bold\")\r\nl3=Label(frame,text=\"Computer\",font=10)\r\nl1.pack(side=LEFT)\r\nl2.pack(side=LEFT)\r\nl3.pack( )\r\nl4=Label(root,\r\n text=\" \",\r\n font=\"normal 20 bold\",\r\n bg=\"yellow\",\r\n width= 30,\r\n borderwidth= 5,\r\n relief=\"solid\")\r\nl4.pack(pady=20)\r\nframe1=Frame(root)\r\nframe1.pack()\r\n\r\nb1=Button(frame1, text=\"Rock\",\r\n font=10,width=7,\r\n command=rock)\r\n\r\nb2=Button(frame1,text=\"Paper\",\r\n font=10,width=7,\r\n command=paper)\r\nb3=Button(frame1,text=\"Scissors\",\r\n font=10,width=7,\r\n command=scissors)\r\n\r\nb1.pack(side=LEFT,padx=10)\r\nb2.pack(side=LEFT,padx=10)\r\nb3.pack(side=LEFT,padx=10)\r\n\r\nb4=Button(root,text=\"Play Again\",\r\n font=10,fg=\"black\",\r\n bg=\"green\",command=reset_game)\r\nb4.pack()\r\n\r\n#Executing the game\r\nroot.mainloop()\r\n \r\n\r\n \r\n\r\n \r\n","repo_name":"AishaniPandey/Rock-paper-scissors-game-using-GUI-in-python","sub_path":"gui rps.py","file_name":"gui rps.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41443396823","text":"def LIS(A, n, i=0, prev=float('-inf')):\n if i == n:\n return 0\n excl = LIS(A, n, i + 1, prev)\n incl = 0\n if A[i] > prev:\n incl = 1 + LIS(A, n, i + 1, A[i])\n return max(incl, excl)\n \nif __name__ == '__main__':\n\n A = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]\n\n print(\"Length of LIS is\", LIS(A, len(A)))","repo_name":"alishbah13/CS-302Algorithms","sub_path":"app/static/algorithms/LIS.py","file_name":"LIS.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"28393927246","text":"\"\"\"\nScript to run segmentation pipeline\n\"\"\"\nimport os\nfrom MIF_Segmentation import MIF_Segmentation\n\nchannel_dict = {\n \"inter\": 13,\n \"aqp1\": 15,\n \"aqp2\": 16,\n \"ck7\": 3,\n \"cal\": 9,\n \"panck\": 8,\n \"cd31\": 5,\n \"cd34\": 4\n}\n\nthreshold_dict = {\n \"inter\": (95, 99),\n \"aqp1\": (93, 99),\n \"aqp2\": (99,100),\n \"ck7\": (99.9,100),\n \"cal\": (99,100),\n \"panck\": (99,100),\n \"cd31\": (1, 99),\n \"cd34\": (99,100)\n}\n\nmethod_dict = {\n \"inter\": 'adaptive',\n \"aqp1\": 'otsu',\n \"aqp2\": 'otsu',\n \"ck7\": 'otsu',\n \"cal\": 'otsu',\n \"panck\": 'otsu',\n \"cd31\": 'adaptive',\n \"cd34\": 'normal'\n}\n\nseg = MIF_Segmentation(r'data/213_HIVE3_TMA_191_7_10_6_22_Scan2.qptiff',\n 6,\n channel_dict,\n threshold_dict,\n method_dict,\n cache_dir = r'masks',\n save = False)\n\n\n### Nuclear Segmenation Evaluation ###\n\nnuclear_files = ['nuclei_6310.0_8578.0_235.0_215.0_mask.png', \n 'nuclei_9412.0_15956.0_242.0_184.0_mask.png',\n 'nuclei_12138.0_12153.0_322.0_240.0_mask.png']\nnuclear_gt = [os.path.join('gt_masks_final', file) for file in nuclear_files]\n# nuclear_metrics = seg.validate_mask_pixels('cell_segmentation_gauss_blur.npy', nuclear_gt)\n# print('-'*10 + 'Nuclear Metrics Pixel Wise ' + '-'*10)\n# print(nuclear_metrics)\n\n# nuclear_metrics = seg.validate_mask_pixels('cell_segmentation7_0.59_0.5.npy', nuclear_gt)\n\n# print('-'*10 + 'Nuclear Metrics Pixel Wise ' + '-'*10)\n# print(nuclear_metrics)\n# nuclear_metrics = seg.validate_mask_element('cell_segmentation7_0.59_0.5.npy', nuclear_gt, iou_thresh = 0.001)\n\n# print('-'*10 + 'Nuclear Metrics Element Wise ' + '-'*10)\n# print(nuclear_metrics)\n\n### Glomeruli Segmentation Metrics ###\n\nglom_files = ['glom_5568.0_13755.0_1831.0_1842.0_mask.png', 'glom_9038.0_18468.0_1320.0_1582.0_mask.png',\n 'glom_11044.0_8500.0_1209.0_1342.0_mask.png', 'glom_11078.0_21329.0_1395.0_1867.0_mask.png']\nglom_gt = [os.path.join('gt_masks_final', file) for file in glom_files]\n\n\nglom_metrics = seg.validate_mask_pixels('initial_glom.npy', glom_gt)\n\nprint('-'*10 + 'Glomeruli Metrics Pixel' + '-'*10)\nprint(glom_metrics)\n\nglom_metrics = seg.validate_mask_element('initial_glom.npy', glom_gt, iou_thresh = 0.001)\n\nprint('-'*10 + 'Glom Metrics Element Wise ' + '-'*10)\nprint(glom_metrics)\n\n### Interstitism Segmentation Metrics ###\n\ninter_files = ['interstism_16183.0_11285.0_1384.0_692.0_mask.png', 'interstism_6382.0_8869.0_651.0_894.0_mask.png', 'interstism_8952.0_24419.0_1096.0_795.0_mask.png']\ninter_gt = [os.path.join('gt_masks_final', file) for file in inter_files]\n\ninter_metrics = seg.validate_mask_pixels('inter_seg.npy', inter_gt)\n\nprint('-'*10 + 'Interstism Metrics' + '-'*10)\nprint(inter_metrics)\n\n### Tubule Segmentation Metrics ###\n\ntub_files = ['tubule_8707.0_11576.0_1295.0_653.0_mask.png', 'tubule_11281.0_16426.0_1026.0_770.0_mask.png']\ntub_gt = [os.path.join('gt_masks_final', file) for file in tub_files]\n\ntub_metrics = seg.validate_mask_pixels('tub_seg.npy', tub_gt)\nprint('-'*10 + 'Tubule Metrics Pixel'+ '-'*10)\nprint(tub_metrics)\n\ntub_metrics = seg.validate_mask_element('tub_seg.npy', tub_gt, iou_thresh = 0.001)\n\nprint('-'*10 + 'Tub Metrics Element Wise ' + '-'*10)\nprint(tub_metrics)\n\n### Interstisial Capillary Metrics ### \n\ncap_files = ['capillary_9142.0_21794.0_421.0_296.0_mask.png', 'capillary_15212.0_5518.0_905.0_574.0_mask.png']\ncap_gt = [os.path.join('gt_masks_final', file) for file in cap_files]\n\ncap_metrics = seg.validate_mask_pixels('capillary_seg.npy', cap_gt)\nprint('-'*10 + 'Capillary Metrics Pixel'+ '-'*10)\nprint(cap_metrics)\n\ncap_metrics = seg.validate_mask_element('capillary_seg.npy', cap_gt, iou_thresh = 0.001)\n\nprint('-'*10 + 'Capillary Metrics Element Wise ' + '-'*10)\nprint(cap_metrics)\n","repo_name":"kaitsmith22/kidney-mif","sub_path":"evaluate_masks.py","file_name":"evaluate_masks.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18133782963","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nfrom hawkmoth.util import compiler\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = 'Hawkmoth Test'\ncopyright = '2018, Jani Nikula'\nauthor = 'Jani Nikula'\nversion = ''\nrelease = ''\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n 'hawkmoth',\n 'hawkmoth.ext.javadoc',\n 'hawkmoth.ext.napoleon',\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for Hawkmoth ----------------------------------------------------\n# https://jnikula.github.io/hawkmoth/dev/extension.html#configuration\n\nhawkmoth_clang = compiler.get_include_args()\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'basic'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'nosidebar': True,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {}\n\nhtml_use_index = False\nhtml_copy_source = False\nhtml_show_sourcelink = False\nhtml_show_copyright = False\nhtml_show_search_summary = False\nhtml_show_sphinx = False\n","repo_name":"jnikula/hawkmoth","sub_path":"test/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"32"} +{"seq_id":"72591015451","text":"from flask import *\nfrom backend.route.__init__ import mypool\nfrom backend.view.member.name_change import MemberID\nfrom backend.view.member.name_change import ResponseMessage\nfrom backend.view.member.name_change import UserInput\n\nclass MemberSystem:\n def name_change():\n member_id = MemberID.get_member_id()\n (new_name, *other_args) = UserInput.input()\n\n try:\n connection = mypool.get_connection()\n cursor = connection.cursor(dictionary = True)\n insert_query = \"UPDATE members SET member_name = %s WHERE member_id = %s;\"\n insert_value = (new_name, member_id)\n cursor.execute(insert_query, insert_value)\n connection.commit()\n return ResponseMessage.name_change_correct()\n\n except Exception as e:\n print(\"Error(14): \", e)\n return ResponseMessage.name_change_error(e)\n\n finally:\n cursor.close()\n connection.close()","repo_name":"skwongman/taipei-day-trip","sub_path":"backend/model/member/name_change.py","file_name":"name_change.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17638497378","text":"import json\r\nimport math\r\nfile = open('as_epsilon_values.txt', 'w')\r\nwith open(\"cg_resilience.json\") as data_file: \r\n\tdata = json.load(data_file)\r\n\tas_remax_dict = {}\r\n\tas_remin_dict = {}\r\n\tepsilon_values = {}\r\n\tas_one_zeros = []\r\n\tas_two_zeros =[]\r\n\tfor values in data.values():\r\n\t\tif isinstance(values, dict):\r\n\t\t\twhile values:\r\n\t\t\t\tkey, value = values.popitem()\r\n\t\t\t\tif key in as_remax_dict:\r\n\t\t\t\t\tif as_remax_dict[key] < value:\r\n\t\t\t\t\t\tas_remax_dict[key] = value \r\n\t\t\t\telse: \r\n\t\t\t\t\tas_remax_dict[key] = value \r\n\t\t\t\tif key in as_remin_dict:\r\n\t\t\t\t\tif as_remin_dict[key] > value:\r\n\t\t\t\t\t\tas_remin_dict[key] = value \r\n\t\t\t\telse: \r\n\t\t\t\t\tas_remin_dict[key] = value \r\n\tfor key in as_remax_dict:\r\n\t\tif as_remax_dict[key] != 0 and as_remin_dict[key] != 0:\r\n\t\t\tepsilon = math.log(float(as_remax_dict[key])/(float(as_remin_dict[key])))\r\n\t\t\tepsilon_values[key] = epsilon\r\n\t\t\tfile.write(key)\r\n\t\t\tfile.write(\": \")\r\n\t\t\tfile.write(str(epsilon))\r\n\t\t\tfile.write(\"\\n\")\r\n\t\telif as_remax_dict[key] == 0 and as_remin_dict[key] == 0:\r\n\t\t\tas_two_zeros.append(key)\r\n\t\telse:\r\n\t\t\tas_one_zeros.append(key)\r\n\r\n\tprint (epsilon_values)\r\n\tprint(len(as_two_zeros))\r\n\tprint(len(as_one_zeros))\r\n\tprint (len(epsilon_values))\r\n","repo_name":"avishek-mondal/Hans_Thesis","sub_path":"calculate_max_prob.py","file_name":"calculate_max_prob.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37547962274","text":"from rest_framework import generics\nfrom django.contrib.auth import authenticate,logout\nfrom .models import *\nfrom .serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.status import (HTTP_200_OK, HTTP_201_CREATED,\n HTTP_400_BAD_REQUEST)\n\nclass Login(generics.CreateAPIView):\n \"\"\"\n CreateAPIView Login.\n \"\"\"\n serializer_class = LoginSerializer\n\n def post(self, request):\n user = authenticate(username=request.data[\"email\"], password=request.data[\"password\"])\n try:\n if user is None:\n raise serializers.ValidationError(\"Invalid email or password\")\n token, _ = Token.objects.get_or_create(user=user)\n return Response({'token': token.key}, status=HTTP_200_OK)\n except Exception as e:\n return Response({\"message\": str(e)}, HTTP_400_BAD_REQUEST)\n\n\nclass Register(generics.CreateAPIView):\n \"\"\"\n CreateAPIView Register.\n \"\"\"\n serializer_class = RegisterSerializer\n\n def post(self, request):\n try:\n email = request.data.get(\"email\", None)\n if not email:\n raise serializers.ValidationError(\"Please specify an email\")\n user_check = User.objects.filter(email=email)\n if user_check:\n raise serializers.ValidationError(\"Email already registered\")\n user = User.objects.create_user(**request.data)\n data = RegisterSerializer(user).data\n return Response(data=data, status=HTTP_201_CREATED)\n except Exception as e:\n return Response({\"message\": str(e)}, HTTP_400_BAD_REQUEST)\n\n\nclass Logout(APIView):\n \"\"\"\n APIView Logout.\n \"\"\"\n def post(self, request):\n try:\n logout(request)\n data = {'success': 'Sucessfully logged out'}\n return Response(data=data, status = HTTP_200_OK)\n except Exception as e:\n return Response({\"message\": str(e)}, HTTP_400_BAD_REQUEST)","repo_name":"jabdalac/music_library","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37774831495","text":"\"\"\"prepdatafolders.py\n\nUse to reorganize your original dataset folder structure and\nspeech annotations into a custom format that our data pipeline can\ningest.\n\nUse `PrepDataFolders.find_files_in_folder_recursive()` to selectively\ngrab files of a certain type as a list and `PrepDataFolders.copyfiles()`\nto copy them into a new folder.\n\nOrganize your folder structure as follows:\ndata/interim///audio\n /rttm\n /textgrid\n /xml\nFor the annotations, provide it in any of the above formats, ensuring\nthat filename matches each audio file.\n\nUse converters below to convert them to RTTM format, which is our\npreferred format for ingest.\n\nFor far field audio which combines speaker segments from multiple\nnear field audio files, use .merge_xml() or .merge_overlap_segments() to\ngenerate combined segments.\n\nOnce dataset is ready, it can be passed through DataValidation() class.\n\n\"\"\"\nimport logging\nimport os\nimport re\nimport shutil\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Union # Optional,\n\nimport numpy as np\nimport soundfile as sf\nfrom tqdm import tqdm\n\nfrom . import speech_segments as sseg\n\nlogger = logging.getLogger(__name__)\n\n\nclass PrepDataFolders:\n \"\"\"\n PrepDataFolders contains generic methods such as\n `file_files_in_folder_recursive()` and `copyfiles()`\n to collate certain filetypes and copy over to a destination path,\n various methods for reading and writing to RTTM, XML, TextGrid\n files, etc.\n\n Also see AmiPrepDataFolders or AliPrepDataFolders, which extends\n PrepDataFolders for AMI/Alimeeting dataset-specific functionality.\n\n \"\"\"\n\n def __init__(self):\n pass\n\n # ! TODO - pull out from here\n @staticmethod\n def toggle_audio_rttm_path(file_path: Union[Path, str]) -> Path:\n \"\"\"Given a filepath to an audio file, returns the filepath for\n the corresponding rttm file, or vice versa.\n\n Args:\n file_path (Union[Path, str]): input file path\n\n Raises:\n ValueError: if file path is not to a .wav or .rttm file\n\n Returns:\n Path: path to the counterpart file pair.\n \"\"\"\n if Path(file_path).suffix == \".wav\":\n new_path = file_path.parent.parent.joinpath(\"rttm\").joinpath(\n str(file_path.stem) + \".rttm\"\n )\n elif Path(file_path).suffix == \".rttm\":\n new_path = file_path.parent.parent.joinpath(\"audio\").joinpath(\n str(file_path.stem) + \".wav\"\n )\n else:\n raise ValueError(\n \"File must be either .wav or .rttm file to toggle with the other.\"\n )\n return new_path\n\n @staticmethod\n def read_audio(audio_path: Union[str, Path]) -> Tuple[np.ndarray, int]:\n \"\"\"Read in audio file from given file path.\n\n Args:\n audio_path (str|Path): Path to audio file\n\n Returns:\n Tuple[np.ndarray, int]: tuple of signal and sample rate\n \"\"\"\n try:\n signal, sr = sf.read(str(audio_path))\n return signal, sr\n except Exception as e:\n logger.error(\"Unable to read file %s : %s\", audio_path, e)\n return\n\n @staticmethod\n def save_audio(audio_path, signal, sr, subtype: str = \"PCM_16\") -> Union[None, int]:\n try:\n sf.write(audio_path, signal, sr, subtype=subtype)\n return\n except Exception as e:\n logger.error(\"Unable to write file %s : %s\", audio_path, e)\n return -1\n\n def duplicate_rttm_file(self, rttm_path, rttm_copy_path) -> Union[None, int]:\n try:\n # copies file\n shutil.copy2(rttm_path, rttm_copy_path)\n # edits file and replaces 'fileid's in each segment to\n # reflect new filename\n self.update_rttm_fileid(rttm_copy_path)\n except Exception as e:\n logger.error(\n \"Unable to copy or create new RTTM %s : %s\",\n rttm_copy_path,\n e,\n )\n return -1\n\n @staticmethod\n def find_files_in_folder(\n ext: str, folderpath: str, recursive: bool = True\n ) -> List[Path]:\n \"\"\"Given a folder path, recursively searches that folder and all\n its subfolders for the given file extension. Returns a list of\n file Path objects for all matches.\n\n Args:\n ext (str): File extension of the filetype you wish to match.\n e.g. '.wav' or 'wav' will work.\n folderpath (str): Path to base directory to search for files.\n\n Returns:\n List[Path]: A list of Path objects of matching file paths.\n \"\"\"\n\n # in case user did not provide '.' in `ext`\n if not ext.startswith(\".\"):\n ext = \".\" + ext\n\n # find all files matching given extension\n if recursive:\n filetype = \"**/*\" + ext # e.g. \"**/*.wav\"\n else:\n filetype = \"*\" + ext\n filelist = list(Path(folderpath).glob(filetype))\n\n return filelist\n\n @staticmethod\n def copyfiles(filepath_list: List[Path], dest_folder: str) -> List[Path]:\n \"\"\"Given a list of filepaths, copies all into destination\n folder. Returns list of new filepaths at the destination folder.\n\n Args:\n filelist (List[Path]): List of file paths to be copied\n dest_folder (str): Destination folder to copy to\n\n Returns:\n List[Path]: List of file paths at destination path\n \"\"\"\n dest_path = Path(dest_folder)\n dest_path.mkdir(\n parents=True, exist_ok=True\n ) # Create the destination folder if it doesn't exist\n\n newfilelist = []\n for filepath in tqdm(filepath_list):\n source_path = Path(filepath)\n dest_file = dest_path / source_path.name\n\n if not os.path.exists(dest_file):\n shutil.copy2(filepath, dest_file)\n logger.debug(\"copied %s\", dest_file)\n else:\n logger.warning(\"%s exists, halted copy.\", dest_file)\n newfilelist.append(dest_file)\n\n return newfilelist\n\n # ! - TODO - pull out into pathing.py\n @staticmethod\n def replace_path(\n old_path: Union[Path, str],\n new_subfolder: str = None,\n new_filename: str = None,\n new_ext: str = None,\n ) -> Path:\n \"\"\"Given the above arguments, replaces the filename, extension\n and topmost subfolder in the path.\n\n Args:\n old_path (Path | str): Old file path.\n new_subfolder (str, optional): Topmost parent folder to\n rename to. Defaults to None. If None, does not rename.\n new_filename (str, optional): Filename to rename to.\n Defaults to None. If None, does not rename filename.\n new_fileext (str, optional): File extension to rename to.\n Defaults to None. If None, does not change extension.\n\n Returns:\n Path: Modified file path\n\n Example:\n >>> replace_path(\n 'path/to/folder/abc.txt',\n 'newfolder',\n 'def',\n '.xml'\n )\n\n Path('path/to/newfolder/def.xml')\n\n \"\"\"\n old_path = Path(old_path)\n old_filename = old_path.stem\n old_ext = old_path.suffix\n newpath = None\n\n # replace subfolder?\n if new_subfolder is not None:\n basepath = old_path.parent.parent\n newpath = basepath.joinpath(new_subfolder)\n newpath.mkdir(parents=True, exist_ok=True)\n else:\n newpath = old_path.parent\n\n if new_ext is not None and not str(new_ext).startswith(\".\"):\n new_ext = \".\" + new_ext\n\n # replace filename?\n if new_filename is not None:\n # replace file ext?\n if new_ext is not None:\n newpath = newpath.joinpath(new_filename + new_ext)\n else:\n newpath = newpath.joinpath(new_filename + old_ext)\n else:\n if new_ext is not None:\n newpath = newpath.joinpath(old_filename + new_ext)\n else:\n newpath = newpath.joinpath(old_filename + old_ext)\n\n return newpath\n\n @staticmethod\n def delete_file(filepath):\n try:\n os.remove(filepath)\n except Exception as e:\n logger.error(\"Unable to delete %s : %s\", filepath, e)\n\n def update_rttm_fileids(self, ali_far_path: Union[str, Path]):\n \"\"\"By default, Ali Far rttms named as X####_X####.rttm whereas\n wav files named as X####_X####_A####.wav. After renaming using\n match_rename_far_rttms_to_audio(), use this to update RTTM file\n contents to reflect the correct file_id.\n \"\"\"\n for split in [\"train\", \"val\", \"test\"]:\n ali_far_rttm_paths = (\n Path(ali_far_path).joinpath(split).joinpath(\"rttm\").glob(\"*.rttm\")\n )\n for rttm_path in ali_far_rttm_paths:\n self.update_rttm_fileid(rttm_path)\n\n def update_rttm_fileid(self, rttm_path):\n \"\"\"After some renaming of audio or RTTM files, the RTTM contents\n itself still refers to the old file_id and will not match. Once\n an RTTM file is named correctly to match its corresponding\n audio, run this to update its contents to correct the file_id's\n in all of its speech segments.\n \"\"\"\n rttm_path = Path(rttm_path)\n speech_segments = sseg.read_rttm(rttm_path)\n sseg.write_rttm(\n speech_segments,\n rttm_path,\n file_id=Path(rttm_path).stem,\n )\n\n\nclass AmiPrepDataFolders(PrepDataFolders):\n \"\"\"\n Extends PrepDataFolders base class for AMI dataset-specific\n functionality.\n\n \"\"\"\n\n def __init__(self, data_splits: Dict):\n super().__init__()\n\n self._TRAIN_SAMPLE_IDS = data_splits[\"train_ids\"]\n self._VALIDATION_SAMPLE_IDS = data_splits[\"val_ids\"]\n self._TEST_SAMPLE_IDS = data_splits[\"test_ids\"]\n\n self._SAMPLE_IDS = {\n \"train\": self._TRAIN_SAMPLE_IDS,\n \"val\": self._VALIDATION_SAMPLE_IDS,\n \"test\": self._TEST_SAMPLE_IDS,\n }\n\n def train_val_test_split(self, filelist: List[Path]) -> List[List[Path]]:\n \"\"\"Given a list of filepaths, refer against predefined lists of\n train/val/test splits and split the list accordingly into\n separate train, val, and test file lists.\n\n Args:\n filelist (List[Path]): a list of file Paths\n\n Returns:\n List[List[Path]]: _description_\n \"\"\"\n train_filepaths = []\n val_filepaths = []\n test_filepaths = []\n\n for filepath in filelist:\n for file_id in self._TRAIN_SAMPLE_IDS:\n match = re.search(file_id, str(filepath))\n if match:\n train_filepaths.append(filepath)\n\n for file_id in self._VALIDATION_SAMPLE_IDS:\n match = re.search(file_id, str(filepath))\n if match:\n val_filepaths.append(filepath)\n\n for file_id in self._TEST_SAMPLE_IDS:\n match = re.search(file_id, str(filepath))\n if match:\n test_filepaths.append(filepath)\n\n return train_filepaths, val_filepaths, test_filepaths\n\n @staticmethod\n def rename_xmlfiles(filelist: List[Path]) -> List[Path]:\n \"\"\"Given a list of AMI xml filepaths, rename the ending suffix\n to match the AMI near field wave file suffixes.\n\n Currently they are:\n EN2001a.A.segments.xml\n EN2001a.Headset-0.wav\n\n We will rename to:\n EN2001a.Headset-0.xml * match the XML to the wave file name\n EN2001a.Headset-0.wav\n\n\n Args:\n filelist (List[Path]): list of XML file paths\n\n Returns:\n List[str]: List of renamed XML file paths.\n \"\"\"\n newfilelist = []\n for filepath in filelist:\n new_filepath = str(filepath)\n if str(filepath).endswith(\".A.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".A.segments.xml\", \".Headset-0.xml\"\n )\n elif str(filepath).endswith(\".B.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".B.segments.xml\", \".Headset-1.xml\"\n )\n elif str(filepath).endswith(\".C.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".C.segments.xml\", \".Headset-2.xml\"\n )\n elif str(filepath).endswith(\".D.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".D.segments.xml\", \".Headset-3.xml\"\n )\n elif str(filepath).endswith(\".E.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".E.segments.xml\", \".Headset-4.xml\"\n )\n elif str(filepath).endswith(\".F.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".F.segments.xml\", \".Headset-5.xml\"\n )\n elif str(filepath).endswith(\".G.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".G.segments.xml\", \".Headset-6.xml\"\n )\n elif str(filepath).endswith(\".H.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".H.segments.xml\", \".Headset-7.xml\"\n )\n elif str(filepath).endswith(\".I.segments.xml\"):\n new_filepath = str(filepath).replace(\n \".I.segments.xml\", \".Headset-8.xml\"\n )\n\n newfilelist.append(new_filepath)\n\n shutil.move(str(filepath), new_filepath)\n\n return newfilelist\n\n\nclass AliPrepDataFolders(PrepDataFolders):\n \"\"\"\n Extends PrepDataFolders base class for Ali dataset-specific\n functionality.\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n return\n\n # ! - TODO catch error for name not found in train/val/test.\n # or if any are missing.\n def match_rename_far_textgrid_to_audio(self, ali_far_path: Union[Path, str]):\n \"\"\"By default, Ali Far textgrids named as X####_X####.textgrid\n whereas wav files named as X####_X####_A####.wav. This renames\n the textgrids to match the wave files.\n \"\"\"\n basepath = Path(ali_far_path)\n\n for split in [\"train\", \"val\", \"test\"]:\n print(basepath.joinpath(split))\n\n audio_paths = basepath.joinpath(split).joinpath(\"audio\").glob(\"*.wav\")\n tgrid_paths = (\n basepath.joinpath(split).joinpath(\"textgrid\").glob(\"*.TextGrid\")\n )\n audio_paths = sorted(audio_paths)\n tgrid_paths = sorted(tgrid_paths)\n\n for audio_path in audio_paths:\n for tgrid_path in tgrid_paths:\n result = re.search(\n str(tgrid_path.stem),\n str(audio_path.stem),\n )\n if result:\n os.rename(\n tgrid_path,\n tgrid_path.parent.joinpath(\n audio_path.stem + tgrid_path.suffix\n ),\n )\n\n def match_rename_far_rttms_to_audio(self, ali_far_path: Union[Path, str]):\n \"\"\"By default, Ali Far rttms named as X####_X####.rttm whereas\n wav files named as X####_X####_A####.wav. This renames the rttms\n to match the wave files.\n \"\"\"\n basepath = Path(ali_far_path)\n\n results = {}\n for split in [\"train\", \"val\", \"test\"]:\n # print(basepath.joinpath(split))\n\n audio_paths = basepath.joinpath(split).joinpath(\"audio\").glob(\"*.wav\")\n rttm_paths = basepath.joinpath(split).joinpath(\"rttm\").glob(\"*.rttm\")\n audio_paths = sorted(audio_paths)\n rttm_paths = sorted(rttm_paths)\n\n new_rttm_paths = []\n for audio_path in audio_paths:\n for rttm_path in rttm_paths:\n result = re.search(\n str(rttm_path.stem),\n str(audio_path.stem),\n )\n if result:\n new_rttm_path = rttm_path.parent.joinpath(\n audio_path.stem + rttm_path.suffix\n )\n os.rename(\n rttm_path,\n new_rttm_path,\n )\n new_rttm_paths.append(new_rttm_path)\n\n results[split] = new_rttm_paths\n\n return results\n\n def split_far_multichannel(self, ali_far_path: Union[str, Path]):\n \"\"\"Specifically for Ali Far, all the audio files are actually\n 8-channel wave files. This splits them into 8 mono files named\n 1.wav to 8.wav. Also duplicates the RTTM\n files to match.\n \"\"\"\n ali_far_path = Path(ali_far_path)\n\n for split in [\n \"test\",\n \"train\",\n \"val\",\n ]:\n audio_paths = ali_far_path.joinpath(split).joinpath(\"audio\").glob(\"*.wav\")\n\n # for each audio/rttm pair\n for audio_path in audio_paths:\n # check how many channels\n info = sf.info(str(audio_path))\n num_channels = int(info.channels)\n\n if num_channels > 1:\n signal, sr = self.read_audio(audio_path)\n\n rttm_path = self.toggle_audio_rttm_path(audio_path)\n\n # split n ways:\n for channel in range(num_channels):\n # make new audio file\n signal_ch = signal[:, channel]\n audio_ch_path = audio_path.parent.joinpath(\n str(audio_path.stem)\n + \"_\"\n + str(channel + 1)\n + str(audio_path.suffix)\n )\n status = self.save_audio(audio_ch_path, signal_ch, sr)\n # if error saving audio (it should have already thrown error)\n # but just additionally break the loop here.\n if status == -1:\n break\n\n # make new RTTM\n rttm_ch_path = rttm_path.parent.joinpath(\n str(audio_path.stem) + \"_\" + str(channel + 1) + \".rttm\"\n )\n\n status = self.duplicate_rttm_file(rttm_path, rttm_ch_path)\n if status == -1:\n break\n\n # success\n logger.info(\n \"Saved channel %s as %s\", channel + 1, audio_ch_path\n )\n\n del signal, sr\n self.delete_file(rttm_path)\n self.delete_file(audio_path)\n","repo_name":"gammaraysky/fastapi_celery_model_serving","sub_path":"src/klass/utils/data_prep/prepdatafolders.py","file_name":"prepdatafolders.py","file_ext":"py","file_size_in_byte":19409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"28632088912","text":"from django.shortcuts import get_list_or_404, get_object_or_404, render\nfrom django.http import Http404\n\nfrom recipes.models import Recipe\nfrom django.db.models import Q\n\nfrom django.core.paginator import Paginator\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import cache_control\n\n\n# from django.contrib import messages\n\n# LIST ALL OF EMPLOYEES\n@login_required(login_url='/login/')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef home(request):\n recipes = Recipe.objects.all().filter(\n is_published=True\n ).order_by('-id')\n\n # Pagination\n pagination = Paginator(recipes, 8)\n current_page = request.GET.get('page', 1)\n recipes_pages = pagination.get_page(current_page)\n\n return render(request, 'recipes/pages/home.html', context={\n 'recipes': recipes_pages\n # 'recipes':[factory.make_recipe() for _ in range(10)]\n })\n\n# LIST ALL OF EMPLOYEES\n\n\n@login_required(login_url='/login/')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef category(request, category_id):\n recipes = get_list_or_404(Recipe.objects.filter(\n category__id=category_id,\n is_published=True\n ).order_by('-id')\n )\n\n # Pagination\n pagination = Paginator(recipes, 8)\n current_page = request.GET.get('page', 1)\n recipes_pages = pagination.get_page(current_page)\n\n return render(request, 'recipes/pages/category.html', context={\n 'recipes': recipes_pages,\n 'title': f'{recipes[0].category.name} - Category'\n })\n\n# LIST ALL OF EMPLOYEES\n\n\n@login_required(login_url='/login/')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef recipe(request, id):\n recipe = get_object_or_404(Recipe, pk=id, is_published=True)\n\n return render(request, 'recipes/pages/recipe-view.html', context={\n 'recipe': recipe,\n 'is_detail_page': True,\n })\n\n# Search recipes\n\n# LIST ALL OF EMPLOYEES\n\n\n@login_required(login_url='/login/')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef search(request):\n search_term = request.GET.get('q', '').strip() # Get value of input\n\n if not search_term:\n raise Http404() # Return http code 404 if not exist\n\n # Return recipe if contains search term\n recipes = Recipe.objects.filter(\n Q(\n Q(title__icontains=search_term) |\n Q(description__icontains=search_term)\n ),\n is_published=True\n\n ).order_by('-id')\n\n # messages.error(request, 'Error!')\n # messages.success(request, 'All right!')\n # messages.info(request, 'Info')\n\n pagination = Paginator(recipes, 8)\n current_page = request.GET.get('page', 8)\n recipes_pages = pagination.get_page(current_page)\n\n return render(request, 'recipes/pages/search.html', context={\n 'search_term': search_term, 'search_title_term': f'Search for \"{search_term}\"', 'recipes': recipes_pages, 'aditional_query': f'&q={search_term}'\n })\n","repo_name":"Isaquesantos7/recipes","sub_path":"recipes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36151798475","text":"import os\nimport numpy as np\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Convolution2D, MaxPooling2D, PReLU\nfrom keras.optimizers import SGD, Adam, Adamax\nfrom keras.layers.convolutional import Conv2D\nfrom keras.utils import np_utils, plot_model\nfrom keras import backend as K\nfrom keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint, EarlyStopping\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom skimage import io\nfrom keras.preprocessing import image\nfrom keras import regularizers\n\ndef model_builder(input_shape, num_classes):\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), input_shape=input_shape))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(Conv2D(32, (3, 3)))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(Conv2D(64, (3, 3)))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(Dense(256))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(Dense(128))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(Dense(64))\n model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)\n\n model.compile(loss='mean_squared_error',\n optimizer=opt,\n metrics=['accuracy'])\n print(model.summary())\n return model\n\ndef model_train(batch_size, img_width, img_height, epochs):\n nb_train_samples = 3699\n nb_validation_samples = 1113\n train_data_dir = '../alt_data_2/data/train'\n validation_data_dir = '../alt_data_2/data/validation'\n # this is the augmentation configuration I will use for training\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.0,\n zoom_range=0.2,\n horizontal_flip=False)\n\n # this is the augmentation configuration I will use for testing:\n # only rescaling\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical')\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical')\n\n csvlog = CSVLogger('../log/model_log_5class.csv', separator=',', append=True)\n earlystop = EarlyStopping(monitor='val_acc', min_delta=0.0, patience=130, verbose=1, mode='auto')\n checkpointer = ModelCheckpoint(filepath='../checkpoints/the_weights_5class.hdf5', monitor='val_acc', verbose=1, save_best_only=True)\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size,\n callbacks = [csvlog, checkpointer, earlystop])\n\n mod_plot = plot_model(model, to_file='model_5class.png')\n model.save_weights('mod3_w_good_5class.h5')\n print('Weights Saved!')\n model.save('final_5class.hdf5')\n print('Model Saved!')\npass\n\ndef predict_class():\n model = load_model('final_5class.hdf5')\n imag = input('File to predict on: ')\n img_path = '../predict/unknown/{}.jpg'.format(imag)\n img = image.load_img(img_path, target_size=(224, 224))\n im2 = image.img_to_array(img)\n im2 = np.expand_dims(im2, axis=0)\n predict = model.predict(x, batch_size = 32, verbose = 1)\n return predict\n\nif __name__ == '__main__':\n # number of convolutional filters to use\n nb_filters = 8\n # size of pooling area for max pooling\n pool_size = (2, 2) # decreases image size, and helps to avoid overfitting\n # convolution kernel size\n kernel_size = (3, 3) # slides over image to learn features\n # image dims\n img_width, img_height = 130, 130\n # number of proccess runs\n epochs = 500\n #number of samples per run\n batch_size = 30\n #number of classes for final layer\n num_classes = 5\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 3)\n\n model = model_builder(input_shape, num_classes)\n model_train(batch_size, img_width, img_height, epochs)\n","repo_name":"jldana/judging-a-movie-by-its-cover","sub_path":"src/model_5class.py","file_name":"model_5class.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21614016280","text":"import random\r\nimport click\r\n\r\n\r\nclass Server:\r\n def __init__(self):\r\n self.connections = {}\r\n\r\n def add_connection(self, connection_id):\r\n connection_load = random.random()*10+1\r\n self.connections[connection_id] = connection_load\r\n\r\n def close_connection(self, connection_id):\r\n del self.connections[connection_id]\r\n\r\n def load(self):\r\n total = 0\r\n for connection_load in self.connections.values():\r\n total += connection_load\r\n return total\r\n\r\n def __str__(self):\r\n return '{:.2f}'.format(self.load())\r\n\r\n\r\nclass LoadBalancing:\r\n def __init__(self):\r\n self.connections = {}\r\n self.servers = [Server()]\r\n\r\n def add_connection(self, connection_id):\r\n self.ensure_availability()\r\n server = random.choice(self.servers)\r\n self.connections[connection_id] = server\r\n server.add_connection(connection_id)\r\n\r\n def close_connection(self, connection_id):\r\n server = self.connections[connection_id]\r\n server.close_connection(connection_id)\r\n del self.connections[connection_id]\r\n\r\n def avg_load(self):\r\n total = 0\r\n n = len(self.servers)\r\n for server in self.servers:\r\n total += server.load()\r\n return total\r\n\r\n def ensure_availability(self):\r\n if self.avg_load() > 20:\r\n self.servers.append(Server())\r\n\r\n def __str__(self):\r\n loads = [str(server.load()) for server in self.servers]\r\n return '[{}]'.format(', '.join(loads))\r\n\r\n\r\n@click.command()\r\ndef main():\r\n '''\r\n Your load balancer initially has 1 server and spins up a new one if avg_load exceeds 20.\r\n '''\r\n l = LoadBalancing()\r\n click.secho('[[Type [DONE] to exit...]]', fg='red', bold=True)\r\n # Interactive Session\r\n while True:\r\n click.secho('[+] Add a new connection(id): ', fg='white', bg='blue')\r\n connection_id = input()\r\n if connection_id == 'DONE':\r\n break\r\n if connection_id.startswith('+'):\r\n temp = int(connection_id[1:])\r\n for connection in range(temp):\r\n l.add_connection(connection)\r\n else:\r\n l.add_connection(connection_id)\r\n click.secho('[*] Average Load currently: {:.4f} and servers: {}'.format(\r\n l.avg_load(), len(l.servers)), fg='white', bg='red')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"greenwayRocks/Workspace","sub_path":".suspend/python/IT Automation/os_linux/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"74895817690","text":"# Definition for singly-linked list.\nfrom typing import Optional, List\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def nextLargerNodes(self, head: Optional[ListNode]) -> List[int]:\n\n cur = head\n stack = [] # monotonic decreasing stack\n\n curIdx = 0\n n = 0\n while cur:\n n += 1\n cur = cur.next\n result = [0] * n\n\n cur = head\n while cur:\n while stack and stack[-1][0] < cur.val:\n val, idx = stack.pop()\n result[idx] = cur.val\n stack.append((cur.val, curIdx))\n cur = cur.next\n curIdx += 1\n return result\n","repo_name":"debbs061/algorithm","sub_path":"src/1019-next-greater-node-in-linked-list.py","file_name":"1019-next-greater-node-in-linked-list.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33354908454","text":"\"\"\"basetimecontentstable.py\nA node component which implements content information in its dataset.\n\"\"\"\n# Package Header #\nfrom ....header import *\n\n# Header #\n__author__ = __author__\n__credits__ = __credits__\n__maintainer__ = __maintainer__\n__email__ = __email__\n\n\n# Imports #\n# Standard Libraries #\nimport pathlib\nfrom typing import Any\n\n# Third-Party Packages #\nfrom dspobjects.time import Timestamp\nfrom sqlalchemy.orm import DeclarativeBase, Session\nfrom sqlalchemy.ext.asyncio import AsyncAttrs, AsyncSession, async_sessionmaker\n\n# Local Packages #\nfrom ..bases import BaseMetaInformationTable, BaseTimeContentsTable\nfrom .contentsfile import ContentsFile\n\n\n# Definitions #\n# Classes #\nclass TimeContentsFileAsyncSchema(AsyncAttrs, DeclarativeBase):\n pass\n\n\nclass TimeMetaInformationTable(BaseMetaInformationTable, TimeContentsFileAsyncSchema):\n pass\n\n\nclass TimeContentsTable(BaseTimeContentsTable, TimeContentsFileAsyncSchema):\n pass\n\n\nclass TimeContentsFile(ContentsFile):\n \"\"\"\n\n Class Attributes:\n\n Attributes:\n\n Args:\n\n \"\"\"\n schema: type[DeclarativeBase] = TimeContentsFileAsyncSchema\n meta_information_table: type[BaseMetaInformationTable] = TimeMetaInformationTable\n contents: type[BaseTimeContentsTable] = TimeContentsTable\n\n # Magic Methods #\n # Construction/Destruction\n def get_start_datetime(self, session: Session | None = None) -> Timestamp:\n if session is not None:\n return self.contents.get_start_datetime(session=session)\n elif self.is_open:\n with self.create_session() as session:\n return self.contents.get_start_datetime(session=session)\n else:\n raise IOError(\"File not open\")\n\n async def get_start_datetime_async(\n self,\n session: async_sessionmaker[AsyncSession] | AsyncSession | None = None,\n ) -> Timestamp:\n if session is not None:\n return await self.contents.get_start_datetime_async(session=session)\n elif self.is_open:\n return await self.contents.get_start_datetime_async(session=self.async_session_maker)\n else:\n raise IOError(\"File not open\")\n\n def get_end_datetime(self, session: Session | None = None) -> Timestamp:\n if session is not None:\n return self.contents.get_end_datetime(session=session)\n elif self.is_open:\n with self.create_session() as session:\n return self.contents.get_end_datetime(session=session)\n else:\n raise IOError(\"File not open\")\n\n async def get_end_datetime_async(\n self,\n session: async_sessionmaker[AsyncSession] | AsyncSession | None = None,\n ) -> Timestamp:\n if session is not None:\n return await self.contents.get_end_datetime_async(session=session)\n elif self.is_open:\n return await self.contents.get_end_datetime_async(session=self.async_session_maker)\n else:\n raise IOError(\"File not open\")\n \n def get_contents_nanostamps(self, session: Session | None = None) -> tuple[tuple[int, int, int], ...]:\n if session is not None:\n return self.contents.get_all_nanostamps(session=session)\n elif self.is_open:\n with self.create_session() as session:\n return self.contents.get_all_nanostamps(session=session)\n else:\n raise IOError(\"File not open\")\n\n async def get_contents_nanostamps_async(\n self,\n session: async_sessionmaker[AsyncSession] | AsyncSession | None = None,\n ) -> tuple[tuple[int, int, int], ...]:\n if session is not None:\n return await self.contents.get_all_nanostamps_async(session=session)\n elif self.is_open:\n return await self.contents.get_all_nanostamps_async(session=self.async_session_maker)\n else:\n raise IOError(\"File not open\")\n","repo_name":"FongAnthonyM/python-cdfs","sub_path":"src/cdfs/contentsfile/sqlite/files/timecontentsfile.py","file_name":"timecontentsfile.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39550301352","text":"import serial\nimport sys\nimport array as buf_array\n# import time\n# from LogFile import LogFile\n\nUART_SIZE_PACKET = 32\n\nUCIP_TITLE_SIZE = 2 # 2 bytes - 'G' + 'R'\nUCIP_PACKET_SIZE = 2 # 2 bytes - size packet in bytes\nUCIP_CMD_SIZE = 1 # 1 byte - command/status\nUCIP_CRC_SIZE = 1 # 1 byte\nUCIP_DATA_SIZE = (UART_SIZE_PACKET - UCIP_TITLE_SIZE - UCIP_PACKET_SIZE - UCIP_CMD_SIZE - UCIP_CRC_SIZE) # 18 bytes\n\n\nUCIP_INDEX_TITLE = 0\nUCIP_INDEX_PACKET_SIZE = (UCIP_INDEX_TITLE + UCIP_TITLE_SIZE)\nUCIP_INDEX_CMD = (UCIP_INDEX_PACKET_SIZE + UCIP_PACKET_SIZE)\nUCIP_INDEX_DATA = (UCIP_INDEX_CMD + UCIP_CMD_SIZE)\nUCIP_INDEX_STATE = UCIP_INDEX_DATA\nUCIP_INDEX_ERROR = UCIP_INDEX_DATA\n\n\nclass UartTerminal(object):\n def __init__(self):\n self.set_current = 0\n self.real_current = 0\n self.command = 0\n self.state = 0\n self.error = 0\n self.ComPort = None\n self.rx_data = buf_array.array('B')\n self.tx_data = buf_array.array('B')\n for i in range(UCIP_DATA_SIZE):\n self.rx_data.append(0)\n for i in range(UCIP_DATA_SIZE):\n self.tx_data.append(0)\n # self.set_current = 1128\n\n def get_set_current(self):\n return self.set_current\n\n def get_real_current(self):\n return self.real_current\n\n def get_command(self):\n return self.command\n\n def get_state(self):\n return self.state\n\n def get_error(self):\n return self.error\n\n def get_size_data(self):\n return UCIP_DATA_SIZE\n\n def get_rx_data(self):\n return self.rx_data\n\n def get_tx_data(self):\n return self.tx_data\n\n def open(self, com_port, baud_rate):\n # com_port = 'COM3'\n # baud_rate = 115200\n try:\n self.ComPort = serial.Serial(com_port, baud_rate, timeout=0.5)\n except serial.SerialException:\n print(\"Serial Exception:\")\n print(sys.exc_info())\n return 1\n print(self.ComPort.out_waiting)\n print(self.ComPort.get_settings())\n print(self.ComPort.reset_output_buffer())\n return 0\n\n def read_module(self):\n read_data = self.ComPort.read(UART_SIZE_PACKET)\n len_data = len(read_data)\n if len_data == 0: #\n return 1, read_data # 'No answer'\n\n crc = self.calc_crc(read_data, UART_SIZE_PACKET - 1)\n if crc != read_data[UART_SIZE_PACKET - 1]:\n return 2, read_data # 'CRC Error'\n\n self.command = read_data[UCIP_INDEX_CMD]\n self.state = read_data[UCIP_INDEX_CMD]\n self.error = read_data[UCIP_INDEX_DATA]\n for i in range(UCIP_DATA_SIZE):\n self.rx_data[i] = read_data[i + UCIP_INDEX_DATA]\n\n return 0, read_data # 'OK'\n\n def send_module(self, command, data):\n # self.set_current = 1128\n # command = 0x00\n buffer = buf_array.array('B', [0x47, 0x52]) # 'G', 'R'\n buffer.append(UART_SIZE_PACKET & 0xFF)\n buffer.append(0)\n buffer.append(command)\n for i in range(UCIP_DATA_SIZE):\n buffer.append(data[i])\n\n crc = self.calc_crc(buffer, UART_SIZE_PACKET - 1)\n buffer.append(crc)\n self.ComPort.write(buffer) # send command to module\n\n def calc_crc(self, buf_data, size_data):\n crc = 0\n for i in range(0, size_data):\n crc = crc + buf_data[i]\n crc = crc + 1\n crc = crc & 0xFF\n return crc\n\n\n # print(buffer)\n # print(len(buffer))\n\n","repo_name":"aenapple/Grizzly_py","sub_path":"UartTerminal_v6.py","file_name":"UartTerminal_v6.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74348667611","text":"import os\nfrom nexarClients.design.nexarDesignClient import NexarClient\n\nLOCATION_QUERY = '''\nquery WorkspaceLocations {\n desWorkspaceLocations {\n name\n apiServiceUrl\n }\n }'''\n\nWORKSPACES_QUERY = '''\nquery Workspaces {\n desWorkspaces {\n id\n name\n url\n }\n }'''\n\n\ndef get_available_regions(queryLocations, nexar):\n for index, location in enumerate(queryLocations):\n print(index + 1, \": \", location[\"name\"])\n\n region = input(\"\\n\" + \"Enter index to request to : \")\n region = int(region)\n\n if region <= 0 or region > len(queryLocations):\n print(\"\\n\" + \"Invalid response, try again... \" + \"\\n\")\n get_available_regions(queryLocations, nexar)\n\n else:\n queryLocation = queryLocations[region - 1]\n regionUrl = queryLocation[\"apiServiceUrl\"]\n queryResult = nexar.get_query(WORKSPACES_QUERY, {\"nexar_url\": regionUrl})[\"desWorkspaces\"]\n print(queryResult)\n\n\nif __name__ == '__main__':\n\n clientId = os.environ[\"NEXAR_CLIENT_ID\"]\n clientSecret = os.environ[\"NEXAR_CLIENT_SECRET\"]\n nexar = NexarClient(clientId, clientSecret, [\"design.domain\", \"user.access\", \"offline_access\"])\n\n queryLocations = nexar.get_query(LOCATION_QUERY)[\"desWorkspaceLocations\"]\n\n print(\"\\n\" + \"Current available regions:\" + \"\\n\")\n get_available_regions(queryLocations, nexar)\n","repo_name":"NexarDeveloper/nexar-examples-py","sub_path":"examplePrograms/design_regionalization.py","file_name":"design_regionalization.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"70251372891","text":"import sys\n\nif(len(sys.argv) != 2):\n\tprint(\"\\nUso: 135_Imprimir_arquivo.py numeros.txt\\n\\n\")\nelse:\n\tnome=sys.argv[1]\n\tarquivo=open(nome,\"r\")\n\tfor linha in arquivo.readlines():\n\t\tprint (linha[:-1])\n\tarquivo.close()","repo_name":"fernandochimi/Intro_Python","sub_path":"Exercícios/135_Imprimir_arquivo.py","file_name":"135_Imprimir_arquivo.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35057269668","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 7 08:18:15 2018\n\n@author: jithin\n\nHW1 for 585\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n'''\n'''\ndef rho(x):\n return 1+x**2\n \ndef tridiag(a, b, c, k1=-1, k2=0, k3=1):\n return np.diag(a, k1) + np.diag(b, k2) + np.diag(c, k3)\n \ndef mat(thetavec,L):\n N = len(thetavec)\n h =L/N\n a1 = np.append([0.7],thetavec[:-1])\n a2 = np.append(thetavec[1:],[0.7])\n b = -2*thetavec+ a1+a2 + (h**2)* np.sin(thetavec)\n return b\n\n\ndef jacobian(thetavec,L):\n N = len(thetavec)\n h =L/N\n a = np.ones(N-1)\n b = -2 + (h**2)* np.cos(thetavec)\n return tridiag(a,b,a) \n\n\nN=500\nL = 70*np.pi\nx = np.linspace(0,L,N)\nthetavec = 0.7+np.sin(x/2)\nfor i in range(40):\n #print(thetavec)\n b = -mat(thetavec,L)\n A = jacobian(thetavec,L)\n delta = np.linalg.solve(A,b)\n thetavec += delta\nplt.plot(x,thetavec)\nplt.title(\"T = 70$\\pi$\" )\n\n","repo_name":"Dirivian/Current-Work","sub_path":"hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74882978332","text":"import os\nfrom typing import Optional, List\n\nimport typer\nfrom gymnasium.utils.play import play as gym_play\nfrom rich import print\n\nfrom crete.cli.cli_utils import _convert_to_key_value_list\nfrom crete.cli.list import list_app\nfrom crete.cli.module import module_app\nfrom crete.cli.concfile import concfile_app\nfrom crete.core import create_env_factory, create_agent, create_save_callback, load_extra_modules\nfrom crete.error import ProfilePropertyNotFound\nfrom crete.global_state import get_cli_state\nfrom crete.file.profile import read_profile, Profile\nfrom crete.file.concrete import ConcreteFile\nfrom crete.util import print_err, print_ex\n\napp = typer.Typer()\napp.add_typer(list_app, name=\"list\")\napp.add_typer(concfile_app, name=\"concfile\")\napp.add_typer(module_app, name=\"module\")\n\n__app_name__ = \"crete\"\n__version__ = \"0.2.0\"\n__app_logo__ = r\"\"\"\n _ \n ___ _ __ ___| |_ ___ \n / __| '__/ _ \\ __/ _ \\\n| (__| | | __/ || __/\n \\___|_| \\___|\\__\\___|\n \"\"\"\n\n\ndef crete_app():\n app(prog_name=__app_name__)\n\n\ndef _version_callback(value: bool) -> None:\n if value:\n typer.echo(__app_logo__)\n print(f\"[bold black]RL Training Assistant[/] [green]v{__version__}[/]\")\n raise typer.Exit()\n\n\n@app.callback()\ndef main(\n debug_mode: bool = typer.Option(\n False,\n \"--dbg\",\n help=\"Active debug mode. Triggers things like extra logging.\"\n ),\n version: Optional[bool] = typer.Option(\n None,\n \"--version\",\n \"-v\",\n help=\"Show the application's version and exit.\",\n callback=_version_callback,\n is_eager=True,\n )\n) -> None:\n get_cli_state().debug_mode = debug_mode\n\n\n@app.command()\ndef train(\n arg_profile_path: str,\n arg_target_profile: str = typer.Argument(\n ...,\n help=\"The name of the profile to use\"\n ),\n opt_out_dir: str = typer.Option(\n \".\",\n \"--out\",\n \"-o\",\n help=\"Directory to place trained agents.\"\n ),\n opt_as: str = typer.Option(\n None,\n \"--as\",\n help=\"Filename override to save as.\"\n ),\n opt_override: bool = typer.Option(\n False,\n \"--override\",\n \"-x\",\n help=\"If false, don't run retrain profiles that already have an file.\"\n )\n):\n \"\"\"\n Train an agent on an environment.\n \"\"\"\n\n load_extra_modules()\n\n # Load config.\n try:\n print(f\"Loading profiles `{arg_profile_path}`... \", end=\"\")\n profiles = read_profile(arg_profile_path)\n print(\"[bold green]success![/]\")\n except RuntimeError:\n print(\"[bold red]failure![/]\")\n raise typer.Abort()\n\n if arg_target_profile not in profiles:\n print(f\"Profile {arg_target_profile} doesn't exist in {arg_profile_path}! Choices are;\")\n print(profiles.keys())\n raise typer.Abort()\n\n target_profile = profiles[arg_target_profile]\n\n _train_with_profile(target_profile, halt=True, out_dir=opt_out_dir, save_path=opt_as, override=opt_override)\n\n\n@app.command()\ndef batch(\n profile_path: str,\n opt_out_dir: str = typer.Option(\n \".\",\n \"--out\",\n \"-o\",\n help=\"Directory to place trained agents.\"\n ),\n opt_override: bool = typer.Option(\n False,\n \"--override\",\n \"-x\",\n help=\"If false, don't run retrain profiles that already have an file.\"\n )\n):\n \"\"\"\n Train all configurations within a profile as a batch.\n \"\"\"\n\n load_extra_modules()\n\n # Load config.\n try:\n print(f\"Loading profiles `{profile_path}`... \", end=\"\")\n profiles = read_profile(profile_path)\n print(\"[bold green]success![/]\")\n except RuntimeError:\n print(\"[bold green]failure![/]\")\n raise typer.Abort()\n\n # Train all profiles.\n for _, target_profile in profiles.items():\n _train_with_profile(target_profile, halt=False, out_dir=opt_out_dir, override=opt_override)\n\n\n@app.command()\ndef play(\n arg_env: str = typer.Argument(\n \"CartPole-v1\",\n help=\"The environment to play in\"\n ),\n opt_wrapper: str = typer.Option(\n None,\n \"--wrapper\",\n \"-w\"\n ),\n opt_seed: int = typer.Option(\n None,\n \"--seed\",\n \"-s\"\n ),\n opt_env_args: List[str] = typer.Option(\n [],\n \"--env-arg\",\n ),\n opt_fps: int = typer.Option(\n None,\n \"--fps\"\n )\n):\n \"\"\"Play the environment as a human. (Not for procrastination!)\"\"\"\n\n load_extra_modules()\n\n opt_env_args = _convert_to_key_value_list(opt_env_args)\n\n env_factory = create_env_factory(arg_env, opt_wrapper, render_mode='rgb_array', env_args=opt_env_args)\n env = env_factory(opt_seed)\n gym_play(env, fps=opt_fps)\n\n\ndef _train_with_profile(\n target_profile: Profile,\n halt: bool = False,\n out_dir: str = \".\",\n save_path: str = None,\n override=False\n):\n\n if save_path is None:\n save_path = f\"{target_profile.name}.cnc\"\n\n path = os.path.join(out_dir, save_path)\n if override is False and os.path.exists(path):\n print(f\"Profile {target_profile.name} already exists! To overwrite, --override, or set an alias with --as\")\n return\n\n env_factory = create_env_factory(\n target_profile.env_id,\n target_profile.env_wrapper,\n env_args=target_profile.env_args\n )\n agent, training_wrapper = create_agent(env_factory, target_profile.agent_id)\n\n print(f\"\\nProceeding to train a {target_profile.agent_id} on {target_profile.env_id} with config values:\")\n print(target_profile.config.to_dict())\n\n if halt:\n if typer.confirm(\"Ready to proceed?\", default=True) is False:\n return\n\n training_artifacts = {}\n try:\n save_callback = create_save_callback(\n target_profile.agent_id,\n target_profile.config.to_dict(),\n target_profile.env_wrapper,\n target_profile.env_id,\n target_profile.env_args\n )\n\n training_wrapper(env_factory, agent, target_profile.config, training_artifacts, save_callback)\n except ProfilePropertyNotFound as ex:\n print_ex(ex)\n except KeyboardInterrupt:\n print(\"[bold red]Training interrupted[/bold red].\")\n\n if halt:\n if typer.confirm(\"Save agent to disk?\") is False:\n return\n\n try:\n print(f\"Saving agent to disk ([italic]{path}[/]) ...\")\n data = agent.save()\n concfile = ConcreteFile(\n id=target_profile.agent_id,\n env_name=target_profile.env_id,\n agent_data=data,\n training_artifacts=training_artifacts,\n used_wrappers=target_profile.env_wrapper,\n config=target_profile.config.to_dict(),\n env_args=target_profile.env_args\n )\n concfile.write(path)\n except OSError as ex:\n print(\"[bold red]Saving failed![/] \" + ex.strerror)","repo_name":"geist-2501/crete","sub_path":"src/crete/cli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73286357852","text":"# Write a program that asks the user for a list of names ending with a blank line. \n# program then outputs a neat list containing the following, each on their own line,\n# labelled: The first name alphabetically. The last name alphabetically, the shortest name,\n# and the longest name.\n\nname = input(\"Type in a name: \")\nothername = \"ZZZZZ\" \nlength = \"aaaaaaaaaaaa\"\nwhile name != \"\":\n if name > othername:\n aname = name\n if name < aname: \n bname = name\n if len(name) < len(length):\n shortname = name\n elif len(name) > len(shortname):\n longname = name\n name = input(\"Type in a name: \") # when asking for this name it also has to be inside the loop in order to not just stay in the loop without any repeats\n \nprint (\"First alphabetical name \", aname)\nprint (\"Last aplhabetical name \", bname)\nprint (\"The shortest name is \", shortname)\nprint (\"The longest name is \", longname)\n \n \n ","repo_name":"Herna7liela/New","sub_path":"names2.py","file_name":"names2.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17898252030","text":"\"\"\" \n# lambda function [similar concept like cpp lambda]\n# python map [c++ map stl and python map is not same :O]\n# c++ stl map is similar to dictionary!\n# python map works like sort or for_each function \n# where comparator or lambda function can be inserted.\n# it is also same. provide the comparator function and the array.\n# it will call the function for each element of array\n \"\"\"\n\ndef square(x) :\n return x*x\n\nresult = square(6)\nprint(result)\n\n# Using Lambda\n\nsquare_lambda = lambda x : x*x\nresult = square_lambda(6)\nprint(result)\n\nadd = lambda x,y : x+y\nsum = add(45, 56)\nprint(sum)\n\n\n# Use of python map and lambda function\nnumbers = [12, 11, 10, 9 , 50]\ndouble_it = lambda x : x * 2\ndoubled_num = map(double_it, numbers)\ndoubled_num2 = map(lambda x : x* 2, numbers)\n\nprint(f'numbers: {numbers}\\ndoubled_num : {list(doubled_num)}\\ndoubled_num2 : {list(doubled_num2)}')\n\n\nbigger_numbers = filter(lambda num : num< 50, numbers)\nprint(numbers)\nprint (list(bigger_numbers))\n\nplayers = [\n {'name' : 'shakib' , 'age':35},\n {'name' : 'Musfiq' , 'age':36},\n {'name' : 'Tamim' , 'age':34} \n ]\n\nsenior_players = filter (lambda players : players ['age'] >=35, players)\nprint(list(senior_players))\n","repo_name":"aa-maruf/Python_Learning_Journey","sub_path":"Week- 2/Module- 5/Lecture-7_lambda.py","file_name":"Lecture-7_lambda.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25046507203","text":"\"\"\"\r\nAuthor: Mike Zhang\r\n\r\nThe program shown prints out a lucky draw coupon as shown and allow a batch printing of coupons.\r\nUser can select reference to the first draw number and how many to print for the following.\r\n\r\nSample:\r\n Please enter the reference no for the voucher : 5678\r\n====================================================================================================\r\nNTU Annual Dinner & Dance Lucky Draw No : 5678\r\n====================================================================================================\r\n\r\n The Lucky Draw will be conducted after the dinner,\r\n please keep this coupon in exchange of your gift \r\n\"\"\"\r\n\r\ndef LuckyDraw():\r\n startVNo = int(input(\"Please enter the reference no for the first voucher : \"))\r\n nos = int(input(\"Please enter the number of vouchers required : \"))\r\n for i in range(nos):\r\n pgno = startVNo + i\r\n \r\n Left_Header = \"NTU\"\r\n Center_Header = f\"Annual Dinner & Dance\"\r\n DrawNo_Text = f\"Lucky Draw No : {pgno}\"\r\n \r\n print(\"=\"*100)\r\n print(f\"{Left_Header:<20}{Center_Header:^60}{DrawNo_Text:>20}\")\r\n print(\"=\"*100)\r\n print('''\r\n The Lucky Draw will be conducted after the dinner,\r\n please keep this coupon in exchange of your gift \r\n ''')\r\n print(\"-\"*100)\r\n \r\nif __name__ == \"__main__\":\r\n LuckyDraw()","repo_name":"MikeZ6/Python_Workshops","sub_path":"Voucher_Generator.py","file_name":"Voucher_Generator.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2001885323","text":"import discord\nfrom discord.ext import commands\nfrom discord.commands import slash_command\n\nclass EmbedBuilder(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @slash_command(name=\"embed\", description=\"Erstelle ein Embed\")\n async def _embed(self, ctx: discord.ApplicationContext):\n await ctx.response.send_modal(EmbedMakerModal())\n\n\ndef setup(bot):\n bot.add_cog(EmbedBuilder(bot))\n\n\nclass FinishedOne(discord.ui.View):\n def __init__(self, embed: discord.Embed):\n super().__init__(timeout=None)\n self.embed = embed\n\n @discord.ui.button(label=\"Senden\", style=discord.ButtonStyle.green, custom_id=\"23345651\")\n async def send(self, button: discord.ui.Button, interaction: discord.Interaction):\n await interaction.channel.send(embed=self.embed)\n button.disabled = True\n await interaction.response.edit_message(view=self)\n\n\n @discord.ui.button(label=\"Fields hinzufügen\", style=discord.ButtonStyle.gray, custom_id=\"566875\")\n async def add_field(self, button: discord.ui.Button, interaction: discord.Interaction):\n await interaction.response.send_modal(FieldMakerModal(self.embed))\n\n\nclass EmbedMakerModal(discord.ui.Modal):\n def __init__(self):\n super().__init__(\n discord.ui.InputText(label=\"Embed Title\", placeholder=\"Title\", required=True, style=discord.InputTextStyle.short),\n discord.ui.InputText(label=\"Embed Description\", placeholder=\"Description\", required=True, style=discord.InputTextStyle.paragraph),\n discord.ui.InputText(label=\"Embed Color\", placeholder=\"Color\", required=True, style=discord.InputTextStyle.short),\n discord.ui.InputText(label=\"Embed Footer\", placeholder=\"Footer\", required=False, style=discord.InputTextStyle.short),\n discord.ui.InputText(label=\"Embed Image\", placeholder=\"https://cdn.discordapp.com/attachments/\", required=False, style=discord.InputTextStyle.short),\n title=\"Embed Maker\",\n )\n\n async def callback(self, interaction: discord.Interaction):\n\n try:\n color = int(self.children[2].value, 16)\n except ValueError:\n color = 0x000000\n\n\n embed = discord.Embed(\n title=self.children[0].value,\n description=self.children[1].value,\n color=color,\n )\n if self.children[3].value:\n embed.set_footer(text=self.children[3].value)\n\n if self.children[4].value:\n if self.children[4].value.startswith(\"https://cdn.discordapp.com/attachments/\"):\n embed.set_image(url=self.children[4].value)\n\n await interaction.response.send_message(embed=embed, view=FinishedOne(embed), ephemeral=True)\n\nclass Confirm(discord.ui.View):\n def __init__(self):\n super().__init__()\n self.value = None\n\n @discord.ui.button(label=\"Senden\", style=discord.ButtonStyle.green)\n async def confirm_callback(self, button: discord.ui.Button, interaction: discord.Interaction):\n self.value = True\n self.stop()\n\n @discord.ui.button(label=\"Abbrechen\", style=discord.ButtonStyle.red)\n async def cancel_callback(self, button: discord.ui.Button, interaction: discord.Interaction):\n self.value = False\n self.stop()\n\nclass FieldMakerModal(discord.ui.Modal):\n def __init__(self, embed: discord.Embed):\n super().__init__(\n discord.ui.InputText(label=\"Field 1 Name\", placeholder=\"Name\", required=True, style=discord.InputTextStyle.short),\n discord.ui.InputText(label=\"Field 1 Value\", placeholder=\"Value\", required=True, style=discord.InputTextStyle.paragraph),\n discord.ui.InputText(label=\"Field 2 Name\", placeholder=\"Name\", required=False, style=discord.InputTextStyle.short),\n discord.ui.InputText(label=\"Field 2 Value\", placeholder=\"Value\", required=False, style=discord.InputTextStyle.paragraph),\n title=\"Field Maker\",\n )\n self.embed = embed\n\n async def callback(self, interaction: discord.Interaction):\n embed = self.embed\n\n embed.add_field(name=self.children[0].value, value=self.children[1].value, inline=False)\n\n if self.children[2].value and self.children[3].value:\n embed.add_field(name=self.children[2].value, value=self.children[3].value, inline=False)\n\n view = Confirm()\n await interaction.response.send_message(\"Möchtest du das Embed senden?\", view=view, ephemeral=True)\n await view.wait()\n if view.value is None:\n await interaction.response.send_message(\"⏰ | Du hast zu lange gebraucht\", ephemeral=True)\n elif view.value:\n await interaction.channel.send(embed=embed)\n await interaction.response.send_message(\"✅ | Gesendet\", ephemeral=True)\n else:\n await interaction.response.send_message(\"❌ | Abgebrochen\", ephemeral=True)","repo_name":"ProPlayer3112/Torbon-Bot","sub_path":"Cogs/embed_maker.py","file_name":"embed_maker.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73434451610","text":"#!/usr/bin/env python3\n\nfrom sympy import *\nfrom mpmath import *\nfrom matplotlib.pyplot import *\n#init_printing() # make things prettier when we print stuff for debugging.\n\n\n# ************************************************************************** #\n# Magnetic Flow normed by current, copper coil with hollow copper cylinder #\n# inserted. #\n# ************************************************************************** #\n\n# All values are in standard SI units unless otherwise noted.\n\n\n# ---------------------------------------------------------#\n# Init, Define Variables and Constants #\n# ---------------------------------------------------------#\nmu0 = 4*pi*1e-7 # vacuum permeability\nsigma = 1.25e6 # fit parameter: adjust as needed\nr = 0 # radial position of measurement probe. Centered on axis\ndsp = 98e-3 # diameter of coil\nrsp = dsp / 2 # radius of coil\nr1 = 30e-3 # inner radius of copper cylinder\nr2 = 35e-3 # outer radius of copper cylinder\nB0 = 6.9e-2 # fit parameter: adjust this as needed\nN0 = 574 # number of turns of copper coil\nl = 500e-3 # length of copper coil\nnpts = 1e3\nfmin = 8e1\nfmax = 5e4\n # -----------------------------------------------------#\n # Create a list for convenient printing of vars to #\n # file, add LaTeX where necessary. #\n # -----------------------------------------------------#\nparams = [\n ' ' + '$\\mu_0' + '$ & $' + '\\SI{' + str(mu0) + r'}{\\newton\\per\\ampere\\squared}' + r'$\\\\' + \"\\n\",\n ' ' + '$\\sigma' + '$ & $' + '\\SI{' + str(sigma) + r'}{\\ampere\\per\\volt\\per\\meter}' + r'$\\\\' + \"\\n\",\n ' ' + '$d_{Sp}' + '$ & $' + '\\SI{' + str(dsp) + r'}{\\meter}' + r'$\\\\' + \"\\n\",\n ' ' + '$r_{Sp}' + '$ & $' + '\\SI{' + str(rsp) + r'}{\\meter}' + r'$\\\\' + \"\\n\",\n ' ' + '$r_1' + '$ & $' + '\\SI{' + str(r1) + r'}{\\meter}' + r'$\\\\' + \"\\n\",\n ' ' + '$r_2' + '$ & $' + '\\SI{' + str(r2) + r'}{\\meter}' + r'$\\\\' + \"\\n\",\n ' ' + '$l' + '$ & $' + '\\SI{' + str(l) + r'}{\\meter}' + r'$\\\\' + \"\\n\",\n ' ' + '$NPTS' + '$ & $' + r'\\num{' + str(npts) + '}' + r'$\\\\' + \"\\n\",\n ' ' + '$f_{min}' + '$ & $' + '\\SI{' + str(fmin) + r'}{\\hertz}' + r'$\\\\' + \"\\n\",\n ' ' + '$f_{max}' + '$ & $' + '\\SI{' + str(fmax) + r'}{\\hertz}' + r'$\\\\' + \"\\n\",\n ]\nfont = {\n 'family' : 'serif',\n 'color' : 'black',\n 'weight' : 'normal',\n 'size' : 9,\n }\ntitlefont = {\n 'family' : 'serif',\n 'color' : 'black',\n 'weight' : 'normal',\n 'size' : 10,\n }\nplot_legend_fontsize = 9\nplot_color_fit = 'blue'\nplot_color_fit_approx = 'magenta'\nplot_scale_x = 'log'\nplot_label_fit = 'Fit-Funktion'\nplot_label_fit_approx = r'Fit-Funktion, N\\\"aherungsl\\\"osung'\nplot_label_x = 'Frequenz (Hz)'\nplot_1_label_y = r\"$\\displaystyle \\biggl| \\frac{\\Phi}{I} \\biggr|$ $\\biggl( \\displaystyle \\frac{Vs}{A} \\biggr)$\"\nplot_2_label_y = r\"$\\displaystyle arg\\biggl( \\frac{\\Phi}{I} \\biggr)$ (Grad)\"\nplot_1_title = r\"Betrag Magn. Fluss normiert auf Spulenstrom, Spule mit Stahlrohr\"\nplot_2_title = r\"Phase Magn. Fluss normiert auf Spulenstrom, Spule mit Stahlrohr\"\n\n\n# ---------------------------------------------------------#\n# Functions #\n# #\n# For the exact formulas, see formula 28 on p.15 of #\n# script, for the approximations see formula 30 on p.16 of #\n# script. #\n# #\n# NOTE: We use frequency f instead of angular frequency #\n# omega since that is what we actually set on the function #\n# generator. #\n# ---------------------------------------------------------#\n\nk = lambda f: sqrt((2*np.pi*f*mu0*sigma)/2)*(mpc(1,-1))\n\n# exact solution:\nenum1 = lambda f:(\n besselj(0,k(f)*r1)\n * bessely(2,k(f)*r1)\n - besselj(2,k(f)*r1)\n * bessely(0,k(f) * r1)\n )\ndenom1 = lambda f: (\n besselj(0,k(f)*r2)\n * bessely(2,k(f)*r1)\n - besselj(2,k(f)*r1)\n * bessely(0,k(f) * r2)\n )\nenum2 = lambda f:(\n r2 * (\n besselj(1,k(f)*r2)\n * bessely(2,k(f)*r1)\n - besselj(2,k(f)*r1)\n * bessely(1,k(f) * r2)\n )\n - r1 * (\n besselj(1,k(f)*r1)\n * bessely(2,k(f)*r1)\n - besselj(2,k(f)*r1)\n * bessely(1,k(f) * r1)\n )\n )\ndenom2 = lambda f: (\n besselj(0,k(f)*r2)\n * bessely(2,k(f)*r1)\n - besselj(2,k(f)*r1)\n * bessely(0,k(f) * r2)\n )\nterm3 = rsp ** 2 - r2**2\nprefactor = mu0 * pi * N0**2 / l\n\nphi_norm = lambda f:(\n prefactor * (\n r1**2 * enum1(f)/denom1(f)\n + 2/k(f) * enum2(f)/denom2(f)\n + term3\n )\n )\n\nphi_norm_abs = lambda f: abs(phi_norm(f))\nphi_norm_arg = lambda f: arg(phi_norm(f))\n\n# Approx. solution:\nu1 = lambda f: mpc(0,1) * k(f) * r1\nu2 = lambda f: mpc(0,1) * k(f) * r2\n\nenum_approx = lambda f: (\n (u1(f) / 2 + 1) * ((u2(f) - 1) * exp( u2(f) - u1(f)) - (u1(f) - 1))\n + (u1(f) / 2 - 1) * ((u2(f) + 1) * exp(-u2(f) + u1(f)) - (u1(f) + 1))\n )\ndenom_approx = lambda f: (\n (u1(f) / 2) * exp(u2(f) - u1(f)) - (u1(f) / 2 - 1) * exp( - u2(f) + u1(f))\n )\n\nphi_norm_approx = lambda f: (\n mu0 * pi * N0**2/l * (2 * r1**2 /denom_approx(f) - 2/k(f)**2 * enum_approx(f) / denom_approx(f) + (rsp**2 - r2**2))\n )\n\nphi_norm_approx_abs = lambda f: abs(phi_norm_approx(f))\nphi_norm_approx_arg = lambda f: arg(phi_norm_approx(f))\n\n\n# ---------------------------------------------------------#\n# Generate points for omega axis #\n# ---------------------------------------------------------#\n# See also separate file stuetzpunkte.py\nn = np.linspace(0,npts,npts)\nexpufunc = np.frompyfunc(exp,1,1)\nfrequency_vector = expufunc((1-n/npts)*log(fmin)) * expufunc(n*log(fmax)/npts)\n\n\n# ---------------------------------------------------------#\n# Numerically evaluate functions #\n# ---------------------------------------------------------#\nphi_norm_abs_ufunc = np.frompyfunc(phi_norm_abs,1,1)\nphi_norm_abs_num = phi_norm_abs_ufunc(frequency_vector)\nphi_norm_arg_ufunc = np.frompyfunc(phi_norm_arg,1,1)\nphi_norm_arg_num = phi_norm_arg_ufunc(frequency_vector)\n\nphi_norm_approx_abs_ufunc = np.frompyfunc(phi_norm_approx_abs,1,1)\nphi_norm_approx_abs_num = phi_norm_approx_abs_ufunc(frequency_vector)\nphi_norm_approx_arg_ufunc = np.frompyfunc(phi_norm_approx_arg,1,1)\nphi_norm_approx_arg_num = phi_norm_approx_arg_ufunc(frequency_vector)\n\n# ---------------------------------------------------------#\n# Unfortunately, the arg() function only delivers values #\n# between -pi and +pi for the angle of a complex number, #\n# which, while correct, is not suitable for pretty #\n# plotting, so we will shift the values larger then zero #\n# accordingly for a continuous curve. #\n# ---------------------------------------------------------#\nphi_norm_arg_num = np.unwrap(phi_norm_arg_num)\nphi_norm_approx_arg_num = np.unwrap(phi_norm_approx_arg_num)\n\n\n# ---------------------------------------------------------#\n# Scale values for improved legibility in plot #\n# ---------------------------------------------------------#\nphi_norm_abs_num = phi_norm_abs_ufunc(frequency_vector)\nphi_norm_abs_num = 1e3 * phi_norm_abs_num\nphi_norm_arg_num = 180 / pi * phi_norm_arg_num\n\nphi_norm_approx_arg_num = 180 / pi * phi_norm_approx_arg_num\nphi_norm_approx_abs_num = phi_norm_approx_abs_ufunc(frequency_vector)\nphi_norm_approx_abs_num = 1e3 * phi_norm_approx_abs_num\n\n\n# ---------------------------------------------------------#\n# Plot the Things #\n# ---------------------------------------------------------#\nmatplotlib.pyplot.rc('text', usetex=True)\nmatplotlib.pyplot.rc('font', family='serif')\n\nfig = figure(1)\naxes1 = fig.add_subplot(211)\naxes1.plot(frequency_vector,phi_norm_abs_num,color=plot_color_fit,label=plot_label_fit)\naxes1.plot(frequency_vector,phi_norm_approx_abs_num,color=plot_color_fit_approx,label=plot_label_fit_approx)\naxes1.set_xlim([fmin*0.9,fmax*1.1])\naxes1.set_xscale(plot_scale_x)\naxes1.set_xlabel(plot_label_x,fontdict=font)\naxes1.set_ylabel(plot_1_label_y,fontdict=font)\naxes1.set_title(plot_1_title,fontdict=titlefont)\naxes1.legend(fontsize=plot_legend_fontsize)\naxes1.tick_params(labelsize=9)\n\naxes2 = fig.add_subplot(212)\naxes2.plot(frequency_vector,phi_norm_arg_num,color=plot_color_fit,label=plot_label_fit)\naxes2.plot(frequency_vector,phi_norm_arg_num,color=plot_color_fit_approx,label=plot_label_fit_approx)\naxes2.set_xlim([fmin*0.9,fmax*1.1])\naxes2.set_xscale(plot_scale_x)\naxes2.set_xlabel(plot_label_x,fontdict=font)\naxes2.set_ylabel(plot_2_label_y,fontdict=font)\naxes2.set_title(plot_2_title,fontdict=titlefont)\naxes2.legend(fontsize=plot_legend_fontsize)\naxes2.tick_params(labelsize=9)\n\nfig.subplots_adjust(bottom=0.15,left=0.125,right=0.925,top=0.95,hspace=0.5)\n\nfig.savefig('plots-pgf/hollow--st--freq--phi-norm.pgf')\nfig.savefig('plots-pdf/hollow--st--freq--phi-norm.pdf')\n\n\n# ---------------------------------------------------------#\n# Save listing to file #\n# ---------------------------------------------------------#\ndumpfile = open('listings/hollow--st--phi-norm.tex', 'w')\n\ntable_opening = r\"\"\"\n{%\n \\begin{center}\n \\captionof{table}{%\n Parameterwerte f\\\"ur Fit-Funktion in Abbildung \\ref{fig:st:freq:phi}\n }\n \\label{tab:fitparams:st:phi}\n \\sisetup{%\n %math-rm=\\mathtt,\n scientific-notation=engineering,\n table-format = +3.2e+2,\n round-precision = 3,\n round-mode = figures,\n }\n \\begin{tabular}{lr}\n \\toprule\n\"\"\"\ntable_closing = r\"\"\"\n \\bottomrule\n \\end{tabular}\n \\end{center}\n}\n\n\"\"\"\n\ndumpfile.writelines(table_opening)\n\nfor line in params:\n dumpfile.writelines(line)\n\ndumpfile.writelines(table_closing)\ndumpfile.close()\n","repo_name":"alpenwasser/glaL3","sub_path":"versuche/skineffect/python/hohlzylinder_st_phi.py","file_name":"hohlzylinder_st_phi.py","file_ext":"py","file_size_in_byte":11028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24931827220","text":"#Questão 04\r\n###Faça um programa que solicite a nota das 4 provas de um aluno e responde a sua média.\r\n\r\nsoma = 0\r\nmédia = 0\r\n\r\nfor contador in range(1,5):\r\n nota = int(input(f\"digite a {contador} nota: \"))\r\n soma = soma + nota\r\n média = soma/contador\r\n\r\nprint(f\"a média é: \", média)","repo_name":"RenatoMC1/simulado161122","sub_path":"Questão04.py","file_name":"Questão04.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14724549497","text":"#2. W grze mag-mino wykorzystuje się klocki, które mają kształt prostokątów, na których obydwu końcach znajduje\n#się liczba oczek od 0 do 9. Na każdym klocku z dwóch jego końców liczba oczek jest inna. W komplecie liczącym 90\n#klocków do gry występują wszystkie kombinacje oczek i każda kombinacja występuje dokładnie jeden raz. Proszę\n#napisać funkcję, która dla danego zbioru N klocków wyznacza najdłuższy ciąg jaki można z nich ułożyć.\n#Na przykład dla zbioru 8 klocków: [2|8] [0|1] [2|3] [3|6] [2|6] [2|9] [3|4] [6|7]\n#najdłuższy ciąg jaki można ułożyć ma długość 5 i ma postać : [8|2] [2|3] [3|6] [6|2] [2|9]\n\n#1. Pomysł: Generwoanie tablic z krotkami i sprawdzanie czy prawa 1 kolcka - lewa 2 klocka da 0 i szukanie najdluzszego takiego ciagu\n\ndef mag_mino(magset, id, path, taken):\n longest = 0\n if id == len(magset):\n for i in range(len(path) - 1):\n if path[i][1] - path[i + 1][0] == 0:\n print(path)\n if taken > longest:\n return taken\n else:\n return longest\n\n\n cords = magset[id]\n # bierze klocek normalnie\n path[id][0] = cords[0]\n path[id][1] = cords[1]\n mag_mino(magset, id + 1, path, taken + 1)\n\n #bierze odwrocony o 180 klocek\n path[id][0] = cords[1]\n path[id][1] = cords[0]\n mag_mino(magset, id + 1, path, taken + 1)\n\n #nie bierze klocka\n mag_mino(magset, id + 1, path, taken)\n\ndef main():\n magset = [(2,8), (0,1), (2,3), (3,6), (2,6), (2,9), (3,4), (6,7)]\n path = [[0] * 2 for _ in range(10)]\n mag_mino(magset, 0, path, 0)\n\nmain()","repo_name":"mamikula/Introduction-to-Computer-Science","sub_path":"Kolosy/KP19-20P02.py","file_name":"KP19-20P02.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6383459462","text":"#! python3\n\n\n# import libraries\nimport time\nfrom k0001func import initialise\nfrom k0001app import open_tlc\nfrom k0001rpifunc import start_func, set_gui_inputs, set_state, finally_func\n\n\n#\ndef run():\n #\n start_func()\n\n #\n step = 0\n # amber_state = False\n\n #\n initialise()\n\n #\n while True:\n #\n # if step % 5 == 0:\n # amber_state ^= True\n\n #\n set_gui_inputs()\n\n #\n open_tlc(step)\n # open_tlc(step, amber_state)\n\n #\n set_state()\n\n # Delay for a 10th of a second (0.1 seconds)\n time.sleep(0.1)\n\n #\n step += 1\n\n\nif __name__ == '__main__':\n try:\n run()\n finally:\n finally_func()\n print('TLC stopped...')\n","repo_name":"MartijnHarmenzon/openTLC","sub_path":"k0001rpi.py","file_name":"k0001rpi.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"39712196213","text":"from simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom simtk.openmm.app.element import *\nfrom simtk.openmm.app.internal.unitcell import computePeriodicBoxVectors\nfrom pdbfixer.pdbfixer import PDBFixer, proteinResidues, dnaResidues, rnaResidues, _guessFileFormat\n\nimport subprocess\nfrom timeit import default_timer as timer\n\nimport nanome\nfrom nanome.util import Logs\n\n# TEMP\nfrom nanome._internal._structure._bond import _Bond\nfrom nanome._internal._structure._io._pdb.save import Options as PDBOptions\n\nfrom .AdvancedSettings import AdvancedSettings\n\nnanometer = nano * meter\npicosecond = pico * second\nmetalElements = ['Al','As','Ba','Ca','Cd','Ce','Co','Cs','Cu','Dy','Fe','Gd','Hg','Ho','In','Ir','K','Li','Mg',\n 'Mn','Mo','Na','Ni','Pb','Pd','Pt','Rb','Rh','Sm','Sr','Te','Tl','V','W','Yb','Zn']\n\npdb_options = PDBOptions()\npdb_options.write_bonds = True\n\nclass MDSimulationProcess():\n def __init__(self, plugin):\n self.__plugin = plugin\n self.__forcefield = None\n\n @staticmethod\n def get_bond_type(kind):\n if kind == _Bond.Kind.CovalentSingle:\n return Single\n if kind == _Bond.Kind.CovalentDouble:\n return Double\n if kind == _Bond.Kind.CovalentTriple:\n return Triple\n return None\n\n @staticmethod\n def get_atom_symbol(name, atoms_nb):\n upper = name.upper()\n if upper.startswith('CL'):\n return chlorine\n elif upper.startswith('NA'):\n return sodium\n elif upper.startswith('MG'):\n return magnesium\n elif upper.startswith('BE'):\n return beryllium\n elif upper.startswith('LI'):\n return lithium\n elif upper.startswith('K'):\n return potassium\n elif upper.startswith('ZN'):\n return zinc\n elif (atoms_nb == 1 and upper.startswith('CA')):\n return calcium\n else:\n return Element.getBySymbol(upper[0])\n\n def set_stream(self, stream):\n self.__stream = stream\n\n def delete_alternate_atoms(self, topology, positions):\n modeller = Modeller(topology, positions)\n delete_atoms = []\n for chain in topology.chains():\n for indexInChain, residue in enumerate(chain.residues()):\n atom_names = []\n for atom in residue.atoms():\n if atom.name in atom_names:\n delete_atoms.append(atom)\n else:\n atom_names.append(atom.name)\n\n modeller.delete(delete_atoms)\n return (modeller.getTopology(), modeller.getPositions())\n\n def fix_complexes(self, complex_list):\n fixed_complexes = []\n for complex in complex_list:\n for residue in complex.residues:\n atoms = residue._atoms\n for i in range(len(atoms) - 1, -1, -1):\n if atoms[i].molecular.is_het == True:\n del atoms[i]\n\n complex.io.to_pdb(\"tmp.pdb\", pdb_options)\n\n fixer = PDBFixer(filename=\"tmp.pdb\")\n fixer.findMissingResidues()\n fixer.findNonstandardResidues()\n fixer.replaceNonstandardResidues()\n fixer.findMissingAtoms()\n fixer.addMissingAtoms()\n fixer.removeHeterogens(False)\n fixer.addMissingHydrogens(7.0)\n\n (topology, positions) = self.delete_alternate_atoms(fixer.topology, fixer.positions)\n with open('tmp.pdb', 'w') as pdb_file:\n PDBFile.writeFile(topology, positions, pdb_file)\n\n fixed_complex = nanome.structure.Complex.io.from_pdb(path=\"tmp.pdb\")\n fixed_complex.index = complex.index\n fixed_complex.position = complex.position\n fixed_complex.rotation = complex.rotation\n fixed_complex.molecular.name = complex.molecular.name\n fixed_complex.rendering.visible = True\n fixed_complexes.append(fixed_complex)\n\n return fixed_complexes\n\n def init_simulation(self, complex_list):\n settings = AdvancedSettings.instance\n self.__forcefield = settings.get_forcefield()\n # Create topology\n topology = Topology()\n added_atoms = dict()\n positions = []\n PDBFile._loadNameReplacementTables()\n self.__complex_list = complex_list\n min_x = max_x = min_y = max_y = min_z = max_z = None\n Logs.debug(\"Create topology\")\n for complex in complex_list:\n for molecule in complex.molecules:\n for chain in molecule.chains:\n sim_chain = topology.addChain()\n for residue in chain.residues:\n residueName = residue.molecular.name\n if residueName in PDBFile._atomNameReplacements:\n atomReplacements = PDBFile._atomNameReplacements[residueName]\n else:\n atomReplacements = {}\n sim_residue = topology.addResidue(residue.molecular.name, sim_chain)\n for atom in residue.atoms:\n molecular = atom.molecular\n symbol = MDSimulationProcess.get_atom_symbol(molecular.name, len(residue._atoms))\n atom_name = molecular.name\n if atom_name in atomReplacements:\n atom_name = atomReplacements[atom_name]\n sim_atom = topology.addAtom(atom_name, symbol, sim_residue)\n added_atoms[atom.index] = sim_atom\n position = molecular.position\n positions.append(Vec3(position.x * 0.1 * nanometer, position.y * 0.1 * nanometer, position.z * 0.1 * nanometer))\n if min_x == None or position.x < min_x:\n min_x = position.x\n if max_x == None or position.x > max_x:\n max_x = position.x\n if min_y == None or position.y < min_y:\n min_y = position.y\n if max_y == None or position.y > max_y:\n max_y = position.y\n if min_z == None or position.z < min_z:\n min_z = position.z\n if max_z == None or position.z > max_z:\n max_z = position.z\n\n topology.createStandardBonds()\n topology.createDisulfideBonds(positions)\n added_bonds = set(topology.bonds())\n for complex in complex_list:\n for molecule in complex.molecules:\n for chain in molecule.chains:\n for residue in chain.residues:\n for bond in residue.bonds:\n if bond.index in added_bonds:\n continue\n atom1 = added_atoms[bond.atom1.index]\n atom2 = added_atoms[bond.atom2.index]\n type = MDSimulationProcess.get_bond_type(bond.molecular.kind)\n topology.addBond(atom1, atom2, type)\n added_bonds.add(bond.index)\n\n # topology.setPeriodicBoxVectors(computePeriodicBoxVectors(max_x - min_x, max_y - min_y, max_z - min_z, 90, 90, 90))\n topology.setPeriodicBoxVectors(computePeriodicBoxVectors(49.163, 45.981, 38.869, 90.00, 90.00, 90.00))\n\n # Create simulation parameters\n # nonbondedMethod = PME\n [templates, residues] = self.__forcefield.generateTemplatesForUnmatchedResidues(topology)\n for index, residue in enumerate(residues):\n template = templates[index]\n print(\"unmatched residue:\", residue)\n # for index, template in enumerate(templates):\n # residue = residues[index]\n # residue_bonds = list(residue.internal_bonds())+list(residue.external_bonds())\n # print(\"residue bonds:\", residue_bonds)\n # (unique_res_bonds, unique_tmp_bonds) = ForceField.findMissingBonds(residue, template)\n # print(\"UNIQUE RESIDUE BONDS:\", unique_res_bonds)\n # print(\"UNIQUE TEMPLATE BONDS:\", unique_tmp_bonds)\n # print(f\"RESIDUE {residue.name}:\")\n # if len(unique_res_bonds) > 0:\n # print(\"Missing bonds:\")\n # for res_bond in unique_res_bonds:\n # print(f\"\\n{res_bond}\")\n # print(f\"TEMPLATE {template.name}:\")\n # if len(unique_tmp_bonds) > 0:\n # print(\"Missing bonds:\")\n # for tmp_bond in unique_tmp_bonds:\n # print(f\"\\n{tmp_bond}\")\n # print(\"-------------------------\")\n # for bond in unique_res_bonds:\n # template.name += '[' + ForceField.getAtomID(bond[0]) + '<-->' + ForceField.getAtomID(bond[1]) + ']'\n # if bond[0] in residue.atoms() and bond[1] in residue.atoms():\n # template.addBondByName(bond[0].name, bond[1].name)\n # else:\n # template.addExternalBondByName(bond[0].name)\n\n # if template.name not in self.__forcefield._templates:\n # self.__forcefield.registerResidueTemplate(template)\n # print(f\"registering template {template.name}\")\n # else:\n # print(f\"redundant template {template.name} ********************\")\n\n # system = self.__forcefield.createSystem(topology, nonbondedMethod = NoCutoff, nonbondedCutoff = 1 * nanometer, constraints = HBonds)\n system = settings.get_system(topology)\n\n # Set the simulation\n # integrator = LangevinIntegrator(300 * kelvin, 1 / picosecond, 0.002 * picosecond)\n integrator = settings.get_integrator()\n\n if settings.system_thermostat is not 'None':\n temp = settings.system_generation_temp\n col_rate = settings.integrator_collision_rate\n system.addForce(mm.AndersenThermostat(temp*kelvin, col_rate/picoseconds))\n\n # simulation = Simulation(topology, system, integrator)\n self.__simulation = settings.get_simulation(positions)\n # Set reporting\n settings.attach_reporter(MDReporter, self.simulation_result)\n\n self.__simulation.context.setPositions(positions)\n if settings.simulation_minimize:\n self.__plugin.send_notification(nanome.util.enums.NotificationTypes.message, \"Minimizing...\")\n self.__simulation.minimizeEnergy()\n\n if settings.system_random_init_vel:\n self.__simulation.context.setVelocitiesToTemperature(settings.system_generation_temp*kelvin)\n eq_steps = settings.simulation_equilibrium_steps\n if eq_steps:\n self.__plugin.send_notification(nanome.util.enums.NotificationTypes.message, \"Equilibrating...\")\n self.simulate(complex_list, eq_steps)\n\n def simulate(self, complex_list, steps=None):\n self.__start = timer()\n self.__simulation.step(steps or AdvancedSettings.instance.simulation_reporter_interval)\n\n def simulation_result(self, positions, velocities=None, forces=None, energies=None):\n end = timer()\n Logs.debug(\"Simulation:\", end - self.__start)\n self.__start = timer()\n new_positions = []\n for position in positions:\n coords = [c._value * 10 for c in position]\n if any(math.isnan(c) for c in coords):\n Logs.warning(\"Got a NaN value, ignoring it\")\n continue\n new_positions.extend(coords)\n self.__stream.update(new_positions, self.on_result_processed)\n\n def on_result_processed(self):\n if self.__plugin.running:\n self.__simulation.step(AdvancedSettings.instance.simulation_reporter_interval)\n\n# This class is a reporter for OpenMM Simulation class\nclass MDReporter(object):\n def __init__(self, settings, results_callback):\n self.__apply_results = results_callback\n self.__settings = settings\n self.__interval = self.__settings.simulation_reporter_interval\n self.__options = list(self.__settings.simulation_reporter_options.values())\n\n def describeNextReport(self, simulation):\n return (self.__interval, *self.__options , None)\n\n def report(self, simulation, state):\n use_velocities = self.__options[1]\n use_forces = self.__options[2]\n use_energies = self.__options[3]\n\n self.__apply_results(state.getPositions(), state.getVelocities() if use_velocities else None, state.getForces() if use_forces else None, state.getEnergies if use_energies else None)","repo_name":"nanome-ai/plugin-molecular-dynamics","sub_path":"nanome_molecular_dynamics/MDSimulationProcess.py","file_name":"MDSimulationProcess.py","file_ext":"py","file_size_in_byte":12858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10430349343","text":"from datetime import datetime\r\nimport csv\r\nimport os\r\nimport sys\r\nimport prompts\r\nimport backendIO\r\nimport email_helper\r\nimport time as timelib\r\ntry:\r\n import cv\r\nexcept Exception:\r\n pass\r\n\r\n\r\ndef load_spreadsheet(spreadsheet, prompt_type):\r\n '''\r\n Loads a spreadsheet (CSV file), trims the header line, and returns a dict of prompt objects\r\n Inputs:\r\n spreadsheet (string): Name of csv file to load in Student_Sentiment_Sensor/data/\r\n prompt_type (string): Options are\r\n \"question\" for question_prompt\r\n \"response\" for response_prompt\r\n Output:\r\n prompt_dict (dict):\r\n - Dictionary of prompts; follows format prompt_dict[prompt_id] = prompts.prompt_type(text, ids)\r\n '''\r\n file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), spreadsheet))\r\n prompt_dict = {}\r\n with open(file_path, 'r') as opened_file:\r\n csv_file = csv.reader(opened_file)\r\n next(csv_file) # Skips header line\r\n for row in csv_file:\r\n try:\r\n prompt_id = row[0]\r\n text = row[1]\r\n if prompt_type == 'question':\r\n response_ids = row[2:]\r\n prompt_dict[prompt_id] = prompts.question_prompt(text, response_ids)\r\n if prompt_type == 'response':\r\n question_id = row[2]\r\n prompt_dict[prompt_id] = prompts.response_prompt(text, question_id)\r\n if prompt_type == 'solution':\r\n emotion_approx = row[2]\r\n info_listing_ids = row[3:]\r\n prompt_dict[prompt_id] = prompts.solutions_prompt(text, emotion_approx, info_listing_ids)\r\n if prompt_type == 'info_listing':\r\n link = row[2]\r\n prompt_dict[prompt_id] = prompts.info_listing_prompt(text, link)\r\n except Exception as e:\r\n print('Error in load_spreadsheet: {} --> {}'.format(row, e))\r\n return prompt_dict\r\n\r\n\r\ndef get_response_texts(response_id_list, response_prompts_dict):\r\n response_texts = []\r\n for response_id in response_id_list:\r\n response_texts.append(response_prompts_dict[response_id].get_text())\r\n return response_texts\r\n\r\n\r\ndef get_current_u_id():\r\n '''\r\n Gets u_id from student_sentiment_sensor/data/stored_user_data\r\n - u_id is used to track runs. Used to create log data in \"u_id.dat\" files\r\n\r\n Outputs:\r\n u_id (int): highest u_id currently stored in student_sentiment_sensor/data/stored_user_data/\"u_id.dat\"\r\n '''\r\n interaction_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../data/stored_user_data/\"))\r\n interaction_files = [f for f in os.listdir(interaction_dir) if os.path.isfile(os.path.join(interaction_dir, f))]\r\n file_list = []\r\n try:\r\n for i_file in interaction_files:\r\n file_list.append(int(i_file.split('.')[0])) # Extract number from each data file\r\n file_list.sort(reverse=True)\r\n # interaction_files = int(interaction_files.split('.'))\r\n # interaction_files.sort(reverse=True)\r\n # u_id = interaction_files[0] # Gets largest file_id\r\n u_id = file_list[0] # Gets largest file_id\r\n except Exception as e:\r\n print('Could not get u_id, returning u_id = 0: {}'.format(e))\r\n u_id = 0\r\n return u_id\r\n\r\n\r\ndef main():\r\n '''\r\n ARGS: backend.py QUESTION_PROMPT_NAME RESPONSE_PROMPT_NAME\r\n QUESTION_PROMPT_NAME and RESPONSE_PROMPT_NAME are optional args to configure the execution\r\n '''\r\n question_prompt_filename = 'decision_tree/question_prompts.csv'\r\n response_prompt_filename = 'decision_tree/response_prompts.csv'\r\n solution_prompt_filename = 'decision_tree/solution_prompts.csv'\r\n info_listing_prompt_filename = 'decision_tree/info_listing_prompts.csv'\r\n\r\n is_cv = True\r\n\r\n if is_cv:\r\n try:\r\n camera = cv.init_cam()\r\n except Exception:\r\n is_cv = False\r\n print('Disabled camera, is_cv = {}'.format(is_cv))\r\n import random\r\n pass\r\n\r\n if len(sys.argv) >= 3:\r\n question_prompt_filename = sys.argv[1]\r\n response_prompt_filename = sys.argv[2]\r\n\r\n question_prompts = load_spreadsheet('../data/' + question_prompt_filename, 'question') # dict\r\n response_prompts = load_spreadsheet('../data/' + response_prompt_filename, 'response') # dict\r\n solution_prompts = load_spreadsheet('../data/' + solution_prompt_filename, 'solution')\r\n info_listing_prompts = load_spreadsheet('../data/' + info_listing_prompt_filename, 'info_listing')\r\n\r\n prompt_id = '0'\r\n\r\n conversation = []\r\n interactions = []\r\n\r\n conversation.append(('Emotion', 'Null'))\r\n\r\n while True:\r\n if prompt_id == '0':\r\n interactions = []\r\n u_id = get_current_u_id() + 1\r\n print('Recording u_id: {}'.format(u_id))\r\n prompt_id = '6'\r\n elif prompt_id == '7':\r\n open_prompt = question_prompts[prompt_id]\r\n # Truncate time down to the centisecond\r\n time = datetime.now().strftime(\"%Y%m%d%H%M%S%f\")[0: -4]\r\n # print(backendIO.toJSON(open_prompt, response_prompts))\r\n backendIO.send_question_to_frontend(open_prompt, response_prompts, time)\r\n emotion = ''\r\n while emotion not in ['sad', 'fear', 'angry']:\r\n if is_cv:\r\n emotion = cv.get_emotion(camera)[0]\r\n else:\r\n timelib.sleep(5)\r\n emotion = random.choice(['sad', 'fear', 'angry'])\r\n\r\n if emotion == 'angry':\r\n prompt_id = '8'\r\n if emotion == 'fear':\r\n prompt_id = '9'\r\n if emotion == 'sad':\r\n prompt_id = '10'\r\n\r\n open_prompt = question_prompts[prompt_id]\r\n # Truncate time down to the centisecond\r\n time = datetime.now().strftime(\"%Y%m%d%H%M%S%f\")[0: -4]\r\n # print(backendIO.toJSON(open_prompt, response_prompts))\r\n backendIO.send_question_to_frontend(open_prompt, response_prompts, time)\r\n timelib.sleep(0.5)\r\n elif prompt_id == '2400':\r\n backendIO.store_conversation(conversation, time)\r\n backendIO.store_interaction(interactions, emotion, str(u_id))\r\n print('Storing u_id: {}'.format(u_id))\r\n conversation = []\r\n conversation.append(('Emotion', 'Null'))\r\n prompt_id = '0'\r\n elif prompt_id in question_prompts.keys():\r\n open_prompt = question_prompts[prompt_id]\r\n\r\n # Truncate time down to the centisecond\r\n time = datetime.now().strftime(\"%Y%m%d%H%M%S%f\")[0: -4]\r\n\r\n # print(backendIO.toJSON(open_prompt, response_prompts))\r\n backendIO.send_question_to_frontend(open_prompt, response_prompts, time)\r\n\r\n response_id, response = backendIO.read_from_frontend(time, response_prompts)\r\n\r\n conversation.append((prompt_id, response_id))\r\n interactions.append(['Question', prompt_id, open_prompt.get_text(), response_id, response.get_text()])\r\n\r\n prompt_id = response.get_question_id()\r\n\r\n elif prompt_id in solution_prompts.keys():\r\n open_prompt = solution_prompts[prompt_id]\r\n\r\n # Truncate time down to the centisecond\r\n time = datetime.now().strftime(\"%Y%m%d%H%M%S%f\")[0: -4]\r\n\r\n # print(backendIO.toJSON(open_prompt, response_prompts))\r\n backendIO.send_solution_to_frontend(open_prompt, info_listing_prompts, time)\r\n\r\n email_status, email_address = backendIO.read_from_frontend(time, response_prompts)\r\n\r\n rsolution = email_helper.solutionToRichSolution(open_prompt, info_listing_prompts)\r\n\r\n conversation.append((prompt_id, email_status))\r\n interactions.append(['Solution', prompt_id, open_prompt.get_text(), open_prompt.get_emotion_approx()])\r\n\r\n if email_status == 'emailTrue':\r\n email_helper.email_solutions(email_address, rsolution)\r\n\r\n prompt_id = '23'\r\n else:\r\n print('Bad prompt ID: {}'.format(prompt_id))\r\n\r\n # OLD CODE: CLI backend setup\r\n # response_id_list = open_prompt.get_response_ids()\r\n # print(open_prompt.get_text())\r\n # for response in response_id_list:\r\n # if response in response_prompts.keys():\r\n # print('{}: {}'.format(response_prompts[response].get_text(), response))\r\n # print('_____________________________')\r\n\r\n # selection = input('')\r\n # response_id = open_prompt.get_response_ids()[int(selection)]\r\n # response = response_prompts[response_id]\r\n # prompt_id = response.get_question_id()\r\n # print(prompt_id)\r\n # if prompt_id == '0':\r\n # do_end = input('Enter anything to exit')\r\n # if do_end != '':\r\n # print('')\r\n # break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"John-priv/student_sentiment_sensor","sub_path":"backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":9169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6269806961","text":"import json\n\nfrom google.appengine.ext import ndb\n\nfrom CONSTANTS import MOVE_TYPES\n\n\nclass InvalidMove(Exception):\n pass\n\n\nclass BaseModel(ndb.Model):\n created = ndb.DateTimeProperty(auto_now_add=True)\n modified = ndb.DateTimeProperty(auto_now=True)\n\n\nclass Pool(BaseModel):\n setup = ndb.JsonProperty(required=True)\n socket_id = ndb.StringProperty(required=True)\n\n\nclass Game(BaseModel):\n red_hash = ndb.StringProperty()\n blue_hash = ndb.StringProperty()\n join_hash = ndb.StringProperty()\n\n board = ndb.JsonProperty(default='''[\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]''')\n\n red_setup = ndb.JsonProperty()\n blue_setup = ndb.JsonProperty()\n\n moves = ndb.JsonProperty(repeated=True)\n\n # Who's turn is it currently? False = red, True = blue\n turn = ndb.BooleanProperty(default=False)\n\n # Is this game by invite only?\n private = ndb.BooleanProperty(default=True)\n\n game_state = ndb.IntegerProperty(default=0)\n\n grave_yard = ndb.JsonProperty(repeated=True)\n\n def set_red_setup(self, red_setup):\n if not self.red_setup:\n board = self.get_board()\n\n board[6] = red_setup[0]\n board[7] = red_setup[1]\n board[8] = red_setup[2]\n board[9] = red_setup[3]\n\n self.set_board(board)\n self.red_setup = json.dumps(red_setup)\n else:\n raise AttributeError('yeah see...')\n\n def set_blue_setup(self, blue_setup):\n if not self.blue_setup:\n board = self.get_board()\n\n # We store things from the perspective of red, so we need to reverse\n board[3] = blue_setup[0][::-1]\n board[2] = blue_setup[1][::-1]\n board[1] = blue_setup[2][::-1]\n board[0] = blue_setup[3][::-1]\n\n self.set_board(board)\n self.blue_setup = json.dumps(blue_setup)\n\n else:\n raise AttributeError('yeah see...')\n\n def set_blocks(self):\n self.set_piece({'x': 2, 'y': 4}, 1)\n self.set_piece({'x': 2, 'y': 5}, 1)\n self.set_piece({'x': 3, 'y': 4}, 1)\n self.set_piece({'x': 3, 'y': 5}, 1)\n\n self.set_piece({'x': 6, 'y': 4}, 1)\n self.set_piece({'x': 6, 'y': 5}, 1)\n self.set_piece({'x': 7, 'y': 4}, 1)\n self.set_piece({'x': 7, 'y': 5}, 1)\n\n def get_opponent_hash(self, player_hash):\n if player_hash == self.blue_hash:\n return self.red_hash\n elif player_hash == self.red_hash:\n return self.blue_hash\n\n def get_board(self):\n return json.loads(self.board)\n\n def get_piece(self, pos):\n board = self.get_board()\n\n return board[pos['y']][pos['x']]\n\n def set_board(self, new_board):\n self.board = json.dumps(new_board)\n\n def set_piece(self, pos, piece):\n board = self.get_board()\n\n board[pos['y']][pos['x']] = piece\n\n self.set_board(board)\n\n def flip_turn(self):\n self.turn = not self.turn\n\n def set_last_move(self, last_move):\n self.moves.append(json.dumps(last_move))\n\n def get_last_move(self):\n if self.moves:\n return json.loads(self.moves[-1])\n else:\n return {}\n\n def get_moves(self):\n moves = []\n for move in self.moves:\n moves.append(json.loads(move))\n\n return moves\n\n def will_violate_two_square_rule(self, fromPos, toPos):\n # Select all even/odd moves i.e. all red/blue moves\n moves = self.get_moves()[int(self.turn)::2]\n\n if len(moves) < 3:\n return False\n\n if not Game.check_moves_are_same_piece([\n {\n 'fromPos': moves[-3]['from']['position'],\n 'toPos': moves[-3]['to']['position']\n },\n {\n 'fromPos': moves[-2]['from']['position'],\n 'toPos': moves[-2]['to']['position']\n },\n {\n 'fromPos': moves[-1]['from']['position'],\n 'toPos': moves[-1]['to']['position']\n },\n {\n 'fromPos': fromPos,\n 'toPos': toPos\n }\n ]):\n return False\n\n move_1_cells = Game.get_cells_between_inclusive(\n moves[-3]['from']['position'],\n moves[-3]['to']['position']\n )\n move_2_cells = Game.get_cells_between_inclusive(\n moves[-2]['from']['position'],\n moves[-2]['to']['position']\n )\n move_3_cells = Game.get_cells_between_inclusive(\n moves[-1]['from']['position'],\n moves[-1]['to']['position']\n )\n move_4_cells = Game.get_cells_between_inclusive(\n fromPos,\n toPos\n )\n\n all_cells = move_1_cells + move_2_cells + move_3_cells + move_4_cells\n\n duplicate_cells = 0\n for move_4_cell in move_4_cells:\n if all_cells.count(move_4_cell) == 4:\n duplicate_cells += 1\n\n if duplicate_cells > 1:\n return True\n\n return False\n\n def has_ended(self):\n last_move = self.get_last_move()\n\n if last_move and last_move['type'] == 'capture':\n return True\n else:\n return False\n\n def move_piece(self, fromPos, toPos):\n board = self.get_board()\n piece = board[fromPos['y']][fromPos['x']]\n\n board[fromPos['y']][fromPos['x']] = 0\n\n board[toPos['y']][toPos['x']] = piece\n\n self.set_board(board)\n\n def delete_piece(self, pos):\n board = self.get_board()\n piece = board[pos['y']][pos['x']]\n\n board[pos['y']][pos['x']] = 0\n\n self.set_board(board)\n\n def check_move(self, fromPos, toPos):\n fromPiece = self.get_piece(fromPos)\n toPiece = self.get_piece(toPos)\n\n if fromPiece == 0 or fromPiece == 1:\n raise InvalidMove('No piece to move.')\n\n if not fromPiece['side'] == self.turn:\n raise InvalidMove('Not your turn')\n\n if self._cell_is_occupied(toPiece):\n if toPiece == 1:\n raise InvalidMove('Can not move onto an unmoveable block.')\n if fromPiece['side'] == toPiece['side']:\n raise InvalidMove('Can not move onto friendly piece.')\n\n # Bombs and flags can't move.\n if fromPiece['rank'] == 'B':\n raise InvalidMove('Bombs cannot be moved.')\n if fromPiece['rank'] == 'F':\n raise InvalidMove('Flags cannot be moved.')\n\n diff = {}\n diff['x'] = abs(fromPos['x'] - toPos['x'])\n diff['y'] = abs(fromPos['y'] - toPos['y'])\n\n if diff['x'] == 0 and diff['y'] == 0:\n raise InvalidMove('Position has not changed.')\n\n if self.will_violate_two_square_rule(fromPos, toPos):\n raise InvalidMove('That move violates the two-square rule.')\n\n # We're either moving one square or we're a scout moving in a straight\n # line.\n # We can't move diagonally\n if ((diff['x'] == 1) != (diff['y'] == 1) or (fromPiece['rank'] == '9')) and \\\n (diff['x'] == 0) != (diff['y'] == 0):\n\n # If we're a scout we need to verify there's nothing between from\n # and to\n if fromPiece['rank'] == '9' and self._is_piece_between(fromPos, toPos, diff):\n raise InvalidMove('Can not jump over pieces.')\n\n if self._cell_is_occupied(toPiece):\n return self._check_attack(fromPiece, toPiece)\n\n else:\n return MOVE_TYPES.MOVE\n\n else:\n raise InvalidMove('Illegal movement.')\n\n # We must know at this point that we're not moving on multiple axis\n def _is_piece_between(self, fromPos, toPos, diff):\n board = self.get_board()\n\n # We're moving on the x axis\n if diff['y'] is 0:\n coefficient = 1 if fromPos['x'] < toPos['x'] else -1\n for i in xrange(1, diff['x']):\n if self.get_piece({'x': fromPos['x'] + (i * coefficient), 'y': fromPos['y']}) != 0:\n return True\n\n return False\n\n # We're moving on the y axis\n else:\n coefficient = 1 if fromPos['y'] < toPos['y'] else -1\n for i in xrange(1, diff['y']):\n if self.get_piece({'x': fromPos['x'], 'y': fromPos['y'] + (i * coefficient)}) != 0:\n return True\n\n return False\n\n def _check_attack(self, fromPiece, toPiece):\n # Are we gonna draw?\n if fromPiece['rank'] == toPiece['rank']:\n return MOVE_TYPES.ATTACK_DRAW\n\n # Any movable piece can capture the flag.\n if toPiece['rank'] == 'F':\n return MOVE_TYPES.CAPTURE\n\n # Are we attacking a bomb?\n if toPiece['rank'] == 'B':\n if fromPiece['rank'] == '8':\n return MOVE_TYPES.ATTACK_WON\n else:\n return MOVE_TYPES.ATTACK_LOST\n\n # Everything wins attacking a spy.\n if toPiece['rank'] == 'S':\n return MOVE_TYPES.ATTACK_WON\n\n # Are we a spy?\n if fromPiece['rank'] == 'S':\n if toPiece['rank'] == '1':\n return MOVE_TYPES.ATTACK_WON\n else:\n return MOVE_TYPES.ATTACK_LOST\n\n fromRank = int(fromPiece['rank'])\n toRank = int(toPiece['rank'])\n\n if toRank > fromRank:\n return MOVE_TYPES.ATTACK_WON\n else:\n return MOVE_TYPES.ATTACK_LOST\n\n def _cell_is_occupied(self, piece):\n return not self._cell_is_empty(piece)\n\n def _cell_is_empty(self, piece):\n if piece == 0:\n return True\n else:\n return False\n\n @staticmethod\n def reverse_board(board):\n board = board[::-1]\n for i in xrange(0, len(board)):\n board[i] = board[i][::-1]\n\n return board\n\n @staticmethod\n def check_moves_are_same_piece(moves):\n try:\n assert moves[1]['fromPos'] == moves[0]['toPos']\n assert moves[2]['fromPos'] == moves[1]['toPos']\n assert moves[3]['fromPos'] == moves[2]['toPos']\n\n return True\n except:\n return False\n\n @staticmethod\n def get_cells_between_inclusive(fromPos, toPos):\n '''Will only work if we're moving on a single axis.'''\n if fromPos['x'] == toPos['x']:\n axis = 'y'\n op_axis = 'x'\n else:\n axis = 'x'\n op_axis = 'y'\n\n cells = []\n\n smallest = min(fromPos[axis], toPos[axis])\n biggest = max(fromPos[axis], toPos[axis])\n for i in xrange(smallest, biggest + 1):\n cell = {}\n cell[op_axis] = fromPos[op_axis] # Doesn't matter if it's fromPos or toPos here...\n cell[axis] = i\n\n cells.append(cell)\n\n return cells\n","repo_name":"benletchford/stratego.io","sub_path":"gae/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"32"} +{"seq_id":"33333805498","text":"# python 3.6\n\nimport os\nimport numpy as np\nfrom scipy.io import mmread, mmwrite\nfrom scipy.sparse import csr_matrix\nos.chdir(\"../data/processed/\") # set data directory\n\n\n### LOAD CELL X PEAK ###\nprint(\"Loading peak accesibility data\")\nfname = \"greenleaf_scATAC_peaks_filtered.mtx\"\npeaks = mmread(fname) # read in the sparse matrix\npeaks = peaks.tocsr()\n\nfname = \"greenleaf_scATAC_peaks_filtered.txt\"\nwith open(fname, \"r\") as f:\n peaknames = [p.strip(\"\\n\") for p in f.readlines()]\nf.close()\n\n\n### FILTER OUT NON-PRESENT PEAKS ###\nidxs = np.where(peaks.getnnz(axis=0) > 0)[0]\npeaks = peaks[:, idxs]\npeaks = csr_matrix(peaks)\n\n\n### CREATE A HIGHLY EXPRESSED SUBSET ###\nprint(\"Creating subset of peaks found in >= 1% of cells\")\nidxs = np.where(peaks.getnnz(axis=0) >= (peaks.shape[0] * 0.01))[0]\nsub1 = peaks[:, idxs]\nsub1 = csr_matrix(sub1)\n\nprint(\"Saving highly expressed subset\")\nfname = \"greenleaf_scATAC_peaks_common.mtx\"\nmmwrite(fname, sub1)\n\nprint(\"Saving common peak names\")\nsub1names = list(np.array(peaknames)[idxs])\nfname = \"greenleaf_scATAC_peaks_common_labels.txt\"\nwith open(fname, \"w\") as f:\n for p in sub1names:\n f.write(\"{0}\\n\".format(p))\nf.close()\n\n### CREATE A MEDIUM EXPRESSED SUBSET ###\nprint(\"Creating subset of peaks found in 0.5%-1% of cells\")\nidxs = np.where((peaks.getnnz(axis=0) < (peaks.shape[0] * 0.01)) &\n (peaks.getnnz(axis=0) >= (peaks.shape[0] * 0.005)))[0]\nsub2 = peaks[:, idxs]\nsub2 = csr_matrix(sub2)\n\nprint(\"Saving lowly expressed subset\")\nfname = \"greenleaf_scATAC_peaks_medium.mtx\"\nmmwrite(fname, sub2)\n\nprint(\"Saving rare peak names\")\nsub2names = list(np.array(peaknames)[idxs])\nfname = \"greenleaf_scATAC_peaks_medium_labels.txt\"\nwith open(fname, \"w\") as f:\n for p in sub2names:\n f.write(\"{0}\\n\".format(p))\nf.close()\n\n### CREATE A LOWLY EXPRESSED SUBSET\nprint(\"Creating subset of peaks found in < 0.5% of cells\")\nidxs = np.where(peaks.getnnz(axis=0) < (peaks.shape[0] * 0.005))[0]\nsub3 = peaks[:, idxs]\nsub3 = csr_matrix(sub3)\n\nprint(\"Saving lowly expressed subset\")\nfname = \"greenleaf_scATAC_peaks_rare.mtx\"\nmmwrite(fname, sub3)\n\nprint(\"Saving rare peak names\")\nsub3names = list(np.array(peaknames)[idxs])\nfname = \"greenleaf_scATAC_peaks_rare_labels.txt\"\nwith open(fname, \"w\") as f:\n for p in sub3names:\n f.write(\"{0}\\n\".format(p))\nf.close()\n","repo_name":"ohlerlab/SEMITONES_paper","sub_path":"scripts/22_subset_peaks_for_enrichment_scoring.py","file_name":"22_subset_peaks_for_enrichment_scoring.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"11446937921","text":"\"\"\"\nhttps://leetcode.com/problems/course-schedule\n\"\"\"\nfrom collections import defaultdict\nfrom enum import Enum\nclass color(Enum):\n WHITE = \"white\"\n GREY = \"grey\"\n BLACK = \"black\"\n\nclass Solution:\n def dfs(self, node, graph, visited):\n result = True\n if visited[node] == color.GREY:\n result = False\n if visited[node] == color.WHITE:\n visited[node] = color.GREY\n for n in graph[node]:\n result = result and self.dfs(n, graph, visited)\n visited[node] = color.BLACK\n return result\n\n\n def canFinish(self, numCourses, prerequisites):\n graph = defaultdict(list)\n\n for u,v in prerequisites:\n graph[u].append(v)\n\n visited = [color.WHITE for i in range(numCourses)]\n result = True\n for n in range(numCourses):\n result = result and self.dfs(n, graph, visited)\n if result == False: return result\n return result\n\nprint(Solution().canFinish(5, [[0,1], [1,2], [3,2], [4,1], [2,4]]))\n\n\n","repo_name":"scarlettlite/hackathon","sub_path":"Graph/DFS/CourseScedule.py","file_name":"CourseScedule.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18477096805","text":"\"\"\" The Workflow Task Agent takes workflow tasks created in the\n transformation database and submits to the workload management system.\n\"\"\"\n\nfrom DIRAC import S_OK\n\nfrom DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations\nfrom DIRAC.TransformationSystem.Agent.TaskManagerAgentBase import TaskManagerAgentBase\n\n__RCSID__ = \"$Id$\"\n\nAGENT_NAME = 'Transformation/WorkflowTaskAgent'\n\nclass WorkflowTaskAgent( TaskManagerAgentBase ):\n \"\"\" An AgentModule class to submit workflow tasks\n \"\"\"\n def __init__( self, *args, **kwargs ):\n \"\"\" c'tor\n \"\"\"\n TaskManagerAgentBase.__init__( self, *args, **kwargs )\n\n self.transType = []\n\n def initialize( self ):\n \"\"\" Standard initialize method\n \"\"\"\n res = TaskManagerAgentBase.initialize( self )\n if not res['OK']:\n return res\n\n agentTSTypes = self.am_getOption( 'TransType', [] )\n if agentTSTypes:\n self.transType = agentTSTypes\n else:\n self.transType = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )\n\n return S_OK()\n","repo_name":"mcorvo/DIRAC","sub_path":"TransformationSystem/Agent/WorkflowTaskAgent.py","file_name":"WorkflowTaskAgent.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"26780064436","text":"# try hatasız ise else ve finallye gider. try hatalı ise except ve finally e gider\nwhile True:\n try:\n benimInt=int(input(\"numara giriniz: \"))\n except:\n print(\"yanlış girdi verdiniz\")\n continue\n else:\n print(\"teşekkürler\")\n break\n finally:\n print(\"finally çağrıldı\")\n ","repo_name":"semihuzunCE/Python","sub_path":"17-HatalarıEleAlmak.py","file_name":"17-HatalarıEleAlmak.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31564022132","text":"from brownie import config, accounts, network, chain, web3\n# addr 0x833514593c7798551A20Ac69f98D486e2A12dFe8\nprivate_key = config[\"wallets\"][\"from_key\"]\nwallet: network.account.LocalAccount = accounts.add(private_key)\n\n# default: {\"from\": wallet, \"gas_limit\": 1000000, \"priority_fee\": \"1 gwei\"}\nif network.chain.id == 1337:\n accounts[0].transfer(wallet, \"10 ether\")\nelse:\n network.priority_fee(\"3 gwei\")\naccounts.default = wallet\nnetwork.gas_limit(1000000)\n","repo_name":"MidnightLady/ethernaut_brownie","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16594091430","text":"import time\r\nfrom src.Game import Game, Player, Bomb, in_explosion_range\r\nfrom src.State import State\r\nfrom src.Algorithms import min_max, alpha_beta\r\n\r\n\r\ndef print_if_final(current_state):\r\n \"\"\"\r\n Print the winner or \"draw\" if the game is over\r\n :param current_state: object of type State\r\n :return: True if the game is over, or False\r\n \"\"\"\r\n final = current_state.game.final()\r\n if final:\r\n if final == \"draw\":\r\n print(\"Draw!\")\r\n elif final == Game.MIN_PLAYER:\r\n print(\"You won!\")\r\n else:\r\n print(\"you lost...\")\r\n print(f\"Your score is: {current_state.game.score(Game.MIN_PLAYER)}\")\r\n print(f\"PC's score is: {current_state.game.score(Game.MAX_PLAYER)}\")\r\n return True\r\n return False\r\n\r\n\r\ndef update_current_state(current_state):\r\n \"\"\"\r\n Update the current_state with the input that the player choose\r\n :param current_state: object of type State\r\n :return: True if the game will continue, False if the player choose to quit\r\n \"\"\"\r\n old_position = current_state.game.player_min.position\r\n new_position = None\r\n\r\n # Ask the player in which direction he wants to go\r\n valid_answer = False\r\n while not valid_answer:\r\n direction = input(\"Choose the direction you want to go or write 'exit' if you want to quit. (w for up, s for down, a for left, d for right)\\n\")\r\n if direction == 'exit':\r\n return False\r\n if direction in [\"w\", \"s\", \"a\", \"d\"]:\r\n if direction == \"w\":\r\n if current_state.game.the_map[old_position[0] - 1][old_position[1]] not in [\"#\", \"b\"]:\r\n valid_answer = True\r\n new_position = (old_position[0] - 1, old_position[1])\r\n else:\r\n print(\"There is a wall or a bomb there, please choose another direction\\n\")\r\n elif direction == \"s\":\r\n if current_state.game.the_map[old_position[0] + 1][old_position[1]] not in [\"#\", \"b\"]:\r\n valid_answer = True\r\n new_position = (old_position[0] + 1, old_position[1])\r\n else:\r\n print(\"There is a wall or a bomb there, please choose another direction\\n\")\r\n elif direction == \"a\":\r\n if current_state.game.the_map[old_position[0]][old_position[1] - 1] not in [\"#\", \"b\"]:\r\n valid_answer = True\r\n new_position = (old_position[0], old_position[1] - 1)\r\n else:\r\n print(\"There is a wall or a bomb there, please choose another direction\\n\")\r\n else:\r\n if current_state.game.the_map[old_position[0]][old_position[1] + 1] not in [\"#\", \"b\"]:\r\n valid_answer = True\r\n new_position = (old_position[0], old_position[1] + 1)\r\n else:\r\n print(\"There is a wall or a bomb there, please choose another direction\\n\")\r\n else:\r\n print(\"Please enter w, s, a or d\\n\")\r\n\r\n # Update the map\r\n current_state.game.the_map[old_position[0]][old_position[1]] = \" \"\r\n # Check if in the new position is a protection\r\n if current_state.game.the_map[new_position[0]][new_position[1]] == 'p':\r\n current_state.game.player_min.protection += 1\r\n current_state.game.the_map[new_position[0]][new_position[1]] = Game.MIN_PLAYER\r\n current_state.game.player_min.position = new_position\r\n\r\n # Check if the new position is in the range of an explosion (updates the protection and remove some bombs if necessary)\r\n check_explosion(current_state, current_state.game.player_min)\r\n\r\n # If the player didn't have any bomb, ask him if he wants to place one behind him\r\n if current_state.game.player_min.bomb is None:\r\n valid_answer = False\r\n while not valid_answer:\r\n answer = input(\"Do you want to place an inactive bomb behind you? Answer with y/n\\n\")\r\n if answer in ['y', 'n']:\r\n if answer == 'y':\r\n current_state.game.the_map[old_position[0]][old_position[1]] = \"b\"\r\n current_state.game.player_min.bomb = Bomb(\"inactive\", old_position)\r\n valid_answer = True\r\n else:\r\n print(\"You didn't choose the correct answer...\\n\")\r\n\r\n # If the player has an inactive bomb placed on the map, ask him if he wants to activate it\r\n elif current_state.game.player_min.bomb.status == \"inactive\":\r\n valid_answer = False\r\n while not valid_answer:\r\n answer = input(\"Do you want to activate the bomb? Answer with y/n\\n\")\r\n if answer in ['y', 'n']:\r\n if answer == 'y':\r\n current_state.game.player_min.bomb.status = \"active\"\r\n valid_answer = True\r\n else:\r\n print(\"You didn't choose the correct answer...\\n\")\r\n\r\n # If the player has an active bomb placed on the map, ask him if he wants to explode it and place another one behind him\r\n else:\r\n valid_answer = False\r\n while not valid_answer:\r\n answer = input(\"Do you want to place an inactive bomb behind you (the bomb you already have will explode)? Answer with y/n\\n\")\r\n if answer in ['y', 'n']:\r\n if answer == 'y':\r\n current_state.game.the_map[current_state.game.player_min.bomb.position[0]][current_state.game.player_min.bomb.position[1]] = \" \"\r\n current_state.game.the_map[old_position[0]][old_position[1]] = \"b\"\r\n current_state.game.player_min.bomb = Bomb(\"inactive\", old_position)\r\n valid_answer = True\r\n else:\r\n print(\"You didn't choose the correct answer...\\n\")\r\n\r\n return True\r\n\r\n\r\ndef initialize_game(path):\r\n \"\"\"\r\n :param path: string, representing the path of the file where the map is stored\r\n :return: object of type Game, representing the intial Game\r\n \"\"\"\r\n with open(path, \"r\") as f:\r\n lines = f.readlines()\r\n the_map = [[c for c in line if c != \"\\n\"] for line in lines]\r\n\r\n player_min = None\r\n player_max = None\r\n for i in range(len(the_map)):\r\n for j in range(len(the_map[i])):\r\n if the_map[i][j] == Game.MIN_PLAYER:\r\n player_min = Player(0, (i, j), None)\r\n elif the_map[i][j] == Game.MAX_PLAYER:\r\n player_max = Player(0, (i, j), None)\r\n\r\n return Game(player_min, player_max, the_map)\r\n\r\n\r\ndef check_explosion(current_state, player):\r\n \"\"\"\r\n Check if the \"player\" is in the range of an explosion, if yes decrease his protection and explode the bomb\r\n :param current_state: object of type State, representing the current state\r\n :param player: object of type Player, representing the player checked for the explosion\r\n \"\"\"\r\n explosion = in_explosion_range(current_state.game, player.position)\r\n if explosion[0]:\r\n player.protection -= 1\r\n if current_state.game.player_min.bomb and explosion[1] == current_state.game.player_min.bomb.position:\r\n current_state.game.player_min.bomb = None\r\n else:\r\n current_state.game.player_max.bomb = None\r\n current_state.game.the_map[explosion[1][0]][explosion[1][1]] = \" \"\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Here's where the magic happens\r\n \"\"\"\r\n\r\n # Ask what algorithm to use (Minimax or Alpha-Beta)\r\n algorithm_type = None\r\n valid_answer = False\r\n while not valid_answer:\r\n algorithm_type = input(\"What algorithm do you wanna use? Minimax(1) or Alpha-Beta(2)\\n\")\r\n if algorithm_type in ['1', '2']:\r\n valid_answer = True\r\n else:\r\n print(\"You didn't choose the correct answer...\\n\")\r\n\r\n # Ask the difficulty of the game\r\n valid_answer = False\r\n while not valid_answer:\r\n difficulty = input(\"Choose difficulty: hard / medium / easy\\n\")\r\n if difficulty == \"hard\":\r\n State.DEPTH_MAX = 9\r\n valid_answer = True\r\n elif difficulty == \"medium\":\r\n State.DEPTH_MAX = 6\r\n valid_answer = True\r\n elif difficulty == \"easy\":\r\n State.DEPTH_MAX = 3\r\n valid_answer = True\r\n else:\r\n print(\"You didn't choose the correct answer...\\n\")\r\n\r\n # Ask if he wants to be the first or the second\r\n valid_answer = False\r\n while not valid_answer:\r\n Game.MIN_PLAYER = input(\"Do you wanna be the first or the second player? Answer with 1 or 2\\n\")\r\n if Game.MIN_PLAYER in ['1', '2']:\r\n valid_answer = True\r\n else:\r\n print(\"You didn't choose the correct answer...\\n\")\r\n Game.MAX_PLAYER = '1' if Game.MIN_PLAYER == '2' else '2'\r\n\r\n print(\"Game starting...\")\r\n\r\n # Creating the initial state\r\n game = initialize_game(\"map.txt\")\r\n print(str(game))\r\n current_state = State(game, '1', State.DEPTH_MAX)\r\n\r\n min_player_moves = 0\r\n max_player_moves = 0\r\n\r\n while True:\r\n # at the start of any turn, check if any of the players are in the range of the explosion (and update the attributes)\r\n check_explosion(current_state, current_state.game.player_min)\r\n check_explosion(current_state, current_state.game.player_max)\r\n\r\n # Player's turn\r\n if current_state.current_player_symbol == Game.MIN_PLAYER:\r\n time_start = time.time()\r\n\r\n if update_current_state(current_state) is False:\r\n print(\"You quit the game...\")\r\n print(f\"Your score is: {current_state.game.score(Game.MIN_PLAYER)}\")\r\n print(f\"PC's score is: {current_state.game.score(Game.MAX_PLAYER)}\")\r\n break\r\n print(\"After your move:\\n\")\r\n print(str(current_state.game))\r\n\r\n time_stop = time.time()\r\n print(f\"It took {time_stop - time_start} for the player to make the move\\n\")\r\n if print_if_final(current_state):\r\n break\r\n current_state.current_player_symbol = current_state.opossite_player()\r\n min_player_moves += 1\r\n\r\n # PC's turn\r\n else:\r\n time_start = time.time()\r\n\r\n if algorithm_type == '1':\r\n updated_state = min_max(current_state)\r\n else:\r\n updated_state = alpha_beta(-500, 500, current_state)\r\n current_state.game = updated_state.chosen_state.game\r\n print(\"After pc's move:\\n\")\r\n print(str(current_state.game))\r\n\r\n time_stop = time.time()\r\n print(f\"It took {time_stop - time_start} for the pc to make the move\\n\")\r\n if print_if_final(current_state):\r\n break\r\n current_state.current_player_symbol = current_state.opossite_player()\r\n max_player_moves += 1\r\n\r\n print(f\"You made {min_player_moves} moves; The PC made {max_player_moves} moves.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n total_time_start = time.time()\r\n main()\r\n total_time_stop = time.time()\r\n print(f\"The game was running for {total_time_stop - total_time_start}\")\r\n","repo_name":"Nacu77/Bomberman","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":11148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30013847936","text":"from itertools import permutations\n\ndef is_prime_num(n):\n if n == 1 or n == 0:\n return False\n for i in range(2, int(pow(n, 0.5))+1): # n의 제곱근을 정수화 시켜준 후 + 1\n if n % i == 0:\n return False\n return True\n\ndef solution(numbers):\n count = 0\n answer = []\n for i in range(len(numbers)):\n x = list(permutations(numbers, i+1))\n for j in range(len(x)):\n answer.append(int(\"\".join(list(x[j]))))\n answer = list(set(answer))\n \n for one in answer:\n if is_prime_num(one):\n print(one)\n count += 1\n\n\n\n return count\n\n\nprint(solution(\"17\")) # 3\nprint()\nprint(solution(\"011\")) # 2\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kangtae210/leetCode","sub_path":"p_lv2/find_prime_in_arr.py","file_name":"find_prime_in_arr.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1894083575","text":"amount_wanted = float(input())\r\n\r\ncocktail = input()\r\n\r\nincome_amount = 0\r\nwhile cocktail != 'Party!':\r\n number_of_cocktails = int(input())\r\n cocktail_len = len(cocktail)\r\n price = number_of_cocktails * cocktail_len\r\n if price % 2 != 0:\r\n price *= 0.75\r\n income_amount += price\r\n if amount_wanted <= income_amount:\r\n print(\"Target acquired.\")\r\n break\r\n cocktail = input()\r\ndiff = abs(amount_wanted - income_amount)\r\nif cocktail == 'Party!':\r\n print(f\"We need {diff:.2f} leva more.\")\r\nprint(f\"Club income - {income_amount:.2f} leva.\")","repo_name":"LazChu/SoftUni-projects","sub_path":"Programming Basics with Python/exams/exam_6_7_july/club.py","file_name":"club.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15642512176","text":"#!/usr/bin/env python\n#import os, sys\nimport sys\nfrom unittest import TestSuite\n\nfrom boot_django import boot_django\n\n# call the django setup routine\nboot_django()\n\n#from django.core.management import call_command\n#call_command('shell')\n\ndefault_labels = ['awl.tests', ]\n\ndef get_suite(labels=default_labels):\n from awl.waelsteng import WRunner\n runner = WRunner(verbosity=1)\n failures = runner.run_tests(labels)\n if failures:\n sys.exit(failures)\n\n # in case this is called from setup tools, return a test suite\n return TestSuite()\n\n\nif __name__ == '__main__':\n labels = default_labels\n if len(sys.argv[1:]) > 0:\n labels = sys.argv[1:]\n\n get_suite(labels)\n","repo_name":"cltrudeau/django-awl","sub_path":"load_tests.py","file_name":"load_tests.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"32"} +{"seq_id":"24353251257","text":"import os\r\nimport shutil\r\nimport customtkinter\r\n\r\ncustomtkinter.set_appearance_mode(\"system\")\r\ncustomtkinter.set_default_color_theme(\"dark-blue\")\r\n\r\nroot = customtkinter.CTk()\r\nroot.geometry(\"1000x500\")\r\ndef path():\r\n\r\n pathinput = entry1.get()\r\n print(pathinput)\r\n\r\n files = os.listdir(pathinput)\r\n\r\n for file in files:\r\n filename, extension = os.path.splitext(file)\r\n extension = extension[1:]\r\n\r\n if os.path.exists(pathinput + '/' + extension):\r\n shutil.move(pathinput + '/' + file, pathinput + '/' + extension + '/' + file)\r\n else:\r\n os.makedirs(pathinput + '/' + extension)\r\n shutil.move(pathinput + '/' + file, pathinput + '/' + extension + '/' + file)\r\n\r\n\r\nframe = customtkinter.CTkFrame(master=root)\r\nframe.pack(pady=20, padx=60, fill=\"both\", expand=True)\r\n\r\nlabel = customtkinter.CTkLabel(master=frame, text=\"File arranger\")\r\nlabel.pack(pady=12, padx=10)\r\n\r\nentry1 = customtkinter.CTkEntry(master=frame, placeholder_text=\"Enter path\")\r\nentry1.pack(pady=12, padx=80)\r\n\r\nbutton = customtkinter.CTkButton(master=frame, text=\"Submit\", command=path)\r\nbutton.pack(pady=12, padx=10)\r\n\r\nroot.mainloop()\r\n\r\n\r\n","repo_name":"Juls-123/Automated-File-Organizer-with-GUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40177248797","text":"from typing import List, Optional\n\nimport sqlalchemy.orm\nfrom data.package import Package\nfrom data.release import Release\nfrom sqlalchemy import func\nfrom sqlalchemy.future import select\n\nfrom data import db_session\n\n\nasync def release_count() -> int:\n async with db_session.create_async_session() as session:\n query = select(func.count(Release.id))\n results = await session.execute(query)\n\n return results.scalar()\n\n\nasync def package_count() -> int:\n async with db_session.create_async_session() as session:\n query = select(func.count(Package.id))\n results = await session.execute(query)\n\n return results.scalar()\n\n\nasync def latest_packages(limit: int = 5) -> List[Package]:\n async with db_session.create_async_session() as session:\n query = select(Release) \\\n .options(\n sqlalchemy.orm.joinedload(Release.package)) \\\n .order_by(Release.created_date.desc()) \\\n .limit(limit)\n\n results = await session.execute(query)\n releases = results.scalars()\n\n return list({r.package for r in releases})\n\n\nasync def get_package_by_id(package_name: str) -> Optional[Package]:\n async with db_session.create_async_session() as session:\n query = select(Package).filter(Package.id == package_name)\n result = await session.execute(query)\n\n return result.scalar_one_or_none()\n\n\nasync def get_latest_release_for_package(package_name: str) -> Optional[Release]:\n async with db_session.create_async_session() as session:\n query = select(Release) \\\n .filter(Release.package_id == package_name) \\\n .order_by(Release.created_date.desc())\n\n results = await session.execute(query)\n release = results.scalar()\n\n return release\n","repo_name":"talkpython/web-applications-with-fastapi-course","sub_path":"code/ch8-async-databases/services/package_service.py","file_name":"package_service.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":313,"dataset":"github-code","pt":"32"} +{"seq_id":"15219740840","text":"#!/usr/bin/env python\n\nfrom neuron import h\nimport numpy as np\nimport numpy.random as rnd\nimport ConfigParser as cp\nimport sys\nimport os\nimport tables as tbl\nimport time\n\nDEBUG = False\ndefault_configuration_file = 'simulator.cfg'\n\nclass SimulationDetails(tbl.IsDescription):\n neuron = tbl.StringCol(2)\n L = tbl.Float64Col()\n diam = tbl.Float64Col()\n Rin = tbl.Float64Col()\n T = tbl.Float64Col()\n Tdelay = tbl.Float64Col()\n dt = tbl.Float64Col()\n Ibase = tbl.Float64Col()\n Iperturb = tbl.Float64Col()\n durperturb = tbl.Float64Col()\n noisemu = tbl.Float64Col()\n noisesigma = tbl.Float64Col()\n noisetau = tbl.Float64Col()\n with_pid = tbl.Int32Col()\n\ndef savePRCData(filename, neuron_type, L, diam, Rin, T, Tdelay, dt, Ibase, Iperturb, \\\n durperturb, noise_mu, noise_sigma, noise_tau, tspikes, tperturb, with_pid):\n h5file = tbl.openFile(filename, mode='w', title='Simulations for PRC calculation')\n table = h5file.createTable(h5file.root, 'Details', SimulationDetails, 'Simulation info')\n details = table.row\n details['neuron'] = neuron_type\n details['L'] = L\n details['diam'] = diam\n details['Rin'] = Rin\n details['T'] = T\n details['Tdelay'] = Tdelay\n details['dt'] = dt\n details['Ibase'] = Ibase\n details['Iperturb'] = Iperturb\n details['durperturb'] = durperturb\n details['with_pid'] = with_pid\n details['noisemu'] = noise_mu\n details['noisesigma'] = noise_sigma\n details['noisesigma'] = noise_tau\n details.append()\n group = h5file.createGroup(h5file.root, 'Data', 'Spikes and perturbations times')\n h5file.createArray(group, 'spikes', tspikes, 'Spikes times')\n h5file.createArray(group, 'perturb', tperturb, 'Perturbation times')\n h5file.close()\n\ndef makeOutputFilename(prefix='', extension='.out'):\n filename = prefix\n if prefix != '' and prefix[-1] != '_':\n filename = filename + '_'\n now = time.localtime(time.time())\n filename = filename + '%d%02d%02d-%02d%02d%02d' % \\\n (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\n if extension[0] != '.':\n extension = '.' + extension\n suffix = ''\n k = 0\n while os.path.exists(filename + suffix + extension):\n k = k+1\n suffix = '_%d' % k\n return filename + suffix + extension\n\ndef usage():\n print('')\n print('This script can be used to compute the PRC of a Purkinje cell or its CV in the presence')\n print('of channel noise.')\n print('')\n print('Usage:')\n print('')\n print(' %s prc [options] [configuration file]' % os.path.basename(sys.argv[0]))\n print(' %s cv [options] length diameter' % os.path.basename(sys.argv[0]))\n print(' %s [-h|--help] print this help message and exit.' % os.path.basename(sys.argv[0]))\n print('')\n print('If \"prc\" is specified as a first argument, the script will compute a PRC, using the options')\n print('contained in the (optional) configuration file passed as an argument. Alternatively, the script')\n print('will look for a file called %s.' % default_configuration_file)\n print('Additionally, the following options are accepted:')\n print('')\n print(' -o,--output specify the path of the file where the results will be saved.')\n print(' --with-pid use a PID controller to clamp the frequency and deliver the stimulation pulses.')\n print('')\n print('If \"cv\" is specified as a first argument, the script will simulate a model containing channel')\n print('noise for 3 seconds and will print the coefficient of variation of the inter-spike intervals.')\n print('As an example, a length of 60 um and a diameter of 50 um lead to a CV of approximately 0.1, while')\n print('a length of 110 um and a diameter of 100 um lead to a CV of approximately 0.05.')\n print('Additionally, the following options are accepted:')\n print('')\n print(' -d,--duration specify the duration of the simulation (in ms, default 5000).')\n print(' -t,--transient specify the duration of the transient (in ms, default 1000).')\n print(' -f,--firing-rate specify the firing rate of the cell (in Hz, default 30).')\n print(' --dt specify the timestep (in ms, default 0.001).')\n print('')\n print('Author: Daniele Linaro - danielelinaro@gmail.com')\n print('')\n\ndef parseArgs():\n import getopt\n\n if len(sys.argv) == 1:\n print('Type %s -h for help on how to use this program.' % os.path.basename(sys.argv[0]))\n sys.exit(1)\n\n if sys.argv[1] in ('-h','--help'):\n usage()\n sys.exit(0)\n\n if not sys.argv[1].lower() in ('prc','cv'):\n print('Mode must be either \"prc\" or \"cv\".')\n sys.exit(1)\n\n options = {'mode': sys.argv[1].lower()}\n if options['mode'] == 'prc':\n options['config_file'] = default_configuration_file\n options['output_file'] = makeOutputFilename('prc_', '.h5')\n options['use_pid'] = False\n opts,args = getopt.getopt(sys.argv[2:], 'o:', ['with-pid','output='])\n for o,a in opts:\n if o in ('-o','--output'):\n options['output_file'] = a\n elif o == '--with-pid':\n options['use_pid'] = True\n if len(args) == 1:\n options['config_file'] = args[0]\n elif len(args) > 1:\n print('You can specify only one configuration file.')\n sys.exit(1)\n else:\n opts,args = getopt.getopt(sys.argv[2:], 'd:t:f:', ['duration=','transient=','dt=','firing-rate='])\n options['duration'] = 5000\n options['transient'] = 1000\n options['dt'] = 0.001\n options['firing_rate'] = 30\n for o,a in opts:\n if o in ('-d','--duration'):\n options['duration'] = float(a)\n elif o in ('-t','--transient'):\n options['transient'] = float(a)\n elif o == '--dt':\n options['dt'] = float(a)\n elif o in ('-f','--firing-rate'):\n options['firing_rate'] = float(a)\n if len(args) != 2:\n print('You must specify length and diameter of the cell.')\n sys.exit(0)\n options['length'] = float(args[0])\n options['diameter'] = float(args[1])\n return options\n\ndef makeKR(L=20, diam=20, type='deterministic'):\n sec = h.Section()\n if type == 'deterministic':\n suffix = ''\n elif type == 'stochastic':\n suffix = '_cn'\n else:\n raise Exception('KR: no such mechanism.') \n sec.insert('naRsg' + suffix)\n sec.insert('kpkj' + suffix)\n sec.insert('kpkj2' + suffix)\n sec.insert('kpkjslow' + suffix)\n sec.insert('bkpkj' + suffix)\n sec.insert('hpkj' + suffix)\n sec.insert('cadiff')\n sec.insert('cap')\n sec.insert('lkpkj')\n if type == 'stochastic':\n import numpy.random as rnd\n rnd.seed(int(time.time()))\n for mech in sec(0.5):\n if 'cn' in mech.name():\n mech.seed = int(rnd.uniform() * 100000)\n print('%s>> seed: %ld' % (mech.name().split('_')[0],mech.seed))\n sec.L = L\n sec.diam = diam\n sec.ena = 60\n sec.ek = -88\n return sec\n\ndef computeInputResistance(segment, Irange, dur, delay, dt=0.005, plot=False):\n if plot:\n import pylab as p\n stim = makeIclamp(segment, dur, 0, delay)\n rec = makeRecorders(segment, {'v': '_ref_v'})\n ap = h.APCount(segment)\n ap.thresh = -20\n spks = h.Vector()\n ap.record(spks)\n I = []\n V = []\n h.load_file('stdrun.hoc')\n h.dt = dt\n h.celsius = 36\n h.tstop = dur+delay*2\n if plot:\n p.figure()\n p.subplot(1,2,1)\n for k,i in enumerate(np.arange(Irange[0],Irange[1],Irange[2])):\n spks.clear()\n ap.n = 0\n stim.amp = i\n h.run()\n spike_times = np.array(spks)\n if len(np.intersect1d(np.nonzero(spike_times>delay)[0], np.nonzero(spike_times delay+0.75*dur)[0], np.nonzero(t < delay+dur)[0])\n I.append(i)\n V.append(np.mean(v[idx]))\n else:\n print('The neuron emitted spikes at I = %g pA' % (stim.amp*1e3))\n if plot:\n p.plot(1e-3*t,v)\n V = np.array(V)*1e-3\n I = np.array(I)*1e-9\n poly = np.polyfit(I,V,1)\n if plot:\n ymin,ymax = p.ylim()\n p.plot([1e-3*(delay+0.75*dur),1e-3*(delay+0.75*dur)],[ymin,ymax],'r--')\n p.plot([1e-3*(delay+dur),1e-3*(delay+dur)],[ymin,ymax],'r--')\n p.xlabel('t (s)')\n p.ylabel('V (mV)')\n p.box(True)\n p.grid(False)\n p.subplot(1,2,2)\n x = np.linspace(I[0],I[-1],100)\n y = np.polyval(poly,x)\n p.plot(1e12*x,1e3*y,'k--')\n p.plot(1e12*I,1e3*V,'bo')\n p.xlabel('I (pA)')\n p.ylabel('V (mV)')\n p.show()\n return poly[0]\n \ndef optimizeF(segment, F, ftol=0.1, dur=5000, dt=0.025, amp=[0,0.2], delay=200, maxiter=50):\n from sys import stdout\n f = F + 2*ftol\n iter = 0\n stim = makeIclamp(segment, dur, amp[1], delay)\n spks = h.Vector()\n apc = h.APCount(segment)\n apc.thresh = -20\n apc.record(spks)\n rec = makeRecorders(segment, {'v': '_ref_v'})\n print('\\nStarting frequency optimization: target is F = %.2f.' % F)\n\n h.load_file('stdrun.hoc')\n h.dt = dt\n h.celsius = 36\n h.tstop = dur+2*delay\n h.run()\n\n f = float(apc.n)/(dur*1e-3)\n if f < F:\n print('[00] !! Increase maximal current !!')\n raise Exception('Required frequency out of current bounds')\n else:\n print('[00] I = %.4f -> F = %.4f Hz.' % (stim.amp, f))\n\n while abs(F - f) > ftol and iter < maxiter:\n iter = iter+1\n stim.amp = (amp[0]+amp[1])/2\n stdout.write('[%02d] I = %.4f ' % (iter, stim.amp))\n spks = h.Vector()\n apc.n = 0\n apc.record(spks)\n h.t = 0\n h.run()\n if len(spks) == 0:\n amp[0] = stim.amp\n stdout.write('no spikes.\\n')\n stdout.flush()\n continue\n f = float(apc.n) / (dur*1e-3)\n stdout.write('-> F = %.4f Hz.\\n' % f)\n stdout.flush()\n if f > F:\n amp[1] = stim.amp\n else:\n amp[0] = stim.amp\n I = stim.amp\n del apc\n del stim\n del spks\n return f,I\n\ndef makeIclamp(segment, dur, amp, delay=0):\n stim = h.IClamp(segment)\n stim.delay = delay\n stim.dur = dur\n stim.amp = amp\n return stim\n\ndef makeNoisyIclamp(segment, dur, dt, mu, sigma, tau, delay=0, seed=int(time.time())):\n np.random.seed(seed)\n if abs(tau) < 1e-12:\n nsteps = int(np.ceil((dur)/dt)) + 1\n I = mu + sigma*np.random.normal(size=nsteps)\n else:\n nsteps = int(np.ceil((dur)/dt)) + 1\n coeff = np.exp(-dt/tau)\n I = (1-np.exp(-dt/tau))*mu + sigma * np.sqrt(2*dt/tau) * np.random.normal(size=nsteps)\n I[0] = mu\n for i in range(1,nsteps):\n I[i] = I[i] + coeff*I[i-1]\n vec = h.Vector(I)\n stim = h.IClamp(segment)\n stim.dur = dur\n stim.delay = delay\n vec.play(stim._ref_amp,dt)\n return stim,vec\n\ndef makeRecorders(segment, labels, rec=None):\n if rec is None:\n rec = {'t': h.Vector()}\n rec['t'].record(h._ref_t)\n for k,v in labels.items():\n rec[k] = h.Vector()\n rec[k].record(getattr(segment, v))\n return rec\n\ndef computeCV(L,diam,dur,ttran,dt,firing_rate):\n n = makeKR(L,diam,'deterministic')\n n.push()\n ref_area = 50*50*np.pi\n interval = [-0.1,0.4*h.area(0.5)/ref_area]\n F = {}\n F['expected'],I0 = optimizeF(n(0.5),firing_rate,amp=interval)\n n = makeKR(L,diam,'stochastic')\n stim = makeIclamp(n(0.5),dur,I0,0)\n spks = h.Vector()\n apc = h.APCount(n(0.5))\n apc.thresh = -20\n apc.record(spks)\n rec = makeRecorders(n(0.5),{'v':'_ref_v'})\n h.load_file('stdrun.hoc')\n h.celsius = 36\n h.dt = dt\n h.tstop = dur\n h.run()\n spks = np.array(spks)\n isi = np.diff(spks[spks>=ttran])\n F['measured'] = (len(isi)+1)/(dur-ttran)*1e3\n return np.std(isi)/F['measured'],F,I0\n\ndef main():\n options = parseArgs()\n\n if options['mode'] == 'cv':\n CV,F,I0 = computeCV(options['length'],options['diameter'],\\\n options['duration'],options['transient'],options['dt'],options['firing_rate'])\n print(('Length = %g um.\\nDiameter = %g um.\\nExpected firing rate = %g Hz.\\n' + \\\n 'Measured firing rate = %g Hz.\\nI0 = %g nA.\\nCV = %g.') % \\\n (options['length'],options['diameter'],F['expected'],F['measured'],I0,CV))\n with open('CVs.txt','a') as fid:\n fid.write('%10.4f %10.4f %9.1f %9.1f %8.4f %8.4f %8.4f %8.4f %10.5f\\n' % \\\n (options['length'],options['diameter'],\\\n options['duration'],options['transient'],options['dt'],\\\n F['expected'],F['measured'],I0,CV))\n sys.exit(0)\n\n if not os.path.exists(options['config_file']):\n print('%s: no such file.' % options['config_file'])\n sys.exit(1)\n fid = open(options['config_file'],'r')\n config = cp.ConfigParser()\n config.readfp(fid)\n fid.close()\n\n neuron_type = config.get('Neuron','type')\n prop = {'L': config.getfloat('Neuron','length'),\n 'diam': config.getfloat('Neuron','diameter')}\n\n neuron_mode = 'deterministic'\n if neuron_type[-2:] == 'cn':\n neuron_mode = 'stochastic'\n neuron_type = neuron_type[:2]\n\n if neuron_type == 'KR':\n n = makeKR(prop['L'],prop['diam'],'deterministic')\n Rin = computeInputResistance(n(0.5), [-0.3,-0.12,0.04], 2000, 200, h.dt, False)*1e-6\n if not options['use_pid']:\n F,I0 = optimizeF(n(0.5),config.getfloat('FiringRate','target_rate'),\n amp=[float(s) for s in config.get('FiringRate','current_range').split(',')])\n del n\n soma = makeKR(prop['L'],prop['diam'],neuron_mode)\n else:\n print('Unknown neuron type: ' + neuron_type + '. Aborting.')\n sys.exit(1)\n\n ntrials = config.getint('Simulation','trials')\n Tdelay = config.getfloat('Simulation','Tdelay')\n\n if not options['use_pid']:\n stimulation_period = config.getfloat('Simulation','T')\n else:\n target_rate = config.getfloat('FiringRate','target_rate')\n spikes_per_perturb = 6\n stimulation_period = 1000. * spikes_per_perturb / target_rate\n\n Ttotal = Tdelay + ntrials * stimulation_period\n perturbation = {'dur': config.getfloat('Perturbation','duration'), \\\n 'amp': config.getfloat('Perturbation','amplitude')}\n\n try:\n dt = config.getfloat('Simulation','dt')\n except cp.NoOptionError:\n dt = h.dt\n\n if neuron_mode == 'deterministic':\n noise_mu = config.getfloat('Noise','mean')\n noise_sigma = config.getfloat('Noise','stddev')\n noise_tau = config.getfloat('Noise','tau')\n noise_stim,noise_vec = makeNoisyIclamp(soma(0.5), Ttotal, dt, noise_mu, noise_sigma, noise_tau)\n else:\n noise_mu = 0.\n noise_sigma = 0.\n noise_tau = -1.\n\n if not options['use_pid']:\n base = makeIclamp(soma(0.5), Ttotal, I0, 0)\n perturb = makeIclamp(soma(0.5), perturbation['dur'], perturbation['amp'], 0) \n else:\n perturb = h.SobolPulses(soma(0.5))\n perturb.delay = Tdelay\n perturb.dur = perturbation['dur']\n perturb.amp = perturbation['amp']\n perturb.F = target_rate\n perturb.spkCount = spikes_per_perturb\n perturb.gp = 0.001\n perturb.gi = 0.1\n nc = [h.NetCon(soma(0.5)._ref_v,perturb,sec=soma), h.NetCon(perturb._ref_i,None)]\n nc[0].delay = 0\n nc[1].delay = 0\n nc[0].threshold = -20\n nc[1].threshold = 0.9 * config.getfloat('Perturbation','amplitude')\n perturb_times = h.Vector()\n nc[1].record(perturb_times)\n\n apc = h.APCount(soma(0.5))\n apc.thresh = -20\n data = {'spks': h.Vector(), 'perturb': np.zeros(ntrials)}\n apc.record(data['spks'])\n\n if DEBUG:\n rec = makeRecorders(soma(0.5), {'v':'_ref_v'})\n if neuron_mode == 'deterministic':\n rec = makeRecorders(noise_stim, {'i':'_ref_i'}, rec)\n if options['use_pid']:\n rec = makeRecorders(perturb, {'pid':'_ref_i'}, rec)\n\n h.load_file('stdrun.hoc')\n h.celsius = 36\n h.dt = dt\n\n if options['use_pid']:\n h.tstop = Ttotal\n h.run()\n data['perturb'] = np.array(perturb_times)\n else:\n # ``Transient''\n h.tstop = Tdelay\n h.run()\n for k in range(ntrials):\n tperturb = stimulation_period/3 + rnd.uniform(0,stimulation_period/3)\n perturb.delay = h.t + tperturb\n data['perturb'][k] = perturb.delay\n tstop = h.t + stimulation_period\n h.continuerun(tstop)\n sys.stdout.write('\\rt = %.4f s [%.0f%%]' % (h.t/1000, round(h.t/Ttotal*100)))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n\n if DEBUG:\n import pylab as p\n p.plot(rec['t'],rec['v'],'k')\n for tp in data['perturb']:\n p.plot([tp,tp],[-80,40],'r--')\n if neuron_mode == 'deterministic':\n p.plot(rec['t'],rec['i'],'g')\n if options['use_pid']:\n p.plot(rec['t'],rec['pid'],'m')\n p.show()\n\n if not options['use_pid']:\n baseline_amplitude = base.amp\n else:\n baseline_amplitude = 0\n\n savePRCData(options['output_file'], neuron_type+neuron_mode, soma.L, soma.diam, Rin, stimulation_period, Tdelay, h.dt,\n baseline_amplitude, perturb.amp, perturb.dur, noise_mu, noise_sigma, noise_tau,\n np.array(data['spks']), data['perturb'], options['use_pid'])\n\nif __name__ == '__main__':\n main()\n","repo_name":"ModelDBRepository/155735","sub_path":"python/KR/KR_simulator.py","file_name":"KR_simulator.py","file_ext":"py","file_size_in_byte":18021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34489545175","text":"from aiogram.types import (\n ReplyKeyboardMarkup,\n KeyboardButton,\n InlineKeyboardMarkup,\n InlineKeyboardButton\n)\n\nfrom core.utils import keyboard_utils\n\nall_keyboards = {}\n\n\n# basic keyboards\n\n\ndef kb_student_menu():\n kb_student = ReplyKeyboardMarkup(resize_keyboard=True)\n kb_student.add(\n KeyboardButton(\"Курсы\"),\n KeyboardButton(\"Репетиторы\"),\n KeyboardButton(\"Занятия\"),\n KeyboardButton(\"Корзина\"),\n KeyboardButton(\"Материалы\")\n )\n return kb_student\n\n\ndef kb_menubasket():\n kb_student = ReplyKeyboardMarkup(resize_keyboard=True)\n kb_student.add(\n KeyboardButton(\"Курсы\"),\n KeyboardButton(\"Репетиторы\"),\n KeyboardButton(\"Занятия\"),\n KeyboardButton(\"Очистить корзину\"),\n KeyboardButton(\"Материалы\")\n )\n return kb_student\n\n\n# course keyboards\n\nasync def kb_course_select_with_desc(course_id):\n kb_student = InlineKeyboardMarkup()\n await keyboard_utils.add_course_packages_to_kb(kb_student, course_id)\n keyboard_utils.add_course_desc_to_kb(kb_student, course_id)\n return kb_student\n\n\nasync def kb_course_select_without_desc(course_id: int) -> None:\n kb_student = InlineKeyboardMarkup()\n await keyboard_utils.add_course_packages_to_kb(kb_student, course_id)\n return kb_student\n\n\ndef kb_course_desc(course_id):\n kb_student = InlineKeyboardMarkup()\n keyboard_utils.add_course_desc_to_kb(kb_student, course_id)\n return kb_student\n\n\n# personal keyboards\n\n\ndef kb_personal_desc(teacher_id: int, subject_name: str) -> InlineKeyboardMarkup:\n kb_show_desc = InlineKeyboardMarkup()\n keyboard_utils.add_personal_desc_to_kb(kb_show_desc, teacher_id, subject_name)\n return kb_show_desc\n\n\ndef kb_personal_select_with_desc(teacher_id: int, subject_name: str) -> InlineKeyboardMarkup:\n kb_add_lesson = InlineKeyboardMarkup()\n keyboard_utils.add_contact_to_kb(kb_add_lesson, teacher_id, subject_name)\n keyboard_utils.add_personal_desc_to_kb(kb_add_lesson, teacher_id, subject_name)\n return kb_add_lesson\n\n\ndef kb_personal_select_without_desc(teacher_id: int, subject_name: str) -> InlineKeyboardMarkup:\n kb_add_lesson = InlineKeyboardMarkup()\n keyboard_utils.add_contact_to_kb(kb_add_lesson, teacher_id, subject_name)\n return kb_add_lesson\n\n\n# materials keyboards\n\ndef kb_materials(tg_id: int):\n kb_student = InlineKeyboardMarkup()\n keyboard_utils.add_materials(kb_student, tg_id)\n return kb_student\n\n\ndef kb_show_webinar_materials(tg_id, course_id: int):\n kb_student = InlineKeyboardMarkup()\n keyboard_utils.add_webinar_materials(kb_student, tg_id, course_id)\n return kb_student\n\n\ndef kb_show_personal_materials(tg_id, teacher_id: int, subject_name: str):\n kb_student = InlineKeyboardMarkup()\n keyboard_utils.add_personal_materials(kb_student, tg_id, teacher_id, subject_name)\n return kb_student\n\n\ndef kb_show_group_materials(tg_id, course_id: int):\n kb_student = InlineKeyboardMarkup()\n keyboard_utils.add_group_materials(kb_student, tg_id, course_id)\n return kb_student\n\ndef kb_pass_homework(tg_id, id: int, tip: str, group_lesson_id=None):\n kb_student = InlineKeyboardMarkup()\n keyboard_utils.add_homework(kb_student, tg_id, id, tip, group_lesson_id)\n return kb_student\n\n\nasync def kb_choose_course_groups(tg_id, course_id: int):\n kb_student = InlineKeyboardMarkup()\n await keyboard_utils.add_course_groups(kb_student, tg_id, course_id)\n return kb_student\n\nall_keyboards[\"menu\"] = kb_student_menu\nall_keyboards[\"menubasket\"] = kb_menubasket\nall_keyboards[\"course_select_without_desc\"] = kb_course_select_without_desc\nall_keyboards[\"course_select_with_desc\"] = kb_course_select_with_desc\nall_keyboards[\"course_desc\"] = kb_course_desc\nall_keyboards[\"personal_select_with_desc\"] = kb_personal_select_with_desc\nall_keyboards[\"personal_select_without_desc\"] = kb_personal_select_without_desc\nall_keyboards[\"materials\"] = kb_materials\nall_keyboards[\"show_webinar_materials\"] = kb_show_webinar_materials\nall_keyboards[\"show_personal_materials\"] = kb_show_personal_materials\nall_keyboards[\"show_group_materials\"] = kb_show_group_materials\nall_keyboards[\"choose_course_groups\"] = kb_choose_course_groups\nall_keyboards[\"pass_homework\"] = kb_pass_homework","repo_name":"Mishlen337/OnlineSchoolBot","sub_path":"telegram/core/keyboards/student_keyboards.py","file_name":"student_keyboards.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11417977290","text":"import os\r\nimport re\r\nimport yaml\r\n\r\n\r\ndef main():\r\n # Read input directory from the config file\r\n with open(\"config/config.yaml\", \"r\") as yaml_file:\r\n config = yaml.safe_load(yaml_file)\r\n\r\n dir_path = os.path.join(config[\"map_qual_stats\"][\"dir\"], \"samtools/flagstat\")\r\n\r\n # Create an empty list to store the results\r\n results = []\r\n\r\n # Loop through all files in the directory\r\n for file in os.listdir(dir_path):\r\n if file.endswith(\".bam.flagstat.txt\"):\r\n with open(os.path.join(dir_path, file), \"r\") as f:\r\n content = f.read()\r\n\r\n # Extract sample ID\r\n sample_id = re.sub(r\"\\.bam\\.flagstat\\.txt$\", \"\", file)\r\n\r\n # Extract total reads\r\n total_reads = int(\r\n re.search(r\"^(\\d+) \\+ \\d+ in total\", content, re.MULTILINE).group(1)\r\n )\r\n\r\n # Extract total mapped reads\r\n mapped_reads = int(\r\n re.search(r\"^(\\d+) \\+ \\d+ mapped\", content, re.MULTILINE).group(1)\r\n )\r\n\r\n # Calculate total unmapped reads\r\n unmapped_reads = total_reads - mapped_reads\r\n\r\n # Extract total duplicates\r\n duplicates = int(\r\n re.search(r\"^(\\d+) \\+ \\d+ duplicates\", content, re.MULTILINE).group(\r\n 1\r\n )\r\n )\r\n\r\n # Append the results to the list\r\n results.append([sample_id, mapped_reads, duplicates, unmapped_reads])\r\n\r\n # Save the results to a file\r\n with open(\"stats_3_mapped_reads.tsv\", \"w\") as f:\r\n # Write the header\r\n f.write(\r\n \"sample_id\\treads_mapped_total\\treads_mapped_duplicates\\treads_mapped_unmapped\\n\"\r\n )\r\n\r\n # Write the results in a tab-separated format\r\n for result in results:\r\n f.write(\"\\t\".join(map(str, result)) + \"\\n\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"kevin-wamae/variant-calling-with-Snakemake-and-GATK","sub_path":"workflow/scripts/get_bam_stats.py","file_name":"get_bam_stats.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39341616619","text":"#!/usr/bin/python2\n# -*- coding: utf-8 -*-\n\n# Module: default\n# Author: Roman V. M.\n# Created on: 28.11.2014\n# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html\n\nimport re\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n#import base64\n#import urllib2\n#import json\nimport sys\n#import HTMLParser\n#import re\n#from urlparse import urlparse\n\n\n\n\ndef play(url):\n uheaders = {}\n uheaders['referer'] = url\n uheaders['user-agent'] = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Safari/605.1.15\"\n ucookies = {}\n ucookies['__cfduid']= 'd7775a564e6be4cf770deb7165835eb1e1577592176'\n ucookies['Hm_lpvt_e2526426c8588c6ac00d82d501ff28d8'] = str(int(time.time()))\n ucookies['Hm_lvt_e2526426c8588c6ac00d82d501ff28d8'] = '1577499144,1577525770,1577587676,1577589524'\n s = requests.session()\n r = s.get(url,verify=False, headers=uheaders)\n soup = BeautifulSoup(r.text)\n videourl = soup.find('div', class_='stui-player__video').find('iframe')['src']\n #print(s.cookies.get_dict()) # 先打印一下,此时一般应该是空的。\n r = s.get(videourl, verify=False, headers=uheaders, cookies=ucookies)\n vpattern = re.compile(\"video src\\=\\\"([^\\\"]*)\\\"\")\n vplayurl = vpattern.findall(r.text)[0].strip()\n # print(vplayurl) # 先打印一下,此时一般应该是空的。\n return vplayurl\n# return vpattern.findall(r.text)\n\n\nif __name__ == '__main__':\n# play(\"https://1090ys.com/play/3726~0~0.html\")\n print(play(sys.argv[1]))\n","repo_name":"alycolas/mybin","sub_path":"1090ys.py","file_name":"1090ys.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72725994651","text":"\"\"\"Naive implementation of Dijkstra's algorithm that runs in O(nm) time\"\"\"\n\n\ndef build_graph(filepath):\n graph = {}\n with open(filepath, \"r\") as file:\n for line in file:\n node_and_edges = line.split()\n node = int(node_and_edges[0])\n edges = node_and_edges[1:]\n graph[node] = []\n for edge in edges:\n neighbor, weight = [int(x) for x in edge.split(\",\")]\n graph[node].append((neighbor, weight))\n return graph\n\n\ndef dijkstra(graph, s):\n def get_edges():\n \"\"\"Return a list of all edges where the tail of the edge has\n been visited and the head of the edge has not\n\n Each edge in the list is a 3-tuple of the form\n (tail, head, edge_weight)\n \"\"\"\n edges = []\n for tail in visited:\n for head, edge_weight in graph[tail]:\n if head not in visited:\n edges.append((tail, head, edge_weight))\n return edges\n\n def get_edges_with_greedy_score():\n \"\"\"Return a list of edges with their corresponding greedy score\"\"\"\n edges_with_scores = []\n for tail, head, edge_weight in get_edges():\n greedy_score = A[tail-1] + edge_weight\n edges_with_scores.append((tail, head, greedy_score))\n return edges_with_scores\n\n A = [1000000 for _ in range(len(graph))] # Computed shortest distances\n A[s-1] = 0 # Distance from s to s is 0; subtract 1 for zero-based index\n B = [[] for _ in range(len(graph))] # Node-to-node shortest paths\n visited = set([s]) # Nodes processed so far\n while len(visited) < len(graph):\n tail, head, greedy_score = min(get_edges_with_greedy_score(),\n key=lambda x: x[2])\n visited.add(head)\n A[head-1] = greedy_score\n B[head-1] = B[tail-1] + [head]\n return A\n\n\ndef main():\n target_nodes = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]\n g = build_graph(\"DijkstraData.txt\")\n shortest_path_distances = dijkstra(g, 1)\n distances = [shortest_path_distances[x-1] for x in target_nodes]\n print(\",\".join([str(x) for x in distances]))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jeurymejia/coursera_ds_algo","sub_path":"2018_08_20/dijkstra_naive.py","file_name":"dijkstra_naive.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70715928410","text":"import pandas as pd\r\nimport re\r\nimport os\r\nimport pandas_profiling\r\nimport ydata_profiling as pdp\r\nfrom ydata_profiling import ProfileReport\r\n# Function to remove non-alphanumeric characters from a string\r\ndef remove_special_characters(text):\r\n return re.sub(r'\\W', '', str(text)) #Convert to string before applying regex\r\n\r\n#Function to remove all digits(numbers) from the text\r\n#def remove_digits(text):\r\n# return re.sub(r'\\d', '', str(text))\r\n\r\n#Function to remove whitespace characters(spaces, tabs, newlines) from the text\r\n#def remove_whitespace(text):\r\n# return re.sub(r'\\s', '', str(text))\r\n\r\n#Function removes a specific phrase\r\n#def remove_specific_phrase(text):\r\n# return re.sub(r'specific_phrase', '', str(text))\r\n\r\n#LAMBDA FUNCTIONS!!!! Remove Specific Words or Phrases, such as \"apple\"\r\n####### df['column_name'] = df['column_name'].apply(lambda x: re.sub(r'apple', '', x)) ###########\r\n\r\n# LAMBDA FUNCTION!!!! Remove all EMAIL ADDRESS with \"REDACTED\"\r\n####### df['column_name'] = df['column_name'].apply(lambda x: re.sub(r'\\S+@\\S+', 'REDACTED', x)) ######\r\n\r\n# Define the path of the file where BAD_DATA.xlsx exists in\r\n# 'r' = raw string literal\r\ndownloads_folder = r'C:\\Users\\ryanw\\Downloads'\r\n\r\n# Specify the file you want to reference in the downloads folder\r\nfile_to_reference = 'BAD_DATA.xlsx'\r\n\r\n# Create the full path to the file\r\nfile_path = os.path.join(downloads_folder, file_to_reference)\r\n\r\ndf = pd.read_excel(file_path) #If the first column is NOT part of the columns to be factored in, add \", index_col=0\"!!! \r\nprint(df.head(6))\r\n\r\nprint(df.dtypes)\r\n\r\n# run the profile report\r\nprofile = df.profile_report(title='Pandas Profiling Report')\r\nprofile2 = ProfileReport(df, title=\"New Profiling Report\")\r\n# save the report as html file\r\nprofile.to_file(output_file=\"pandas_profiling1.html\")\r\nprofile2.to_file(output_file=\"pandas_profiling1_v2.html\")\r\n# save the report as json file\r\nprofile.to_file(output_file=\"pandas_profiling2.json\")\r\nprofile2.to_file(output_file=\"pandas_profiling2_v2.json\")\r\n#Specify which columns to clean\r\ncolumns_to_clean = ['SYMBOL','AMOUNT','PRICE']\r\nfor column in columns_to_clean:\r\n if df[column].dtype == 'object': # Check if the column contains string data\r\n df[column]=df[column].apply(remove_special_characters)\r\n\r\n#If you want to apply for ALL COLUMNS:\r\n#for column in df.columns:\r\n # if df[column].dtype == 'object': # Check if the column contains string data\r\n # df[column] = df[column].apply(remove_special_characters) \r\n\r\n#Covert specific columns back to numeric:\r\ncolumns_to_convert_to_numeric = ['PRICE'] \r\nfor column in columns_to_convert_to_numeric:\r\n df[column] = pd.to_numeric(df[column], errors='coerce') # 'coerce' converts non-numeric values to NaN\r\n #POSSIBLE OPTIONS FOR ERROR TREATMENT: 1) IGNORE - IGNORES UNCONVERTIBLE VALUE AND LEAVES THEN AS THE ARE IN RESULTING SERIES\r\n #2) COERCE - CONVERTS UNCONVERTIBLE VALUES to NaN(Not a Number)...3) RAISE - WILL RAISE AN ERRO IF IT ENCOUNTERS ANY UNCONVERTIBLE VALUES\r\n\r\nprint(df.dtypes)\r\n\r\nprint(df.head(6))\r\n\r\ndf2=df.to_excel('GOOD_DATA.xlsx')\r\n\r\nprint(df2.dtypes)\r\n\r\nprint(df2)\r\n\r\n","repo_name":"DataSolutions360/DATA-QUALITY","sub_path":"BAD_DATA.py","file_name":"BAD_DATA.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5335529697","text":"import csv\nimport os\nfrom pathlib import Path\nfrom i2b2_cdi.common.utils import *\nfrom datetime import datetime as DateTime\nfrom Mozilla.exception.mozilla_cdi_max_err_reached import MaxErrorCountReachedError\nfrom Mozilla.exception.mozilla_cdi_csv_conversion_error import CsvToBcpConversionError\nfrom i2b2_cdi.log import logger\nfrom i2b2_cdi.config.config import Config\n\n\nclass TransformFile:\n \"\"\"The class provides the various methods for transforming csv data to bcp file\"\"\"\n\n def __init__(self):\n self.float_precision_digits = 10\n self.write_batch_size = 100\n self.error_count = 0\n self.error_count_max = 100\n self.numeric_concept_types = ['posinteger','float','integer','posfloat']\n now = DateTime.now()\n self.import_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n self.bcp_header = ['LINE_NUM', 'EncounterID', 'PatientID', 'ConceptCD', 'ProviderID', 'StartDate', 'ModifierCD', 'InstanceNum', 'VALTYPE_CD', 'TVAL_CHAR', 'NVAL_NUM', 'VALUEFLAG_CD', 'QUANTITY_NUM', 'UnitCD',\n 'END_DATE', 'LOCATION_CD', 'OBSERVATION_BLOB', 'CONFIDENCE_NUM', 'UPDATE_DATE', 'DOWNLOAD_DATE', 'IMPORT_DATE', 'SOURCESYSTEM_CD', 'UPLOAD_ID', 'TEXT_SEARCH_INDEX']\n\n def csv_to_bcp(self, csv_file_path, input_csv_delimiter, bcp_file_path, output_bcp_delimiter):\n \"\"\"This method transforms csv file to bcp, Error records will be logged to log file\n\n Args:\n csv_file_path (:obj:`str`, mandatory): Path to the input csv file which needs to be converted to bcp file\n input_csv_delimiter (:obj:`str`, mandatory): Delimiter of the input csv file, which will be used while reading csv file.\n bcp_file_path (:obj:`str`, mandatory): Path to the output bcp file.\n output_bcp_delimiter (:obj:`str`, mandatory): Delimiter of the output bcp file, which will be used while writing bcp file.\n\n \"\"\"\n # MS:: input arg data check\n \n \n _valid_rows_arr = []\n max_line = file_len(csv_file_path) - 1\n try:\n print('\\n')\n # Read input csv file\n with open(csv_file_path, mode='r') as csv_file:\n csv_reader = csv.DictReader(\n csv_file, delimiter=input_csv_delimiter)\n row_number = 0\n \n with alive_bar(max_line, bar='smooth') as bar:\n \n for row in csv_reader:\n #if csv_file_path!=\"tmp/api_reserved_dir/Demo/deid/tableAccess_concepts.csv\":\n #if csv_file_path!=\"tmp/api_reserved_dir/Demo/tableAccess_concepts.csv\":\n try:\n listOfValues=list(row.values())\n _valid_rows_arr.append(listOfValues)\n except Exception as e:\n logger.error(e)\n self.error_count += 1\n if self.error_count > self.error_count_max:\n raise MaxErrorCountReachedError(\"Exiting function as max errors reached :\" + self.error_count_max)\n\n # Write valid records to file, if batch size reached.\n if len(_valid_rows_arr) == self.write_batch_size:\n self.write_to_bcp_file(\n _valid_rows_arr, bcp_file_path, output_bcp_delimiter)\n _valid_rows_arr = []\n self.write_to_bcp_file(\n _valid_rows_arr, bcp_file_path, output_bcp_delimiter)\n except MaxErrorCountReachedError:\n raise\n except Exception as e:\n logger.error(\"Error while bcp conversion : {}\", e)\n raise CsvToBcpConversionError(\n \"Error while bcp conversion : \" +str(e))\n\n def write_to_bcp_file(self, _valid_rows_arr, bcp_file_path, bcp_delimiter):\n \"\"\"This method writes the list of rows to the bcp file using csv writer\n\n Args:\n _valid_rows_arr (:obj:`str`, mandatory): List of valid facts to be written into bcp file.\n bcp_file_path (:obj:`str`, mandatory): Path to the output bcp file.\n bcp_delimiter (:obj:`str`, mandatory): Delimeter to be used in bcp file.\n\n \"\"\"\n try:\n with open(bcp_file_path, 'a+') as csvfile:\n for _arr in _valid_rows_arr:\n csvfile.write(bcp_delimiter.join(_arr) + \"\\n\")\n except Exception as e:\n raise e\n \n def getValType(self, x):\n \"\"\"Returns the type of value provided\n\n Args:\n x (type): value/instance \n\n Returns:\n type: provide the type of instance/value \n\n \"\"\"\n try:\n if float(x):\n return 'float'\n except BaseException:\n return 'str'\n\ndef csv_to_bcp(csv_file_path,csv_file_path_name):\n \"\"\"Convert the csv file to bcp file and provide the path to the bcp file\n\n Args:\n _file (str): path to the csv file\n\n Returns:\n str: path to the bcp file\n\n \"\"\"\n\n if os.path.exists(csv_file_path):\n logger.info('converting csv to bcp : {}', csv_file_path)\n T = TransformFile()\n bcp_file_path = os.path.join(\n Path(csv_file_path).parent, csv_file_path_name +\"_\"+'concepts.bcp')\n # Delete bcp and error file if already exists\n delete_file_if_exists(bcp_file_path)\n mkParentDir(bcp_file_path)\n input_csv_delimiter = str(Config.config.csv_delimiter) # not using \n output_bcp_delimiter = str(Config.config.bcp_delimiter)\n T.csv_to_bcp(csv_file_path, input_csv_delimiter,\n bcp_file_path, output_bcp_delimiter)\n\n return bcp_file_path\n else:\n logger.error('File does not exist : {}', csv_file_path)\n","repo_name":"i2b2/i2b2-etl","sub_path":"i2b2_cdi/concept/transform_file.py","file_name":"transform_file.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"14589205015","text":"import json\n\nfrom flask import make_response, current_app\n\nfrom ..database import redis\n\n\nclass Menu(object):\n def __init__(self, session_id, session, user, user_response, phone_number=None, level=None):\n self.session = session\n self.session_id = session_id\n self.user = user\n self.user_response = user_response\n self.phone_number = phone_number\n self.level = level\n\n def execute(self):\n raise NotImplementedError\n\n def ussd_proceed(self, menu_text):\n redis.set(self.session_id, json.dumps(self.session))\n menu_text = \"CON {}\".format(menu_text)\n response = make_response(menu_text, 200)\n response.headers['Content-Type'] = \"text/plain\"\n return response\n\n def ussd_end(self, menu_text):\n redis.delete(self.session_id)\n menu_text = \"END {}\".format(menu_text)\n response = make_response(menu_text, 200)\n response.headers['Content-Type'] = \"text/plain\"\n return response\n\n def home(self):\n \"\"\"serves the home menu\"\"\"\n menu_text = \"Hello {}, welcome to {},\\n Choose a service\\n\".format(self.user.username,\n current_app.config['APP_NAME'])\n menu_text += \" 1. Deposit Money\\n\"\n menu_text += \" 2. Withdraw Money\\n\"\n menu_text += \" 3. Buy Airtime\\n\"\n menu_text += \" 4. Check Wallet Balance\\n\"\n self.session['level'] = 1\n # print the response on to the page so that our gateway can read it\n return self.ussd_proceed(menu_text)\n","repo_name":"DanNyongesa/USSD-Python-Demo","sub_path":"app/ussd/base_menu.py","file_name":"base_menu.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"32"} +{"seq_id":"71997243931","text":"import os\n#En este codigo se encuetran varios algortimos de la pagina https://www.codewars.com/dashboard nivel Basico\n#poner comentario control k c... quitar comentario control k u\n\ndef clear():\n if os.name == \"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n\n\ndef main():\n hacer=int(input(\"Ingrese que quieres hacer: \"))\n while True:\n clear()\n\n if hacer == 0:\n break\n\n elif hacer == 1:\n palabra=input(\"Ingrese una palabra para saber su composicion: \")\n repetir(palabra)\n \n elif hacer == 2:\n palabra=input(\"Ingrese una palabra para saber la palabra que se repite: \")\n print(saber_repetidas(palabra))\n \n elif hacer == 3:\n lista=anadir_nombre()\n print(friend(lista))\n\n elif hacer == 4:\n nume=int(input(\"Ingrese un numero para sumar hasta que quede un digito: \"))\n print(digital_root(nume ))\n \n elif hacer == 5:\n palabra=input(\"Aqui la letra se pondra mayuscula si hay un - o un _: \")\n print(to_camel_case(palabra))\n \n elif hacer == 6:\n print(xo(\"xooxx\"))\n print(xo(\"ooxXm\"))\n print(xo(\"ooxXm\"))\n print(xo(\"zzoo\"))\n \n elif hacer == 7:\n lista=[1,2,'aasf','1','123',123,14] \n print(sacar_numero(lista))\n \n elif hacer == 8:\n numero=int(input(\"Ingrese un numero para comprobar si es primo: \"))\n print(divisors(numero))\n \n elif hacer == 9:\n lista=input(\"Ingrese numeros dejando un espacio para luego buscar el mas alto y el mas bajo: \")\n print(high_and_low(lista))\n\n elif hacer == 10:\n a=int(input(\"Ingrese la poblacion inicial: \"))\n b=int(input(\"Ingrese el porcentaje de incremento: \"))\n c=int(input(\"Ingrese la poblacion anual: \"))\n d=int(input(\"Ingrese la poblacion deseada: \"))\n #print(nb_year(1500, 5, 100, 5000))\n print(f\"Necesita de: {nb_year(a,b,c,d)} años\")\n\n elif hacer ==11:\n palabra=input(\"Ingrese una frase: \")\n print(reverse_words(palabra))\n\n elif hacer == 12:\n seconds=int(input(\"Ingrese un numero para calcular el tiempo: \"))\n print(make_readable(seconds))\n\n elif hacer == 13:\n ini=input(\"Ingrese una palabra: \")\n encontrar= input(\"Ingrese palabra a encontrar:\")\n print(solution(ini,encontrar))\n\n hacer=int(input(\"Ingrese que quieres hacer: \"))\n\n \n\ndef anadir_nombre():\n lista=[]\n contador=int(input(\"Cuanto nombres quieres anadir: \"))\n for i in range(contador):\n nombre=input(\"Ingrese un nombre: \")\n lista.append(nombre)\n return lista\n\ndef numeros_al100():\n i=0\n while i<100:\n i=i+1\n print(f\"El numero es: {i}\")\n\ndef impimir():\n hola=\"Hola Mundo\"\n for i in hola:\n print(i)\n\ndef is_number(n):\n \n try:\n\t entero = int(n)\n\t return True\n except ValueError:\n\t return False\n\ndef sacar_numero(lista):\n lista2=[]\n \n for i in lista:\n \n if is_number(i) == True:\n lista2.append(int(i))\n\n return list(set(lista2))\n #return lista2\n\n#Aqui una funcion que si tiene x y o, igual devuelve True en cambio devuelve False, y si no tiene ni x ni o devuelve True\ndef xo(s):\n s=s.lower();o=0;x=0\n\n for i in s: \n if i == \"x\": \n x=x+1 \n \n elif i == \"o\": \n o=o+1\n \n if x == o:\n return True\n\n else:\n if \"x\" not in s:\n if \"o\" not in s:\n return True\n return False\n\n#Aqui una funcion que si hay un - o un _ , la siguiente letra sera mayuscula \ndef to_camel_case(s):\n nueva=[]\n mayus=False\n for i in s:\n \n if i == \"-\" or i ==\"_\":\n mayus=True\n\n else:\n\n if mayus==False:\n nueva.append(i)\n elif mayus==True:\n nueva.append(i.capitalize())\n mayus=False\n\n return \"\".join(nueva)\n\n#Esta funcion suma los numeros de un string, y lo convierte a int el string\ndef sumar_numero(lista):\n sumatoria=0\n for i in lista:\n sumatoria=sumatoria+int(i)\n return sumatoria\n\n#Esta funcion se encarga de sumar los numeros, hasta que quede solo un numero Ej: 493193 --> 4 + 9 + 3 + 1 + 9 + 3 = 29 --> 2 + 9 = 11 --> 1 + 1 = 2\ndef digital_root(n):\n\n detener=len(str(n))\n solo=str(n)\n \n while detener != 1:\n solo=sumar_numero(str(solo))\n detener=len(str(solo))\n \n return solo\n\n#Esta funcion sacas solo la palabras de 4 digitos de una lista\ndef friend(x):\n nueva=[]\n for i in x:\n saber=len(i)\n if saber == 4:\n nueva.append(i)\n return nueva\n\n#Esta funcion va a descomponer una palabra y decir cuantas veces se repite, en caso de que se repita\ndef repetir(palabra):\n nueva=list(palabra.lower())\n palabra2=\"\"\n\n print(f\"La palabra que ingreso es: {palabra}\")\n for i in palabra:\n\n if i not in palabra2:\n palabra2=palabra2.lower()+i\n \n for i in palabra2:\n \n saber_si_es_una=nueva.count(i)\n if saber_si_es_una == 1:\n print(f\"La letra {i} se encuentra {nueva.count(i)} vez\")\n else:\n print(f\"La letra {i} se encuentra {nueva.count(i)} veces\")\n\n#Este algortimo, solo imprime si una palabra se repite, e indica cual palabra se repitio\ndef saber_repetidas(palabra):\n nueva=list(palabra.lower())\n salida=\"\"\n\n repetida=0\n\n for i in palabra:\n saber=nueva.count(i)\n if saber>=2:\n \n if i not in salida:\n repetida=repetida+1\n salida=salida+i\n\n if salida==\"\":\n return repetida\n else: \n return repetida,salida\n\n#Este programa sca todos los divisodres de un primo menos el 1 y el mismo numero, y si es primo indica que es primo\ndef divisors(numero):\n primos=[]\n\n for i in range (2,numero):\n primo=numero%i\n if primo == 0:\n primos.append(i)\n \n if len(primos)==0:\n numero=str(numero)\n cadena=numero+\" is prime\"\n return cadena\n else:\n return primos\n\n#En esta pequeña tarea, se le da una serie de números separados por espacios y debe devolver el número más alto y el más bajo.\ndef high_and_low(numeros):\n numeros=numeros.split()\n salida=[]\n for i in numeros:\n ok=int(i)\n salida.append(ok)\n maximo=str(max(salida))\n minimo=str(min(salida))\n salida=(f\"{maximo} {minimo}\")\n return salida\n\n#En una ciudad pequeña, la población es p0 = 1000 al comienzo de un año. La población aumenta regularmente en un 2 por ciento anual y, además, más de 50 nuevos habitantes por año vienen a vivir a la ciudad. ¿Cuántos años necesita la ciudad para que su población sea mayor o igual ap = 1200 habitantes?\ndef nb_year(p0, percent, aug, p):\n anos=0\n while p0<=p: \n p0=(p0+((p0*percent)/100)+aug)\n anos=anos+1\n return anos\n\n#Complete la función que acepta un parámetro de cadena e invierte cada palabra en la cadena. Todos los espacios en la cadena deben conservarse.\ndef reverse_words(text):\n nueva=\"\"\n lista=[]\n for i in text:\n \n if i != \" \":\n nueva=nueva+i\n\n else:\n if len(nueva) != 0:\n nueva=\"\".join(reversed(nueva))\n lista.append(nueva)\n nueva=\"\"\n\n if i==\" \":\n lista.append(i)\n\n if nueva != \"\":\n nueva=\"\".join(reversed(nueva))\n lista.append(nueva)\n\n lista = \"\".join(lista)\n return lista\n\n\ndef make_readable(seconds):\n\n horas=seconds//3600\n minutes=-((horas*60)-(seconds//60))\n seconds=-((horas*3600)+(minutes*60)-(seconds))\n\n if horas<10:\n horas=\"0{}\".format(horas)\n\n if minutes<10:\n minutes=\"0{}\".format(minutes)\n if seconds<10:\n seconds=\"0{}\".format(seconds)\n \n\n return \"{}:{}:{}\".format(horas,minutes,seconds)\n\ndef make_readablee(s):\n return '{:02}:{:02}:{:02}'.format(s / 3600, s / 60 % 60, s % 60)\n\n#Aqui imprimo verdadero si la palabra terminar en la palabra que quiero, \ndef solution(string, ending):\n if string.endswith(ending):\n return True\n else:\n return False\n\n\n\nmain()","repo_name":"carlosdiazz/python","sub_path":"Videos Youtube/funcion.py","file_name":"funcion.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30945486394","text":"from . import views\nfrom django.urls import path\nfrom django.urls import include\nfrom rest_framework.routers import DefaultRouter\n\nrouter = DefaultRouter()\nrouter.register('buildings', views.BuildingViewSet)\nrouter.register('paymentTypes', views.PaymentTypeViewSet)\nrouter.register('membershipTypes', views.MembershipTypeViewSet)\nrouter.register('billings', views.BillingViewSet, basename='Billing')\nrouter.register('tenants', views.TenantViewSet, basename='Tenant')\nrouter.register('contacts', views.ContactViewSet, basename='Contact')\n\nurlpatterns = [\n path('', include(router.urls)),\n path('register_user/', views.register_user),\n path('login_user/', views.login_user),\n path('available-rooms//', views.available_building_rooms),\n path('add-floor/', views.add_floor),\n path('floors//', views.get_floors_of_building),\n path('floor-details//', views.FloorDetailAPIView.as_view()),\n path('add-block/', views.add_block),\n path('blocks//', views.get_blocks_of_building),\n path('block-details//', views.BlockDetailAPIView.as_view()),\n path('add-room/', views.add_room),\n path('rooms//', views.get_rooms_of_building),\n path('room-details//', views.RoomDetailAPIView.as_view()),\n path('add-booking/', views.add_booking),\n path('pending-bookings//',\n views.get_pending_bookings_of_building),\n path('confirmed-bookings//',\n views.get_confirmed_bookings_of_building),\n path('booking-details//',\n views.BookingDetailAPIView.as_view()),\n path('count-totals///', views.count_totals),\n path('test/', views.test),\n]\n","repo_name":"uwevanopfern/BMS-Full-project-","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"34758141581","text":"#Circuit Python capacitive touch\nimport pulseio\nimport time\nimport board\nimport touchio\nfrom adafruit_motor import servo\npwm = pulseio.PWMOut(board.D13, duty_cycle=2 ** 15, frequency = 50)\nmy_servo = servo.Servo(pwm)\ntouch_A1 = touchio.TouchIn(board.A1) # Not a touch pin on Trinket M0!\ntouch_A2 = touchio.TouchIn(board.A2) # Not a touch pin on Trinket M0!\ni = 5\nj = 5\nwhile True:\n my_servo.angle = i\n if touch_A1.value and i < 180:\n i = i + j\n print(\"Touched A1!\")\n\n if touch_A2.value and i > 0:\n i = i - j\n print(\"Touched A2!\")\n time.sleep(.01)","repo_name":"sosman83/CircuitPython","sub_path":"servo.py","file_name":"servo.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5105741823","text":"from chatterbot.logic import LogicAdapter\nfrom chatterbot.conversation import Statement\nfrom datetime import datetime\nimport requests\nimport re\n\nclass TemperatureAdapter(LogicAdapter):\n def __init__(self, chatbot, **kwargs):\n super().__init__(chatbot, **kwargs)\n\n def can_process(self, statement):\n words = ['what', 'is', 'temperature']\n words1 = ['what', 'is', 'temperature', 'in']\n text=re.sub('[!@#$?]', '', statement.text)\n text=text.lower()\n res= all(x in text.split() for x in words)\n if res:\n return True\n else:\n return False\n\n def process(self, statement, additional_response_selection_parameters=None):\n try:\n response = requests.get('http://samples.openweathermap.org/data/2.5/weather?q=London,uk&appid=b6907d289e10d714a6e88b30761fae22')\n data = response.json()\n \n if response.status_code == 200:\n confidence = 1\n else:\n confidence = 0\n\n temperature =data[\"main\"][\"temp\"]\n response = Statement('The current temperature is {}'.format(temperature))\n \n except Exception as err:\n confidence = 0\n response = Statement('The current temperature is unavailable')\n\n response.confidence = confidence\n return response\n","repo_name":"itcodium/chatter","sub_path":"server/src/api/adapters/TemperatureAdapter.py","file_name":"TemperatureAdapter.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12486840894","text":"import sys\nfrom collections import deque\nread = sys.stdin.readline\n\nn, m = map(int, read().split())\ngraph = [[]for _ in range(n+1)]\n\nfor i in range(m):\n a, b = map(int, read().split())\n graph[a].append(b)\n graph[b].append(a)\n\n\ndef dfs(cur, find):\n visited = [0] * (n+1)\n q = deque()\n q.append((cur, 0))\n\n while q:\n node, depth = q.popleft()\n #print('node: ', node, 'depth: ', depth)\n if node == find:\n break\n for i in range(len(graph[node])):\n if visited[graph[node][i]] == 0:\n q.append((graph[node][i], depth+1))\n visited[graph[node][i]] = depth + 1\n return depth\n\n\nresult = 9876543210\nanswer = 0\nfor i in range(1, n+1):\n cur = i\n cnt = 0\n for j in range(1, n+1):\n if j == i:\n continue\n cnt += dfs(cur, j)\n if result > cnt:\n result = cnt\n answer = cur\n\nprint(answer)\n","repo_name":"HyunJungJo98/Algorithm-Study","sub_path":"DFS, BFS/1389 - 케빈 베이컨의 6단계 법칙.py","file_name":"1389 - 케빈 베이컨의 6단계 법칙.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35142927340","text":"with open('input/day15-input.txt', 'r') as f:\n risk = [[int(n) for n in x[:-1]] for x in f.readlines()]\n\ndim=100\nbig_risk=[]\nfor r in range(500):\n row=[]\n for c in range(500):\n new_risk = (risk[r % 100][c % 100] + r//100 + c//100) % 9\n row.append(9 if new_risk==0 else new_risk )\n big_risk.append(row)\n\nrisk=big_risk ### comment out these lines to run part 1\ndim=500 ### comment out these lines to run part 1\n\ndef adj_coords(coord):\n i=coord[0]\n j=coord[1]\n adj_coords=[(i-1,j),(i+1,j),(i,j-1),(i,j+1)]\n adj_coords=[c for c in adj_coords if 0 <= c[0] < dim and 0 <= c[1] < dim]\n return adj_coords\n\ndistances=[[0 for i in range(dim)] for j in range(dim)]\n\nfor d in range(1,(2*dim)-1):\n for i in range(max(0,d-(dim-1)),min(dim,d+1)):\n j=d-i\n if i==0:\n distances[i][j] = distances[i][j-1] + risk[i][j]\n elif j==0:\n distances[i][j] = distances[i-1][j] + risk[i][j]\n else:\n distances[i][j] = min(distances[i-1][j],distances[i][j-1]) + risk[i][j]\n\ndef refine_distances(risk,distances):\n new_best = [[0 for i in range(dim)] for j in range(dim)]\n for i in range(dim):\n for j in range(dim):\n adjs=adj_coords((i,j))\n best_dist = min(distances[c[0]][c[1]] for c in adjs) + risk[i][j]\n new_best[i][j] = min(best_dist,distances[i][j])\n return new_best\n\nbetter_found=True\nwhile better_found:\n better_found=False\n new_dists = refine_distances(risk, distances)\n if sum(sum(dl) for dl in new_dists) < sum(sum(dl) for dl in distances):\n better_found=True\n distances = new_dists\n\nprint(distances[-1][-1])\n","repo_name":"aPaulTaylor/advent-of-code-2020","sub_path":"aoc-2021/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33779005965","text":"from marvin.cloudstackTestCase import cloudstackTestCase\nfrom marvin.lib.utils import (cleanup_resources,\n validateList,\n get_hypervisor_type)\nfrom marvin.lib.base import (Account,\n VirtualMachine,\n ServiceOffering,\n Volume,\n DiskOffering,\n VmSnapshot,\n Template,\n listConfigurations)\nfrom marvin.lib.common import (get_domain,list_isos,\n get_zone,\n get_template)\nfrom nose.plugins.attrib import attr\nfrom ast import literal_eval\nfrom marvin.codes import PASS\nfrom marvin.cloudstackException import CloudstackAPIException\n\nclass TestVMware(cloudstackTestCase):\n\n @classmethod\n def setUpClass(cls):\n try:\n cls._cleanup = []\n cls.testClient = super(TestVMware, cls).getClsTestClient()\n cls.api_client = cls.testClient.getApiClient()\n cls.services = cls.testClient.getParsedTestDataConfig()\n cls.hypervisor = cls.testClient.getHypervisorInfo()\n # Get Domain, Zone, Template\n cls.domain = get_domain(cls.api_client)\n cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())\n cls.template = get_template(\n cls.api_client,\n cls.zone.id,\n cls.services[\"ostype\"]\n )\n if cls.zone.localstorageenabled:\n cls.storagetype = 'local'\n cls.services[\"service_offerings\"][\"tiny\"][\"storagetype\"] = 'local'\n cls.services[\"disk_offering\"][\"storagetype\"] = 'local'\n else:\n cls.storagetype = 'shared'\n cls.services[\"service_offerings\"][\"tiny\"][\"storagetype\"] = 'shared'\n cls.services[\"disk_offering\"][\"storagetype\"] = 'shared'\n\n cls.services['mode'] = cls.zone.networktype\n cls.services[\"virtual_machine\"][\"hypervisor\"] = cls.testClient.getHypervisorInfo()\n cls.services[\"virtual_machine\"][\"zoneid\"] = cls.zone.id\n cls.services[\"virtual_machine\"][\"template\"] = cls.template.id\n cls.services[\"custom_volume\"][\"zoneid\"] = cls.zone.id\n # Creating Disk offering, Service Offering and Account\n cls.disk_offering = DiskOffering.create(\n cls.api_client,\n cls.services[\"disk_offering\"]\n )\n cls.service_offering = ServiceOffering.create(\n cls.api_client,\n cls.services[\"service_offerings\"][\"tiny\"]\n )\n cls.account = Account.create(\n cls.api_client,\n cls.services[\"account\"],\n domainid=cls.domain.id\n )\n # Getting authentication for user in newly created Account\n cls.user = cls.account.user[0]\n cls.userapiclient = cls.testClient.getUserApiClient(cls.user.username, cls.domain.name)\n cls._cleanup.append(cls.disk_offering)\n cls._cleanup.append(cls.service_offering)\n cls._cleanup.append(cls.account)\n except Exception as e:\n cls.tearDownClass()\n raise Exception(\"Warning: Exception in setup : %s\" % e)\n return\n\n def setUp(self):\n\n self.apiClient = self.testClient.getApiClient()\n self.cleanup = []\n\n def tearDown(self):\n #Clean up, terminate the created volumes\n cleanup_resources(self.apiClient, self.cleanup)\n return\n\n @classmethod\n def tearDownClass(cls):\n try:\n cleanup_resources(cls.api_client, cls._cleanup)\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n\n @attr(tags=[\"advanced\"], required_hardware=\"true\")\n def test_01_attach_volume_ide(self):\n \"\"\"\n @desc: Exception when attaching data disk to RHEL VM on vSphere\n Step1: Confirm that vmware.root.disk.controller = \"ide\" in Global Settings.\n Step2: Register RHEl 6.0 template and deploy a VM.\n Step3: Note that the root disk is attached to IDE.\n Step4: Create new DATA disk and attempt to attach it to the VM.\n Verify that step4 succeeds without any exception\n \"\"\"\n self.hypervisor = str(get_hypervisor_type(self.api_client)).lower()\n if self.hypervisor != \"vmware\":\n self.skipTest(\"This test can be run only on vmware\")\n cmd = listConfigurations.listConfigurationsCmd()\n cmd.name = \"vmware.root.disk.controller\"\n cmd.listAll = True\n try:\n config_descs = self.api_client.listConfigurations(cmd)\n except Exception as e:\n raise Exception(\"Failed to fetch configurations: %s\" % e)\n if not isinstance(config_descs, list):\n raise Exception(\"List configs didn't returned a valid data\")\n config_desc = config_descs[0]\n if str(config_desc.value).lower() != \"ide\":\n self.skipTest(\"This test is invalid if {} is not set to ide\".format(config_desc.name))\n \"\"\"\n Register RHEL 6.0 template and deploy vm\n \"\"\"\n template = Template.register(\n self.userapiclient,\n self.services[\"rhel60template\"],\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.account.domainid,\n hypervisor=self.hypervisor\n )\n self.assertIsNotNone(template,\"Failed to register Rhel6 template\")\n self.debug(\n \"Registered a template with format {} and id {}\".format(\n self.services[\"rhel60template\"][\"format\"],template.id)\n )\n template.download(self.userapiclient)\n self.cleanup.append(template)\n vm = VirtualMachine.create(\n self.userapiclient,\n self.services[\"virtual_machine\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n templateid=template.id,\n zoneid=self.zone.id\n )\n self.assertIsNotNone(vm,\"Failed to deploy virtual machine\")\n self.cleanup.append(vm)\n response = VirtualMachine.list(self.userapiclient,id=vm.id)\n status = validateList(response)\n self.assertEqual(status[0],PASS,\"list vm response returned invalid list\")\n \"\"\"\n list root disk of the vm created above and make sure that device type is ide\n \"\"\"\n volume_res = Volume.list(\n self.userapiclient,\n virtualmachineid=vm.id,\n type=\"root\",\n listAll=\"true\"\n )\n self.assertEqual(validateList(volume_res)[0],PASS,\"list vm response returned invalid list\")\n chaininfo = volume_res[0].chaininfo\n device_Bus = literal_eval(chaininfo)[\"diskDeviceBusName\"]\n if \"ide\" not in device_Bus:\n self.fail(\"Root disk is not created with device type IDE\")\n disk = Volume.create(\n self.userapiclient,\n self.services[\"volume\"],\n zoneid=self.zone.id,\n diskofferingid=self.disk_offering.id\n )\n self.assertIsNotNone(disk,\"Failed to create custom volume\")\n self.cleanup.append(disk)\n try:\n vm.attach_volume(self.userapiclient,disk)\n list_volumes = Volume.list(\n self.userapiclient,\n listall=self.services[\"listall\"],\n id=disk.id\n )\n attached_volume = list_volumes[0]\n self.assertEqual(\n disk.id,\n attached_volume.id,\n \"list volume response does not match with the volume created and attached to vm\"\n )\n except Exception as e:\n self.fail(\"Failed to attach data disk to RHEL vm whose root disk type is IDE\")\n return\n\n # @attr(tags=[\"advanced\", \"basic\"], required_hardware=\"true\")\n @attr(tags=[\"TODO\"], required_hardware=\"true\")\n def test_02_attach_ISO_in_CentOSVM(self):\n \"\"\"\n @desc:Incorrect guest os mapping in vmware for CentOS 5.9 and above\n Step1 :Register an CentOS 6.3 template\n Step2 :Launch a VM\n Step3: Try to attach VMware Tools ISO\n Step4: Verify VMware tools ISO attached correctly\n \"\"\"\n self.hypervisor = str(get_hypervisor_type(self.api_client)).lower()\n if self.hypervisor != \"vmware\":\n self.skipTest(\"This test can be run only on vmware\")\n template = Template.register(\n self.userapiclient,\n self.services[\"CentOS6.3template\"],\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.account.domainid,\n hypervisor=self.hypervisor\n )\n self.debug(\n \"Registered a template with format {} and id {}\".format(\n self.services[\"CentOS6.3template\"][\"format\"],template.id)\n )\n template.download(self.userapiclient)\n self.cleanup.append(template)\n vm = VirtualMachine.create(\n self.userapiclient,\n self.services[\"virtual_machine\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n templateid=template.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(vm)\n response = VirtualMachine.list(self.userapiclient,id=vm.id)\n status = validateList(response)\n self.assertEqual(status[0],PASS,\"list vm response returned invalid list\")\n list_default_iso_response = list_isos(\n self.api_client,\n name=\"vmware-tools.iso\",\n account=\"system\",\n isready=\"true\"\n )\n status = validateList(list_default_iso_response)\n self.assertEqual(\n PASS,\n status[0],\n \"ISO list is empty\")\n self.debug(\n \"Registered a ISO with name {}\".format(list_default_iso_response[0].name))\n try:\n vm.attach_iso(self.userapiclient,list_default_iso_response[0])\n except CloudstackAPIException as e:\n self.fail(\"Attached ISO failed : %s\" % e)\n response = VirtualMachine.list(self.userapiclient, id=vm.id)\n status = validateList(response)\n self.assertEqual(status[0], PASS,\"list vm response returned invalid list\")\n attachedIsoName=response[0].isoname;\n self.assertEqual(attachedIsoName, \"vmware-tools.iso\", \"vmware-tools.iso not attached\")\n return\n\n # @attr(tags=[\"advanced\", \"basic\"], required_hardware=\"true\")\n @attr(tags=[\"TODO\"], required_hardware=\"true\")\n def test_03_attach_ISO_in_RHEL7OSVM(self):\n \"\"\"\n @desc:Incorrect guest os mapping in vmware for Rhel7. Add a valid RHEL7 URL to execute this test case\n Step1 :Register an RHEL 7 template\n Step2 :Launch a VM\n Step3: Try to attach VMware Tools ISO\n Step4: Verify VMware tools ISO attached correctly\n \"\"\"\n self.hypervisor = str(get_hypervisor_type(self.api_client)).lower()\n if self.hypervisor != \"vmware\":\n self.skipTest(\"This test can be run only on vmware\")\n self.services[\"Rhel7template\"][\"url\"]=\"http://10.147.28.7/templates/rhel71.ova\",\n template = Template.register(\n self.userapiclient,\n self.services[\"Rhel7template\"],\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.account.domainid,\n hypervisor=self.hypervisor\n )\n self.debug(\n \"Registered a template with format {} and id {}\".format(\n self.services[\"Rhel7template\"][\"format\"],template.id)\n )\n template.download(self.userapiclient)\n self.cleanup.append(template)\n vm = VirtualMachine.create(\n self.userapiclient,\n self.services[\"virtual_machine\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n templateid=template.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(vm)\n response = VirtualMachine.list(self.userapiclient,id=vm.id)\n status = validateList(response)\n self.assertEqual(status[0],PASS,\"list vm response returned invalid list\")\n list_default_iso_response = list_isos(\n self.api_client,\n name=\"vmware-tools.iso\",\n account=\"system\",\n isready=\"true\"\n )\n status = validateList(list_default_iso_response)\n self.assertEqual(\n PASS,\n status[0],\n \"ISO list is empty\")\n self.debug(\n \"Registered a ISO with name {}\".format(list_default_iso_response[0].name))\n try:\n vm.attach_iso(self.userapiclient,list_default_iso_response[0])\n except CloudstackAPIException as e:\n self.fail(\"Attached ISO failed : %s\" % e)\n response = VirtualMachine.list(self.userapiclient, id=vm.id)\n status = validateList(response)\n self.assertEqual(status[0], PASS,\"list vm response returned invalid list\")\n attachedIsoName=response[0].isoname;\n self.assertEqual(attachedIsoName, \"vmware-tools.iso\", \"vmware-tools.iso not attached\")\n return\n\n @attr(tags=[\"advanced\", \"basic\"], required_hardware=\"true\")\n def test_04_check_vm_snapshot_creation_after_Instance_creation(self):\n \"\"\"\n @summary: Test if Snapshot creation is successful\n after VM deployment\n CLOUDSTACK-8830 : VM snapshot creation fails for 12 min\n\n Step1: Create a VM with any Service offering\n Step2: Create a VM snapshot\n Step3: Verify is VM SS creation is failed\n \"\"\"\n\n if self.hypervisor.lower() not in ['vmware']:\n self.skipTest(\"This test case is only for vmware. Hence, skipping the test\")\n vm = VirtualMachine.create(\n self.userapiclient,\n self.services[\"virtual_machine\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n templateid=self.template.id,\n zoneid=self.zone.id\n )\n\n snapshot_created_1 = VmSnapshot.create(\n self.userapiclient,\n vm.id\n )\n self.assertIsNotNone(\n snapshot_created_1,\n \"VM Snapshot creation failed\"\n )\n","repo_name":"apache/cloudstack","sub_path":"test/integration/component/test_escalations_vmware.py","file_name":"test_escalations_vmware.py","file_ext":"py","file_size_in_byte":14718,"program_lang":"python","lang":"en","doc_type":"code","stars":1557,"dataset":"github-code","pt":"32"} +{"seq_id":"26933206138","text":"from enum import Enum, auto\nfrom typing import Dict\n\nfrom core.patterns.robotics import Robotic\n\n\nclass CoreMovement(Robotic):\n class Kinds(Enum):\n COVERAGE = auto()\n SURVEILLANCE = auto()\n\n patterns: Dict[Kinds, str] = {\n Kinds.COVERAGE: \"TODO description...\",\n Kinds.SURVEILLANCE: \"TODO description...\",\n }\n\n def __init__(self, formula: str, kind: Kinds):\n self.__formula: str = formula\n self.__kind: CoreMovement.Kinds = kind\n super().__init__(formula, Robotic.Kinds.CORE_MOVEMENT)\n","repo_name":"pierg/crome","sub_path":"src/core/patterns/robotics/coremovement/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20063516331","text":"from django.shortcuts import render, HttpResponse, get_object_or_404, redirect\nfrom products.models import Product\nfrom decimal import Decimal\nfrom cart.utils import get_cart_items_and_total\n# Create your views here.\n\n \ndef add_to_cart(request):\n id = request.POST['id']\n quantity = int(request.POST['quantity'])\n\n cart = request.session.get('cart', {})\n cart[id] = int(cart.get(id, 0)) + quantity\n \n request.session['cart'] = cart \n\n return redirect('product_list')\n \ndef view_cart(request):\n \n cart = request.session.get('cart', {})\n print(cart)\n print(cart.items())\n context = get_cart_items_and_total(cart)\n return render(request, 'cart/view_cart.html', context)\n \ndef remove_from_cart(request, id):\n cart = request.session.get('cart', {})\n del cart[id]\n request.session['cart'] = cart \n return redirect('view_cart') \n \n \n \n \n \n \n ","repo_name":"ColmHughes/day-36-40---Django-Ecommerce","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33778847255","text":"from nose.plugins.attrib import attr\nfrom marvin.cloudstackTestCase import cloudstackTestCase\nfrom marvin.lib.utils import (cleanup_resources,\n validateList)\nfrom marvin.lib.base import (Account,\n ServiceOffering,\n VirtualMachine,\n Host\n )\nfrom marvin.lib.common import (get_domain,\n get_zone,\n get_template,\n list_virtual_machines,\n list_ssvms,\n list_routers\n )\n\n\nfrom marvin.cloudstackAPI import (updateHypervisorCapabilities,\n listHypervisorCapabilities)\n\nfrom marvin.codes import PASS\n\n\nclass TestMaxHyperviosrLimit(cloudstackTestCase):\n\n @classmethod\n def setUpClass(cls):\n testClient = super(TestMaxHyperviosrLimit, cls).getClsTestClient()\n cls.apiclient = testClient.getApiClient()\n cls.testdata = testClient.getParsedTestDataConfig()\n\n cls.hypervisor = cls.testClient.getHypervisorInfo()\n # Get Zone, Domain and templates\n\n cls.domain = get_domain(cls.apiclient)\n cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())\n\n cls.template = get_template(\n cls.apiclient,\n cls.zone.id,\n cls.testdata[\"ostype\"])\n\n cls._cleanup = []\n try:\n cls.skiptest = False\n if cls.hypervisor.lower() not in ['xenserver']:\n cls.skiptest = True\n return\n\n # Create an account\n cls.account = Account.create(\n cls.apiclient,\n cls.testdata[\"account\"],\n domainid=cls.domain.id\n )\n\n # Create user api client of the account\n cls.userapiclient = testClient.getUserApiClient(\n UserName=cls.account.name,\n DomainName=cls.account.domain\n )\n # Create Service offering\n cls.service_offering = ServiceOffering.create(\n cls.apiclient,\n cls.testdata[\"service_offering\"],\n hosttags=\"host1\"\n )\n\n cls._cleanup = [\n cls.account,\n cls.service_offering,\n ]\n except Exception as e:\n cls.tearDownClass()\n raise e\n return\n\n @classmethod\n def tearDownClass(cls):\n try:\n cleanup_resources(cls.apiclient, cls._cleanup)\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n\n def setUp(self):\n self.cleanup = []\n if self.skiptest:\n self.skipTest(\"This test is to be checked on xenserver \\\n only Hence, skip for %s\" % self.hypervisor)\n\n self.apiclient = self.testClient.getApiClient()\n self.dbclient = self.testClient.getDbConnection()\n\n def tearDown(self):\n try:\n\n cmd = updateHypervisorCapabilities.updateHypervisorCapabilitiesCmd()\n cmd.id = self.hostCapId\n cmd.maxguestslimit = self.originalLimit\n self.apiclient.updateHypervisorCapabilities(cmd)\n\n cleanup_resources(self.apiclient, self.cleanup)\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n return\n\n @attr(tags=[\"advanced\", \"basic\"], required_hardware=\"false\")\n def test_check_hypervisor_max_limit_effect(self):\n \"\"\" Test hypervisor max limits effect\n\n # 1. Read existing count of VM's on the host including SSVM and VR\n and modify maxguestcount accordingly\n # 2. Deploy a VM\n # 2. Try to deploy another vm\n # 3. Verify that second VM\n deployment fails (2 SSVMs 1 VR VM and 1 deployed VM)\n \"\"\"\n\n hostList = Host.list(\n self.apiclient,\n zoneid=self.zone.id,\n type=\"Routing\")\n event_validation_result = validateList(hostList)\n self.assertEqual(\n event_validation_result[0],\n PASS,\n \"host list validation failed due to %s\" %\n event_validation_result[2])\n\n self.host = Host(hostList[0])\n Host.update(self.apiclient, id=self.host.id, hosttags=\"host1\")\n\n # Step 1\n # List VM's , SSVM's and VR on selected host\n listVm = list_virtual_machines(self.apiclient,\n hostid=self.host.id)\n\n listssvm = list_ssvms(self.apiclient,\n hostid=self.host.id)\n\n listvr = list_routers(self.apiclient,\n hostid=self.host.id)\n\n newValue = 1\n if listVm is not None:\n newValue = len(listVm) + newValue\n\n if listssvm is not None:\n newValue = len(listssvm) + newValue\n\n if listvr is not None:\n newValue = len(listvr) + newValue\n\n qresultset = self.dbclient.execute(\n \"select hypervisor_version from host where uuid='%s'\" %\n self.host.id)\n\n event_validation_result = validateList(qresultset)\n self.assertEqual(\n event_validation_result[0],\n PASS,\n \"event list validation failed due to %s\" %\n event_validation_result[2])\n\n cmdList = listHypervisorCapabilities.listHypervisorCapabilitiesCmd()\n cmdList.hypervisor = self.hypervisor\n config = self.apiclient.listHypervisorCapabilities(cmdList)\n\n for host in config:\n if host.hypervisorversion == qresultset[0][0]:\n self.hostCapId = host.id\n self.originalLimit = host.maxguestslimit\n break\n else:\n self.skipTest(\"No hypervisor capabilities found for %s \\\n with version %s\" % (self.hypervisor, qresultset[0][0]))\n\n cmdUpdate = updateHypervisorCapabilities.\\\n updateHypervisorCapabilitiesCmd()\n cmdUpdate.id = self.hostCapId\n cmdUpdate.maxguestslimit = newValue\n self.apiclient.updateHypervisorCapabilities(cmdUpdate)\n\n # Step 2\n vm = VirtualMachine.create(\n self.userapiclient,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n )\n\n self.cleanup.append(vm)\n # Step 3\n with self.assertRaises(Exception):\n VirtualMachine.create(\n self.userapiclient,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n )\n","repo_name":"apache/cloudstack","sub_path":"test/integration/component/maint/test_hypervisor_limit.py","file_name":"test_hypervisor_limit.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","stars":1557,"dataset":"github-code","pt":"32"} +{"seq_id":"37701574723","text":"import copy\nimport datetime\nimport multiprocessing as mp\nimport time\nfrom queue import Empty\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom config import CV_SPLITS, Path\nfrom models.dataset_collection import DatasetCollection\nfrom models.experiment import Experiment\nfrom models.report import Report\nfrom utils.enums import DSType\nfrom utils.prints import Print\nfrom utils.utils import datestamp_str, flatten_dict\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n# <--- PARAMETER GRIDS --->\n\n\nparam_grid = {\n \"window_length\": [50, 100, 250],\n \"dataset_type\": [str(t) for t in DSType.variants()],\n \"sample_trim\": [\"0;0\", \"0;1\", \"0;2\", \"0;3\"],\n \"ds_split_by\": [\"session\", \"user\", \"random\"],\n \"classifier\": [\"svm\", \"lda\", \"rfc\", \"bagging\", \"tree\", \"knn\", \"gaussian\"],\n \"preprocessor\": [\n \"filter;csp;mean_power\",\n \"emd;csp;mean_power\",\n \"csp;mean_power\",\n \"emd;stats\",\n \"stats\",\n \"mean_power\",\n \"dwt;stats\",\n \"dwt;mean_power\",\n \"dwt;csp;mean_power\"\n ]\n}\n\nconditional_param_grid = {\n \"svm\": {\n \"kernel\": [\"linear\", \"rbf\", \"sigmoid\"]\n },\n \"nn\": {\n \"verbose\": 0,\n \"epochs\": [10, 50, 100, 200],\n \"loss\": [\"mean_squared_error\", \"binary_crossentropy\"],\n \"n_layers\": [3, 5, 10],\n \"start_units\": [100, 50, 20, 10]\n },\n \"rfc\": {\n \"n_estimators\": [1, 10, 100],\n \"criterion\": [\"gini\", \"entropy\"]\n },\n \"filter\": {\n \"kernel\": [\"mne\", \"custom\"],\n \"l_freq\": [7, 10, 15, 20],\n \"h_freq\": [30, 50, None],\n \"band\": [\"delta\", \"theta\", \"alpha\", \"beta\", \"gamma\"]\n },\n \"csp\": {\n \"kernel\": [\"mne\", \"custom\"],\n \"n_components\": [1, 2, 4],\n \"mode\": [\"1vall\", \"1v1\"]\n },\n \"lda\": {\n \"solver\": \"lsqr\"\n },\n \"mean_power\": {\n \"log\": [True, False]\n },\n \"emd\": {\n \"mode\": [\"set_max\", \"minkowski\"],\n \"n_imfs\": [1, 2],\n \"max_iter\": [10, 500, 2000],\n \"subtract_residue\": [True, False]\n },\n \"stats\": {\n \"features\": [\"__all__\", \"__fast__\"],\n \"splits\": 1\n },\n \"dwt\": {\n \"dim\": [1, 2],\n # \"wavelet\": pywt.wavelist(kind=\"discrete\"),\n \"wavelet\": [\"db1\", \"rbio6.8\", \"rbio2.6\", \"sym2\", \"db2\", \"bior2.4\", \"sym5\"]\n }\n}\n\n\nclass ExperimentSet:\n def __init__(self, description=\"\", hypothesis=\"\", cv_splits=CV_SPLITS, **kwargs):\n self.params = dict(kwargs)\n self.init_time = datetime.datetime.now()\n self.exp_params_list = []\n self.exp_reports = []\n self.best_exp = None\n\n self.description = description\n self.hypothesis = hypothesis\n\n self.run_time = None\n self.multiprocessing = None\n self.save_best = kwargs.get('save_best', False)\n\n self.cv_splits = cv_splits\n\n self.relevant_keys = []\n self.pipeline_items = []\n\n self.create_experiment_params()\n\n def filename(self, prefix, suffix):\n return \"{}_{}.{}\".format(prefix, datestamp_str(self.init_time, file=True), suffix)\n\n def reproduction_params(self, as_string=False):\n params = {}\n\n for key in param_grid.keys():\n if key in self.params:\n params[key] = self.params[key]\n else:\n params[key] = param_grid[key]\n\n for key in conditional_param_grid.keys():\n if key in self.pipeline_items:\n params[key] = {}\n\n if key in self.params:\n for inner_key in conditional_param_grid[key]:\n if inner_key in self.params[key]:\n params[key][inner_key] = self.params[key][inner_key]\n else:\n params[key][inner_key] = conditional_param_grid[key][inner_key]\n else:\n params[key] = conditional_param_grid[key]\n\n return params\n\n # <--- EXPERIMENT GENERATION --->\n\n def create_experiment_params(self):\n Print.point(\"Generating Experiments\")\n for key in param_grid.keys():\n if key not in self.params:\n self.params[key] = param_grid[key]\n\n exp_params_list = self.recurse_flatten(self.params)\n\n for params in exp_params_list:\n pipeline_items = params[\"preprocessor\"].split(\";\")\n pipeline_items.append(params[\"classifier\"])\n self.pipeline_items = list(set(self.pipeline_items + pipeline_items))\n\n for key, val in conditional_param_grid.items():\n key = key\n if key in pipeline_items:\n if isinstance(val, dict):\n for val_key, val_val in val.items():\n if key in self.params:\n if val_key in self.params[key]:\n params[key][val_key] = self.params[key][val_key]\n else:\n params[key][val_key] = val_val\n else:\n params[key] = val\n else:\n params[key] = self.params[key] if key in self.params else val\n else:\n if key in params:\n del params[key]\n\n exp_params_list = self.recurse_flatten(exp_params_list)\n\n # The following two lines remove duplicate configurations\n\n out = []\n for v in exp_params_list:\n if v not in out:\n out.append(v)\n\n exp_params_list = out\n # set_of_jsons = {json.dumps(d, sort_keys=True) for d in exp_params_list}\n # exp_params_list = [json.loads(t) for t in set_of_jsons]\n\n Print.start(\"\")\n print(pd.DataFrame([flatten_dict(e) for e in exp_params_list]))\n print(\"\\n\\n\")\n\n self.exp_params_list = exp_params_list\n\n def recurse_flatten(self, params):\n res = []\n if isinstance(params, list):\n for item in params:\n res += self.recurse_flatten(item)\n else:\n found_list = False\n for key, val in params.items():\n if isinstance(val, list):\n self.relevant_keys.append(key)\n found_list = True\n for option in val:\n new_params = copy.deepcopy(params)\n new_params[key] = option\n res += self.recurse_flatten(new_params)\n break\n elif isinstance(val, dict):\n for val_key, val_val in val.items():\n if isinstance(val_val, list):\n if key in self.pipeline_items:\n self.relevant_keys.append(\"{}__{}\".format(key, val_key))\n found_list = True\n for option in val_val:\n new_params = copy.deepcopy(params)\n new_params[key][val_key] = option\n res += self.recurse_flatten(new_params)\n break\n\n if not found_list:\n res.append(params)\n\n return res\n\n # <--- EXPERIMENT EXECUTION --->\n\n def run_experiments(self, fast_datasets=False):\n time.sleep(1)\n start_run_time = time.time()\n\n ds_collection = DatasetCollection.from_params(self.params, self.cv_splits, fast=fast_datasets)\n\n if self.multiprocessing == \"exp\":\n self.run_multi(ds_collection)\n else:\n for i, exp_params in enumerate(tqdm(self.exp_params_list, desc=\"Running Experiments\")):\n exp = Experiment.from_params(exp_params)\n exp.cv_splits = self.cv_splits\n exp.index = i\n exp.set_datasets(ds_collection)\n\n exp.multiprocessing = (self.multiprocessing == \"cv\")\n\n exp.run()\n\n if self.best_exp is None or exp.report[\"accuracy\"] > self.best_exp.report[\"accuracy\"]:\n Print.good(\"New best: {}\".format(np.round(exp.report[\"accuracy\"], 3)))\n self.best_exp = exp\n\n self.exp_reports.append(exp.report)\n\n self.run_time = time.time() - start_run_time\n self.generate_report()\n\n if self.save_best:\n from sklearn.externals import joblib\n fp = Path.classifiers + '/' + \"classifier1.pkl\"\n\n joblib.dump(self.best_exp.pipeline, fp)\n # notify(\"ExperimentSet finished running\", \"\")\n\n def generate_report(self):\n print(\"\\n\")\n Print.success(\"Generating Report\")\n\n report = Report(self, self.exp_reports)\n report.generate()\n\n @staticmethod\n def worker(i, working_queue, output_q, cv_splits, ds_collection):\n while True:\n try:\n exp_params = working_queue.get_nowait()\n exp = Experiment.from_params(exp_params)\n exp.cv_splits = cv_splits\n exp.set_datasets(ds_collection)\n\n Print.progress(\"{}: Running Experiment\".format(i))\n exp.run()\n output_q.put(exp.report)\n except Empty:\n Print.info(\"Queue Empty\")\n break\n\n return\n\n def run_multi(self, ds_collection):\n working_q = mp.Queue()\n output_q = mp.Queue()\n\n for exp_params in self.exp_params_list:\n working_q.put(exp_params)\n\n n_workers = np.min([mp.cpu_count(), len(self.exp_params_list)])\n\n Print.info(\"Using {} workers\".format(n_workers))\n processes = [mp.Process(target=self.worker, args=(i, working_q, output_q, self.cv_splits, ds_collection)) for i\n in\n range(n_workers)]\n\n for proc in processes:\n proc.start()\n\n for proc in processes:\n proc.join()\n\n while True:\n try:\n self.exp_reports.append(output_q.get_nowait())\n except Empty:\n break\n\n\n# <--- RUN CODE --->\n\n\nif __name__ == '__main__':\n params = {\n \"window_length\": 100,\n \"dataset_type\": \"LA-RA-LF-RF\",\n \"sample_trim\": \"0;3\",\n \"ds_split_by\": \"session\",\n \"classifier\": \"rfc\",\n \"preprocessor\": [\"mean_power\", \"stats\", \"csp;mean_power\", \"filter;mean_power\", \"filter;stats\", \"emd;stats\",\n \"dwt;stats\", \"filter;csp;mean_power\"],\n \"svm\": {\n \"kernel\": \"rbf\"\n },\n \"lda\": {\n \"solver\": \"lsqr\"\n },\n \"rfc\": {\n \"n_estimators\": 100,\n \"criterion\": \"entropy\"\n },\n \"filter\": {\n \"kernel\": \"mne\",\n \"l_freq\": 20,\n \"h_freq\": None,\n \"band\": None\n },\n \"csp\": {\n \"kernel\": \"custom\",\n \"n_components\": 4,\n \"mode\": \"1vall\"\n },\n \"emd\": {\n \"mode\": \"set_max\",\n \"n_imfs\": 1,\n \"max_iter\": 10,\n \"subtract_residue\": True\n },\n \"dwt\": {\n \"dim\": 2,\n \"wavelet\": \"bior2.4\"\n },\n \"mean_power\": {\n \"log\": True,\n },\n \"stats\": {\n \"features\": \"__env__\",\n \"splits\": 1\n }\n }\n\n exp_set = ExperimentSet(cv_splits=24, **params)\n exp_set.multiprocessing = \"cv\"\n exp_set.run_experiments(fast_datasets=False)\n","repo_name":"olavblj/thesis-experiments","sub_path":"models/experiment_set.py","file_name":"experiment_set.py","file_ext":"py","file_size_in_byte":11600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27282618219","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDatei von Server herunterladen\n\nQuelle: https://developers.google.com/edu/python/utilities\n\n@author: Christian Wichmann\n@license: GNU GPL\n\"\"\"\n\nimport urllib\nimport urllib.request\n\n\ndef print_url_content(url):\n try:\n request = urllib.request.urlopen(url)\n print(request.read())\n except IOError:\n print('Problem reading url: ', url)\n \n\ndef save_url_content(url, filename):\n try:\n request = urllib.request.urlopen(url)\n local_file = open(filename, 'w')\n local_file.write(request.read().decode('utf-8'))\n except IOError:\n print('Problem reading url: ', url)\n\n\ndef download_file(url, filename):\n try:\n # request data from url\n request = urllib.request.urlopen(url)\n header = request.info()\n \n # writing data to local file...\n print('Loading file from url.', end='')\n chunk_size = 1024\n local_file = open(filename, 'wb')\n while True:\n chunk = request.read(chunk_size)\n # ...as long there is data\n if not chunk:\n break\n local_file.write(chunk)\n print('.', end='')\n except IOError:\n print('Problem reading url: ', url)\n print('')\n\n\nif __name__ == '__main__':\n print_url_content('http://www.bbs-os-brinkstr.de')\n save_url_content('http://www.bbs-os-brinkstr.de', 'temp.html')\n download_file('http://www.bbs-os-brinkstr.de/fileadmin/0_intern/Schulleitung/Terminrahmenplan_public/Terminrahmenplan_2013_2014.pdf', 'temp.pdf')\n\n\n","repo_name":"wichmann/PythonExamples","sub_path":"network/download_file.py","file_name":"download_file.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"13399380357","text":"\"\"\"\nTo be used in the folder where you compute something.\nCompare many-body and stupid DOS\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport extract\nimport numpy as np\nimport matplotlib\nmatplotlib.use('pdf')\nimport matplotlib.pyplot as plt\nimport os\nfrom shutil import copyfile\n\n\nNames = ['.', 'replay_100nm', 'replay_longer']\n\nn_r = 30\n\nfor i in range(len(Names)):\n mobile_fraction = np.loadtxt('{}/postprocessing/mobile_fraction.dat'.format(Names[i]))\n std_err_mo = np.loadtxt('{}/postprocessing/std_err_mo.dat'.format(Names[i])) # in CI\n dop = np.loadtxt('{}/postprocessing/dop.dat'.format(Names[i])) # dop molar fraction\n plt.errorbar(dop[0:len(mobile_fraction)], mobile_fraction, yerr=std_err_mo*np.sqrt(n_r), label=Names[i], capsize=3, alpha=1, Color='C{}'.format(i), fmt='-', ecolor='C{}'.format(i))\n #plt.errorbar(dop[0:len(mobile_fraction)], mobile_fraction, yerr=std_err_mo*np.sqrt(n_r), label=Names[i], capsize=3, alpha=1, Color='C{}'.format(i), fmt='-', ecolor='grey')\n\n#Fig 1\nplt.xscale('log')\nplt.ylabel('Mobile Carrier Fraction')\nplt.xlabel('Doping')\nplt.legend()\nplt.xlim([1E-3, 1E-1])\nplt.ylim([0, 1.2])\nplt.savefig(\"CompareMobFraction.png\".format(Names[:]), dpi=600)\nplt.close()\n\n# Fig 2 || Normalized std\nfor i in range(len(Names)):\n dop = np.loadtxt('{}/postprocessing/dop.dat'.format(Names[i])) # dop molar fraction\n mobile_fraction = np.loadtxt('{}/postprocessing/mobile_fraction.dat'.format(Names[i]))\n std_err_mo = np.loadtxt('{}/postprocessing/std_err_mo.dat'.format(Names[i])) # in CI\n plt.plot(dop[0:len(mobile_fraction)], std_err_mo*np.sqrt(n_r)/mobile_fraction, label=Names[i])\nplt.xscale('log')\nplt.ylabel('std Mobile Carrier Fraction')\nplt.xlabel('Doping')\nplt.legend()\nplt.xlim([1E-3, 1E-1])\nplt.savefig(\"CompareStdMobFraction.png\".format(Names[:]), dpi=600)\nplt.close()","repo_name":"ArtemFediai/postprocessing","sub_path":"compare_depl.py","file_name":"compare_depl.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39403997633","text":"import sys, argparse\r\nfrom gooey import Gooey, GooeyParser\r\n\r\nCL_Flag = False \r\ninit_length = len(sys.argv)\r\nif init_length >= 2:\r\n if not '--ignore-gooey' in sys.argv:\r\n sys.argv.append('--ignore-gooey')\r\n CL_Flag = True\r\nelse:\r\n CL_Flag = False \r\n\r\n@Gooey(program_name=\"Count\", program_description='Stand Alone Profile Motifs Routine')\r\ndef main():\r\n\r\n\r\n if CL_Flag == False:\r\n parser = GooeyParser(conflict_handler='resolve', description=\"Enter DNA or select or type a filename\")\r\n \r\n parser.add_argument('input0', type=int, nargs='?', help='t') \r\n parser.add_argument('input1', type=int, nargs='?', help='K') \r\n parser.add_argument('input2', type=str, nargs='*', help='DNA without \\' \\\" or , (s) or FileName of a properly formatted data dictionary' , widget=\"FileChooser\") \r\n \r\n \r\n if CL_Flag == True:\r\n parser = argparse.ArgumentParser(conflict_handler='resolve') \r\n parser.add_argument('input0', type=int, nargs='?', help=\"t\" ) \r\n parser.add_argument('input1', type=int, nargs='?', help='K') \r\n parser.add_argument('input2', type=str, nargs='?', help=\"DNA file or raw\" )\r\n \r\n args = parser.parse_args()\r\n valid_File = '/.tx'\r\n Flag_1 = False\r\n while Flag_1 == False:\r\n if args.input2 != None: \r\n s1 = args.input2\r\n if validSequence(s1) == True:\r\n Text = str(args.input2)\r\n Flag_1 = True\r\n if any(i in valid_File for i in str(args.input2)) == True:\r\n filename = str(args.input2[0])\r\n file = open(filename, \"r\")\r\n Text = file.read()\r\n Flag_1 = True\r\n \r\n # Clean it up if its alredy in data dictionary format\r\n Text = Text.replace(\"{[\",\"[\")\r\n Text = Text.replace(\"]}\",\"]\")\r\n Text = Text.replace(\"[\\\"[\\'\",\"[\\'\")\r\n Text = Text.replace(\"\\']\\\"]\",\"\\']\")\r\n Text = Text.replace(\"\\',\\\",\",\"\\',\")\r\n Text = Text.replace(\"\\\"\\'\",\"\\'\")\r\n Dna = str(Text)\r\n k = int(args.input1)\r\n t = int(args.input0)\r\n \r\n print(GreedyMotifSearch(Dna, k, t))\r\n return GreedyMotifSearch(Dna, k, t)\r\n\r\n# \r\n# ActualCode from here onhttps://stepik.org/lesson/23066/step/5?unit=6799\r\n#\r\n\r\n\r\ndef Pr(Text, Profile):\r\n p = 1\r\n for i in range(len(Text)):\r\n p = p * Profile[Text[i]][i]\r\n return p\r\n\r\ndef validSequence(s1):\r\n valid = 'ACTGU'\r\n for letter in s1:\r\n if letter not in valid:\r\n return False\r\n return True\r\n\r\ndef Count(Motifs):\r\n count = {} # initializing the count dictionary\r\n k = len(Motifs[0])\r\n for symbol in \"ACGT\":\r\n count[symbol] = []\r\n for j in range(k):\r\n count[symbol].append(0)\r\n\r\ndef Consensus(Motifs):\r\n k = len(Motifs[0])\r\n count = Count(Motifs)\r\n consensus = \"\"\r\n for j in range(k):\r\n m = 0\r\n frequentSymbol = \"\"\r\n for symbol in \"ACGT\":\r\n if count[symbol][j] > m:\r\n m = count[symbol][j]\r\n frequentSymbol = symbol\r\n consensus += frequentSymbol\r\n return consensus\r\n\r\ndef Profile(Motifs):\r\n count = {} # initializing the count dictionary\r\n profile = {}\r\n k = len(Motifs[0])\r\n for symbol in \"ACGT\":\r\n count[symbol] = []\r\n for j in range(k):\r\n count[symbol].append(0)\r\n\r\n t = len(Motifs)\r\n for i in range(t):\r\n for j in range(k):\r\n symbol = Motifs[i][j]\r\n count[symbol][j] += 1\r\n ## divide the number of motif strings to get frequency\r\n for letter in count.keys():\r\n profile[letter] = [x/ float(t) for x in count[letter]]\r\n return profile\r\n\r\ndef ProfileMostProbableKmer(Text, k, Profile):\r\n p_dict = {}\r\n for i in range(len(Text)- k +1):\r\n p = Pr(Text[i: i+k], Profile)\r\n p_dict[i] = p\r\n m = max(p_dict.values())\r\n keys = [k for k,v in p_dict.items() if v == m]\r\n ind = keys[0]\r\n return Text[ind: ind +k]\r\n\r\ndef Score(Motifs):\r\n consensus = Consensus(Motifs)\r\n t = len(Motifs)\r\n k = len(Motifs[0])\r\n score = 0\r\n for i in range(k):\r\n FrequentSymbol = consensus[i]\r\n for j in range(t):\r\n if Motifs[j][i] != FrequentSymbol:\r\n score = score + 1\r\n return score\r\n\r\ndef GreedyMotifSearch(Dna, k, t):\r\n BestMotifs = []\r\n for i in range(0, t):\r\n BestMotifs.append(Dna[i][0:k])\r\n n = len(Dna[0])\r\n for i in range(n-k+1):\r\n Motifs = []\r\n Motifs.append(Dna[0][i:i+k])\r\n for j in range(1, t):\r\n P = Profile(Motifs[0:j])\r\n Motifs.append(ProfileMostProbableKmer(Dna[j], k, P))\r\n if Score(Motifs) < Score(BestMotifs):\r\n BestMotifs = Motifs\r\n return BestMotifs\r\n\r\nif __name__ == '__main__':\r\n main() ","repo_name":"marlinpohlman/bioinformatics","sub_path":"GreedyMotif.py","file_name":"GreedyMotif.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14807094837","text":"import discord\nfrom discord.ext import commands\n\nfrom discord_bot_owners import DiscordBotOwners\n\n\nclass AutoRolesView(discord.ui.View):\n\n def __init__(self):\n super().__init__(timeout=None)\n\n @discord.ui.button(label=\"Announcements\", style=discord.ButtonStyle.blurple, custom_id=\"persisten:announcements\")\n async def announcements(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:\n announcements_role = interaction.guild.get_role(interaction.client.config[\"role_id\"][\"announcements\"])\n\n if announcements_role in interaction.user.roles:\n await interaction.user.remove_roles(announcements_role)\n await interaction.response.send_message(\n \"You will no longer be pinged when an announcement is posted.\", ephemeral=True\n )\n else:\n await interaction.user.add_roles(announcements_role)\n await interaction.response.send_message(\n \"You will now be pinged when an announcement is posted.\", ephemeral=True\n )\n\n @discord.ui.button(label=\"Events\", style=discord.ButtonStyle.blurple, custom_id=\"persisten:events\")\n async def events(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:\n events_role = interaction.guild.get_role(interaction.client.config[\"role_id\"][\"events\"])\n\n if events_role in interaction.user.roles:\n await interaction.user.remove_roles(events_role)\n await interaction.response.send_message(\n \"You will no longer be pinged when an event is starting.\", ephemeral=True\n )\n else:\n await interaction.user.add_roles(events_role)\n await interaction.response.send_message(\n \"You will now be pinged when an event is starting.\", ephemeral=True\n )\n\n @discord.ui.button(label=\"Polls\", style=discord.ButtonStyle.blurple, custom_id=\"persisten:polls\")\n async def polls(self, interaction: discord.Interaction, button: discord.ui.Button) -> None:\n polls_role = interaction.guild.get_role(interaction.client.config[\"role_id\"][\"polls\"])\n\n if polls_role in interaction.user.roles:\n await interaction.user.remove_roles(polls_role)\n await interaction.response.send_message(\n \"You will no longer be pinged when a poll is posted.\", ephemeral=True\n )\n else:\n await interaction.user.add_roles(polls_role)\n await interaction.response.send_message(\n \"You will now be pinged when a poll is posted.\", ephemeral=True\n )\n\n\nclass AutoRoles(commands.Cog):\n \"\"\"The cog to manage the auto roles system.\"\"\"\n\n def __init__(self, client: DiscordBotOwners):\n self.client = client\n\n async def cog_load(self) -> None:\n self.client.loop.create_task(self.after_ready())\n\n async def after_ready(self) -> None:\n await self.client.wait_until_ready()\n\n guild_data = await self.client.mongo.fetch_guild_data()\n if guild_data[\"auto_roles_message_id\"] is None:\n return\n\n self.client.add_view(AutoRolesView(), message_id=guild_data[\"auto_roles_message_id\"])\n\n async def send_auto_roles_view(self, channel, **kwargs) -> None:\n auto_roles_embed = discord.Embed(\n title=\"Auto Roles\",\n description=\"Select the roles you want to get by clicking the buttons below.\",\n color=self.client.color\n )\n\n msg = await channel.send(embed=auto_roles_embed, view=AutoRolesView(), **kwargs)\n await self.client.mongo.update_guild_data_document(\n {\"$set\": {\"auto_roles_message_id\": msg.id, \"auto_roles_channel_id\": channel.id}}\n )\n await self.client.reload_extension(\"cogs.auto_roles\")\n\n\nasync def setup(client):\n await client.add_cog(AutoRoles(client))\n","repo_name":"AmeyWale/discord-bot-owners","sub_path":"cogs/auto_roles.py","file_name":"auto_roles.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"30887269329","text":"#para rodar o streamlit tem que digitar no terminal o comando streamlit run NomedoArquivo\nimport pandas as pd\nimport streamlit as st\nimport numpy as np\nimport folium\nfrom streamlit_folium import folium_static\nfrom folium.plugins import MarkerCluster\nimport plotly.express as px\nfrom datetime import datetime\n\nst.set_page_config(layout='wide') #para que os conteúdos da página preencham toda a tela\n\n@st.cache(allow_output_mutation=True) #para otimizar a performance do código\ndef get_data(path):\n df = pd.read_csv(path)\n return df\n\ndef set_feature(data):\n # tranformação da variavel de foot para metros\n data['sqft_lot_m'] = data['sqft_lot'] * (0.3048)\n #add new feature\n data['price_m2'] = data['price'] / data['sqft_lot_m']\n return data\n\ndef overview_data(data):\n # data overview\n f_attribute = st.sidebar.multiselect('Enter columns',\n data.columns) # Q2 - filtro que permite escolher uma ou mais variáveis para visualizar (Q2)\n f_zipcode = st.sidebar.multiselect('Enter ZipCode', data['zipcode'].unique()) # Q1 - filtro para visualizar os imóveis de uma ou várias regiões (Q1)\n\n st.title('Data Overview') # título na página\n\n # if (f_zipcode != []) & (f_attribute != []):\n # data = data.loc[data['zipcode'].isin(f_zipcode), f_attribute]\n # elif (f_zipcode != []) & (f_attribute == []):\n # data = data.loc[data['zipcode'].isin(f_zipcode), :]\n # elif (f_zipcode == []) & (f_attribute != []):\n # data = data.loc[:, f_attri0ute]\n # else:\n # data = data.copy()\n\n if (f_attribute == []):\n if f_zipcode != []:\n data = data.loc[data['zipcode'].isin(f_zipcode), :]\n data2 = data.loc[data['zipcode'].isin(f_zipcode), :]\n else: #f_zipcode == []\n data = data.copy()\n data2 = data.copy()\n else: #f_attribute != []\n if f_zipcode != []:\n data2 = data.loc[data['zipcode'].isin(f_zipcode), f_attribute]\n data = data.loc[data['zipcode'].isin(f_zipcode), :]\n else: #f_zipcode == []\n data2 = data.loc[:, f_attribute]\n data = data.copy()\n\n\n st.dataframe(data2)\n\n c1, c2 = st.columns((1, 1)) # para colocar uma tabela do lado da outra\n\n # average metrics\n # Q3 - Observar o número total de imóveis, a média de preço, a média da sala de estar\n # e também a média do preço por metro quadrado em cada um dos códigos postais.\n # data2 = get_data(path)\n\n df1 = data[['id', 'zipcode']].groupby('zipcode').count().reset_index() # número total de imóveis\n df2 = data[['price', 'zipcode']].groupby('zipcode').mean().reset_index() # média de preço\n df3 = data[['sqft_living', 'zipcode']].groupby('zipcode').mean().reset_index() # média da sala de estar\n df4 = data[['price_m2', 'zipcode']].groupby('zipcode').mean().reset_index() # média do preço por metro quadrado\n\n # merge\n m1 = pd.merge(df1, df2, on='zipcode', how='inner')\n m2 = pd.merge(m1, df3, on='zipcode', how='inner')\n df = pd.merge(m2, df4, on='zipcode', how='inner')\n\n df.columns = ['ZIPCODE', 'TOTAL HOUSES', 'PRICE', 'SQRT LIVIND', 'PRICE/M2']\n\n c1.header('Average Values')\n c1.dataframe(df, height=600)\n\n # Statistic Descriptive\n # Q4 - Analisar cada uma das colunas de um modo mais descrito.\n num_attributes = data.select_dtypes(include=['int64', 'float64'])\n media = pd.DataFrame(num_attributes.apply(np.mean))\n mediana = pd.DataFrame(num_attributes.apply(np.median))\n std = pd.DataFrame(num_attributes.apply(np.std))\n max_ = pd.DataFrame(num_attributes.apply(np.max))\n min_ = pd.DataFrame(num_attributes.apply(np.min))\n\n df1 = pd.concat([max_, min_, media, mediana, std], axis=1).reset_index()\n df1.columns = ['attributes', 'max', 'min', 'mean', 'median', 'std']\n\n c2.header('Descriptive analysis')\n c2.dataframe(df1, height=700)\n\n return None\n\ndef portfolio_density(data):\n # densidade de portfólio\n st.title('Region Overview')\n\n c1, c2 = st.columns((1, 1))\n c1.header('Portfolio Density')\n\n df = data.sample(10)\n\n # base Map - folium\n density_map = folium.Map(location=[data['lat'].mean(), data['long'].mean()],\n default_zoom_start=15)\n\n marker_cluster = MarkerCluster().add_to(density_map) # marcadores no mapa\n\n for name, row in df.iterrows():\n folium.Marker([row['lat'], row['long']],\n popup='Sold RS{0} on: {1}. Features: {2} sqft, {3} bedrooms, {4} bathrooms, year built: {5}'.format(\n row['price'],\n row['date'],\n row['sqft_living'],\n row['bedrooms'],\n row['bathrooms'],\n row['yr_built'])).add_to(marker_cluster)\n\n with c1:\n folium_static(density_map)\n\n return None\n\ndef commercial_distribution(data):\n # Distribuição dos imoveis por categoria comerciais\n st.sidebar.title('Commercial Options')\n st.title('Commercial Attributes')\n\n # Checar a variação anual de preço.\n # Average Price per Year\n data['date'] = pd.to_datetime(data['date']).dt.strftime('%Y-%m-%d')\n\n # filter\n min_year_built = int(data['yr_built'].min())\n max_year_built = int(data['yr_built'].max())\n\n st.sidebar.subheader('Select Max Year Built')\n f_year_built = st.sidebar.slider('Year Built', min_year_built,\n max_year_built,\n min_year_built)\n st.header('Average Price per Year Built')\n\n # data select\n df = data.loc[data['yr_built'] < f_year_built]\n df = df[['yr_built', 'price']].groupby('yr_built').mean().reset_index()\n\n # plot\n fig = px.line(df, x='yr_built', y='price')\n st.plotly_chart(fig, use_container_width=True)\n\n # Checar a variação diária de preço.\n # Average Price per Day\n st.header('Average Price per Day')\n st.sidebar.subheader('Select Max Date')\n\n # filter\n min_date = datetime.strptime(data['date'].min(), '%Y-%m-%d')\n max_date = datetime.strptime(data['date'].max(), '%Y-%m-%d')\n\n f_date = st.sidebar.slider('Date', min_date, max_date, min_date)\n\n # data select\n data['date'] = pd.to_datetime(data['date'])\n df = data.loc[data['date'] < f_date]\n df = df[['date', 'price']].groupby('date').mean().reset_index()\n\n # plot\n fig = px.line(df, x='date', y='price')\n st.plotly_chart(fig, use_container_width=True)\n\n # histograma\n st.header('Price Distribution')\n st.sidebar.subheader(('Select Max Price'))\n\n # filter\n min_price = int(data['price'].min())\n max_price = int(data['price'].max())\n avg_price = int(data['price'].mean())\n\n # data filtering\n f_price = st.sidebar.slider('Price', min_price, max_price, avg_price)\n df = data.loc[data['price'] < f_price]\n\n # plot\n fig = px.histogram(df, x='price', nbins=50)\n st.plotly_chart(fig, use_container_width=True)\n\n return None\n\ndef attributes_distribution(data):\n # distribuição dos imoveis por categorias físicas\n st.sidebar.title('Attributes Options')\n st.title('House Attributes')\n\n # filter\n f_bedrooms = st.sidebar.selectbox('Max number of bedrooms', sorted(set(data['bedrooms'].unique())))\n\n f_bathrooms = st.sidebar.selectbox('Max number of bedrooms', sorted(set(data['bathrooms'].unique())))\n\n c1, c2 = st.columns(2)\n\n # house per bedrooms\n c1.header('Houses per bedrooms')\n df = data[data['bedrooms'] < f_bedrooms]\n # plot\n fig = px.histogram(df, x='bedrooms', nbins=19)\n c1.plotly_chart(fig, use_container_width=True)\n\n # house per bathrooms\n c2.header('Houses per bathrooms')\n df = data[data['bathrooms'] < f_bathrooms]\n # plot\n fig = px.histogram(df, x='bathrooms', nbins=19)\n c2.plotly_chart(fig, use_container_width=True)\n\n # filters\n f_floors = st.sidebar.selectbox('Max number of floor', sorted(data['floors'].unique()))\n f_waterview = st.sidebar.checkbox('Only Houses with Water View')\n\n c1, c2 = st.columns(2)\n\n # house per floors\n c1.header('Houses per floor')\n df = data[data['floors'] < f_floors]\n # plot\n fig = px.histogram(df, x='floors', nbins=19)\n c1.plotly_chart(fig, use_container_width=True)\n\n # house per water view\n if f_waterview:\n df = data[data['waterfront'] == 1]\n else:\n df = data.copy()\n\n c2.header('Houses with Water View')\n fig = px.histogram(df, x='waterfront', nbins=10)\n c2.plotly_chart(fig, use_container_width=True)\n\n return None\n\nif __name__ == \"__main__\":\n # ETL\n #data extration\n path = 'datasets/kc_house_data.csv'\n data = get_data(path)\n\n #tranformation\n data = set_feature(data)\n\n overview_data(data)\n\n portfolio_density(data)\n\n commercial_distribution(data)\n\n attributes_distribution(data)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"laaisfmaia/Projeto-Insights-House-Rocket","sub_path":"cursoDS/codigo sem um mapa.py","file_name":"codigo sem um mapa.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36157568443","text":"#######################ПАРАМЕТРЫ ОТЧЕТА###################################\r\n\r\n#С какой даты отчет\r\nSTART_DATE = '2019-03-01'\r\n\r\n#По какую дату отчет\r\nFINISH_DATE = '2019-03-30'\r\n\r\n#По какому проекту отчет\r\nPROJECT = 'da-vita.ru'\r\n\r\n\r\n\r\n#СПИСОК ДОСТУПНЫХ ПРОЕКТОВ(ВПИСЫВАТЬ НАДО ИМЕННО ТАК!!!):\r\n\r\n# elitewheels.ru\r\n# mentalshop.ru\r\n# new-tel.net\r\n# gdesvet.ru\r\n#\tda-vita.ru\r\n#\tlampabar.ru\r\n#\tmegaves.su\r\n#\tmuzokon.ru\r\n#\tбольшие-плюшевые-медведи.рф\r\n#\ttsvetomania.ru\r\n#\tcvetyopt.ru\r\n#\tcertex.spb.ru\r\n#\tfrutoss.ru\r\n#\tthe-koleso.ru\r\n#\tda-vita.com\r\n#\topt.da-vita.ru\r\n#\trezkalazerom.ru\r\n\r\n\r\n#НОВЫЕ ПРОЕКТЫ МОЖНО ДОБАВИТЬ В ФАЙЛЕ global_settings.py, ТАМ ЖЕ МОЖНО НАСТРОИТЬ ИНФОРМАЦИЮ\r\n#ДЛЯ СЕГМЕНТАЦИИ ПО КАЖДОМУ ПРОЕКТУ\r\n\r\n\r\n\r\n\r\n","repo_name":"kkomissarov/metrika-reports","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15242761862","text":"#Implement a function that receives two IPv4 addresses, and returns the number of addresses between them (including the first one, excluding the last one).\n\ndef ips_between(start, end):\n start = start.split('.'); start = list(map(int, start))\n end = end.split('.'); end = list(map(int, end))\n a = 16777216; b = 65536; c = 256; d = 1\n \n ips = 0\n ips += (end[3] - start[3])\n ips += ((end[2] * c) - (start[2] * c))\n ips += ((end[1] * b) - (start[1] * b))\n ips += ((end[0] * a) - (start[0] * a))\n return ips\n","repo_name":"lessercreator/kata","sub_path":"count-ip.py","file_name":"count-ip.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12423986273","text":"\"\"\"\nDATE: 2020年4月16日13:16:57\n\nrequirements:\n\n1. aiohttp\n2. lxml\n\"\"\"\n\nimport asyncio\nimport pickle\nimport time\n\nimport aiohttp\nfrom lxml import etree\n\n# 结论:\n# aiohttp.CilentSesion 对象会自动记录请求返回的 cookies,无需手动添加\n\nclass Downloader(object):\n COOKIE_FILE = 'cookie.txt'\n\n def __init__(self, username, password, max_corou=5):\n \"\"\"WALLHAVEN 下载器\n\n 输入 WALLHAVEN 账号密码可下载 NSFW 图\n 可自行设置最大并发数\"\"\"\n\n self.username = username\n self.password = password\n self.max_corou = max_corou\n\n self.uri_counter = 0\n self.url_queue = asyncio.Queue(maxsize=24*10)\n\n async def __login(self, session):\n \"\"\"登录并保存 cookies 以便下次直接使用\n\n TODO:登录是否成功检查\"\"\"\n\n print('使用账号进行登录')\n\n login_page = 'https://wallhaven.cc/login'\n login_url = 'https://wallhaven.cc/auth/login'\n\n token_xpath = '//input[@name=\"_token\"]/@value'\n\n async with session.get(login_page) as resp:\n selector = etree.HTML(await resp.text())\n token = selector.xpath(token_xpath)[0]\n\n field = {\n 'username': self.username,\n 'password': self.password,\n '_token': token\n }\n\n cookies = {}\n\n async with session.post(login_url, data=field, allow_redirects=False) as resp:\n resp.raise_for_status()\n cookies = resp.cookies\n print(cookies)\n\n remember = None\n for k in cookies.keys():\n if k.startswith('remember'):\n remember = dict(k=cookies[k])\n\n with open(Downloader.COOKIE_FILE, 'wb') as f:\n pickle.dump(remember, f)\n\n return remember\n\n async def __get_cookies(self, session):\n \"\"\"获取 COOKIES 并返回\n\n 优先尝试从文件获取,如果文件不存在则尝试用指定的账户登录并返回 COOKIES\"\"\"\n\n cookies = {}\n\n try:\n with open(Downloader.COOKIE_FILE, 'rb') as f:\n cookies = pickle.load(f)\n\n print('尝试使用 COOKIES 登录...')\n except IOError:\n print('未找到 COOKIE 文件,尝试登录...')\n cookies = await self.__login(session)\n\n return cookies\n\n async def __create_tasks(self, session, url_list):\n \"\"\"根据指定的并发数创建协程\"\"\"\n\n headers = {'User-Agent': 'wasp'}\n cookies = await self.__get_cookies(session)\n\n for url in url_list:\n self.url_queue.put_nowait(url)\n\n tasks = []\n\n for corou in range(self.max_corou):\n tasks.append(asyncio.create_task(self.__request_url(session, headers, cookies)))\n\n await self.url_queue.join()\n\n for task in tasks:\n task.cancel()\n\n await asyncio.gather(*tasks, return_exceptions=True)\n\n async def __request_url(self, session, headers, cookies):\n \"\"\"协程消费者方法\n\n 在此方法内处理uri\n \"\"\"\n\n while True:\n url = await self.url_queue.get()\n\n try:\n async with session.get(url, headers=headers, cookies=cookies) as resp:\n selector = etree.HTML(await resp.text())\n uris = selector.xpath('//img[@class=\"lazyload\"]/@data-src')\n\n # 在这里处理URI\n for uri in uris:\n # uri 计数器自增1\n self.uri_counter += 1\n print(uri)\n\n except asyncio.CancelledError:\n raise\n except asyncio.TimeoutError:\n print(f'ERROR: Get {url} timeout')\n except Exception as error:\n print(f'ERROR: {error} TYPE: {type(error)}')\n finally:\n self.url_queue.task_done()\n\n async def download(self, tag, login=False, start=1, end=1, timeout=60):\n \"\"\"下载指定 TAG 的内容\n\n 可设置下载起始页以及下载结束页,目前没有对是否存在下载内容进行检查\n \"\"\"\n\n timeout = aiohttp.ClientTimeout(total=timeout)\n\n url_list = [\n f'https://wallhaven.cc/search?q={tag}&purity=001&page={idx}'\n for idx in range(start,end+1)]\n\n start = time.time()\n\n async with aiohttp.ClientSession(timeout=timeout) as session:\n if login:\n await self.__login(session)\n\n await self.__create_tasks(session, url_list)\n\n print('\\n'*2)\n print(f'爬取 {tag} 标签结束。')\n print(f'耗时 {round(time.time()-start, 2)} s')\n print(f'共处理 {self.uri_counter} 条URI')\n","repo_name":"magicFeirl/Crawlers","sub_path":"wallhaven/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"7466243640","text":"\"\"\"Creating models for database\"\"\"\nfrom pgvector.psycopg2 import register_vector\nfrom connect_database import connections\n\nconn = connections()\ncur = conn.cursor()\n\ncur.execute(\"CREATE EXTENSION IF NOT EXISTS vector\")\nconn.commit()\n\nregister_vector(conn)\n\ntable_create_command = \"\"\"\nCREATE TABLE embeddings (\n id bigserial primary key,\n embedding vector(1536)\n );\n \"\"\"\n\ncur.execute(table_create_command)\ncur.close()\nconn.commit()\n\n","repo_name":"Lakshya-E/ALSA-iMeet","sub_path":"Database/create_model.py","file_name":"create_model.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40382762289","text":"\"\"\"Helper functions to train a model on one epoch, decode the val subset, greedy decode, beam search decode and score predicted captions\"\"\"\n\nimport time\nimport torch\n\nfrom utils import captions2index\n\n__author__ = \"Thomas Pellegrini - 2020\"\n\ndef train(model, train_loader, criterion, optimizer, epoch, pretrain_decoder=False, use_gumbel_noise=False, device='cpu'):\n\n start = time.time()\n\n model.train()\n\n train_loss_avg = 0\n\n for batch_idx, train_batch in enumerate(train_loader):\n\n # print(\"batch nb\", batch_idx)\n\n # Use torch.autograd.set_detect_anomaly(True) to get notices about gradient explosion\n torch.autograd.set_detect_anomaly(True)\n\n # Set the inputs to the device\n speech_batch, text_batch, speech_lengths, text_lengths, ids_batch = train_batch\n speech_batch = speech_batch.to(device)\n text_batch = text_batch.to(device)\n speech_lengths = speech_lengths.to(device)\n text_lengths = text_lengths.to(device)\n\n # Initialising gradients to zero\n optimizer.zero_grad()\n\n # Pass the inputs, and length of speech into the model\n probs = model(speech_batch, speech_lengths, text_input=text_batch, pretrain_decoder=pretrain_decoder, use_gumbel_noise=use_gumbel_noise, isTrain=True)\n torch.cuda.empty_cache()\n\n # text_batch: B,padded_length\n # we want to predict the next word so text_batch[:,1:] and text_lengths-1\n loss = criterion(probs, text_batch[:,1:].contiguous(), text_lengths-1, device)\n \n # Run the backward pass on the masked loss\n loss.backward()\n\n # Clip the gradients\n torch.nn.utils.clip_grad_norm_(model.parameters(), 2)\n\n # Update weights and optimizer params\n optimizer.step()\n\n # Add masked loss\n train_loss_avg += loss.item()\n\n # Print the training loss after every N batches\n if batch_idx % 50 == 0: print(\" batch %d\\tloss: %.5f\"%(batch_idx, loss))\n\n end = time.time()\n train_loss_avg /= batch_idx\n print(\" train epoch %d:\\ttrain_loss: %.5f\\ttook %.1f sec\"%(epoch, train_loss_avg, end-start))\n return train_loss_avg\n\n\ndef val(model, val_loader, criterion, epoch, pretrain_decoder=False, use_gumbel_noise=False, print_captions=False, index2word=None, device='cpu'):\n model.eval()\n start = time.time()\n\n val_loss_avg = 0\n batch_idx = 0\n\n for batch_idx, val_batch in enumerate(val_loader):\n with torch.no_grad():\n\n speech_batch, text_batch, speech_lengths, text_lengths, ids_batch = val_batch\n speech_batch = speech_batch.to(device)\n text_batch = text_batch.to(device)\n speech_lengths = speech_lengths.to(device)\n text_lengths = text_lengths.to(device)\n\n if batch_idx == 0:\n first_batch_text = text_batch.clone().detach().cpu()\n\n if print_captions:\n probs = model(speech_batch, speech_lengths, text_input=None, pretrain_decoder=pretrain_decoder,\n use_gumbel_noise=use_gumbel_noise, isTrain=False) # size: B, T, Vocab\n\n preds_words = greedy_captioning(probs, index2word)\n print(\" len(predictions)\", len(preds_words))\n for i in range(20):\n print_predicted_and_gt_utterance(preds_words, first_batch_text, i, index2word)\n print_captions=False\n\n probs = model(speech_batch, speech_lengths, text_input=text_batch, pretrain_decoder=pretrain_decoder, use_gumbel_noise=use_gumbel_noise, isTrain=True)\n\n torch.cuda.empty_cache()\n\n loss = criterion(probs, text_batch[:,1:].contiguous(), text_lengths-1, device)\n\n val_loss_avg += loss.item()\n\n batch_idx += 1\n \n end = time.time()\n val_loss_avg /= batch_idx\n print(\" val epoch %d:\\tval_loss: %.5f\\ttook %.1f sec\"%(epoch, val_loss_avg, end-start))\n\n return val_loss_avg\n\n\ndef print_predicted_and_gt_utterance(prediction_char, gt_text_batch, index, index2word, is_beamsearch=True):\n print(\" hyp :\", prediction_char[index])\n gt = ''\n is_not_eos = True\n # print(index, gt_text_batch)\n i = 0\n if is_beamsearch:\n while is_not_eos and i < gt_text_batch[index].shape[0]:\n c = gt_text_batch[index][i]\n c = index2word[c.item()]\n gt += c\n gt += ' '\n is_not_eos = c != ''\n i += 1\n else:\n while is_not_eos and i < gt_text_batch[index].shape[0]:\n c = gt_text_batch[index, i]\n c = index2word[c.item()]\n gt += c\n gt += ' '\n is_not_eos = c != ''\n i += 1\n\n print(\" gt:\", gt, \"\\n\")\n\n\ndef greedy_captioning(probs_tensor, index2word):\n \"\"\"greedy decoding on a probability pytorch tensor\"\"\"\n\n preds = torch.argmax(probs_tensor, dim=-1).detach().cpu().numpy()\n preds_word = []\n\n # print(index2word)\n\n for i in range(preds.shape[0]):\n t = 0\n is_not_eos = True\n pred_utt = ' '\n while is_not_eos and t < preds.shape[1]:\n c = index2word[preds[i, t]]\n # if c!='' : pred_utt += c\n pred_utt += c\n pred_utt += ' '\n is_not_eos = c != ''\n t += 1\n preds_word.append(pred_utt.replace(' ', '').replace(' ', ''))\n return preds_word\n\n\ndef decode_val(model, data_loader, criterion, index2word, word2index, decode_first_batch_only = False, plot_att=False, use_gumbel_noise=False, device='cpu'):\n\n model.eval()\n start = time.time()\n\n if decode_first_batch_only:\n val_batch = next(iter(data_loader))\n val_att_masks = []\n\n with torch.no_grad():\n\n speech_batch, text_batch, speech_lengths, text_lengths, ids_batch = val_batch\n speech_batch = speech_batch.to(device)\n text_batch = text_batch.to(device)\n first_batch_text = text_batch.clone().detach().cpu()\n speech_lengths = speech_lengths.to(device)\n text_lengths = text_lengths.to(device)\n\n if plot_att:\n print(\"Val: getting att_masks\")\n print(\"size speech_batch\", speech_batch.size())\n print(\"size text_batch\", text_batch.size())\n probs, att_masks = model(speech_batch, speech_lengths, text_input=None,\n use_gumbel_noise=use_gumbel_noise, isTrain=False, return_attention_masks=True)\n val_att_masks.append(att_masks)\n\n else:\n probs = model(speech_batch, speech_lengths, text_input=None,\n use_gumbel_noise=use_gumbel_noise, isTrain=False) # size: B, T, Vocab\n\n\n preds_words = greedy_captioning(probs, index2word)\n print(\" len(predictions)\", len(preds_words))\n # for i in range(50):\n # print_predicted_and_gt_utterance(preds_words, first_batch_text, i, index2word)\n\n if len(val_att_masks)>0:\n return val_att_masks, first_batch_text, preds_words\n\n else:\n # decode the whole validation subset\n all_preds_words = []\n gt_words_indices = []\n all_ids_str = []\n\n for batch_idx, val_batch in enumerate(data_loader):\n\n with torch.no_grad():\n speech_batch, text_batch, speech_lengths, text_lengths, ids_batch = val_batch\n\n if batch_idx == 0:\n first_batch_text = text_batch\n\n gt_words_indices.extend(text_batch.clone().detach().cpu())\n\n # append file ids for scoring later (outside this function)\n all_ids_str.extend(ids_batch)\n\n speech_batch = speech_batch.to(device)\n text_batch = text_batch.to(device)\n speech_lengths = speech_lengths.to(device)\n # text_lengths = text_lengths.to(device)\n\n probs = model(speech_batch, speech_lengths, text_input=None,\n use_gumbel_noise=use_gumbel_noise, isTrain=False) # size: B, T, Vocab\n\n # ind1 = torch.argmax(probs[0][0]).item()\n # ind2 = torch.argmax(probs[0][1]).item()\n\n # greedy search\n preds_words = greedy_captioning(probs, index2word)\n all_preds_words.extend(preds_words)\n\n torch.cuda.empty_cache()\n\n print(\" len(all_preds_words)\", len(all_preds_words))\n\n # for i in range(0,50,5):\n # print_predicted_and_gt_utterance(all_preds_words, first_batch_text, i, index2word)\n\n\n end = time.time()\n print(\" took %.1f sec\"%(end-start))\n return all_preds_words, gt_words_indices, all_ids_str\n\n\ndef decode_test(model, data_loader, index2word, use_gumbel_noise=False, device='cpu'):\n\n model.eval()\n start = time.time()\n\n # decode the whole test subset\n all_preds_words = []\n all_ids_str = []\n\n for batch_idx, test_batch in enumerate(data_loader):\n\n with torch.no_grad():\n speech_batch, speech_lengths, ids_batch = test_batch\n\n # append file ids for scoring later (outside this function)\n all_ids_str.extend(ids_batch)\n\n speech_batch = speech_batch.to(device)\n speech_lengths = speech_lengths.to(device)\n\n probs = model(speech_batch, speech_lengths, text_input=None,\n use_gumbel_noise=use_gumbel_noise, isTrain=False) # size: B, T, Vocab\n\n # greedy search\n preds_words = greedy_captioning(probs, index2word)\n all_preds_words.extend(preds_words)\n\n torch.cuda.empty_cache()\n # if batch_idx == 0: break\n # if batch_idx % 2 == 0: print(\" batch %d\\tloss: %.3f\"%(batch_idx, loss))\n\n end = time.time()\n print(\" took %.1f sec\"%(end-start))\n return all_preds_words, all_ids_str\n\n\ndef score_test_captions(model, criterion, data_loader, captions_dict_pred, index2word, word2index, use_gumbel_noise=False, device='cpu'):\n\n model.eval()\n start = time.time()\n\n # decode the whole test subset\n all_ids_str = []\n test_losses = []\n\n for batch_idx, test_batch in enumerate(data_loader):\n\n with torch.no_grad():\n speech_batch, speech_lengths, ids_batch = test_batch\n\n pseudo_gt_captions = [captions_dict_pred[fid] for fid in ids_batch]\n text_batch, text_lengths = captions2index(pseudo_gt_captions, word2index)\n # print(text_batch)\n # print(\"text_lengths\", text_lengths)\n\n\n\n speech_batch = speech_batch.to(device)\n speech_lengths = speech_lengths.to(device)\n text_batch = text_batch.to(device)\n text_lengths = text_lengths.to(device)\n\n probs = model(speech_batch, speech_lengths, text_input=text_batch, pretrain_decoder=False,\n use_gumbel_noise=use_gumbel_noise, isTrain=True)\n\n losses = criterion(probs, text_batch[:,1:].contiguous(), text_lengths-1, device)\n\n test_losses.extend(losses.tolist())\n # append file ids for scoring later (outside this function)\n all_ids_str.extend(ids_batch)\n\n torch.cuda.empty_cache()\n # if batch_idx == 4: break\n if batch_idx % 1 == 0: print(\" batch %d\"%batch_idx)\n\n end = time.time()\n print(\" took %.1f sec\"%(end-start))\n return test_losses, all_ids_str\n\n\n\ndef bs_decode_val(model, data_loader, index2word, use_gumbel_noise=False, device='cpu'):\n\n model.eval()\n start = time.time()\n\n all_preds_words = []\n gt_words_indices = []\n all_ids_str = []\n # decode the whole validation subset\n\n for batch_idx, val_batch in enumerate(data_loader):\n\n with torch.no_grad():\n speech_batch, text_batch, speech_lengths, text_lengths, ids_batch = val_batch\n\n gt_words_indices.extend(text_batch.clone().detach().cpu())\n\n # append file ids for scoring later (outside this function)\n all_ids_str.extend(ids_batch)\n\n speech_batch = speech_batch.to(device)\n speech_lengths = speech_lengths.to(device)\n\n hyps = model(speech_batch, speech_lengths, text_input=None,\n use_gumbel_noise=use_gumbel_noise, isTrain=False) # size: B, T, Vocab\n\n hyp_words = [\" \".join([index2word[ind] for ind in hyp]).replace(\" \", \"\") for hyp in hyps]\n del hyps\n\n all_preds_words.append(hyp_words[0])\n\n if batch_idx % 100 == 0: print(\"BS for batch i:\", batch_idx)\n\n torch.cuda.empty_cache()\n\n print(\" len(all_preds_words)\", len(all_preds_words))\n print(\" len(gt_words_indices)\", len(gt_words_indices))\n # print(\"first_batch_text\", first_batch_text)\n\n # for i in range(0,50,5):\n # # print(i)\n # # print(gt_words_indices[i])\n # print_predicted_and_gt_utterance(all_preds_words, gt_words_indices, i, index2word, is_beamsearch=True)\n\n\n end = time.time()\n print(\" took %.1f sec\"%(end-start))\n\n return all_preds_words, gt_words_indices, all_ids_str\n\n\ndef bs_decode_test(model, data_loader, index2word, use_gumbel_noise=False, device='cpu'):\n\n model.eval()\n start = time.time()\n\n all_preds_words = []\n all_ids_str = []\n # decode the whole test subset w beamsearch\n\n for batch_idx, val_batch in enumerate(data_loader):\n\n with torch.no_grad():\n speech_batch, speech_lengths, ids_batch = val_batch\n\n # append file ids for scoring later (outside this function)\n all_ids_str.extend(ids_batch)\n\n speech_batch = speech_batch.to(device)\n speech_lengths = speech_lengths.to(device)\n\n hyps = model(speech_batch, speech_lengths, text_input=None,\n use_gumbel_noise=use_gumbel_noise, isTrain=False) # size: B, T, Vocab\n # print(\"hyps\", hyps)\n\n hyp_words = [\" \".join([index2word[ind] for ind in hyp]).replace(\" \", \"\") for hyp in hyps]\n del hyps\n\n all_preds_words.append(hyp_words[0])\n\n if batch_idx % 100 == 0: print(\"BS for batch i:\", batch_idx)\n\n torch.cuda.empty_cache()\n\n print(\" len(all_preds_words)\", len(all_preds_words))\n\n end = time.time()\n print(\" took %.1f sec\"%(end-start))\n\n return all_preds_words, all_ids_str\n","repo_name":"topel/listen-attend-tell","sub_path":"utils_train_val_test.py","file_name":"utils_train_val_test.py","file_ext":"py","file_size_in_byte":14411,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"8055022677","text":"import RPi.GPIO as GPIO\n\n\nbutton_pin = 27\nled_pin = 22\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(button_pin, GPIO.IN)\nGPIO.setup(led_pin, GPIO.OUT)\n\nflag_led = 0\ncnt = 0\n\nbuttonInputPrev = 0\n\ntry:\n while True:\n buttonInput = GPIO.input(button_pin)\n\n if not buttonInputPrev and buttonInput:\n print(\"rising edge\")\n if flag_led == 0: \n GPIO.output(led_pin, True)\n else:\n GPIO.output(led_pin, False)\n flag_led = 0\n\n elif buttonInputPrev and not buttonInput:\n print(\"falling edge\")\n\n buttonInputPrev = buttonInput\n\nfinally:\n GPIO.cleanup()\n print(\"GPIO cleaned!\")","repo_name":"SeoDongHyeon0227/RaspberryPi_Ex","sub_path":"10_GPIO_SW_LED_interrupt.py","file_name":"10_GPIO_SW_LED_interrupt.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37836620618","text":"from flask import Flask, render_template, request, redirect, url_for, send_file\r\nimport pandas as pd\r\n\r\napp = Flask(__name__)\r\n\r\ndef process_excel(input_file, output_file, starting_number):\r\n # 读取Excel表格数据\r\n data = pd.read_excel(input_file)\r\n\r\n # 定义排序规则\r\n position_order = {'骨干': 1, '其他研究人员': 2}\r\n title_order = {'正高': 1, '副高': 2, '中级': 3, '初级': 4, '学生': 5}\r\n education_order = {'博士': 1, '硕士': 2, '本科': 3, '大专': 4}\r\n age_order = data['年龄'].max() - data['年龄']\r\n\r\n # 添加辅助列\r\n data['Position_Order'] = data['职务'].map(position_order)\r\n data['Title_Order'] = data['职称'].map(title_order)\r\n data['Education_Order'] = data['学历'].map(education_order)\r\n data['Age_Order'] = age_order\r\n\r\n # 根据排序规则进行排序\r\n sorted_data = data.sort_values(by=['Position_Order', 'Title_Order', 'Education_Order', 'Age_Order'],\r\n ascending=[True, True, True, False])\r\n\r\n # 移除辅助列\r\n sorted_data = sorted_data.drop(['Position_Order', 'Title_Order', 'Education_Order', 'Age_Order'], axis=1)\r\n\r\n # 添加排序序号\r\n sorted_data['排序序号'] = range(starting_number, starting_number + len(sorted_data))\r\n\r\n # 合并排序后的序号到原始数据\r\n result = pd.merge(data, sorted_data[['排序序号']], left_index=True, right_index=True, how='left')\r\n\r\n # 保存结果到Excel\r\n result.to_excel(output_file, index=False)\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if request.method == 'POST':\r\n input_file = request.files['input_file']\r\n starting_number = int(request.form['starting_number'])\r\n\r\n if input_file and input_file.filename.endswith('.xlsx'):\r\n input_filename = 'uploaded_file.xlsx'\r\n input_file.save(input_filename)\r\n\r\n output_filename = 'sorted_with_number.xlsx'\r\n process_excel(input_filename, output_filename, starting_number)\r\n\r\n return redirect(url_for('download', filename=output_filename))\r\n\r\n return render_template('index.html')\r\n\r\n@app.route('/download/')\r\ndef download(filename):\r\n return send_file(filename, as_attachment=True)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"Mengyu-Messic/person_sort","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1310504220","text":"\"\"\"bquotepad URL Configuration\"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.auth import views as auth_views\nfrom quotepad.views import home, register, change_password, landing\nfrom quotepad.forms import FormStepOne, FormStepTwo, FormStepThree, FormStepFour, FormStepFive, FormStepSix, FormStepSeven, FormStepEight, FormStepNine\nfrom quotepad.views import BoilerFormWizardView \nfrom quotepad.views import model_form_upload\nfrom django.contrib.auth.decorators import login_required\n\nfrom quotepad.views import edit_profile_details, show_uploaded_files, quote_generated, test_quote_generated, quote_emailed, quote_not_possible, quotepad_template_help\nfrom quotepad.views import ProductPriceList, ProductPriceCreate, ProductPriceUpdate, ProductPriceDelete\nfrom quotepad.views import ProductComponentList, ProductComponentCreate, ProductComponentUpdate, ProductComponentDelete\nfrom quotepad.views import OptionalExtraList, OptionalExtraCreate, OptionalExtraUpdate, OptionalExtraDelete\nfrom quotepad.views import generate_quote_from_file, edit_quote_template, list_quote_archive, pdf_view, edit_quote_data\n\n# Imports for Westchem\n#from quotepad.forms import CustomerProductForm, KitchenChecksForm, LaundryChecksForm, WaterSoftenerChecksForm, ProductsUsedForForm, CommentsForm, ProductOrderForm\n#from quotepad.views import WestChemFormWizardView, pdf_viewWC, cust_ord_pdf_viewWC, report_generated, list_report_archive, order_report_generated\n#from quotepad.views import generate_quote_from_fileWC\n#from quotepad.views import customer_order\n\n# Imports for Yourheat\nfrom quotepad.views import BoilerFormWizardView_yh,generate_quote_from_file_yh\nfrom quotepad.forms import FormStepOne_yh, FormStepTwo_yh, FormStepThree_yh, FormStepFour_yh, FormStepFive_yh, FormStepSix_yh, FormStepSeven_yh, FormStepEight_yh, FormStepNine_yh, FinanceForm_yh\nfrom quotepad.views import list_quote_archive_yh, upload_for_reprint_yh, QuoteAccepted\nfrom quotepad.views import ssCustomerSelect, ssPostSurveyQuestions, ss_customer_comms_yh, ssGetPhotosForUpload\n\n# Imports for Yourheat admin\nfrom quotepad.views import admin_home, customer_comms, list_customers_for_comms, emails_sent_to_customers, confirm_calendar_appointment, get_survey_appointment, get_installation_appointment, get_job_parts, get_special_offer, get_heat_plan, get_guarantee, generate_guarantee_pdf\nfrom quotepad.views import processing_cancelled, preview_comms, display_comms, email_comms, email_sent_to_merchant\n\n# Imports for Hub\nfrom quotepad.views import hub_home, recommend_a_friend, preview_recommend_a_friend, email_recommend_a_friend, confirmation_page, view_invoice_pdf, view_receipt_pdf\n\n# Imports for Customer Pages\nfrom quotepad.views import customer_acceptance, customer_acceptance_email, customer_enquiry_form\n\nfrom quotepad.views import TestForm, test_gmail\nfrom quotepad.views import engineer_hub, engineer_calendar_change, engineer_calendar_delete, engineer_hub_job, engineer_hub_photo_select, engineer_hub_photo_upload, engineer_hub_ok, engineer_hub_get_ss_attachments, engineer_hub_get_serial_numbers, engineer_update_serial_numbers, engineer_hub_latest_PO_details, engineer_hub_get_job_completion, engineer_hub_job_completion\n\nfrom quotepad.views import XeroInitialAuthorisation, XeroInitialRefreshToken, XeroInvoicePost, XeroCustomerCreate, XeroCreateDepositCustomer, XeroCreateBalanceInvoice\n\nurlpatterns = [\n \n url(r'^admin/', admin.site.urls),\n # Patterns in views/core.py\n path('logout/', auth_views.LogoutView.as_view()),\n path('', landing, name='landing'),\n path('login/', auth_views.LoginView.as_view()),\n path('passwordreset/', auth_views.PasswordResetView.as_view()),\n path('register/', register),\n\n path('quotepad/', include('django.contrib.auth.urls')),\n\n path('quotegenerated/', quote_generated, name = 'quote_generated'),\n\tpath('quoteemailed/', quote_emailed, name = 'quote_emailed'),\n path('quotenotpossible/', quote_not_possible, name = 'quote_not_possible'),\n\tpath('quotepadtemplatehelp/', quotepad_template_help, name = 'quotepad_template_help'),\n path('testquotegenerated/', test_quote_generated, name = 'test_quote_generated'),\n\n path('loginredirect/', home, name = 'home'),\n path('changepassword/', change_password, name = 'change_password'),\n path('home/', home, name = 'home'),\n path('landing/', landing, name = 'landing'),\n \n path('fileupload/', model_form_upload, name = 'file_upload'),\n path('showuploadedfiles/', show_uploaded_files, name = 'show_uploaded_files'),\n path('editquotetemplate/', edit_quote_template, name = 'editquotetemplate'),\n path('editcurrentquotedata/', edit_quote_data, name = 'editcurrentquotedata'),\n\tpath('listquotearchive/', list_quote_archive, name = 'listquotearchive'),\n\tpath('pdfview/', pdf_view, name = 'pdfview'),\n\n # Patterns in views/generic_boiler.py\n path('boilerform/', login_required(BoilerFormWizardView.as_view([FormStepOne,FormStepTwo,FormStepThree, FormStepFour, FormStepFive, FormStepSix, FormStepSeven, FormStepEight, FormStepNine])), name = 'boilerform'),\n path('generatequotefromfile//', generate_quote_from_file, name = 'generate_quote_from_file'),\n\n path('editprofiledetails/', edit_profile_details, name = 'editprofiledetails'),\n path('editquotetemplate/', edit_quote_template, name = 'editquotetemplate'),\n\n path('productpricelist/', login_required(ProductPriceList.as_view()), name = 'productpricelist'),\n path('productpricecreate/', ProductPriceCreate, name = 'productpricecreate'),\n\tpath('productpriceupdate//', ProductPriceUpdate, name = 'productpriceupdate'),\n\tpath('productpricedelete//', login_required(ProductPriceDelete.as_view()), name = 'productpricedelete'),\n\n path('productcomponentlist/', login_required(ProductComponentList.as_view()), name = 'productcomponentlist'),\n path('productcomponentcreate/', ProductComponentCreate, name = 'productcomponentcreate'),\n\tpath('productcomponentupdate//', ProductComponentUpdate, name = 'productcomponentupdate'),\n\tpath('productcomponentdelete//', login_required(ProductComponentDelete.as_view()), name = 'productcomponentdelete'),\n\n path('optionalextralist/', login_required(OptionalExtraList.as_view()), name = 'optionalextralist'),\n path('optionalextracreate/', OptionalExtraCreate, name = 'optionalextracreate'),\n\tpath('optionalextraupdate//', OptionalExtraUpdate, name = 'optionalextraupdate'),\n\tpath('optionalextradelete//', login_required(OptionalExtraDelete.as_view()), name = 'optionalextradelete'),\n\n path('quotepadtemplatehelp/', quotepad_template_help, name = 'quotepad_template_help'),\n\n # Patterns in views/WestChem.py\n #path('WestChemform/', login_required(WestChemFormWizardView.as_view([CustomerProductForm,KitchenChecksForm,LaundryChecksForm, WaterSoftenerChecksForm, ProductsUsedForForm, CommentsForm, ProductOrderForm])), name = 'WestChemform'),\n #path('generatequotefromfileWC//', generate_quote_from_fileWC, name = 'generate_quote_from_fileWC'),\n #path('reportgenerated/', report_generated, name = 'report_generated'),\n #path('orderreportgenerated/', order_report_generated, name = 'order_report_generated'),\n #path('listreportarchive/', list_report_archive, name = 'listreportarchive'),\n #path('pdfviewWC/', pdf_viewWC, name = 'pdfviewWC'),\n #path('custordpdfviewWC/', cust_ord_pdf_viewWC, name = 'custordpdfviewWC'),\n #path('customerorder/', customer_order, name='customer_order'),\n\n # Patterns in views/yourheat.py\n path('boilerform_yh/', login_required(BoilerFormWizardView_yh.as_view([FormStepOne_yh,FormStepTwo_yh,FormStepThree_yh, FormStepFour_yh, FormStepFive_yh, FormStepSix_yh, FormStepSeven_yh, FormStepEight_yh, FormStepNine_yh, FinanceForm_yh])), name = 'boilerform_yh'),\n path('generatequotefromfile_yh//', generate_quote_from_file_yh, name = 'generate_quote_from_file_yh'),\n #path('financeform/', FinanceFormWizardView_yh.as_view([FinanceForm_yh]), name = 'finance_form'),\n #path('quotegenerated_yh/', quote_generated_yh, name = 'quote_generated_yh'),\n path('listquotearchive_yh/', list_quote_archive_yh, name = 'listquotearchive_yh'),\n path('uploadforreprint_yh/', upload_for_reprint_yh, name = 'uploadforreprint_yh'),\n path('editcurrentquotedata/', edit_quote_data, name = 'editcurrentquotedata'),\n\n #path('getsmartsheet/', get_smartsheet, name='getsmartsheet'),\n path('ssCustomerSelect/', login_required(ssCustomerSelect.as_view()), name='ssCustomerSelect'),\n #path('quote_sent_to_Smartsheet_yh/', quote_sent_to_Smartsheet_yh, name = 'quotesenttoSmartsheet_yh'),\n #path('emailsSentToCustomers_yh/', emails_sent_to_customers_yh, name = 'emailsSentToCustomers_yh'),\n path('ssPostSurveyQuestions/', login_required(ssPostSurveyQuestions.as_view()), name='ssPostSurveyQuestions'),\n #path('ssGenerateCustomerComms_yh//', ss_generate_customer_comms_yh, name = 'ssGenerateCustomerComms_yh'),\n #path('ssGenerateCustomerComms_yh///', ss_generate_customer_comms_yh, name = 'ssGenerateCustomerComms_yh'),\n #path('ssListCustomersForComms_yh//', ss_list_customers_for_comms_yh, name = 'ssListCustomersForComms_yh'),\n #path('ssListCustomersForComms_yh///', ss_list_customers_for_comms_yh, name = 'ssListCustomersForComms_yh'),\n path('ssCustomerComms_yh/', ss_customer_comms_yh, name = 'ssCustomerComms_yh'),\n #path('quoteready_yh/', quote_ready_yh, name = 'quote_ready_yh'),\n #path('quoteemailed_yh/', quote_emailed_yh, name = 'quote_emailed_yh'),\n path('ssGetPhotosForUpload/', login_required(ssGetPhotosForUpload.as_view()), name='ssGetPhotosForUpload'),\n #path('photosSentToSmartsheet_yh/', photos_sent_to_smartsheet_yh, name = 'photosSentToSmartsheet_yh'),\n path('QuoteAccepted/', login_required(QuoteAccepted.as_view()), name='QuoteAccepted'),\n\n # Patterns for Hub\n path('HubHome/', hub_home, name = 'HubHome'),\n path('RecommendAFriend/', recommend_a_friend, name = 'RecommendAFriend'),\n path('PreviewRecommendAFriend//', preview_recommend_a_friend, name = 'PreviewRecommendAFriend'),\n path('EmailRecommendAFriend/', email_recommend_a_friend, name = 'EmailRecommendAFriend'),\n path('ConfirmationPage////', confirmation_page, name = 'ConfirmationPage'),\n \n\n # Patterns in views/yh_admin.py\n path('adminhome/', admin_home, name = 'adminhome'),\n path('CustomerComms/', customer_comms, name = 'CustomerComms'),\n path('ListCustomersForComms///', list_customers_for_comms, name = 'ListCustomersForComms'),\n path('ListCustomersForComms//', list_customers_for_comms, name = 'ListCustomersForComms'),\n #path('GenerateCustomerComms///', generate_customer_comms, name = 'GenerateCustomerComms'),\n #path('GenerateCustomerComms//', generate_customer_comms, name = 'GenerateCustomerComms'),\n path('EmailsSentToCustomers/', emails_sent_to_customers, name = 'EmailsSentToCustomers'),\n path('EmailSentToMerchant/', email_sent_to_merchant, name = 'EmailSentToMerchant'),\n path('ConfirmCalendarAppointment///', confirm_calendar_appointment, name = 'ConfirmCalendarAppointment'),\n path('GetSurveyAppointment//', get_survey_appointment.as_view(), name='GetSurveyAppointment'),\n path('GetSurveyAppointment/', get_survey_appointment.as_view(), name='GetSurveyAppointment'),\n path('GetInstallationAppointment//', get_installation_appointment.as_view(), name='GetInstallationAppointment'),\n path('GetJobParts//', get_job_parts.as_view(), name='GetJobParts'),\n path('GetSpecialOffer//', get_special_offer.as_view(), name='GetSpecialOffer'),\n path('GetHeatPlan//', get_heat_plan.as_view(), name='GetHeatPlan'),\n path('GetHeatPlan/', get_heat_plan.as_view(), name='GetHeatPlan'),\n path('GetGuarantee//', get_guarantee.as_view(), name='GetGuarantee'),\n path('ProcessingCancelled/', processing_cancelled, name='ProcessingCancelled'),\n path('PreviewComms///', preview_comms, name = 'PreviewComms'),\n path('DisplayComms///', display_comms, name = 'DisplayComms'),\n path('EmailComms///', email_comms, name = 'EmailComms'),\n\n path('TestForm/', login_required(TestForm.as_view()), name='TestForm'),\n path('TestGmail/', test_gmail, name='TestGmail'),\n\n path('ViewInvoicePDF///', view_invoice_pdf, name = 'ViewInvoicePDF'),\n path('ViewReceiptPDF///', view_receipt_pdf, name = 'ViewReceiptPDF'),\n\n path('GenerateGuaranteePDF//', generate_guarantee_pdf, name = 'GenerateGuaranteePDF'),\n \n path('CustomerAcceptance/////', customer_acceptance, name = 'CustomerAcceptance'),\n path('CustomerAcceptanceEmail/////', customer_acceptance_email, name = 'CustomerAcceptanceEmail'),\n path('CustomerEnquiry/////', customer_enquiry_form.as_view(), name='CustomerEnquiry'),\n\n path('EngineerHub//', engineer_hub, name = 'EngineerHub'),\n path('EngineerCalendarChange///', engineer_calendar_change, name = 'EngineerCalendarChange'),\n path('EngineerCalendarDelete///', engineer_calendar_delete, name = 'EngineerCalendarDelete'),\n path('EngineerHubJob///', engineer_hub_job, name = 'EngineerHubJob'),\n path('EngineerHubPhotoSelect///', engineer_hub_photo_select, name = 'EngineerHubPhotoSelect'),\n path('EngineerHubPhotoUpload/////', engineer_hub_photo_upload, name = 'EngineerHubPhotoUpload'),\n path('EngineerHubOk////', engineer_hub_ok, name = 'EngineerHubOk'),\n path('EngineerHubGetSSAttachments///', engineer_hub_get_ss_attachments, name = 'EngineerHubGetSSAttachments'),\n path('EngineerHubGetSerialNumbers///', engineer_hub_get_serial_numbers, name = 'EngineerHubGetSerialNumbers'),\n path('EngineerHubLatestPODetails///', engineer_hub_latest_PO_details, name = 'EngineerHubLatestPODetails'),\n path('EngineerUpdateSerialNumbers////', engineer_update_serial_numbers, name = 'EngineerUpdateSerialNumbers'),\n path('EngineerHubGetJobCompletion///', engineer_hub_get_job_completion, name = 'EngineerHubGetJobCompletion'),\n path('EngineerHubJobCompletion///', engineer_hub_job_completion, name = 'EngineerHubJobCompletion'),\n\n path('XeroInit/', XeroInitialAuthorisation, name = 'XeroInit'),\n path('XeroRedirect/', XeroInitialRefreshToken, name = 'XeroRedirect'),\n path('XeroInvoicePost//', XeroInvoicePost, name = 'XeroInvoicePost'),\n path('XeroCustomerCreate//', XeroCustomerCreate, name = 'XeroCustomerCreate'),\n path('XeroCreateDepositCustomer//', XeroCreateDepositCustomer, name = 'XeroCreateDepositCustomer'),\n path('XeroCreateBalanceInvoice//', XeroCreateBalanceInvoice, name = 'XeroCreateBalanceInvoice'),\n\n\tpath('', include('payments.urls')),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"owngoal63/qp3","sub_path":"quotepadproj/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":16296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24506156150","text":"# Databricks notebook source\n# MAGIC %md # MLflow Spark ML Training Tutorial\n# MAGIC \n# MAGIC **Overview**\n# MAGIC * Train a SparkML model several times with different `maxDepth` hyperparameters\n# MAGIC * Algorithm is DecisionTreeRegressor with wine quality dataset\n# MAGIC * Show different ways to view runs:\n# MAGIC * [MlflowClient.list_run_infos](https://mlflow.org/docs/latest/python_api/mlflow.tracking.html#mlflow.tracking.MlflowClient.list_run_infos)\n# MAGIC * [MlflowClient.search_runs](https://mlflow.org/docs/latest/python_api/mlflow.tracking.html#mlflow.tracking.MlflowClient.search_runs)\n# MAGIC * [mlflow.search_runs](https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.search_runs)\n# MAGIC * Experiment data source\n# MAGIC * Find the best run for the experiment\n# MAGIC * Show how to score the model with different flavors;\n# MAGIC * [Spark flavor](https://mlflow.org/docs/latest/python_api/mlflow.spark.html) \n# MAGIC * [Pyfunc flavor](https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html) \n# MAGIC * [MLeap flavor](https://mlflow.org/docs/latest/python_api/mlflow.mleap.html) (using SparkBundle)\n# MAGIC \n# MAGIC **MLeap**\n# MAGIC * MLeap: common serialization format and execution engine for machine learning pipelines\n# MAGIC * https://mleap-docs.combust.ml\n# MAGIC * Databricks MLeap documentation:\n# MAGIC * [MLeap ML Model Export](https://docs.databricks.com/applications/machine-learning/model-export-import/mleap-model-export.html#mleap-ml-model-export)\n# MAGIC * [Train a PySpark model and save in MLeap format](https://docs.databricks.com/applications/mlflow/tracking-ex-pyspark.html#train-a-pyspark-model-and-save-in-mleap-format) - Databricks documentation notebook\n\n# COMMAND ----------\n\n# MAGIC %md ### Setup\n\n# COMMAND ----------\n\n# MAGIC %run ./Common\n\n# COMMAND ----------\n\nimport mlflow\nclient = mlflow.tracking.MlflowClient()\n\n# COMMAND ----------\n\nexperiment_id, experiment_name = init()\n\n# COMMAND ----------\n\n# MAGIC %md ### Delete any existing runs\n\n# COMMAND ----------\n\ndelete_runs(experiment_id)\n\n# COMMAND ----------\n\n# MAGIC %md ## Prepare data\n\n# COMMAND ----------\n\ndata_path = download_data()\n\n# COMMAND ----------\n\ndata = spark.read.format(\"csv\") \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\") \\\n .load(data_path.replace(\"/dbfs\",\"dbfs:\")) \n(trainData, testData) = data.randomSplit([0.7, 0.3], 42)\n\n# COMMAND ----------\n\ndisplay(data)\n\n# COMMAND ----------\n\n# MAGIC %md ### Training Pipeline\n\n# COMMAND ----------\n\nimport pyspark\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.regression import DecisionTreeRegressor\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.feature import VectorAssembler\n\n# COMMAND ----------\n\nimport mlflow.spark\nimport mlflow.mleap\n\ndef train(maxDepth):\n with mlflow.start_run() as run: \n # Set MLflow tags\n mlflow.set_tag(\"mlflow_version\", mlflow.__version__)\n mlflow.set_tag(\"spark_version\", spark.version)\n mlflow.set_tag(\"pyspark_version\", pyspark.__version__)\n \n # Log MLflow parameters\n mlflow.log_param(\"maxDepth\", maxDepth)\n \n # Create model\n dt = DecisionTreeRegressor(labelCol=colLabel, featuresCol=colFeatures, \\\n maxDepth=maxDepth)\n \n # Create pipeline\n assembler = VectorAssembler(inputCols=data.columns[:-1], outputCol=colFeatures)\n pipeline = Pipeline(stages=[assembler, dt])\n \n # Fit model\n model = pipeline.fit(trainData)\n \n # Predict on test data\n predictions = model.transform(testData)\n \n # Log MLflow training metrics\n predictions = model.transform(testData)\n metric = \"rmse\"\n evaluator = RegressionEvaluator(labelCol=colLabel, predictionCol=colPrediction, metricName=metric)\n v = evaluator.evaluate(predictions)\n mlflow.log_metric(metric, v)\n print(f\"{v:5.3f} {maxDepth:2d} {run.info.run_id} {run.info.experiment_id}\")\n \n # Log MLflow model as Spark ML\n mlflow.spark.log_model(model, \"spark-model\")\n \n # Log MLflow model as MLeap\n mlflow.mleap.log_model(spark_model=model, sample_input=testData, artifact_path=\"mleap-model\")\n\n# COMMAND ----------\n\n# MAGIC %md ### Train with different hyperparameters\n\n# COMMAND ----------\n\nparams = [1, 2, 4, 16]\nfor p in params:\n train(p)\n\n# COMMAND ----------\n\n# MAGIC %md ### Different ways to show an experiment's runs\n\n# COMMAND ----------\n\n# MAGIC %md #### MlflowClient.list_run_infos\n# MAGIC * [mlflow.tracking.MlflowClient.list_run_infos](https://mlflow.org/docs/latest/python_api/mlflow.tracking.html#mlflow.tracking.MlflowClient.list_run_infos)\n# MAGIC * Returns a list of [RunInfo](https://mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.RunInfo) objects\n\n# COMMAND ----------\n\ninfos = client.list_run_infos(experiment_id)\nfor info in infos:\n print(info.run_id, info.experiment_id, info.status)\n\n# COMMAND ----------\n\n# MAGIC %md #### MLflowClient.search_runs\n# MAGIC * [mlflow.tracking.MlflowClient.search_runs](https://mlflow.org/docs/latest/python_api/mlflow.tracking.html#mlflow.tracking.MlflowClient.search_runs)\n# MAGIC * Returns a list of [Run](https://mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.Run) objects\n# MAGIC * Allows for paging when you have a very large number of runs\n# MAGIC * Sorted by best metrics `rmse`\n\n# COMMAND ----------\n\nruns = client.search_runs(experiment_id, order_by=[\"metrics.rmse ASC\"])\nfor run in runs:\n print(run.info.run_id, run.data.metrics[\"rmse\"], run.data.params)\n\n# COMMAND ----------\n\n# MAGIC %md #### mlflow.search_runs\n# MAGIC * [mlflow.search_runs](https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.search_runs)\n# MAGIC * Returns a Pandas dataframe\n# MAGIC * All `data` attributes are exploded into one flat column name space\n# MAGIC * Sorted by best metrics `rmse`\n\n# COMMAND ----------\n\nruns = mlflow.search_runs(experiment_id)\nruns = runs.sort_values(by=['metrics.rmse'])\nruns\n\n# COMMAND ----------\n\nruns[[\"run_id\",\"metrics.rmse\",\"params.maxDepth\"]]\n\n# COMMAND ----------\n\n# MAGIC %md #### Experiment data source\n# MAGIC * Returns a Spark dataframe of all runs\n# MAGIC * Run `data` elements such as `params`, `metrics` and `tags` are nested.\n# MAGIC * Background Documentation:\n# MAGIC * Databricks documentation:\n# MAGIC * [MLflow Experiment Data Source](https://docs.databricks.com/data/data-sources/mlflow-experiment.html#mlflow-exp-datasource)\n# MAGIC * [Analyze MLflow runs using DataFrames\n# MAGIC ](https://docs.databricks.com/applications/mlflow/tracking.html#analyze-mlflow-runs-using-dataframes)\n# MAGIC * [Analyzing Your MLflow Data with DataFrames](https://databricks.com/blog/2019/10/03/analyzing-your-mlflow-data-with-dataframes.html) - blog - 2019-10-03\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import *\ndf_runs = spark.read.format(\"mlflow-experiment\").load(experiment_id)\ndf_runs.createOrReplaceTempView(\"runs\")\n\n# COMMAND ----------\n\n# MAGIC %md ##### Query with Spark DataFrame API\n\n# COMMAND ----------\n\ndf_runs = df_runs.sort(asc(\"metrics.rmse\"))\ndisplay(df_runs)\n\n# COMMAND ----------\n\ndisplay(df_runs.select(\"run_id\", round(\"metrics.rmse\",3).alias(\"rmse\"),\"params\"))\n\n# COMMAND ----------\n\n# MAGIC %md ##### Query as SQL\n\n# COMMAND ----------\n\n# MAGIC %sql select run_id, metrics.rmse, params from runs order by metrics.rmse asc\n\n# COMMAND ----------\n\n# MAGIC %sql select run_id, metrics.rmse, params from runs order by metrics.rmse asc limit 1\n\n# COMMAND ----------\n\n# MAGIC %md ### Find the best run\n\n# COMMAND ----------\n\nruns = client.search_runs(experiment_id, order_by=[\"metrics.rmse ASC\"], max_results=1)\nbest_run = runs[0]\nbest_run\n\n# COMMAND ----------\n\ndisplay_run_uri(experiment_id, best_run.info.run_id)\n\n# COMMAND ----------\n\nbest_run.info.run_id, best_run.data.metrics[\"rmse\"]\n\n# COMMAND ----------\n\n# MAGIC %md ### Score\n# MAGIC \n# MAGIC Several ways to score:\n# MAGIC * Spark ML flavor\n# MAGIC * Pyfunc flavor\n# MAGIC * MLeap (SparkBundle) flavor\n\n# COMMAND ----------\n\nmodel_uri = f\"runs:/{best_run.info.run_id}/spark-model\"\nmodel_uri\n\n# COMMAND ----------\n\n# MAGIC %md #### Score with Spark ML flavor\n\n# COMMAND ----------\n\nmodel = mlflow.spark.load_model(model_uri)\ntype(model)\n\n# COMMAND ----------\n\npredictions = model.transform(data)\ndisplay(predictions.select(colPrediction, colLabel, colFeatures))\n\n# COMMAND ----------\n\n# MAGIC %md #### Score with Pyfunc flavor\n\n# COMMAND ----------\n\nmodel = mlflow.pyfunc.load_model(model_uri)\ntype(model)\n\n# COMMAND ----------\n\npredictions = model.predict(data.toPandas())\ntype(predictions)\n\n# COMMAND ----------\n\npredictions[:10]\n\n# COMMAND ----------\n\n# MAGIC %md #### Score with MLeap flavor \n# MAGIC * Uses SparkBundle\n# MAGIC * There is no MLflow MLeap `load_model` method so we have to:\n# MAGIC * Manually construct the model artifact URI\n# MAGIC * Use low-level MLeap methods to load the model\n\n# COMMAND ----------\n\nrun = client.get_run(best_run.info.run_id)\nrun.info.artifact_uri\n\n# COMMAND ----------\n\nbundle_path = f\"file:{run.info.artifact_uri}/mleap-model/mleap/model\".replace(\"dbfs:\",\"/dbfs\")\nbundle_path\n\n# COMMAND ----------\n\nfrom pyspark.ml import PipelineModel\nfrom mleap.pyspark.spark_support import SimpleSparkSerializer\nmodel = PipelineModel.deserializeFromBundle(bundle_path)\ntype(model)\n\n# COMMAND ----------\n\npredictions = model.transform(data)\ntype(predictions)\n\n# COMMAND ----------\n\ndisplay(predictions.select(colPrediction, colLabel, colFeatures))","repo_name":"0xKoios/mlflow-examples","sub_path":"databricks/notebooks/mlflow_tutorial/01_MLflow_SparkML_Tutorial.py","file_name":"01_MLflow_SparkML_Tutorial.py","file_ext":"py","file_size_in_byte":9600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"72424820571","text":"from random import seed\nfrom game_state import GameResult\n\nseed(6)\n\nclass TicTacToeBoard():\n\n def __init__(self, rows=3, column=3):\n self.rows = rows\n self.col = column\n self.board = []\n self.win_value = 1\n self.lose_value = -1\n self.draw_value = .5\n self.transition_value = 0\n self.random_action = False\n self.abc = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n\n for _ in range(rows):\n row = []\n for _ in range(column):\n row.append(\"-\")\n self.board.append(row)\n\n def print_board(self):\n print(self.board)\n\n def get_board_state(self):\n return \"\".join([ \"\".join(r) for r in self.board])\n\n def get_available_moves(self, marker):\n moves_hash = []\n moves = []\n for r in range(self.rows):\n for c in range(self.col):\n if self.board[r][c] == '-':\n self.board[r][c] = marker \n moves_hash.append(\"\".join([\"\".join(r) for r in self.board]))\n self.board[r][c] = '-'\n moves.append((r,c))\n return moves, moves_hash\n\n def set_tile(self, row, col, marker):\n # Defensive coding to make sure none of our updates do anything funny\n if self.board[row][col] != '-':\n raise Exception(\"Issue sending invalid row col for row: \" + str(row) + \\\n \"column: \" + str(col) + \" for marker \" + marker)\n self.board[row][col] = marker\n\n def find_winner(self, marker):\n # win by rows\n marks = [marker] * self.rows\n for r in self.board:\n if r == marks:\n return marker\n\n # Win by column\n for ind in range(self.col):\n col = [row[ind] for row in self.board]\n if col == marks:\n return marker\n \n cross = [self.board[i][i] for i in range(self.col)]\n if cross == marks:\n return marker\n\n cross = [self.board[self.col-1-i][i] for i in range(self.col)]\n if cross == marks:\n return marker\n\n if \"-\" not in [xrow for row in self.board for xrow in row]:\n return GameResult.DRAW\n else:\n return None\n\n def get_reward(self, marker):\n winner = self.find_winner(marker)\n if not winner:\n return self.transition_value\n if winner == GameResult.DRAW:\n return self.draw_value\n if winner == marker:\n return self.win_value\n else:\n return self.lose_value # we lost\n\n def is_valid_move(self, row, col):\n if self.board[row][col] == '-':\n return True\n return False\n \n def abc_to_row_col(self, abc):\n ind = self.abc.index(abc)\n col = ind % self.col\n row = int(ind / self.col)\n return row, col\n\n def pretty_print_board(self, alpha=True):\n counter = 0\n print(\"_____________\")\n for row in self.board:\n row_vals = []\n for col in row:\n if col == \"-\":\n row_vals.append(self.abc[counter])\n else:\n row_vals.append(col)\n counter += 1\n print(\"| \" + \" | \".join(row_vals) + \" |\")\n print(\"_____________\")\n\n def moves_left(self):\n return self.get_board_state().count(\"-\")\n\n def check_win_condition(self, mark):\n board = self.board\n for i in range(len(board)):\n row = board[i]\n if row[0] == mark and row[1] == mark and row[2] == '-':\n return (i, 2)\n if row[1] == mark and row[2] == mark and row[0] == '-':\n return (i, 0)\n if row[0] == mark and row[2] == mark and row[1] == '-':\n return (i, 1)\n\n # Check for column win\n for i in range(len(board)):\n if board[0][i] == mark and board[1][i] == mark and board[2][i] == '-':\n return (2, i)\n if board[1][i] == mark and board[2][i] == mark and board[0][i] == '-':\n return (0, i)\n if board[2][i] == mark and board[0][i] == mark and board[1][i] == '-':\n return (1, i)\n \n # Check for cross win\n if board[0][0] == mark and board[1][1] == mark and board[2][2] == '-':\n return (2, 2)\n if board[1][1] == mark and board[2][2] == mark and board[0][0] == '-':\n return (0, 0)\n if board[0][0] == mark and board[2][2] == mark and board[1][1] == '-':\n return (1, 1)\n \n if board[0][2] == mark and board[1][1] == mark and board[2][0] == '-':\n return (2, 0)\n if board[2][0] == mark and board[1][1] == mark and board[0][2] == '-':\n return (0, 2)\n if board[0][2] == mark and board[2][0] == mark and board[1][1] == '-':\n return (1, 1)\n\n return None","repo_name":"rlhull6/tic-tac-toe-rl","sub_path":"tic_tac_toe_board.py","file_name":"tic_tac_toe_board.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1163393412","text":"# Project imports\nfrom manager.Command.Utils import *\n\n\nclass Chatango(CommandUtils):\n # profile\n def command_profile(self):\n if not self.args:\n user = self.user\n else:\n username = self._get_user_uid(self.args[0])\n self._validate_username(username)\n user = get_user(username)\n\n try:\n stuff = str(request.urlopen('http://{0}.chatango.com'.format(user.uid)).read())\n\n age = stuff.split(\n 'Age:',\n 1\n )[1].split(\n '
',\n 1\n )[0]\n\n gender = stuff.split(\n 'Gender:',\n 1\n )[1].split(\n '
',\n 1\n )[0]\n\n location = stuff.split(\n 'Location:',\n 1\n )[1].split(\n '
',\n 1\n )[0]\n\n picture = 'http://pop.sorch.info/pic/{0}.png'.format(user.uid)\n url = 'http://{0}.chatango.com'.format(user.uid)\n\n if self.pm:\n picture = ''.format(picture)\n\n profile_data = self._lang('PROFILE_DATA').format(\n url=url,\n age=age,\n gender=gender,\n location=location,\n picture=picture\n )\n self._message(profile_data)\n except:\n self._message(self._lang('ERROR_CH_USER_NOT_FOUND').format(\n self._user_color(user.uid)\n ))\n if self.log_level > 0:\n raise\n\n # bg\n def command_bg(self):\n if not self.args:\n username = self.user.uid\n else:\n username = self._get_user_uid(self.args[0])\n self._validate_username(username)\n\n picture = 'http://st.chatango.com/profileimg/{0}/{1}/{2}/msgbg.jpg'.format(\n username[0],\n username[1] if 1 != len(username) else username[0],\n username\n )\n\n if self.pm:\n picture = ''.format(picture)\n\n self._message(picture)\n\n # bgtime\n def command_bgtime(self):\n if not self.args:\n uid = self.user.uid\n else:\n uid = self._get_user_uid(self.args[0])\n self._validate_username(uid)\n\n try:\n bg_time = get_bg_time(uid)\n\n if bg_time < time.time():\n self._message(self._lang('PREMIUM_HAD').format(\n self._user_color(uid),\n highlight(self._format_seconds(int(time.time()) - bg_time), 'Blue')\n ))\n else:\n self._message(self._lang('PREMIUM_HAS').format(\n self._user_color(uid),\n highlight(self._format_seconds(bg_time - int(time.time())), 'Blue')\n ))\n except:\n self._message(self._lang('PREMIUM_NEVER').format(\n self._user_color(uid)\n ))\n if self.log_level > 0:\n raise\n\n # mini\n def command_mini(self):\n if not self.args:\n uid = self.user.uid\n else:\n uid = self._get_user_uid(self.args[0])\n self._validate_username(uid)\n\n try:\n stuff = str(\n request.urlopen(\n 'http://{0}.chatango.com'.format(uid)\n ).read().decode('utf-8')\n )\n stuff = everything_between(\n stuff,\n '',\n ''\n )\n\n if len(stuff.strip()) > 0:\n self._message(stuff)\n except:\n self._message(self._lang('ERROR_CH_USER_NOT_FOUND').format(\n self._user_color(uid)\n ))\n if self.log_level > 0:\n raise\n\n # minihtml\n def command_minihtml(self):\n if not self.args:\n uid = self.user.uid\n else:\n uid = self._get_user_uid(self.args[0])\n self._validate_username(uid)\n\n try:\n stuff = str(\n request.urlopen(\n 'http://{0}.chatango.com'.format(uid)\n ).read().decode('utf-8')\n )\n stuff = everything_between(\n stuff,\n '',\n ''\n )\n\n if len(stuff.strip()) > 0:\n self._message(stuff, html=False)\n except:\n self._message(self._lang('ERROR_CH_USER_NOT_FOUND').format(\n self._user_color(uid)\n ))\n if self.log_level > 0:\n raise","repo_name":"mircea-dinoiu/edwinbot-py","sub_path":"commands/Users/Information/Chatango/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74177893530","text":"#!/usr/bin/env python\n\n# FIXME can we avoid having globals?\nINPUT_VALUES = [0]\nOUTPUT_VALUES = []\n\nrelative_base = 0\n\n\ndef main():\n values = read_input()\n values[0] = 2\n tiles = {}\n\n ball = None\n paddle = None\n score = None\n\n position = 0\n while position != -1:\n if ball is None or paddle is None or ball[0] == paddle[0]:\n new_direction = 0\n elif ball[0] > paddle[0]:\n new_direction = 1\n elif ball[0] < paddle[0]:\n new_direction = -1\n if INPUT_VALUES:\n INPUT_VALUES.pop()\n INPUT_VALUES.append(new_direction)\n\n position = do_turn(values, position)\n if len(OUTPUT_VALUES) == 3:\n tile_id, y, x = (\n OUTPUT_VALUES.pop(),\n OUTPUT_VALUES.pop(),\n OUTPUT_VALUES.pop(),\n )\n if (x, y) == (-1, 0):\n score = tile_id\n tiles[(x, y)] = tile_id\n if tile_id == 3:\n paddle = (x, y)\n elif tile_id == 4:\n ball = (x, y)\n\n return score\n\n\ntile_types = {\n 0: \" \",\n 1: \"|\",\n 2: \"X\",\n 3: \"_\",\n 4: \"O\",\n}\n\n\ndef draw_tiles(tiles):\n lines = []\n min_x, max_x, min_y, max_y = get_bounds(tiles)\n for y in range(min_y, max_y):\n lines.append([tiles.get((x, y), 0) for x in range(min_x, max_x + 1)])\n for line in lines:\n print(\"\".join(tile_types[l] for l in line))\n\n\ndef get_bounds(tiles):\n xs, ys = zip(*tiles)\n return min(xs), max(xs), min(ys), max(ys)\n\n\ndef extend_if_required(values, pos):\n if pos + 1 > len(values):\n values.extend([0] * (pos + 1 - len(values)))\n\n\ndef get_pos(values, mode, position):\n if mode == 1:\n pos = position\n elif mode == 0:\n pos = values[position]\n elif mode == 2:\n pos = relative_base + values[position]\n\n extend_if_required(values, pos)\n return pos\n\n\ndef do_add(values, modes, start_position):\n pos_a = get_pos(values, modes[0], start_position + 1)\n pos_b = get_pos(values, modes[1], start_position + 2)\n pos_destination = get_pos(values, modes[2], start_position + 3)\n\n values[pos_destination] = values[pos_a] + values[pos_b]\n return start_position + 4\n\n\ndef do_multiply(values, modes, start_position):\n pos_a = get_pos(values, modes[0], start_position + 1)\n pos_b = get_pos(values, modes[1], start_position + 2)\n pos_destination = get_pos(values, modes[2], start_position + 3)\n\n values[pos_destination] = values[pos_a] * values[pos_b]\n return start_position + 4\n\n\ndef do_input(values, modes, start_position):\n # should always be in position mode\n assert modes[0] != 1\n pos_destination = get_pos(values, modes[0], start_position + 1)\n\n values[pos_destination] = INPUT_VALUES.pop(0)\n return start_position + 2\n\n\ndef do_output(values, modes, start_position):\n pos_a = get_pos(values, modes[0], start_position + 1)\n\n output = values[pos_a]\n OUTPUT_VALUES.append(output)\n return start_position + 2\n\n\ndef do_jump_if_true(values, modes, start_position):\n pos_a = get_pos(values, modes[0], start_position + 1)\n pos_b = get_pos(values, modes[1], start_position + 2)\n\n if values[pos_a]:\n return values[pos_b]\n else:\n return start_position + 3\n\n\ndef do_jump_if_false(values, modes, start_position):\n pos_a = get_pos(values, modes[0], start_position + 1)\n pos_b = get_pos(values, modes[1], start_position + 2)\n\n if not values[pos_a]:\n return values[pos_b]\n else:\n return start_position + 3\n\n\ndef do_less_than(values, modes, start_position):\n pos_a = get_pos(values, modes[0], start_position + 1)\n pos_b = get_pos(values, modes[1], start_position + 2)\n pos_destination = get_pos(values, modes[2], start_position + 3)\n\n new_value = 1 if values[pos_a] < values[pos_b] else 0\n values[pos_destination] = new_value\n return start_position + 4\n\n\ndef do_equals(values, modes, start_position):\n pos_a = get_pos(values, modes[0], start_position + 1)\n pos_b = get_pos(values, modes[1], start_position + 2)\n pos_destination = get_pos(values, modes[2], start_position + 3)\n\n new_value = 1 if values[pos_a] == values[pos_b] else 0\n values[pos_destination] = new_value\n return start_position + 4\n\n\ndef do_relative_base(values, modes, start_position):\n global relative_base\n pos_a = get_pos(values, modes[0], start_position + 1)\n relative_base += values[pos_a]\n return start_position + 2\n\n\ndef get_modes(instruction):\n zero_padded = \"%05d\" % instruction\n return [int(x) for x in zero_padded[-3::-1]]\n\n\ndef do_turn(values, start_position):\n instruction = values[start_position]\n opcode = instruction % 100\n modes = get_modes(instruction)\n if opcode == 99:\n return -1\n\n if opcode == 1:\n position = do_add(values, modes, start_position)\n elif opcode == 2:\n position = do_multiply(values, modes, start_position)\n elif opcode == 3:\n position = do_input(values, modes, start_position)\n elif opcode == 4:\n position = do_output(values, modes, start_position)\n elif opcode == 5:\n position = do_jump_if_true(values, modes, start_position)\n elif opcode == 6:\n position = do_jump_if_false(values, modes, start_position)\n elif opcode == 7:\n position = do_less_than(values, modes, start_position)\n elif opcode == 8:\n position = do_equals(values, modes, start_position)\n elif opcode == 9:\n position = do_relative_base(values, modes, start_position)\n else:\n raise ValueError(\"Unexpected opcode: %s\", opcode)\n\n return position\n\n\ndef read_input():\n with open(\"day13.txt\") as f:\n return [int(x) for x in f.read().split(\",\")]\n\n\nif __name__ == \"__main__\":\n score = main()\n print(f\"Final score: {score}\")\n","repo_name":"alasdairnicol/advent-of-code-2019","sub_path":"day13b.py","file_name":"day13b.py","file_ext":"py","file_size_in_byte":5823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6195945024","text":"\"\"\"\nThis module contains the command line interface for snakeviz.\n\n\"\"\"\nfrom __future__ import print_function\n\nimport optparse\nimport os\nimport random\nimport socket\nimport sys\nimport threading\nimport webbrowser\n\ntry:\n from urllib.parse import quote_plus\nexcept ImportError:\n from urllib import quote_plus\n\n\n# As seen in IPython:\n# https://github.com/ipython/ipython/blob/8be7f9abd97eafb493817371d70101d28640919c/IPython/html/notebookapp.py\n# See the IPython license at:\n# https://github.com/ipython/ipython/blob/master/COPYING.rst.\ndef random_ports(port, n):\n \"\"\"Generate a list of n random ports near the given port.\n The first 5 ports will be sequential, and the remaining n-5 will be\n randomly selected in the range [port-2*n, port+2*n].\n \"\"\"\n for i in range(min(5, n)):\n yield port + i\n for i in range(n-5):\n yield max(1, port + random.randint(-2*n, 2*n))\n\n\ndef main(argv=sys.argv[1:]):\n parser = optparse.OptionParser(\n usage='%prog [options] filename'\n )\n parser.add_option('-H', '--hostname', metavar='ADDR', default='127.0.0.1',\n help='hostname to bind to (default: 127.0.0.1')\n\n parser.add_option('-p', '--port', type='int', metavar='PORT', default=8080,\n help='port to bind to; if this port is already in use a '\n 'free port will be selected automatically '\n '(default: %default)')\n\n parser.add_option('-b', '--browser', metavar='PATH',\n help=\"path to the web browser executable to use to open \"\n \"the visualization; uses the same default as \"\n \"Python's webbrowser module, which can also be \"\n \"overridden with the BROWSER environment variable\")\n\n options, args = parser.parse_args(argv)\n\n if len(args) != 1:\n parser.error('please provide the path to a profiler output file to '\n 'open')\n\n filename = os.path.abspath(args[0])\n if not os.path.exists(filename):\n parser.error('the file %s does not exist' % filename)\n\n try:\n open(filename)\n except IOError as e:\n parser.error('the file %s could not be opened: %s'\n % (filename, str(e)))\n\n filename = quote_plus(filename)\n\n hostname = options.hostname\n port = options.port\n\n if not 0 <= port <= 65535:\n parser.error('invalid port number %d: use a port between 0 and 65535'\n % port)\n\n try:\n browser = webbrowser.get(options.browser)\n except webbrowser.Error as e:\n parser.error('no web browser found: %s' % e)\n\n # Go ahead and import the tornado app and start it; we do an inline import\n # here to avoid the extra overhead when just running the cli for --help and\n # the like\n from .main import app\n import tornado.ioloop\n\n # As seen in IPython:\n # https://github.com/ipython/ipython/blob/8be7f9abd97eafb493817371d70101d28640919c/IPython/html/notebookapp.py\n # See the IPython license at:\n # https://github.com/ipython/ipython/blob/master/COPYING.rst.\n for p in random_ports(port, 10):\n try:\n app.listen(p, address=hostname)\n except socket.error as e:\n print('Port {0} in use, trying another.'.format(p))\n else:\n port = p\n break\n else:\n print('No available port found.')\n return 1\n\n print(('snakeviz web server started on %s:%d; enter Ctrl-C to exit' %\n (hostname, port)))\n\n # Launch the browser in a separate thread to avoid blocking the ioloop from\n # starting\n bt = lambda: browser.open('http://%s:%d/snakeviz/%s' %\n (hostname, port, filename), new=2)\n threading.Thread(target=bt).start()\n\n try:\n tornado.ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n # TODO: Cheap KeyboardInterrupt handler for now; iPython has some nicer\n # stuff for handling SIGINT and SIGTERM that might be worth borrowing\n tornado.ioloop.IOLoop.instance().stop()\n print('\\nBye!')\n\n return 0\n","repo_name":"vp29/market_analyzer","sub_path":"build/snakeviz/snakeviz/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22379221571","text":"from github import Github\nfrom jinja2 import Environment, Template\nimport os\nimport requests\nimport signal\nimport stat\nimport subprocess\n\ngh = Github()\n\nlatest_release = gh.get_repo('squili/makita').get_latest_release()\n\nprint(f'[makita-railway] Makita version {latest_release.tag_name[1:]}')\n\ndownload_url = next(filter(lambda asset: asset.name == 'makita', latest_release.get_assets())).browser_download_url\n\nprint(f'[makita-railway] Downloading {download_url}...')\n\nr = requests.get(download_url, stream=True)\nr.raise_for_status()\n\nwith open('makita', 'wb') as f:\n for block in r.iter_content(chunk_size=None):\n f.write(block)\n\nprint('[makita-railway] Configuring makita...')\n\nos.chmod('makita', stat.S_IEXEC)\n\nwith open('config.jinja') as source, open('config.ron', 'w') as target:\n target.write(Environment().from_string(source.read()).render(\n token=os.environ['TOKEN'],\n client_id=os.environ['CLIENT_ID'],\n client_secret=os.environ['CLIENT_SECRET'],\n database_url=os.environ['DATABASE_URL'],\n port=os.environ['PORT'],\n owner_id=os.environ['OWNER_ID'],\n manager_guild=os.environ['MANAGER_GUILD'],\n github_webhook_secret=os.environ['GITHUB_WEBHOOK_SECRET'],\n ))\n\nprint('[makita-railway] Starting makita...')\n\nchild = subprocess.Popen(['./makita', 'run'], env=os.environ)\nsignal.signal(signal.SIGINT, lambda: child.kill())\nchild.wait()\n","repo_name":"squili/makita-railway","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74689791450","text":"from win32com import client\nfrom enum import IntEnum\nfrom collections import namedtuple\n\n\nRawVariableInfo = namedtuple('VariableInfo', 'name label data_type categories')\nMasterVariableInfo = namedtuple('VariableInfo', 'name data_type v_new v_dropped c_new c_dropped v_label c_label')\n\nclass DataTypeConstants(IntEnum):\n mtNone = 0\n mtLong = 1\n mtText = 2\n mtCategorical = 3\n mtObject = 4\n mtDate = 5\n mtDouble = 6\n mtBoolean = 7\n\nclass openConstants(IntEnum):\n oREAD = 1\n oREADWRITE = 2\n oNOSAVE = 3\n\ndef get_mdd_data(mdd_path):\n\n mdd = client.Dispatch('MDM.Document')\n mdd.Open(mdd_path, mode=openConstants.oREAD)\n\n variables = {}\n for v in mdd.Variables:\n name = v.fullname\n label = str(v.fulllabel)\n data_type=str(DataTypeConstants(v.datatype)).split('.')[1]\n categories = {}\n for c in v.Categories:\n categories[c.Name] = c.Label\n variables[name] = RawVariableInfo(name, label, data_type, categories)\n \n mdd.Close()\n return variables\n\ndef create_excel_comparison(old_mdd, new_mdd, xl_output):\n old_wave_variables = get_mdd_data(old_mdd)\n new_wave_variables = get_mdd_data(new_mdd)\n\n # filling master variables\n master_variables = []\n\n # 1. new variables\n for name, variable in new_wave_variables.items():\n if name not in old_wave_variables:\n master_variables.append(\n MasterVariableInfo(\n name, variable.data_type,\n v_new=True,\n v_dropped=False,\n c_new=False,\n c_dropped=False,\n v_label=False,\n c_label=False)\n )\n\n # 2. dropped variables\n for name, variable in old_wave_variables.items():\n if name not in new_wave_variables:\n master_variables.append(\n MasterVariableInfo(\n name, variable.data_type,\n v_new=False,\n v_dropped=True,\n c_new=False,\n c_dropped=False,\n v_label=False,\n c_label=False)\n )\n\n # 3. changed variables\n\n for new_variable in new_wave_variables.values():\n old_variable = old_wave_variables.get(new_variable.name)\n\n if old_variable:\n\n # 3.1 check variable labels\n v_label = old_variable.label != new_variable.label\n\n # 3.2 check categories\n for new_name, new_label in new_variable.categories.items():\n old_label = old_variable.categories.get(new_name)\n if old_label and new_label != old_label:\n c_label = True\n break\n else:\n c_label = False\n\n # 3.3 new categories\n c_new = bool([c for c in new_variable.categories if c not in old_variable.categories])\n c_dropped = bool([c for c in old_variable.categories if c not in new_variable.categories])\n\n if v_label or c_label or c_new or c_dropped:\n master_variables.append(\n MasterVariableInfo(\n name=new_variable.name,\n data_type=new_variable.data_type,\n v_new=False,\n v_dropped=False,\n c_new=c_new,\n c_dropped=c_dropped,\n v_label=v_label,\n c_label=c_label)\n )\n\n # export in excel\n\n from openpyxl import Workbook\n from openpyxl.styles import Alignment, Border, Side, PatternFill\n from openpyxl.formatting.rule import FormulaRule\n from openpyxl.styles import colors\n from openpyxl.styles import Font, Color\n\n wb = Workbook()\n\n ws1 = wb.active\n ws1.title = \"overview\"\n\n # report header (row 1-2)\n\n ws1.append(['New', new_mdd])\n ws1.append(['Old', old_mdd])\n ws1.append([])\n\n if master_variables:\n\n # variable header (row 4-5)\n ws1.append(['name', 'data_type', 'variable', '', 'categories', '', 'label(s) changed', '', 'Remarks'])\n ws1.append(['', '', 'new', 'dropped', 'new', 'dropped', 'variable', 'categories', ''])\n\n for v in master_variables:\n booleans = map(lambda x: 'x' if x else '', v[2:])\n # booleans = ['x' if x else '' for x in v[2:]]\n ws1.append((v.name, v.data_type, *booleans))\n\n #formatting\n\n ws1.column_dimensions['A'].width = 30\n ws1.column_dimensions['B'].width = 20\n\n # alignment - header\n for row in range(4, 6):\n for col in range(1, 12):\n ws1.cell(column=col, row=row).alignment = Alignment(horizontal='center', vertical='center')\n\n ws1.merge_cells('A4:A5')\n ws1.merge_cells('B4:B5')\n ws1.merge_cells('C4:D4')\n ws1.merge_cells('E4:F4')\n ws1.merge_cells('G4:H4')\n ws1.merge_cells('I4:I5')\n\n # borders\n thin_border = Border(left=Side(style='thin'), \n right=Side(style='thin'), \n top=Side(style='thin'), \n bottom=Side(style='thin'))\n \n # create fill\n colour_fill = PatternFill(start_color='FFFF7F',\n end_color='FFFF7F',\n fill_type='solid') \n \n for row in range(4, len(master_variables) + 6):\n for col in range(1, len(master_variables[0]) + 2):\n ws1.cell(row=row, column=col).border = thin_border\n # alignment - table \n for col in range(3, len(master_variables[0]) + 2):\n ws1.cell(row=row, column=col).alignment = Alignment(horizontal='center', vertical='center')\n\n # freeze header\n ws1.freeze_panes = ws1.cell(row=6, column=1)\n\n # conditional formatting\n ws1.conditional_formatting.add(f'B6:B{len(master_variables) + 5}',\n FormulaRule(formula=['B6=\"mtText\"'], stopIfTrue=True, fill=colour_fill))\n\n # auto filter\n ws1.auto_filter.ref = f'A5:I{len(master_variables) + 4}'\n\n else:\n ws1.append(['... no changes in variables'])\n ws1.append(['... check routing, use toolbox.bat'])\n\n a1 = ws1['A4']\n a2 = ws1['A5']\n ft = Font(color=colors.RED)\n a1.font=ft\n a2.font=ft\n\n wb.save(filename = xl_output)","repo_name":"nz25/codeplans","sub_path":"diagnose.py","file_name":"diagnose.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74633416731","text":"import os\nimport sys\nfrom functools import reduce\nfrom string import ascii_letters\n\nROOT_DIR = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(ROOT_DIR, \"..\")))\n\nfrom common import iter_cleaned_lines\n\n\ndef get_section_span_pairs(src):\n for line in iter_cleaned_lines(src):\n sections_a, sections_b = line.split(\",\")\n yield _parse_sections(sections_a), _parse_sections(sections_b)\n\ndef _parse_sections(sections: str):\n start, end = sections.split(\"-\")\n return int(start), int(end)\n\ndef has_overlap(span_a, span_b):\n return span_a[0] <= span_b[1] and span_a[1] >= span_b[0]\n\n\ndef part2():\n counter = 0\n for sections_a, sections_b in get_section_span_pairs(os.path.join(ROOT_DIR, \"input.txt\")):\n if has_overlap(sections_a, sections_b):\n counter += 1\n print(counter)\n\n\npart2()\n","repo_name":"ignir/advent-of-code","sub_path":"2022/day 4/4-2.py","file_name":"4-2.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4981654817","text":"import pygame\r\nimport functions\r\nimport time\r\nfrom player import Player\r\nfrom settings import Settings\r\nfrom background import Background\r\nfrom main_screen import Main_screen\r\nfrom graphics import Graphics\r\nfrom hiscores import Hiscores\r\nfrom gamecontrol import Gamecontrol\r\n\r\ndef start():\r\n\tfps = pygame.time.Clock()\r\n\tpygame.init()\r\n\tpygame.key.set_repeat(50, 100)\r\n\t\r\n\tsettings = Settings()\r\n\t#icon\r\n\tico = pygame.image.load('graphics\\\\windowico\\\\ico.png')\r\n\tpygame.display.set_icon(ico)\r\n\tdisplay = pygame.display.set_mode((settings.screen_w, settings.screen_h), pygame.DOUBLEBUF, 16)\r\n\t\r\n\thiscores = Hiscores()\r\n\tpygame.display.set_caption('Greedy Miner')\r\n\tmain_screen = Main_screen(display, hiscores)\r\n\tgraphics = Graphics(settings)\r\n\ttiles = functions.prepare_random_tiles(settings)\r\n\r\n\t#groups\r\n\tdirt_group = pygame.sprite.Group()\r\n\tstone_group = pygame.sprite.Group()\r\n\tdiamond_group = pygame.sprite.Group()\r\n\tdynamite_group = pygame.sprite.Group()\r\n\tbat_group = pygame.sprite.Group()\r\n\r\n\tfunctions.generate_tile_objects(tiles, settings, dirt_group, stone_group, diamond_group, bat_group, graphics)\r\n\t\r\n\tplayer = Player(settings, dirt_group, graphics)\r\n\tgamecontrol = Gamecontrol(display, settings, hiscores, diamond_group)\r\n\tbackground = Background(settings, player, graphics)\r\n\t\r\n\t\r\n\twhile True:\r\n\t\tif settings.is_active == True:\r\n\t\t\t#draw tiles on screen\r\n\t\t\tfunctions.update_tiles_draw(display, player, background, dirt_group, stone_group, diamond_group, dynamite_group, bat_group, gamecontrol)\r\n\t\t\t#read keys\r\n\t\t\tfunctions.key_control(settings, player)\r\n\t\t\t#act player\r\n\t\t\tfunctions.check_next_tiles(settings, player, diamond_group, stone_group, dirt_group, hiscores, bat_group)\r\n\t\t\t#update logic\r\n\t\t\tfunctions.update_tiles(settings, display, player, background, dirt_group, stone_group, diamond_group, dynamite_group, bat_group, graphics, gamecontrol)\r\n\t\t\t#player\r\n\t\t\tfunctions.player(settings, player)\r\n\t\t\t#generate next level\r\n\t\t\tif gamecontrol.check_diamonds() and player.alive == True:\r\n\t\t\t\tdel player\r\n\t\t\t\tfunctions.erase_old_game(settings, dirt_group, stone_group, diamond_group, dynamite_group, bat_group)\r\n\t\t\t\tdel tiles\r\n\t\t\t\ttiles = functions.prepare_random_tiles(settings)\r\n\t\t\t\tfunctions.generate_tile_objects(tiles, settings, dirt_group, stone_group, diamond_group, bat_group, graphics)\r\n\t\t\t\tplayer = Player(settings, dirt_group, graphics)\r\n\t\telse:\r\n\t\t\tif settings.need_clean == True:\r\n\t\t\t\tgamecontrol.reset()\r\n\t\t\t\tfunctions.erase_old_game(settings, dirt_group, stone_group, diamond_group, dynamite_group, bat_group)\r\n\t\t\t\tdel player\r\n\t\t\t\t#count point\r\n\t\t\t\thiscores.check_hiscores_reset_old()\r\n\t\t\t\tdel tiles\r\n\t\t\t\ttiles = functions.prepare_random_tiles(settings)\r\n\t\t\t\tfunctions.generate_tile_objects(tiles, settings, dirt_group, stone_group, diamond_group, bat_group, graphics)\r\n\t\t\t\tplayer = Player(settings, dirt_group, graphics)\r\n\t\t\t\tsettings.need_clean = False\r\n\r\n\t\t\t#read keys\r\n\t\t\tfunctions.key_control(settings, player)\r\n\t\t\t#display main screen\r\n\t\t\tfunctions.main_screen(main_screen)\r\n\r\n\t\tfps.tick(30)\r\n\t\tpygame.display.flip()\r\n\r\nif __name__ == '__main__':\r\n\tstart()\r\n","repo_name":"InternalCode/GreedyMiner","sub_path":"main_bd.py","file_name":"main_bd.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26531110230","text":"from xmlrpc.client import Boolean\nfrom main_functions import *\nfrom prettytable import PrettyTable\n\ndef display_all():\n staked_cryptos = {}\n lended_cryptos = {}\n spot_cryptos = {}\n total_cryptos = {}\n get_total(staked_cryptos, lended_cryptos, spot_cryptos, total_cryptos)\n\n if len(staked_cryptos) > 0:\n print(\"Staked:\")\n staked_cryptos_list = seperate_from_dict(staked_cryptos)\n display_wallet(staked_cryptos_list)\n print(f'Value in fiat: {round(sum(crypto.value_in_fiat for crypto in staked_cryptos_list), 2)}')\n print()\n\n if len(lended_cryptos) > 0:\n print(\"Lended:\")\n lended_cryptos_list = seperate_from_dict(lended_cryptos)\n display_wallet(lended_cryptos_list)\n print(f'Value in fiat: {round(sum(crypto.value_in_fiat for crypto in lended_cryptos_list), 2)}')\n print()\n\n if len(spot_cryptos) > 0:\n print(\"Spot:\")\n spot_cryptos_list = seperate_from_dict(spot_cryptos)\n display_wallet(spot_cryptos_list)\n print(f'Value in fiat: {round(sum(crypto.value_in_fiat for crypto in spot_cryptos_list), 2)}')\n print()\n\n if len(total_cryptos) > 0:\n print(\"Total:\")\n total_cryptos_list = seperate_from_dict(total_cryptos)\n display_wallet(total_cryptos_list)\n print(f'Value in fiat: {round(sum(crypto.value_in_fiat for crypto in total_cryptos_list), 2)}')\n print()\n\ndef display_wallet(data: list):\n x = PrettyTable()\n for crypto in data:\n x.field_names = [\"Crypto\", \"Amount\", \"Value\"]\n x.add_row([crypto.short_name, round(crypto.value, 8), round(crypto.value_in_fiat, 2)])\n print(x)\n\ndef display_exchange(cryptos: list[Crypto]):\n staked_cryptos = {}\n lended_cryptos = {}\n spot_cryptos = {}\n total_cryptos = {}\n get_all_wallets(staked_cryptos, lended_cryptos, spot_cryptos, total_cryptos, cryptos)\n \n if len(staked_cryptos) > 0:\n print(\"Staked:\")\n display_wallet(seperate_from_dict(staked_cryptos))\n\n if len(lended_cryptos) > 0:\n print(\"Lended:\")\n display_wallet(seperate_from_dict(lended_cryptos))\n\n if len(spot_cryptos) > 0:\n print(\"Spot:\")\n display_wallet(seperate_from_dict(spot_cryptos))\n\n if len(total_cryptos) > 0:\n print(\"Total:\")\n display_wallet(seperate_from_dict(total_cryptos))\n\ndef display_asset(asset_name: str):\n staked_cryptos = {}\n lended_cryptos = {}\n spot_cryptos = {}\n total_cryptos = {}\n final_values = []\n name = [crypto_name.name for crypto_name in CryptoNames if asset_name == crypto_name.value][0]\n get_total(staked_cryptos, lended_cryptos, spot_cryptos, total_cryptos)\n\n table = PrettyTable()\n table.field_names = [\"Crypto\", \"Spot\", \"Staked\", \"Lended\", \"Total\", \"Value\"]\n final_values.append(name)\n\n # Add a value for each one of the categories\n add_value_from_asset(spot_cryptos, final_values, name)\n add_value_from_asset(staked_cryptos, final_values, name)\n add_value_from_asset(lended_cryptos, final_values, name)\n add_value_from_asset(total_cryptos, final_values, name, True)\n\n table.add_row(final_values)\n print(table)\n\n# Get value from asset in the specific wallet\ndef add_value_from_asset(wallet: dict[Crypto], final_values: list, name: str, total: Boolean = False):\n exists = False\n for _, value in wallet.items():\n if value.short_name == name:\n final_values.append(value.value)\n if total:\n final_values.append(round(value.value_in_fiat, 2))\n exists = True\n if exists == False: final_values.append(0)\n # TODO: This should be removed when I display only the assets that actually exists, thus, get them from a DB instead of a hashmap\n # Add an extra zero in case there is no asset with that name\n if len(final_values) != 6 and total: final_values.append(0)","repo_name":"dimitris23bp/asset-watch","sub_path":"view_functions.py","file_name":"view_functions.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34226151041","text":"import HandTrackingModule as htm\nimport cv2 \nimport time\nimport os\nimport numpy as np \nimport math\n\nwCam=352\nhCam=640\npTime=0\n\ncap=cv2.VideoCapture(0)\ncap.set(3,wCam)\ncap.set(4,hCam)\n\nfolderPath=\"fingers\"\nmyList= os.listdir(folderPath)\noverlayList=[]\nfor imPath in myList:\n image=cv2.imread(f'{folderPath}/{imPath}')\n image=cv2.resize(image,(150,150))\n overlayList.append(image)\n\ndetector=htm.handDetector(detectionConf=0.8)\ntipIds=[4,8,12,16,20]\n\nwhile True:\n success,img=cap.read() \n img=detector.findHands(img)\n lmList=detector.findPosition(img,draw=False)\n if len(lmList)!=0:\n fingers=[]\n if lmList[4][1] < lmList[3][1]:\n fingers.append(1)\n else:\n fingers.append(0)\n for tip in tipIds[1:]:\n if lmList[tip][2] < lmList[tip-2][2]:\n fingers.append(1)\n else:\n fingers.append(0)\n count=fingers.count(1)\n img[10:160,10:160]=overlayList[count]\n cv2.rectangle(img,(180,50),(230,100),(50,50,50),cv2.FILLED)\n cv2.putText(img,str(count),(185,95),cv2.FONT_HERSHEY_PLAIN,4,(200,200,200),2)\n print(count)\n\n\n cTime=time.time()\n fps=int(1/(cTime-pTime))\n pTime=cTime\n cv2.putText(img,\"fps: \"+str(fps),(240,40),cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),2)\n cv2.imshow(\"Image\",img)\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n break","repo_name":"jhpiedrahitao/MPHandTrackingProjects","sub_path":"FingerCounter.py","file_name":"FingerCounter.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26334495344","text":"import os\nimport string\nimport Data\n\nimport NTFS\n\n\"\"\"FAT32\"\"\"\ndef ReadInfoFromBootSector(drive):\n BOOT = Data.BootSectorFAT32()\n BOOT.ReadBootSector(drive) # Read Boot Sector\n return BOOT\n\ndef ReadInfoRDET(drive, BOOT, FirstClusterDATA):\n \n # Read Root Directory Entry Table\n RDET = Data.RDET()\n res = RDET.ReadRDET(FirstClusterDATA, drive)\n \n \"\"\" Read all directory in RDET \"\"\"\n for x in RDET.RootEntry.ListEntry:\n \n # 1 Of Entry in RDET contain the information of the disk, so we need to get it\n str = x.name.split('\\x00')[0]\n if (str == 'System Volume Information'): \n RDET.RootEntry.attr = x.attr\n RDET.RootEntry.createDate = x.createDate\n RDET.RootEntry.createTime = x.createTime\n \n # Besides, We still continute to the full Information of Each Entry in Root Directory Entry Table\n ReadAllDirectory_FromRDET(x, FirstClusterDATA, BOOT, drive)\n \n return RDET # Return Value\n\ndef ReadAllDirectory_FromRDET(Entry, FirstClusterDATA, bootSector, drive, depth = 0):\n if depth >= 10: return None # The Depth can be adjusted to fit the requirement\n \n # Goal: Read data in the Directory Entry\n # We just use Recursive to go further in the DataZone If only the Entry is a Regular Directory with no extra attributes\n if Entry.attr[3] == 'DIRECTORY' and Entry.attr[4] == 'NULL' and Entry.attr[5] == 'NULL' and Entry.attr[6] == 'NULL' and Entry.attr[7] == 'NULL': \n \n # Locate the Cluster contain the chosen Entry data\n EntryInsideDir_Address = (Entry.startCluster - 2 ) * bootSector.sectorPerCluster * bootSector.bytePerSector + FirstClusterDATA\n Entry.ReadDET(EntryInsideDir_Address, drive) # Read it\n \n for x in Entry.ListEntry:\n if x.attr[3] == 'DIRECTORY' and x.attr[4] == 'NULL' and x.attr[5] == 'NULL' and x.attr[6] == 'NULL' and x.attr[7] == 'NULL': \n ReadAllDirectory_FromRDET(x, FirstClusterDATA, bootSector, drive, depth + 1)\n \n return None\n\ndef Load_FAT_DATA(Entry, isROOT = False):\n \n path = [] # List of Children\n \n if isROOT == True: # If the Entry is the Root Directory, we load all info of it even it is a hidden file or system file\n for x in Entry.ListEntry:\n res =Load_FAT_DATA(x)\n if res != '': #If the Entry is not a hidden file or system file, we load it\n path.append(res) \n \n dict_path = {} # Dictionary of the Entry, Help located the data easier with calling the key\n dict_path[\"Name\"] = Entry.name\n \n str = ''\n for i in range(len(Entry.attr)): \n if Entry.attr[i] != \"NULL\" and i != 4 and i != 5 and i != 6: # Only load the attribute that is not Hidden File or System File\n if str != '': str += ',' + Entry.attr[i]\n else: str += Entry.attr[i]\n \n dict_path[\"Attribute\"] = str\n dict_path[\"Date_Created\"] = Entry.createDate\n dict_path[\"Time_Created\"] = Entry.createTime\n dict_path[\"Size\"] = Entry.size\n dict_path[\"Children\"] = path\n dict_path[\"Type\"] = \"FAT32\"\n \n else:\n if Entry.attr[3] == 'DIRECTORY' and Entry.attr[4] == 'NULL' and Entry.attr[5] == 'NULL' and Entry.attr[6] == 'NULL' and Entry.attr[7] == 'NULL' :\n \n for x in Entry.ListEntry:\n res =Load_FAT_DATA(x)\n if res != '': # If the Entry is not a hidden file or system file, we load it\n path.append(res) \n \n dict_path = {} #Dictionary of the Entry, Help located the data easier with calling the key\n dict_path[\"Name\"] = Entry.name\n \n str = ''\n for i in range(len(Entry.attr)): \n if Entry.attr[i] != \"NULL\": #Only load the attribute that is not Hidden File or System File\n if str != '': str += ',' + Entry.attr[i]\n else: str += Entry.attr[i]\n \n dict_path[\"Attribute\"] = str\n dict_path[\"Date_Created\"] = Entry.createDate\n dict_path[\"Time_Created\"] = Entry.createTime\n dict_path[\"Size\"] = Entry.size\n dict_path[\"Children\"] = path\n dict_path[\"Type\"] = \"FOLDER\"\n \n elif Entry.attr[4] == 'VOLUME LABEL' or Entry.attr[5] == 'SYSTEM FILE' or Entry.attr[6] == 'HIDDEN FILE': #If the Entry is a Hidden File or System File\n return ''\n else: #If the Entry is a File\n dict_path = {} #Dictionary of the Entry, Help located the data easier with calling the key\n dict_path[\"Name\"] = Entry.name\n \n str = ''\n for i in range(len(Entry.attr)): \n if Entry.attr[i] != \"NULL\": #Only load the attribute that is not Hidden File or System File\n if str != '': str += ',' + Entry.attr[i]\n else: str += Entry.attr[i]\n \n dict_path[\"Attribute\"] = str\n dict_path[\"Size\"] = Entry.size\n dict_path[\"Date_Created\"] = Entry.createDate\n dict_path[\"Time_Created\"] = Entry.createTime\n dict_path[\"Size\"] = Entry.size\n TypeofFile = Entry.name.split('.')[-1]\n dict_path[\"Type\"] = TypeofFile + \" FILE\"\n \n return dict_path\n \n return dict_path\n\ndef Push_To_GUI(Entry, TypePartition, file_path):\n \n if TypePartition == 'FAT32':\n file_path.append(Load_FAT_DATA(Entry,True))\n else:\n file_path.append(Load_NTFS_DATA(Entry,True))\n \n\n\"\"\"NTFS\"\"\"\ndef LocatedRoot(MFT):\n\n for i in range(len(MFT.MFT)):\n if (MFT.MFT[i].isROOT == False):\n for j in range(len(MFT.MFT[i].attributes)):\n attr = MFT.MFT[i].attributes[j]\n if(attr.typeHeader == 'FILE_NAME'):\n IdParent = attr.content.file_name.IdRootParentDirectory\n MFT.Dictionary[IdParent].listEntry.append(MFT.MFT[i])\n \ndef CheckIsFolder(MFT):\n for i in range(len(MFT.MFT)):\n if (MFT.MFT[i].isROOT == False and MFT.MFT[i].listEntry != []):\n for j in range(len(MFT.MFT[i].attributes)):\n if(MFT.MFT[i].attributes[j].typeHeader == 'FILE_NAME'):\n if(MFT.MFT[i].attributes[j].content.file_name.attr[1] == \"NULL\" and MFT.MFT[i].attributes[j].content.file_name.attr[2] == \"NULL\"):\n print(MFT.MFT[i].attributes[j].content.file_name.Name)\n\ndef Load_NTFS_DATA(Entry, isROOT = False):\n \n path = [] #List of Children\n NTFS_CreateTime = \"\" #Create Time of the Entry\n Check_Is_Folder = False #Check if the Entry is a Folder\n \n for j in range(len(Entry.attributes)): #Scan all the attribute of the Entry\n if(Entry.attributes[j].typeHeader == 'STANDARD_INFORMATION' ): \n NTFS_CreateTime = Entry.attributes[j].content.standard_information.create_time\n \n if(Entry.attributes[j].typeHeader == 'FILE_NAME'):#If the attribute contain important information about the Entry\n if(Entry.attributes[j].content.file_name.attr[1] != \"NULL\" or Entry.attributes[j].content.file_name.attr[2] != \"NULL\"):\n if(isROOT != True): return \"\" \n \n dict_path = {} # Dictionary of the Entry, Help located the data easier with calling the key\n dict_path[\"Name\"] = Entry.attributes[j].content.file_name.Name #Name of the Entry\n \n str = ''\n for x in range(len(Entry.attributes[j].content.file_name.attr)):\n if Entry.attributes[j].content.file_name.attr[x] != \"NULL\" and x != 1 and x != 2: #Only load the attribute that is not Hidden File or System File\n if str != '': str += ',' + Entry.attributes[j].content.file_name.attr[x]\n else : str += Entry.attributes[j].content.file_name.attr[x]\n \n dict_path[\"Attribute\"] = str #Attribute of the Entry\n dict_path[\"Date_Created\"] = NTFS_CreateTime.split(\" \")[0] #Date of the Entry\n dict_path[\"Time_Created\"] = NTFS_CreateTime.split(\" \")[1] #Time of the Entry\n dict_path[\"Size\"] = Entry.SizeofusedMFTE #Size of the Entry\n dict_path[\"Type\"] = \"NTFS\"\n Check_Is_Folder = True\n \n else: \n dict_path = {} # Dictionary of the Entry, Help located the data easier with calling the key\n dict_path[\"Name\"] = Entry.attributes[j].content.file_name.Name #Name of the Entry\n \n str = '' #Attribute of the Entry\n for x in range(len(Entry.attributes[j].content.file_name.attr)):\n if Entry.attributes[j].content.file_name.attr[x] != \"NULL\": #Only load the attribute that is not Hidden File or System File\n if str != '': str += ',' + Entry.attributes[j].content.file_name.attr[x]\n else : str += Entry.attributes[j].content.file_name.attr[x]\n \n dict_path[\"Attribute\"] = str #Attribute of the Entry\n dict_path[\"Date_Created\"] = NTFS_CreateTime.split(\" \")[0] #Date of the Entry\n dict_path[\"Time_Created\"] = NTFS_CreateTime.split(\" \")[1] #Time of the Entry\n dict_path[\"Size\"] = Entry.SizeofusedMFTE #Size of the Entry\n \n if Entry.attributes[j].content.file_name.attr[4] != \"NULL\": \n Check_Is_Folder = True #Check if the Entry is a Folder\n dict_path[\"Type\"] = \"FOLDER\"\n else: dict_path[\"Type\"] = \"FILE\"\n break\n \n \n for x in Entry.listEntry:\n res = Load_NTFS_DATA(x)\n if res != '': #If data is not empty\n path.append(res)\n \n if len(path) == 0: path = \"\" \n if Check_Is_Folder == True: #If the Entry is a Folder\n dict_path[\"Children\"] = path\n \n return dict_path\n\ndef Get_InFo_From_All_Disk():\n \n \"\"\" Detect all drive \"\"\"\n drives = ['%s:' % d for d in string.ascii_uppercase if os.path.exists('%s:' % d)]\n\n \"\"\" File Path\"\"\"\n file_path = []\n\n \"\"\" Drive Path \"\"\"\n for d in drives: # LOOP Through All Available Drive\n \n drive = r\"\\\\.\\{}\".format(d) # Format The Name That Will Be Used To Open The Drive\n \n \"\"\" Boot Sector \"\"\"\n BOOT = ReadInfoFromBootSector(drive)\n \n if BOOT.FATtype.split(\" \")[0] == 'FAT32': \n \n \"\"\" Address of FAT1, FAT2, DATA, RDET (Including Data part) \"\"\"\n FAT1_Address = BOOT.sectorBeforeFAT * BOOT.bytePerSector\n FAT2_Address = (BOOT.sectorBeforeFAT + BOOT.sectorPerFAT) * BOOT.bytePerSector \n FirstCluster_Data_Address = (BOOT.sectorBeforeFAT + BOOT.sectorPerFAT * 2) * BOOT.bytePerSector\n RDET_Address = (BOOT.firstClusterinRDET- 2 ) * BOOT.sectorPerCluster * BOOT.bytePerSector + FirstCluster_Data_Address\n \n \"\"\" RDET \"\"\"\n RDET = ReadInfoRDET(drive, BOOT, RDET_Address)\n \n \"\"\" Print Directory Tree \"\"\"\n RDET.RootEntry.name = drive[4:]\n \n #Directory Tree Gui\n Push_To_GUI(RDET.RootEntry, \"FAT32\", file_path)\n\n else: \n if drive[4:] == 'C:' : continue\n with open (drive,'rb') as fp:\n NTFS_BOOT = NTFS.VBR()\n NTFS_BOOT.ReadVBR(drive,fp)\n #NTFS_BOOT.PrintVBR()\n \n MFTAddress = NTFS_BOOT.FirstClusterInMFT * NTFS_BOOT.BytesPerSector * NTFS_BOOT.SectorPerCluster\n MFT = NTFS.MFT()\n MFT.ReadMFT(drive,fp,MFTAddress,NTFS_BOOT.BytesPerSector*NTFS_BOOT.SectorPerCluster)\n #MFT.PrintMFT()\n \n LocatedRoot(MFT)\n \n for i in range(len(MFT.MFT)):\n if (MFT.MFT[i].isROOT == True):\n Push_To_GUI(MFT.MFT[i], \"NTFS\", file_path)\n break\n\n \"\"\" Display GUI\"\"\"\n y = 1\n return file_path\n","repo_name":"ChuBaoChamChi/OS_Project","sub_path":"21127013_21127168_21127635/Read_Disk_Information.py","file_name":"Read_Disk_Information.py","file_ext":"py","file_size_in_byte":12292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27800123110","text":"import cv2\r\nfrom imutils.video import VideoStream\r\nfrom imutils.video import FPS\r\nimport numpy as np\r\nimport argparse\r\nimport imutils\r\nimport time\r\nclass resi:\r\n\r\n def _init_(self):\r\n pass\r\n def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):\r\n # initialize the dimensions of the image to be resized and\r\n # grab the image size\r\n dim = None\r\n (h, w) = image.shape[:2]\r\n\r\n # if both the width and height are None, then return the\r\n # original image\r\n if width is None and height is None:\r\n return image\r\n\r\n # check to see if the width is None\r\n if width is None:\r\n # calculate the ratio of the height and construct the\r\n # dimensions\r\n r = height / float(h)\r\n dim = (int(w * r), height)\r\n\r\n # otherwise, the height is None\r\n else:\r\n # calculate the ratio of the width and construct the\r\n # dimensions\r\n r = width / float(w)\r\n dim = (width, int(h * r))\r\n\r\n # resize the image\r\n resized = cv2.resize(image, dim, interpolation = inter)\r\n\r\n # return the resized image\r\n return resized\r\n\r\ntmp=resi()\r\nimg=cv2.imread(\"img2.jpg\");\r\nimg=tmp.image_resize(img,1366,768)\r\ncv2.imshow(\"P1\",img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"DeepikaSingh1998/Tooniser","sub_path":"resizer.py","file_name":"resizer.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29994896804","text":"# Given a string, find the length of the longest substring in it with no more than K distinct characters.\n# Example:\n# Input: String=\"araaci\", K=2\n# Output: 4\n# Explanation: The longest substring with no more than '2' distinct characters is \"araa\".\n\n# Input: String=\"cbbebi\", K=3\n# Output: 5\n# Explanation: The longest substrings with no more than '3' distinct characters are \"cbbeb\" & \"bbebi\".\n\n# sliding window: O(N) space: O(K)\ndef longest_substring_with_k_distinct_characters(str, k):\n window_start = 0\n result = 0\n char_frequency = dict()\n for window_end in range(len(str)):\n right_char = str[window_end]\n if right_char not in char_frequency:\n char_frequency[right_char] = 0\n char_frequency[right_char] += 1\n while(len(char_frequency)) > k:\n left_char = str[window_start]\n char_frequency[left_char] -= 1\n if char_frequency[left_char] == 0:\n del char_frequency[left_char]\n window_start += 1\n result = max(result, window_end - window_start + 1)\n return result\n\nprint(longest_substring_with_k_distinct_characters(\"araaci\", 2))\nprint(longest_substring_with_k_distinct_characters(\"araaci\", 1))\nprint(longest_substring_with_k_distinct_characters(\"cbbebi\", 3))","repo_name":"JoanWu5/Grokking-the-coding-interview","sub_path":"sliding window/longest substring with K distinct characters.py","file_name":"longest substring with K distinct characters.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"610146574","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nimport torch\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\n\nfinetuned_model_path = \"./model/finetunedM\"\n\nwith open('./data/label_to_id.json', 'r') as f:\n label_to_id = json.load(f)\nid_to_label = {v: k for k, v in label_to_id.items()}\n\n#\n# 模型测试\nfinetunedM = AutoModelForSequenceClassification.from_pretrained(finetuned_model_path, device_map={\"\": \"cuda:1\"})\ntokenizerM = AutoTokenizer.from_pretrained(finetuned_model_path)\n\nsequences = [\"张三的电话是多少啊?\", \"SSE的责任人是谁?\"]\ntokens = tokenizerM(sequences, padding=\"max_length\", truncation=True, return_tensors=\"pt\")\noutputs = finetunedM(**tokens)\npredictions = torch.nn.functional.softmax(outputs.logits, dim=-1)\nscores, predicted_labels = torch.max(predictions, dim=-1)\nprint(scores.tolist(), [id_to_label[i] for i in predicted_labels.tolist()])\n","repo_name":"qianshuang/FH_text_classify","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39343946159","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('', views.month, name='month'),\n path('signin/', views.signin, name='signin'),\n path('signin/new/', views.new, name='new'),\n path('signin/change_month//', views.change_month, name='change_month'),\n]\n","repo_name":"noahrossi/moe-attendance","sub_path":"attendance/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73987615130","text":"from transformers import BertTokenizer, BertForSequenceClassification\nimport torch\nimport pandas as pd\nimport numpy as np\nfrom torch.optim.lr_scheduler import ExponentialLR\nfrom torchsummary import summary\nfrom sklearn.model_selection import train_test_split\nfrom dataloader import CustomDataset\nfrom model import BERTClass\nfrom ckpoint import *\nfrom train_utils import predict, flat_accuracy, train_model\n\nnew_target_list = [\n 0, 1, 2, 3, 4, 5, 6, 7\n]\n\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\nMAX_LEN = 256\nTRAIN_BATCH_SIZE = 32\nVALID_BATCH_SIZE = 32\nEPOCHS = 4\nLEARNING_RATE = 1e-5\n\norig_train_df = pd.read_csv(\"./data/train.csv\")\n\nX, y = orig_train_df.transcription, orig_train_df.new_labels\n\nX_train, X_val, y_train, y_val = train_test_split(X, y, stratify=y, test_size=0.2, random_state=200)\ntrain_df = pd.concat([X_train, y_train], axis=1)\nval_df = pd.concat([X_val, y_val], axis=1)\n\ntrain_medical_df = pd.get_dummies(train_df.new_labels)\nval_medical_df = pd.get_dummies(val_df.new_labels)\n\ntrain_df = pd.concat([train_df, train_medical_df], axis=1)\nval_df = pd.concat([val_df, val_medical_df], axis=1)\n# class_weights = class_weight.compute_class_weight(class_weight='balanced', classes=np.unique(target_list), y=train_df.medical_specialty)\n# weights= torch.tensor(class_weights,dtype=torch.float)\n\n# train_size = 0.8\n# train_df = orig_train_df.sample(frac=train_size, random_state=200)\n# train_df.drop('medical_specialty', axis=1, inplace=True)\n# val_df.drop('medical_specialty', axis=1, inplace=True)\n\n# val_df = orig_train_df.drop(train_df.index).reset_index(drop=True)\ntrain_df = train_df.reset_index(drop=True)\nval_df = val_df.reset_index(drop=True)\n\ntrain_dataset = CustomDataset(train_df, tokenizer, MAX_LEN, new_target_list)\nval_dataset = CustomDataset(val_df, tokenizer, MAX_LEN, new_target_list)\n\ntrain_data_loader = torch.utils.data.DataLoader(train_dataset, shuffle=True, batch_size =TRAIN_BATCH_SIZE, num_workers = 0)\nval_data_loader = torch.utils.data.DataLoader(val_dataset, shuffle=False, batch_size =VALID_BATCH_SIZE, num_workers = 0)\n\n# GPU usage\nif torch.cuda.is_available(): \n device = torch.device(\"cuda\")\n print('GPU:', torch.cuda.get_device_name(0))\nelse:\n device = torch.device(\"cpu\")\n print('CPU exists.')\n\n# weights = weights.to(device)\n\n\nmodel = BERTClass(len(new_target_list))\nmodel.to(device)\n\noptimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE)\n\nckpt_path = \"data/curr_ckpt.pt\"\nmodel_path = \"data/best_model_class.pt\"\n\ntrained_model = train_model(EPOCHS, train_data_loader, val_data_loader, model, optimizer, ckpt_path, model_path, device)\n","repo_name":"soyeonjangg/watermelon-intact-datathon","sub_path":"big_main.py","file_name":"big_main.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15415459955","text":"import csv\nfrom .decorators import request_time_used\n# movies: movieId,title,genres\n# ratings: userId,movieId,rating,timestamp\n\n_DEBUG = True\n\n@request_time_used(_DEBUG)\ndef make_cache(config: dict) -> None:\n '''Maps every movie id to it's average rating and writes it in csv file.'''\n with open(config['movies_path'], 'r', encoding='utf-8') as movies_file:\n movie_rating_dict = {}\n next(movies_file)\n for movie in movies_file:\n movie_id = int(movie.split(',')[0])\n movie_rating_dict[movie_id] = [0, 0]\n\n with open(config['ratings_path'], 'r') as ratings_file:\n next(ratings_file)\n for line in ratings_file:\n line = line.split(',')\n movie_id = int(line[1])\n movie_rating = float(line[2])\n movie_rating_dict[movie_id][0] += movie_rating # sum of all ratings\n movie_rating_dict[movie_id][1] += 1 # count of these ratings\n\n for record in movie_rating_dict.values():\n rating_sum = record[0]\n rating_count = record[1] if record[1] else 1\n average_rating = round(rating_sum/rating_count, 1)\n record[0] = average_rating\n\n with open(config['cache_path'], 'w', newline='') as cache:\n writer = csv.writer(cache)\n writer.writerow(('movieid', 'rating'))\n for item in movie_rating_dict.items():\n movie_id = item[0]\n average_rating = item[1][0]\n writer.writerow((movie_id, average_rating))\n","repo_name":"lce-Cream/issoft_big_data_course","sub_path":"task_2/utility/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73560795290","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os,sys\nimport ctypes\nimport numpy as np\nfrom .hmatrix import _C_HMatrix, HMatrix\n\n\nclass _C_MultiHMatrix(ctypes.Structure):\n \"\"\"Holder for the raw data from the C++ code.\"\"\"\n pass\n\n\nclass AbstractMultiHMatrix:\n \"\"\"Common code for the two actual MultiHMatrix classes below.\"\"\"\n\n ndim = 2 # To mimic a numpy 2D array\n\n def __init__(self, c_data: _C_MultiHMatrix, **params):\n # Users should use one of the two constructors below.\n\n self.c_data = c_data\n self.shape = (self.lib.multi_nbrows(c_data), self.lib.multi_nbcols(c_data))\n self.size = self.lib.nbhmats(c_data)\n\n\n self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)\n self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]\n\n self.hmatrices = []\n for l in range(0,self.size):\n c_data_hmatrix = self.lib.getHMatrix(self.c_data,l)\n self.hmatrices.append(HMatrix(c_data_hmatrix,**params))\n\n\n self.params = params.copy()\n\n @classmethod\n def from_coefs(cls, getcoefs, nm, points_target, points_source=None, **params):\n \"\"\"Construct an instance of the class from a evaluation function.\n\n Parameters\n ----------\n getcoefs: Callable\n A function evaluating an array of matrices at given coordinates.\n points_target: np.ndarray of shape (N, 3)\n The coordinates of the target points. If points_source=None, also the coordinates of the target points\n points_source: np.ndarray of shape (N, 3)\n If not None; the coordinates of the source points.\n epsilon: float, keyword-only, optional\n Tolerance of the Adaptive Cross Approximation\n eta: float, keyword-only, optional\n Criterion to choose the blocks to compress\n minclustersize: int, keyword-only, optional\n Minimum shape of a block\n maxblocksize: int, keyword-only, optional\n Maximum number of coefficients in a block\n\n Returns\n -------\n MultiHMatrix or ComplexMultiHMatrix\n \"\"\"\n # Set params.\n cls._set_building_params(**params)\n \n # Boilerplate code for Python/C++ interface.\n _getcoefs_func_type = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double))\n if points_source is None:\n cls.lib.MultiHMatrixCreateSym.restype = ctypes.POINTER(_C_MultiHMatrix)\n cls.lib.MultiHMatrixCreateSym.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),\n ctypes.c_int,\n _getcoefs_func_type,\n ctypes.c_int\n ]\n\n # Call the C++ backend.\n c_data = cls.lib.MultiHMatrixCreateSym(points_target, points_target.shape[0], _getcoefs_func_type(getcoefs),nm)\n\n else:\n cls.lib.MultiHMatrixCreate.restype = ctypes.POINTER(_C_MultiHMatrix)\n cls.lib.MultiHMatrixCreate.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),\n ctypes.c_int,\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),\n ctypes.c_int,\n _getcoefs_func_type,\n ctypes.c_int\n ]\n\n # Call the C++ backend. \n c_data = cls.lib.MultiHMatrixCreate(points_target,points_target.shape[0],points_source, points_source.shape[0], _getcoefs_func_type(getcoefs),nm)\n\n return cls(c_data, **params)\n\n\n @classmethod\n def from_submatrices(cls, getsubmatrix, nm, points_target, points_source=None, **params):\n \"\"\"Construct an instance of the class from a evaluation function.\n\n Parameters\n ----------\n points: np.ndarray of shape (N, 3)\n The coordinates of the points.\n getsubmatrix: Callable\n A function evaluating the matrix in a given range.\n epsilon: float, keyword-only, optional\n Tolerance of the Adaptive Cross Approximation\n eta: float, keyword-only, optional\n Criterion to choose the blocks to compress\n minclustersize: int, keyword-only, optional\n Minimum shape of a block\n maxblocksize: int, keyword-only, optional\n Maximum number of coefficients in a block\n\n Returns\n -------\n HMatrix or ComplexHMatrix\n \"\"\"\n # Set params.\n cls._set_building_params(**params)\n\n # Boilerplate code for Python/C++ interface.\n _getsumatrix_func_type = ctypes.CFUNCTYPE(\n None, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),\n ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double)\n )\n if points_source is None:\n cls.lib.MultiHMatrixCreatewithsubmatSym.restype = ctypes.POINTER(_C_MultiHMatrix)\n cls.lib.MultiHMatrixCreatewithsubmatSym.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),\n ctypes.c_int,\n _getsumatrix_func_type,\n ctypes.c_int\n ]\n\n # Call the C++ backend.\n c_data = cls.lib.MultiHMatrixCreatewithsubmatSym(points_target, points_target.shape[0], _getsumatrix_func_type(getsubmatrix),nm)\n else:\n cls.lib.MultiHMatrixCreatewithsubmat.restype = ctypes.POINTER(_C_MultiHMatrix)\n cls.lib.MultiHMatrixCreatewithsubmat.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),\n ctypes.c_int,\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),\n ctypes.c_int,\n _getsumatrix_func_type,\n ctypes.c_int\n ]\n\n # Call the C++ backend.\n c_data = cls.lib.MultiHMatrixCreatewithsubmat(points_target,points_target.shape[0],points_source, points_source.shape[0], _getsumatrix_func_type(getsubmatrix),nm)\n\n return cls(c_data, **params)\n\n @classmethod\n def _set_building_params(cls, *, eta=None, minclustersize=None, epsilon=None, maxblocksize=None):\n \"\"\"Put the parameters in the C++ backend.\"\"\"\n if epsilon is not None:\n cls.lib.setepsilon.restype = None\n cls.lib.setepsilon.argtypes = [ ctypes.c_double ]\n cls.lib.setepsilon(epsilon)\n\n if eta is not None:\n cls.lib.seteta.restype = None\n cls.lib.seteta.argtypes = [ ctypes.c_double ]\n cls.lib.seteta(eta)\n\n if minclustersize is not None:\n cls.lib.setminclustersize.restype = None\n cls.lib.setminclustersize.argtypes = [ ctypes.c_int ]\n cls.lib.setminclustersize(minclustersize)\n\n if maxblocksize is not None:\n cls.lib.setmaxblocksize.restype = None\n cls.lib.setmaxblocksize.argtypes = [ ctypes.c_int ]\n cls.lib.setmaxblocksize(maxblocksize)\n\n def __str__(self):\n return f\"{self.__class__.__name__}(shape={self.shape})\"\n\n def __getitem__(self, key):\n\n # self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)\n # self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]\n # c_data_hmatrix = self.lib.getHMatrix(self.c_data,key)\n # return HMatrix(c_data_hmatrix,**self.params)\n return self.hmatrices[key]\n\n def matvec(self, l , vector):\n \"\"\"Matrix-vector product (interface for scipy iterative solvers).\"\"\"\n\n assert self.shape[1] == vector.shape[0], \"Matrix-vector product of matrices of wrong shapes.\"\n\n # Boilerplate for Python/C++ interface\n self.lib.MultiHMatrixVecProd.argtypes = [\n ctypes.POINTER(_C_MultiHMatrix),\n ctypes.c_int,\n np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS')\n ]\n\n # Initialize vector\n result = np.zeros((self.shape[0],), dtype=self.dtype)\n\n # Call C++ backend\n self.lib.MultiHMatrixVecProd(self.c_data,l , vector, result)\n return result\n\n\nclass MultiHMatrix(AbstractMultiHMatrix):\n \"\"\"A real-valued hierarchical matrix based on htool C++ library.\n Create with HMatrix.from_coefs or HMatrix.from_submatrices.\n\n Attributes\n ----------\n c_data:\n Pointer to the raw data used by the C++ library.\n shape: Tuple[int, int]\n Shape of the matrix.\n nb_dense_blocks: int\n Number of dense blocks in the hierarchical matrix.\n nb_low_rank_blocks: int\n Number of sparse blocks in the hierarchical matrix.\n nb_blocks: int\n Total number of blocks in the decomposition.\n params: dict\n The parameters that have been used to build the matrix.\n \"\"\"\n libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared')\n if 'linux' in sys.platform:\n lib = ctypes.cdll.LoadLibrary(libfile+'.so')\n elif sys.platform == 'darwin':\n lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')\n elif sys.platform == 'win32':\n lib = ctypes.cdll.LoadLibrary(libfile+'.dll')\n dtype = ctypes.c_double\n\n\nclass ComplexMultiHMatrix(AbstractMultiHMatrix):\n \"\"\"A complex-valued hierarchical matrix based on htool C++ library.\n Create with ComplexHMatrix.from_coefs or ComplexHMatrix.from_submatrices.\n\n Attributes\n ----------\n c_data:\n Pointer to the raw data used by the C++ library.\n shape: Tuple[int, int]\n Shape of the matrix.\n nb_dense_blocks: int\n Number of dense blocks in the hierarchical matrix.\n nb_low_rank_blocks: int\n Number of sparse blocks in the hierarchical matrix.\n nb_blocks: int\n Total number of blocks in the decomposition.\n params: dict\n The parameters that have been used to build the matrix.\n \"\"\"\n libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared_complex')\n if 'linux' in sys.platform:\n lib = ctypes.cdll.LoadLibrary(libfile+'.so')\n elif sys.platform == 'darwin':\n lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')\n elif sys.platform == 'win32':\n lib = ctypes.cdll.LoadLibrary(libfile+'.dll')\n dtype = np.complex128\n\n","repo_name":"jcavieresg/tps_model","sub_path":"htps/htool/interface/htool/multihmatrix.py","file_name":"multihmatrix.py","file_ext":"py","file_size_in_byte":10354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32702432997","text":"\"\"\"\nDisplay an image URL as an embedded image in some clients like Conversations.\nUses: https://xmpp.org/extensions/xep-0066.html#x-oob\n\nUsage\n-----\n\n.. glossary::\n\n /embed \n\n Run this command to send the as an\n embedded image in your contact's client.\n\"\"\"\n\nfrom poezio import tabs\nfrom poezio.plugin import BasePlugin\nfrom poezio.theming import get_theme\n\n\nclass Plugin(BasePlugin):\n def init(self):\n for tab_t in [tabs.MucTab, tabs.StaticConversationTab, tabs.DynamicConversationTab, tabs.PrivateTab]:\n self.api.add_tab_command(\n tab_t,\n 'embed',\n self.embed_image_url,\n help='Embed an image url into the contact\\'s client',\n usage='')\n\n def embed_image_url(self, args):\n tab = self.api.current_tab()\n message = self.core.xmpp.make_message(tab.name)\n message['body'] = args\n message['oob']['url'] = args\n if isinstance(tab, tabs.MucTab):\n message['type'] = 'groupchat'\n else:\n message['type'] = 'chat'\n tab.add_message(\n message['body'],\n nickname=tab.core.own_nick,\n nick_color=get_theme().COLOR_OWN_NICK,\n identifier=message['id'],\n jid=tab.core.xmpp.boundjid,\n typ=1,\n )\n message.send()\n","repo_name":"mathieui/poezio","sub_path":"plugins/embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"32"} +{"seq_id":"70061274972","text":"from flask_jwt_extended import current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.db import safe_query, get_count, save_to_db\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\nfrom app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.sessions import SessionSchema\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.track import Track\nfrom app.models.user import User\nfrom app.models.session_speaker_link import SessionsSpeakersLink\nfrom app.settings import get_settings\nfrom app.api.helpers.files import make_frontend_url\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'track'], data)\n data['creator_id'] = current_user.id\n if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0:\n raise ForbiddenException({'pointer': ''}, \"Sessions are disabled for this Event\")\n\n def after_create_object(self, session, data, view_kwargs):\n \"\"\"\n method to send email for creation of new session\n mails session link to the concerned user\n :param session:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if session.event.get_owner():\n event_name = session.event.name\n owner = session.event.get_owner()\n owner_email = owner.email\n frontend_url = get_settings()['frontend_url']\n event = session.event\n link = make_frontend_url(\"/events/{}/sessions/{}\"\n .format(event.identifier, session.id))\n send_email_new_session(owner_email, event_name, link)\n send_notif_new_session_organizer(owner, event_name, link, session.id)\n\n for speaker in session.speakers:\n session_speaker_link = SessionsSpeakersLink(session_state=session.state,\n session_id=session.id,\n event_id=session.event.id,\n speaker_id=speaker.id)\n save_to_db(session_speaker_link, \"Session Speaker Link Saved\")\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object\n }}\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for SessionList class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')\n query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)\n if view_kwargs.get('user_id') is not None:\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n query_ = query_.join(User)\\\n .join(Speaker).filter((User.id == user.id or Session.speakers.any(Speaker.user_id == user.id)))\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {\n 'query': query\n }}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')\n view_kwargs['event_id'] = event.id\n\n def before_update_object(self, session, data, view_kwargs):\n \"\"\"\n before update method to verify if session is locked before updating session object\n :param event:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('is_locked') != session.is_locked:\n if not (has_access('is_admin') or has_access('is_organizer', event_id=session.event_id)):\n raise ForbiddenException({'source': '/data/attributes/is-locked'},\n \"You don't have enough permissions to change this property\")\n\n if session.is_locked and data.get('is_locked') == session.is_locked:\n raise ForbiddenException({'source': '/data/attributes/is-locked'}, \"Locked sessions cannot be edited\")\n\n def after_update_object(self, session, data, view_kwargs):\n \"\"\" Send email if session accepted or rejected \"\"\"\n\n if 'state' in data and data.get('send_email', None) and (session.state == 'accepted' or\n session.state == 'rejected'):\n\n event = session.event\n # Email for speaker\n speakers = session.speakers\n for speaker in speakers:\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, event.identifier, session.id)\n if not speaker.is_email_overridden:\n send_email_session_accept_reject(speaker.email, session, link)\n send_notif_session_accept_reject(speaker, session.title, session.state, link, session.id)\n\n # Email for owner\n if session.event.get_owner():\n owner = session.event.get_owner()\n owner_email = owner.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, event.identifier, session.id)\n send_email_session_accept_reject(owner_email, session,\n link)\n send_notif_session_accept_reject(owner, session.title,\n session.state, link, session.id)\n if 'state' in data:\n entry_count = SessionsSpeakersLink.query.filter_by(session_id=session.id)\n if entry_count.count() == 0:\n is_patch_request = False\n else:\n is_patch_request = True\n\n if is_patch_request:\n for focus_session in entry_count:\n focus_session.session_state = session.state\n db.session.commit()\n else:\n current_session = Session.query.filter_by(id=session.id).first()\n for speaker in current_session.speakers:\n session_speaker_link = SessionsSpeakersLink(session_state=session.state,\n session_id=session.id,\n event_id=session.event.id,\n speaker_id=speaker.id)\n save_to_db(session_speaker_link, \"Session Speaker Link Saved\")\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {\n 'before_update_object': before_update_object,\n 'before_get_object': before_get_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session,\n 'model': Session}\n","repo_name":"akashtalole/python-flask-restful-api","sub_path":"app/api/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":10077,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"36092828208","text":"class Solution:\n def isPalindrome(self, s: str) -> bool:\n '''\n 题目: 验证回文串,给定一个字符串,验证它是否是回文串,只考虑字母和数字字符,可以忽略字母的大小写。\n '''\n l, r = 0, len(s)-1\n while l <= r:\n # 过滤左指针的非字母和数字\n if ord(s[l]) < 48 or 57 < ord(s[l]) < 65 or 90 < ord(s[l]) < 97 or ord(s[l]) > 122:\n l += 1\n continue\n # 过滤右指针的非字母和数字\n if ord(s[r]) < 48 or 57 < ord(s[r]) < 65 or 90 < ord(s[r]) < 97 or ord(s[r]) > 122:\n r -= 1\n continue\n # 左右指针都是数字并且相等\n if 48 <= ord(s[l]) <= 57 and s[l] != s[r]:\n return False\n # 左右指针, 左边字母, 右边数字\n elif 65 <= ord(s[l]) <= 122 and 48 <= ord(s[r]) <= 57:\n return False\n elif 65 <= ord(s[l]) <= 122 and (ord(s[l]) != ord(s[r]) and abs(ord(s[l])-ord(s[r])) != 32):\n return False\n l += 1\n r -= 1\n \"\".isalnum()\n return True\n\n\n# 48 57 65 90 97 122\n# 0 9 A Z a z\ns = Solution()\nprint('res', s.isPalindrome(\"ab_a\"))\n","repo_name":"MrLW/algorithm","sub_path":"05_doublepoint/easy_125_isPalindrome.py","file_name":"easy_125_isPalindrome.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17595169549","text":"class Game_Over:\n \"\"\"\n This class checks for game ending for a player or all players\n \"\"\"\n\n # check for game ending for a player or all players\n def game_over(self, player_n, dic_players, list_players):\n mes = '\\nCaptain {} sunk along with the last sub!'.format(player_n,)\n mes += '\\nGame over for Captain {}! Bye Bye! Bon Voyage!'.format(player_n,)\n print(mes)\n dic_players.pop(player_n)\n list_players.remove(player_n)\n if len(list_players) == 1:\n mes = '\\nCaptain {} is the Winner of this game!'.format(list_players[0],)\n mes += '\\nCongratulations Captain {}! Well done!'.format(list_players[0],)\n mes += '\\nGame Over! Thanks for playing!\\n'\n print(mes)\n","repo_name":"jamil-said/code-samples","sub_path":"Python/Python_OOP_game_battle_of_submarines/game_over.py","file_name":"game_over.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"32917471417","text":"import subprocess, os, pydig, sys, getopt, yaml \nipa = sys.argv[1]\nfile_path = \"/Users/Marinadin.Grin/repositories/ivaldi-ansible/host_vars/\"\nfile_name = sys.argv[2]\nfull_path = file_path + file_name\ncontent = None\n\nwith open(full_path, \"r\") as stream:\n try:\n content = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\ndomain_list = []\nfor field_key in content:\n for env in content[field_key]:\n if 'server_names' in env:\n domain_list += env['server_names']\n#print(domain_list)\n\nprint(\"================================\\n================================\\n================================\")\n\ndef domain_checker():\n for d in domain_list:\n output = pydig.query(d, 'A')\n #import pdb; pdb.set_trace()\n if ipa not in output:\n print(f\"The domain {d} does not match the IP address\") \n\ndomain_checker()\n","repo_name":"devopsinthecloud/python_scripts","sub_path":"domain-checker-parser.py","file_name":"domain-checker-parser.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"31529036164","text":"from langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores import Chroma\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.document_loaders import DirectoryLoader\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.chains import RetrievalQA\nfrom dotenv import load_dotenv\nimport json\n\nload_dotenv()\n\n\nclass Bot:\n def __init__(self, temp: str):\n self.path = temp\n self.llm = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)\n self.qa_chain = None\n\n def load(self):\n loader = DirectoryLoader(self.path)\n documents = loader.load()\n\n text_splitter = CharacterTextSplitter(chunk_size=1500, chunk_overlap=0)\n texts = text_splitter.split_documents(documents)\n\n embeddings = OpenAIEmbeddings()\n vectorstore = Chroma.from_documents(texts, embeddings)\n retriever = vectorstore.as_retriever()\n self.qa_chain = RetrievalQA.from_chain_type(self.llm, retriever=retriever)\n\n def generate(\n self,\n num_flash_cards: int = 5,\n max_attempts: int = 10,\n optional_instructions: str = \"\",\n ):\n if len(optional_instructions) > 0:\n query = f\"\"\"Based on the document, I want you to create exactly {num_flash_cards} question-answering pairs for me in the following format. In\n particular, I want you to emphasis on the following instruction: {optional_instructions}\n \n if there are 3 questions-answering pairs, it would look like this:\n \n [\n {{\"question\": \"What is the capital of France\", \"answer\": \"The capital of France is Paris\"}},\n {{\"question\": \"Did World War II end in 1945\", \"answer\": \"True\"}},\n {{\"question\": \"What is an array in computer science\", \"answer\": \"An array is a collection of items of same data type stored at contiguous memory locations\"}},\n ],\n\n do not include anything other than the json, should return just like the example provided\n \"\"\"\n else:\n query = f\"\"\"Based on the document, I want you to create exactly {num_flash_cards} question-answering pairs for me in the following format.\n \n if there are 3 questions-answering pairs, it would look like this:\n \n [\n {{\"question\": \"What is the capital of France\", \"answer\": \"The capital of France is Paris\"}},\n {{\"question\": \"Did World War II end in 1945\", \"answer\": \"True\"}},\n {{\"question\": \"What is an array in computer science\", \"answer\": \"An array is a collection of items of same data type stored at contiguous memory locations\"}},\n ],\n\n do not include anything other than the json, should return just like the example provided\n \"\"\"\n\n for attempt in range(1, max_attempts + 1):\n try:\n res = self.qa_chain({\"query\": query})\n json_res = json.loads(res[\"result\"].strip())\n return json_res\n except json.JSONDecodeError as e:\n print(\n f\"Failed to generate valid JSON on attempt {attempt}/{max_attempts}: {e}\"\n )\n # if we can't generate valid JSON after max_attempts, return an error\n error_response = {\n \"error\": \"Failed to generate valid JSON\",\n \"message\": f\"Max attempts ({max_attempts}) reached\",\n }\n\n return error_response\n","repo_name":"therealcyberlord/Flashily.AI","sub_path":"backend/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"24293371950","text":"import matplotlib.pyplot as plt\nimport pylab\n\n\ndef plot_loss(train_loss=[], val_loss=[], test_loss=[], file_name='loss.jpg',\n y_label='cross-entropy loss'):\n plt.clf()\n plt.plot(train_loss)\n plt.plot(val_loss)\n plt.plot(test_loss)\n plt.legend(('train loss', 'validation loss', 'test loss'), loc='upper right')\n plt.title('Losses during training of LSTM->LSTM Model')\n plt.xlabel('#epochs')\n plt.ylabel(y_label)\n # plt.show()\n pylab.savefig(file_name)\n","repo_name":"emalgorithm/ncRNA-family-prediction","sub_path":"src/util/visualization_util.py","file_name":"visualization_util.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"41367226788","text":"from fiscalsim_us.model_api import *\n\n\nclass vt_capital_gains_exclusion(Variable):\n value_type = float\n entity = TaxUnit\n label = \"Vermont capital gains exclusion\"\n unit = USD\n documentation = \"Vermont excludes a portion of capital gains, calculated either as a flat amount or as a fraction of adjusted net capital gains, and limited by a fraction of federal taxable income.\"\n definition_period = YEAR\n defined_for = StateCode.VT\n reference = (\n \"https://tax.vermont.gov/sites/tax/files/documents/IN-153-2022.pdf#page=1\" # 2022 Schedule IN-153 Vermont Capital Gains Exclusion Calculation\n \"https://legislature.vermont.gov/statutes/section/32/151/05811\" # Titl. 32 V.S.A. § 5811(21)(B)(ii)\n \"https://tax.vermont.gov/sites/tax/files/documents/IN-153%20Instr-2022.pdf\"\n )\n\n def formula(tax_unit, period, parameters):\n # Get adjusted net capital gains, which is capped at 0\n adjusted_net_capital_gain = tax_unit(\n \"adjusted_net_capital_gain\", period\n )\n p = parameters(\n period\n ).gov.states.vt.tax.income.agi.exclusions.capital_gain\n # The flat exclusion is the less of a capped amount\n # or the actual amount of net adjusted capital gains\n flat_exclusion = min_(adjusted_net_capital_gain, p.flat.cap)\n # Get percentage exclusion\n percentage_exclusion = tax_unit(\n \"vt_percentage_capital_gains_exclusion\", period\n )\n # Filer can choose from flat or percentage exclusion.\n # Assume the filer will always choose the larger one\n chosen_exclusion = max_(flat_exclusion, percentage_exclusion)\n # The chosen exclusion should not exceed 40% of federal taxable income\n federal_taxable_income = tax_unit(\"taxable_income\", period)\n cap = federal_taxable_income * p.income_share_cap\n return min_(chosen_exclusion, cap)\n","repo_name":"TheCGO/fiscalsim-us","sub_path":"fiscalsim_us/variables/gov/states/vt/tax/income/adjusted_gross_income/subtractions/vt_capital_gain_exclusion/vt_capital_gains_exclusion.py","file_name":"vt_capital_gains_exclusion.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"27921348045","text":"from switchboard.config import CONFIG, SETTINGS\nfrom switchboard.switchboard_logging import LOGGER\nfrom switchboard import config_osc as osc\nfrom switchboard.devices.device_base import Device, DeviceStatus\nfrom switchboard.devices.device_widget_base import AddDeviceDialog, DeviceWidget\nimport switchboard.switchboard_utils as utils\n\nfrom PySide2 import QtCore, QtGui\n\nimport importlib\nimport inspect\nimport os\nimport traceback\n\n\nDEVICE_PLUGIN_PACKAGE = \"switchboard.devices\"\nDEVICE_PLUGIN_PATH = os.path.join(os.path.dirname(__file__))\n\nclass DeviceManager(QtCore.QObject):\n signal_device_added = QtCore.Signal(object)\n signal_device_removed = QtCore.Signal(object, str, str, bool)\n\n def __init__(self):\n super().__init__()\n\n self._plugins, self._plugin_widgets, self._plugin_icons = self.find_available_device_plugins()\n self._devices = {}\n self._device_name_validator = DeviceNameValidator(self)\n\n def add_devices(self, device_config):\n for device_type, devices in device_config.items():\n for device in devices:\n self._add_device(device_type, device[\"name\"], device[\"ip_address\"], **device[\"kwargs\"])\n\n def _add_device(self, device_type, name, ip, **kwargs):\n device_cls_name = device_type\n if device_cls_name not in self._plugins:\n LOGGER.error(f\"Could not find plugin for {device_type} in {DEVICE_PLUGIN_PACKAGE}\")\n return None\n\n device = self._plugins[device_cls_name](name, ip, **kwargs)\n widget_class = self._plugin_widgets[device_cls_name]\n icons = self._plugin_icons[device_cls_name] if device_cls_name in self._plugin_icons.keys() else None\n device.init(widget_class, icons)\n self._devices[device.device_hash] = device\n\n device.widget.set_name_validator(self._device_name_validator)\n\n # Notify the plugin\n device.__class__.added_device(device)\n\n self.signal_device_added.emit(device)\n return device\n\n def find_available_device_plugins(self):\n\n plugin_modules = self._find_plugin_modules()\n\n found_plugins = {}\n plugin_widgets = {}\n\n for plugin in plugin_modules:\n\n try:\n plugin_module = importlib.import_module(plugin)\n except:\n LOGGER.error(f\"Error while loading plugin: {plugin}\\n\\n=== Traceback BEGIN ===\\n{traceback.format_exc()}=== Traceback END ===\\n\")\n continue\n\n members = inspect.getmembers(plugin_module, inspect.isclass)\n\n for (name, c) in members:\n # only add classes that are a sub-class of Device but not Device itself\n if issubclass(c, Device) and (c is not Device):\n display_name = utils.remove_prefix(name, \"Device\")\n found_plugins[display_name] = c\n elif issubclass(c, DeviceWidget) and (c is not DeviceWidget):\n display_name = utils.remove_prefix(name, \"DeviceWidget\")\n plugin_widgets[display_name] = c\n\n plugin_icons = {}\n \n for name, plugin in found_plugins.items():\n plugin_icons[name] = plugin.load_plugin_icons()\n\n return found_plugins, plugin_widgets, plugin_icons\n\n def _find_plugin_modules(self):\n\n plugin_modules = []\n device_subdirs = next(os.walk(DEVICE_PLUGIN_PATH))[1]\n\n for subdir in device_subdirs:\n\n module_name = f\"plugin_{subdir}\"\n path = os.path.join(DEVICE_PLUGIN_PATH, subdir, module_name + \".py\")\n\n if os.path.exists(path):\n module = \".\".join([DEVICE_PLUGIN_PACKAGE, subdir, module_name])\n plugin_modules.append(module)\n\n return plugin_modules\n\n def get_device_add_dialog(self, device_type):\n\n if self._plugins[device_type].add_device_dialog is None:\n dialog = AddDeviceDialog(device_type, self.devices())\n else:\n dialog = self._plugins[device_type].add_device_dialog(self.devices())\n\n dialog.add_name_validator(self._device_name_validator)\n return dialog\n\n def remove_device(self, device, update_config=True):\n # Remove the device from the dict\n self._devices.pop(device.device_hash)\n\n # Disconnect the device\n device.disconnect_listener()\n\n # Set status to delete\n device.status = DeviceStatus.DELETE\n\n # Notify the plugin\n device.__class__.removed_device(device)\n\n self.signal_device_removed.emit(device.device_hash, device.device_type, device.name, update_config)\n\n def remove_device_by_hash(self, device_hash):\n device = self.device_with_hash(device_hash)\n self.remove_device(device)\n\n def clear_device_list(self):\n ''' Removes all device instances. \n '''\n devices_being_removed = list(self._devices.values())\n\n for device in devices_being_removed:\n self.remove_device(device, update_config=False)\n\n if len(self._devices):\n LOGGER.error(f\"{inspect.currentframe().f_code.co_name} failed to remove all devices one by one\")\n self._devices.clear()\n\n def reset_plugins_settings(self, config):\n ''' Resets all plugins' settings, including their values and overrides.\n This function should be called right after a new config is being loaded or created.\n '''\n for plugin in self._plugins.values():\n plugin.reset_csettings()\n\n for name, plugin in self._plugins.items():\n plugin_settings = plugin.plugin_settings()\n config.register_plugin_settings(name, plugin_settings)\n config.load_plugin_settings(name, plugin_settings)\n\n def available_device_plugins(self):\n return self._plugins.keys()\n\n def plugin_settings(self, device_type):\n return self._plugins[device_type].plugin_settings()\n\n def plugin_icons(self, device_type):\n return self._plugin_icons[device_type]\n\n def auto_connect(self):\n for device in self._devices.values():\n # Auto connect any devices\n if device.auto_connect:\n device.connect_listener()\n\n def devices(self):\n return list(self._devices.values())\n\n def devices_of_type(self, device_type):\n return [device for device in self._devices.values() if device.device_type == device_type]\n\n def device_with_hash(self, device_hash):\n if device_hash not in self._devices:\n return None\n\n return self._devices[device_hash]\n\n def device_with_ip_address(self, ip_address):\n for device in self._devices.values():\n if device.ip_address == ip_address:\n return device\n return None\n\n def device_with_name(self, name):\n for device in self._devices.values():\n if device.name == name:\n return device\n return None\n\n def is_name_unique(self, name):\n for device in self._devices.values():\n if device.name.lower() == name.lower():\n return False\n return True\n\n def plug_into_ui(self, menubar, tabs):\n for plugin in self._plugins.values():\n plugin.plug_into_ui(menubar, tabs)\n\n\nclass DeviceNameValidator(QtGui.QValidator):\n def __init__(self, device_manager, parent=None):\n super().__init__(parent)\n self.device_manager = device_manager\n\n def validate(self, input, pos):\n if self.device_manager.is_name_unique(input):\n return QtGui.QValidator.Acceptable\n return QtGui.QValidator.Invalid\n","repo_name":"chenyong2github/UnrealEngine","sub_path":"Engine/Plugins/VirtualProduction/Switchboard/Source/Switchboard/switchboard/devices/device_manager.py","file_name":"device_manager.py","file_ext":"py","file_size_in_byte":7590,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"30082141477","text":"\"\"\"\r\n\n\nCreate a function that takes a list of students and returns an dictionary\nrepresenting their notes distribution. Have in mind that all invalid notes\nshould not be count in the distribution. Valid notes are: `1, 2, 3, 4, 5`\n\n### Example\n\n get_notes_distribution([\n {\n \"name\": \"Steve\",\n \"notes\": [5, 5, 3, -1, 6]\n },\n {\n \"name\": \"John\",\n \"notes\": [3, 2, 5, 0, -3]\n }\n ] ➞ {\n 5: 3,\n 3: 2,\n 2: 1\n })\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef get_notes_distribution(students):\n A=[]\n for x in students:\n A+=x['notes']\n D={}\n for i in [1,2,3,4,5]:\n if A.count(i)>0:\n D[i]=A.count(i)\n return D\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"MhtcQNMbkP82ZKJpm_4.py","file_name":"MhtcQNMbkP82ZKJpm_4.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3213315982","text":"from glob import glob\nimport os\nfrom ovirt.node.utils import process, fs, system\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\nPLUGIN_DIR = \"/etc/ovirt-plugins.d/\"\nPLUGIN_XML_OUT = \"/etc/firewalld/services/node-plugin.xml\"\nplugin_files = []\nfw_conf = []\n\nFIREWALLD_PORT_XML = \"\"\"\\n \"\"\"\n\nFIREWALLD_XML_TEMPLATE = \"\"\"\n\n firewall plugin\n necessary ports for ovirt-node plugin operations\n %(port_section)s\n\n\"\"\"\n\n\ndef is_firewalld_available():\n \"\"\"Check if firewalld is installed\n \"\"\"\n return os.path.exists(\"/etc/firewalld\")\n\n\ndef is_firewalld_started():\n \"\"\"Check if firewalld is started\n \"\"\"\n is_started = False\n try:\n system.service(\"firewalld\", \"status\")\n is_started = True\n except Exception as e:\n LOGGER.debug(\"Firewalld service status: %s\" % e)\n return is_started\n\n\ndef open_port(port, proto):\n if is_firewalld_available() and is_firewalld_started():\n _setup_firewalld(port, proto)\n else:\n setup_iptables(port, proto)\n\n\ndef setup_iptables(port, proto):\n rules = \"/etc/sysconfig/iptables\"\n\n def is_open():\n pat = \"%s dpt:%s\" % (proto, port)\n for rule in process.check_output([\"iptables\", \"-L\", \"-n\"]).split(\"\\n\"):\n if rule.strip().endswith(pat):\n return True\n return False\n\n def open_port():\n cmd = [\"iptables\", \"-I\", \"INPUT\", \"1\", \"-p\", proto,\n \"--dport\", port, \"-j\", \"ACCEPT\"]\n process.check_call(cmd)\n\n def load_rules():\n process.check_call(\"iptables-restore -c < %s\" % rules,\n shell=True)\n\n def save_rules():\n process.check_call(\"iptables-save -c > %s\" % rules,\n shell=True)\n\n fs.Config().persist(rules)\n\n # We need to load the rules before, to prevent overwriting them\n # when they weren't loaded.\n load_rules()\n if not is_open():\n open_port()\n save_rules()\n\n\ndef _setup_firewalld(port, proto):\n port_conf = \"\"\n rule_dict = {\"port\": port,\n \"proto\": proto\n }\n\n port_conf += FIREWALLD_PORT_XML % rule_dict\n port_dict = {\"port_section\": port_conf}\n with open(PLUGIN_XML_OUT, \"w\") as f:\n f.write(FIREWALLD_XML_TEMPLATE % port_dict)\n\n process.call([\"firewall-cmd\", \"--reload\"])\n process.call([\"firewall-cmd\", \"--permanent\", \"--add-service\",\n \"node-plugin\"])\n process.check_call([\"firewall-cmd\", \"--reload\"])\n\n\ndef process_plugins():\n LOGGER.debug(\"Handling plugin firewall rules\")\n for plugin in glob(PLUGIN_DIR + \"*.firewall\"):\n plugin_files.append(plugin)\n\n for f in plugin_files:\n LOGGER.debug(\"Parsing firewall rules: %s\" % f)\n with open(f) as i:\n conf = i.readlines()\n for line in conf:\n if not line.startswith(\"#\"):\n port, proto = line.strip().split(\",\")\n fw_conf.append((port, proto))\n\n for i in fw_conf:\n LOGGER.debug(\"Opening firewall ports: %s\" % str(i))\n port, proto = i\n open_port(port, proto)\n\n\nif __name__ == \"__main__\":\n process_plugins()\n","repo_name":"oVirt/ovirt-node","sub_path":"src/ovirt/node/utils/firewall.py","file_name":"firewall.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"} +{"seq_id":"17734928498","text":"import tensorflow as tf\nimport skimage\nimport skimage.io\nimport skimage.color\nimport skimage.measure\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# Task specification.\nflags.DEFINE_string('hr_flist', '',\n 'file_list containing the training data.')\nflags.DEFINE_string('prediction_dir', '',\n 'directory containing the predicted images.')\n\n\ndef compute_psnr(predction, ground_truth):\n pred_y = skimage.color.rgb2ycbcr(prediction)[:,:,0:1]\n gt_y = skimage.color.rgb2ycbcr(ground_truth)[:,:,0:1]\n return skimage.measure.compare_psnr(pred_y, gt_y, data_range=255)\n\ndef main():\n flist = open(FLAGS.hr_flist, 'r').read().splitlines()\n total_images = 0\n total_psnr = .0\n for fname in flist:\n pred_fname = os.path.join(FLAGS.prediction_dir,\n os.path.basename(fname))\n gt_image = skimage.io.imread(fname)\n pred_image = skimage.io.imread(pred_fname)\n psnr = compute_psnr(pred_image, gt_image)\n print(\"Image name: %s, PSNR=%f\", os.path.basename(fname),\n psnr)\n total_images += 1\n total_psnr += psnr\n\n print(\"Average PSNR is %f\", total_psnr/total_images)\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"WeiHan3/dsrn","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"32"} +{"seq_id":"30008153027","text":"\ndef calculate_damage(your_type, opponent_type, attack, defense):\n win={'fire':['water'],'grass':['fire'],'water':['grass','electric'],'electric':[]}\n if your_type in win[opponent_type]:\n effectiveness=2\n elif opponent_type in win[your_type]:\n effectiveness=0.5\n else:\n effectiveness=1\n return 50 * (attack / defense) * effectiveness\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"pzQXHMqizBmaLDCHc_7.py","file_name":"pzQXHMqizBmaLDCHc_7.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14868456606","text":"import pook\nimport os\nfrom page_loader.html_loader import download\n\n\nclass FakeClient:\n def __init__(self, data):\n self.text = data\n self.content = data\n\n def get(self, url):\n return self\n\n\ndef test_download(tmpdir):\n file_name = 'tests/fixtures/test_file.html'\n with open(file_name) as file:\n data = file.read()\n client = FakeClient(data)\n test_dir = tmpdir.mkdir(\"sub\")\n file_path = download('https://www.test.ru', test_dir, client=client)\n with open(file_path) as new_file:\n new_data = new_file.read()\n assert new_data != data\n assert file_path == os.path.join(test_dir, 'www-test-ru.html')\n assert 'www-test-ru_files' in os.listdir(test_dir)\n assert 'www-test-ru-image.jpeg' in os.listdir(os.path.join(test_dir, 'www-test-ru_files'))\n\n\n@pook.on\ndef test_download_http(tmpdir):\n pook.get(\n 'https://www.google.com',\n reply=200,\n response_json='Nice'\n )\n test_dir = tmpdir.mkdir(\"sub\")\n file_path = download('https://www.google.com', test_dir)\n with open(file_path) as new_file:\n new_data = new_file.read()\n assert new_data == 'Nice\\n'\n","repo_name":"IoninMark/python-project-lvl3","sub_path":"tests/test_download.py","file_name":"test_download.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19424353409","text":"import socket \n\nIP = \"127.0.0.1\" \nPORT = 20001\nBUFFER = 1024\n\nsock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\nsock.bind((IP, PORT))\n\nprint(\"UDP server up and listening on \" + IP + \":\" + str(PORT)) \n\nwhile(True):\n bytesAddressPair = sock.recvfrom(BUFFER)\n message = bytesAddressPair[0]\n address = bytesAddressPair[1]\n clientMsg = \"Message from Client:{}\".format(message) \n clientIP = \"Client IP Address:{}\".format(address) \n print(clientMsg) \n print(clientIP)\n sock.sendto(str.encode(\"aAAAAAAAAAAAA\"), address) \n","repo_name":"benthic-mmo/metaverse_client","sub_path":"crates/session/tests/test_udp/test_udp.py","file_name":"test_udp.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"16990116096","text":"def SwapFileData():\r\n file1=input(\"Enter your file name : \")\r\n file2=input(\"Enter your file name : \")\r\n\r\n with open(file1,'r') as sampleA:\r\n data_sampleA=sampleA.read()\r\n\r\n with open(file2, 'r') as sampleB:\r\n data_sampleB=sampleB.read()\r\n\r\n with open(file1, 'w') as sampleA:\r\n sampleA.write(data_sampleB)\r\n\r\n with open(file2, 'w') as sampleB:\r\n sampleB.write(data_sampleA)\r\n\r\n \r\nSwapFileData() \r\n","repo_name":"Taimoor23/C98-functions","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4366561930","text":"# Ankit Bagde - 17cs30009\n# Task 1 - Dataset Generation\n\nimport pandas as pd \nimport numpy as np\n\n# read csv\ndata_a = pd.read_csv(\"../data/winequality-red.csv\", sep=';')\ndata_b = pd.read_csv(\"../data/winequality-red.csv\", sep=';')\nprint(\"data size is : \"+str(data_a.shape))\n\n######## dataset A\n\n# quality attribute\ndata_a['quality'] = [0 if x<=6 else 1 for x in data_a['quality']]\n\n# min-max scaling\nfor i in data_a:\n\tif(i!='quality'):\n\t\tmin_ele = np.min(data_a[i])\n\t\tmax_ele = np.max(data_a[i])\n\t\tdata_a[i] = (data_a[i]-min_ele)/(max_ele - min_ele)\n\n# save csv as dataset_A.csv\ndata_a.to_csv('../data/dataset_A.csv', index=False)\nprint(\"Dataset A generated. File saved as dataset_A.csv in 'data' folder\")\n\n######## dataset B\n\n# quality attribute\ndata_b['quality'] = [0 if x<5 else 1 if x==5 or x==6 else 2 for x in data_b[i]]\n\n# Z-score normalization\nfor i in data_b:\n\tif(i!='quality'):\n\t\tmean = np.mean(data_b[i])\n\t\tstd = np.std(data_b[i])\n\t\tdata_b[i] = (data_b[i]-mean)/std\n\n# bin segregation\nfor i in data_b:\n\tif(i!='quality'):\n\t\tmin_ele = np.min(data_b[i])\n\t\tmax_ele = np.max(data_b[i])\n\t\tdiff = (max_ele - min_ele)/4\n\t\tdata_b[i] = [0 if x 80:\n index_list.append(j)\n added_index.append(j)\n \n print(index_list)\n \n \n for index in index_list:\n copy_list[index]=count\n \n count+=1\n \n \n return copy_list\n \n \n \n \n\n\n\n\n\n\n\ndef get_cosine(vec1, vec2):\n intersection = set(vec1.keys()) & set(vec2.keys())\n numerator = sum([vec1[x] * vec2[x] for x in intersection])\n\n sum1 = sum([vec1[x]**2 for x in vec1.keys()])\n sum2 = sum([vec2[x]**2 for x in vec2.keys()])\n denominator = math.sqrt(sum1) * math.sqrt(sum2)\n\n if not denominator:\n return 0.0\n else:\n return float(numerator) / denominator\n\ndef text_to_vector(text):\n words = WORD.findall(text)\n return Counter(words) \n \n\n\n\n\n\n\ndef calc_distance(x1,y1,a,b,c):\n \n d= abs((a*x1 + b*y1 +c)) / (math.sqrt(a*a + b*b))\n return d\n\n\n\n\n\ncapability_dict= {'Purchase Order': 13, 'Slot Assignment': 11} #Input Dict for mapping capability\nWORD = re.compile(r'\\w+')\n\n\n\n\n\n\n\n#Getting all the execution Block Structures\ndataframe= pd.read_csv(r'C:\\Users\\u65988\\cluster_latest.csv')\ndataframe= dataframe[['Block ID', 'Expression Type ID']]\n#dataframe.drop_duplicates(keep='first', inplace= True)\ndataframe = dataframe.dropna(axis=0, subset=['Block ID'])\ndataframe= dataframe.fillna('')\nstructure_list=[]\nstructure=''\nstructure_dict={}\n\nfor index in range(0,len(dataframe)):\n structure_dict[dataframe['Block ID'].iloc[index]]=''\n\nfor index in range(0,len(dataframe)-1):\n if dataframe['Block ID'].iloc[index]== dataframe['Block ID'].iloc[index+1]:\n structure += \" \" + str((dataframe['Expression Type ID'].iloc[index]))\n else:\n structure += \" \" + str((dataframe['Expression Type ID'].iloc[index]))\n structure_list.append(structure)\n structure_dict[dataframe['Block ID'].iloc[index]]= structure\n structure=''\n\nprint(\"There are \", len(structure_list),\" no of Execution Blocks\")\n\n\n\n\n\n#Mapping Blocks with Data Operations\ndataframe= pd.read_csv(r'C:\\Users\\u65988\\cluster_latest.csv')\ndataframe= dataframe.sort_values(by= ['Block ID'])\ndataframe= dataframe.fillna('')\ndata_operation=''\ndata_source_name=''\nBlock_Data_Map= {}\nBlock_Data_Name_Map= {}\n\nfor index in range(0,len(dataframe)-1):\n \n if dataframe['Block ID'].iloc[index]== dataframe['Block ID'].iloc[index+1]:\n \n if dataframe['Data Operation Type'].iloc[index]!= '':\n \n data_operation+= dataframe['Data Operation Type'].iloc[index]+ \", \"\n data_source_name+= dataframe['Data Source Name'].iloc[index]+ \", \"\n \n else:\n \n data_operation+= dataframe['Data Operation Type'].iloc[index]\n data_source_name+= dataframe['Data Source Name'].iloc[index]\n Block_Data_Map[dataframe['Block ID'].iloc[index]]= data_operation\n Block_Data_Name_Map[dataframe['Block ID'].iloc[index]]= data_source_name\n data_operation=''\n data_source_name=''\n\n\n\n\n\n \ndataframe= pd.DataFrame(index= range(0,len(Block_Data_Map)), columns=['Block ID', 'Data Operation','Data Source Name', 'Block Structure'])\nBlock_List= list(Block_Data_Map.keys())\nfor index in range(0, len(Block_Data_Map)):\n \n block_id= Block_List[index]\n dataframe['Block ID'].iloc[index]= block_id\n dataframe['Data Operation'].iloc[index]= Block_Data_Map[block_id]\n dataframe['Block Structure'].iloc[index]= structure_dict[block_id]\n dataframe['Data Source Name'].iloc[index]= Block_Data_Name_Map[block_id]\n\n\n\n\n\n#Label Encoding\ndataframe2= copy.deepcopy(dataframe)\ndataframe= dataframe[['Block ID', 'Data Operation','Data Source Name', 'Block Structure']]\n#dataframe= dataframe.drop_duplicates()\n#dataframe= dataframe.reset_index(drop= True)\ndf= copy.deepcopy(dataframe)\nn= copy.deepcopy(df)\nlabel_encoder = preprocessing.LabelEncoder() \ndf['Data Operation']= label_encoder.fit_transform(df['Data Operation'])\ndf['Block Structure']= label_encoder.fit_transform(df['Block Structure'])\ndf['Data Source Name']= label_encoder.fit_transform(df['Data Source Name'])\n\n\n\n \ndata= df.values\n\n\n\n\n \n\n\n\n\n#Finding Optimum K value\n# =============================================================================\n# import matplotlib.pyplot as plt\n# import math\n# \n# max_length= len(df)-1\n# dist_points_from_cluster_center=[]\n# K = range(1,max_length)\n# for no_of_clusters in K:\n# k_model= KMeans(n_clusters= no_of_clusters)\n# k_model.fit(data)\n# dist_points_from_cluster_center.append(k_model.inertia_)\n# \n# plt.plot(K, dist_points_from_cluster_center)\n# plt.plot([K[0], K[max_length-2]], [dist_points_from_cluster_center[0], dist_points_from_cluster_center[max_length-2]], 'ro-' )\n# \n# a= dist_points_from_cluster_center[0]- dist_points_from_cluster_center[max_length-2]\n# b= K[max_length-2] - K[0]\n# c1= K[0] * dist_points_from_cluster_center[max_length-2]\n# c2= K[max_length-2] * dist_points_from_cluster_center[0]\n# c= c1-c2\n# \n# distance_of_points_from_line = []\n# for k in range(max_length-1):\n# distance_of_points_from_line.append(calc_distance(K[k], dist_points_from_cluster_center[k], a, b, c ))\n# \n# plt.plot(K, distance_of_points_from_line)\n# \n# print(\"Optimum Value of K is \", int(distance_of_points_from_line.index(max(distance_of_points_from_line))+1))\n# k = int(distance_of_points_from_line.index(max(distance_of_points_from_line))+1)\n# \n# =============================================================================\n\n\n\n\n\n\n\n\n\n# KMeans Clustering\n\n\n# =============================================================================\n# model = KMeans()\n# visualizer = KElbowVisualizer(\n# model, k=(20,30), metric='calinski_harabasz', timings=False, locate_elbow=False\n# ) \n# visualizer.fit(data)\n# \n# print(visualizer.elbow_value_)\n# =============================================================================\ndataframe['Cluster Center']=np.nan \ndataframe2['Cluster Center']=np.nan \n \n\nmodel = KMeans(n_clusters=int(len(df)/50), random_state=0).fit(data) \nfor index in range(0,len(df)):\n \n cluster_center = model.predict(data[index].reshape(1,-1))\n #dataframe['Cluster Center'].loc[index]= int(cluster_center) \n dataframe2['Cluster Center'].loc[index]= int(cluster_center) \n# =============================================================================\n# X=data\n# sil_score_max = -1\n# from sklearn.metrics import silhouette_score\n# for n_clusters in range(2,157):\n# model = KMeans(n_clusters = n_clusters, init='k-means++', max_iter=100, n_init=1)\n# labels = model.fit_predict(X)\n# sil_score = silhouette_score(X, labels)\n# print(\"The average silhouette score for %i clusters is %0.2f\" %(n_clusters,sil_score))\n# if sil_score > sil_score_max:\n# sil_score_max = sil_score\n# best_n_clusters = n_clusters\n# =============================================================================\n\n\n\n\n\n\n\n#Capability Association\ndataframe2['Capability']= np.nan\nCU_Cluster_map= {}\n\n\n\nfor index in range(0,len(dataframe2)):\n \n CU_Cluster_map[dataframe2['Block ID'].iloc[index]]= dataframe2['Cluster Center'].iloc[index]\n \nfor item in capability_dict.keys():\n \n cluster_center= CU_Cluster_map[capability_dict[item]]\n \n for index in range(0, len(dataframe2)):\n \n if dataframe2['Cluster Center'].iloc[index]== cluster_center:\n \n dataframe2['Capability'].iloc[index]= item\n \n\n\nreturn (\"Hey\")\n\n\n","repo_name":"rahulmonish/Python-Scripts","sub_path":"notusedmuch/Block_Clustering.py","file_name":"Block_Clustering.py","file_ext":"py","file_size_in_byte":8719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"707548612","text":"import json\nfrom django.http.response import JsonResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom .models import Usuario\nfrom django.views.decorators.csrf import csrf_exempt\n\nclass UsuarioView(View):\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def get(self,request,id_usuario=0):\n if (id_usuario>0):\n usuarios=list(Usuario.objects.filter(id_usuario=id_usuario).values())\n if len(usuarios) > 0:\n usuario=usuarios[0]\n datos={'message':\"Success\",'usuarios':usuario}\n else:\n datos={'message':\"usuarios no encontrados...\"}\n return JsonResponse(datos)\n else:\n usuarios=list(Usuario.objects.values())\n if len(usuarios)>0:\n datos={'message':\"Success\",'usuarios':usuarios}\n else:\n datos={'message':\"usuarios no encontrados...\"}\n return JsonResponse(datos)\n \n def post(self, request):\n #print(request.body)\n jd=json.loads(request.body)\n #print(jd)\n Usuario.objects.create(\n usuario=jd['usuario'],\n contrasena=jd['contrasena'],\n emp_id_emp_id=jd['emp_id_emp_id'],\n )\n datos={'message':\"Success\"}\n return JsonResponse(datos)\n\n def put(self,request,id_usuario):\n jd=json.loads(request.body)\n usuarios = list(Usuario.objects.filter(id_usuario=id_usuario).values())\n if len(usuarios) > 0:\n usuario=Usuario.objects.get(id_usuario=id_usuario)\n usuario.usuario=jd['usuario']\n usuario.contrasena=jd['contrasena']\n usuario.emp_id_emp=jd['emp_id_emp']\n usuario.save()\n datos = {'mesage':\"Success\"}\n else:\n datos = {'message':\"usuario not found\"}\n return JsonResponse(datos)\n\n def delete(self,request, id_usuario):\n usuarios = list(Usuario.objects.filter(id_usuario=id_usuario).values())\n if len(usuarios) > 0:\n Usuario.objects.filter(id_usuario=id_usuario).delete()\n datos = {'message' : \"succes\"}\n else:\n datos = {'message' : \"usuario no encontrado\"}\n return JsonResponse(datos)","repo_name":"lucasvasquezr21/petplus_api","sub_path":"app_usuario/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39617274186","text":"from pyitab.analysis.states.gsbs import GSBS\nfrom pyitab.io.loader import DataLoader\n\nfrom pyitab.analysis.configurator import AnalysisConfigurator\nfrom pyitab.preprocessing import SampleSlicer, FeatureSlicer\nfrom pyitab.analysis.roi import RoiAnalyzer\nimport os\nimport numpy as np\n\n\nconf_file = \"/home/robbis/mount/permut1/sherlock/bids/bids.conf\"\nloader = DataLoader(configuration_file=conf_file, \n loader='bids', task='preproc', \n bids_task=['day1'])\n\nsubjects = ['marcer', 'matsim', 'simpas']\nfor s in subjects:\n\n ds = loader.fetch(subject_names=[s], \n prepro=[SampleSlicer(trial_type=np.arange(1, 32))])\n\n roi_analyzer = RoiAnalyzer(analysis=GSBS())\n roi_analyzer.fit(ds, roi=['aal'], kmax=50)\n\n roi_analyzer.save()\n\n\n\n################## Resting state ##########################\nconf_file = path = \"/home/robbis/mount/permut1/sherlock/bids/bids.conf\"\nloader = DataLoader(configuration_file=conf_file,\n data_path=\"/home/robbis/mount/permut1/sherlock/bids/\",\n subjects='participants.tsv',\n loader='bids', task='preproc', \n bids_task=['day1'])\n\nsubjects = ['marcer', 'matsim', 'simpas']\nfor s in subjects:\n\n ds = loader.fetch(subject_names=[s])","repo_name":"robbisg/mvpa_itab_wu","sub_path":"scripts/carlo/sherlock/gsbs.py","file_name":"gsbs.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4655312085","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Data Cleaning\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n\n\n# In[2]:\n\n\n# Set the working directory & Read the data\nos.chdir(\"C:\\\\Sunder\\\\DataScience\\\\MachineLearning\\\\Projects\\\\DataAnalytics\\\\FinancialInvestment\")\n\n\n# In[3]:\n\n\n# Read the .csv files\nrounds = pd.read_csv(\"rounds2.csv\", encoding = \"ISO-8859-1\")\ncompanies = pd.read_csv(\"companies.txt\", sep=\"\\t\", encoding = \"ISO-8859-1\")\n\n\n# In[4]:\n\n\n# Number of records in rounds\nprint(rounds.shape)\n\n\n# In[5]:\n\n\n# Number of records in companies\nprint(companies.shape)\n\n\n# In[6]:\n\n\n# Column information of rounds\nrounds.info()\n\n\n# There are null values in **funding_round_code** and **raised_amount_usd** columns \n\n# In[7]:\n\n\n# Column information for companies\ncompanies.info()\n\n\n# In[8]:\n\n\n# identify the unique number of permalinks in companies which could be the unique key (Primary key) for each record\nlen(companies.permalink.unique())\n\n\n# In[9]:\n\n\n# converting all permalinks to lowercase\ncompanies['permalink'] = companies['permalink'].str.lower()\ncompanies.head()\n\n\n# Let's check whether these permalink values are present in the rounds datafrme\n\n# In[10]:\n\n\n# find the number of unique permalink values in the rounds dataframe\nlen(rounds.company_permalink.unique())\n\n\n# There are more unique permalinks than 66368 which shows that they are either additional permalink data \n# or they could be due to lowercase/uppercase combinations of a given permalink\n\n# In[11]:\n\n\n# Convert permalink to lowercase\nrounds['company_permalink'] = rounds['company_permalink'].str.lower()\nlen(rounds.company_permalink.unique())\n\n\n# In[12]:\n\n\n# Identify the two extra permalinks in rounds\nrounds.loc[~rounds['company_permalink'].isin(companies['permalink']), :]\n\n\n# From above, it can be seen that there is some weird characters in the **company_permalink** field. We need to termine \n# the character encoding while reading the .csv file \n\n# In[14]:\n\n\nimport chardet\n\nrawdata = open('rounds2.csv', 'rb').read()\nresult = chardet.detect(rawdata)\nprint(result['encoding'])\n\n\n# WE will try with utf-8 encoding and then decode with ascii\n\n# In[15]:\n\n\nrounds['company_permalink'] = rounds.company_permalink.str.encode('utf-8').str.decode('ascii', 'ignore')\nrounds.loc[~rounds['company_permalink'].isin(companies['permalink']), :]\n\n\n# In[16]:\n\n\n# Let's check for unique company_permalink in rounds df\nlen(rounds.company_permalink.unique())\n\n\n# There are same number of unique companies in both round and companies df. We can check, \n# if the encoding problem exists in the **companies** df \n\n# In[17]:\n\n\ncompanies.loc[~companies['permalink'].isin(rounds['company_permalink']), :]\n\n\n# In[18]:\n\n\n# Removing the special characters from **companies** df\n\ncompanies['permalink'] = companies.permalink.str.encode('utf-8').str.decode('ascii', 'ignore')\n\n\n# In[19]:\n\n\ncompanies.loc[~companies['permalink'].isin(rounds['company_permalink']), :]\n\n\n# The data is now clean for these 2 dataframes and we shall write it out to a file before proceeding for further cleansing\n\n# In[20]:\n\n\n# write rounds file\nrounds.to_csv(\"rounds_clean.csv\", sep=',', index=False)\n\n# write companies file\ncompanies.to_csv(\"companies_clean.csv\", sep='\\t', index=False)\n\n\n# # Data Cleaning (Part-2)\n\n# In[22]:\n\n\n# read the new, decoded csv files\nrounds = pd.read_csv(\"rounds_clean.csv\", encoding = \"ISO-8859-1\")\ncompanies = pd.read_csv(\"companies_clean.csv\", sep=\"\\t\", encoding = \"ISO-8859-1\")\n\n# Check for the unique permalink values\nprint(\"Companies in companies df \", len(companies.permalink.unique()))\nprint(\"Companies in rounds df \",len(rounds.company_permalink.unique()))\n\n# Companies in rounds and not in companies df (Ideally should be zero)\nprint(\"Companies in rounds but not in companies df \", len(rounds.loc[~rounds['company_permalink'].isin(companies['permalink']), :]))\n\n\n# ## Identifying Missing Values\n\n# In[23]:\n\n\n# missing values in companies df\ncompanies.isnull().sum()\n\n\n# In[24]:\n\n\n# missing values in rounds df\nrounds.isnull().sum()\n\n\n# In[25]:\n\n\n# We shall merge the two dataframes based on permalink and company_permalink\nmaster = pd.merge(companies, rounds, how=\"inner\", left_on=\"permalink\", right_on=\"company_permalink\")\nmaster.head()\n\n\n# In[26]:\n\n\n# drop the duplicate column \"company_permalink\"\nmaster = master.drop(['company_permalink'], axis=1) \n\n\n# In[28]:\n\n\n# Identifying the null values in columns\nmaster.isnull().sum()\n\n\n# In[29]:\n\n\n# summing up the missing values (column-wise) and displaying percentage of NaNs \nround(100*(master.isnull().sum()/len(master.index)), 2)\n\n\n# The columns ```funding_round_code``` is useless (with about 73% missing values). \n# Also, from the business objectives given, the columns ```homepage_url```, ```founded_at```, \n# ```state_code```, ```region``` and ```city``` need not be used and we shall drop these columns.\n\n# In[30]:\n\n\ndrop_columns = ['funding_round_code', 'homepage_url', 'founded_at', 'state_code', 'region', 'city']\nmaster = master.drop(drop_columns, axis=1)\n\n\n# In[31]:\n\n\nmaster.head()\n\n\n# In[32]:\n\n\n# summing up the missing values (column-wise) and displaying percentage of NaNs for the remaining columns\nround(100*(master.isnull().sum()/len(master.index)), 2)\n\n\n# - The column ```raised_amount_usd``` is an important column, since that is the number we want to analyse (compare, means, sum etc.). \n# and hence this column needs to be carefully treated \n# - The column ```country_code``` will be used for country-wise analysis, and \n# - The column ```category_list``` will be used to merge the dataframe with the main categories.\n\n# Identifying the missing values in ```raised_amount_usd```.\n\n# In[33]:\n\n\n# summary stats of raised_amount_usd\nmaster['raised_amount_usd'].describe()\n\n\n# - The mean is somewhere around USD 10 million, while the median is only about USD 100m. \n# - The min and max values are also quite wide apart (22 billion)\n# - Hence, we will not be able to impute and hence removing the NaN values\n\n# In[34]:\n\n\n# removing NaNs in raised_amount_usd\nmaster = master[~np.isnan(master['raised_amount_usd'])]\nround(100*(master.isnull().sum()/len(master.index)), 2)\n\n\n# In[35]:\n\n\ncountry_codes = master['country_code'].astype('category')\n\n# displaying frequencies of each category\ncountry_codes.value_counts()\n\n\n# Now, we can either delete the rows having country_code missing (about 6% rows), or we can impute them by USA. \n# Since the number 6 is quite small, better to just remove the rows.\n\n# In[36]:\n\n\n# removing rows with missing country_codes\nmaster = master[~pd.isnull(master['country_code'])]\n\n# look at missing values\nround(100*(master.isnull().sum()/len(master.index)), 2)\n\n\n# Since the category_list has only very small % of misisng values, we can remove those rows\n\n# In[37]:\n\n\n# removing rows with missing category_list values\nmaster = master[~pd.isnull(master['category_list'])]\n\n# look at missing values\nround(100*(master.isnull().sum()/len(master.index)), 2)\n\n\n# In[38]:\n\n\n# Write the cleaned data frame to a .csv file\nmaster.to_csv(\"master_df.csv\", sep=',', index=False)\n\n\n# In[39]:\n\n\nmaster.info()\n\n\n# In[40]:\n\n\n# Finding how many rows have been retained\n100*(len(master.index) / len(rounds.index))\n\n\n# # Funding Type Analysis\n\n# We shall compare the funding amounts across the funding types. We would also need to impose the constraint that the investment amount should be between 5 and 15 million USD.\n# We will choose the funding type such that the average investment amount falls in this range.\n\n# In[43]:\n\n\nmaster['funding_round_type'].unique()\n\n\n# Among the above funding types, we shall be considering only 4 of them (**venture**, **angel**, **seed**, **private_equity**)\n\n# In[44]:\n\n\ndf = master[(master.funding_round_type == \"venture\") | \n (master.funding_round_type == \"angel\") | \n (master.funding_round_type == \"seed\") | \n (master.funding_round_type == \"private_equity\") ]\n\n\n# We have to compute a representative value of the funding amount for each type of invesstment. \n# We can either choose the mean or the median - We shall take that call after we have a look at the\n# distribution of **raised_amount_usd** to get a sense of the distribution of data.\n\n# In[45]:\n\n\n# distribution of raised_amount_usd\nsns.boxplot(y=df['raised_amount_usd'])\nplt.yscale('log')\nplt.show()\n\n\n# There are few extreme values. We shall find the actual median and mean values through summary metrics\n\n# In[47]:\n\n\n# Summary Metrics\ndf['raised_amount_usd'].describe()\n\n\n# The mean is 9.5 million and the median is 2.0 million which is a significant difference. We shall check the \n# mean and median for each individual **funding type**\n\n# In[48]:\n\n\n# comparing summary stats across four categories\nsns.boxplot(x='funding_round_type', y='raised_amount_usd', data=df)\nplt.yscale('log')\nplt.show()\n\n\n# In[49]:\n\n\n# compare the mean and median values across categories\ndf.pivot_table(values='raised_amount_usd', columns='funding_round_type', aggfunc=[np.median, np.mean])\n\n\n# The mean and median for each funding_type is varying largely. For eg:- For **Private Equity**, the mean is 74 million \n# and the median is 20 million. \n\n# We shall go ahead for the **median** as the statistic, since there are many values which are extreme on the \n# higher end thereby pulling up the mean\n\n# In[51]:\n\n\n# compare the median investment amount across the types\ndf.groupby('funding_round_type')['raised_amount_usd'].median().sort_values(ascending=False)\n\n\n# Among the various funding types, only one of them, **Venture** falls in the range of 5 million to 15 million \n# which is the need\n\n# ## Country Analysis\n# \n# We shall compare the total investment amounts across countries. We will filter the data for only the **venture** type investments and then compare the **total investment** across countries\n\n# In[53]:\n\n\n# filter the df for private equity type investments\ndf = df[df.funding_round_type==\"venture\"]\n\n# group by country codes and compare the total funding amounts\ncountry_wise_total = df.groupby('country_code')['raised_amount_usd'].sum().sort_values(ascending=False)\nprint(country_wise_total)\n\n\n# We shall take the top 5 countries and in that we will filter out non-English speaking countries\n\n# In[54]:\n\n\ntop5countries = country_wise_total[:5]\ntop5countries\n\n\n# Amongthe above, China is a non-English speaking country and hence, we shall consider the other top 3. \n# They are **US**, **GBR** and **India**\n\n# In[55]:\n\n\n# filtering for the top three countries\ndf = df[(df.country_code=='USA') | (df.country_code=='GBR') | (df.country_code=='IND')]\ndf.head()\n\n\n# In[56]:\n\n\n# boxplot to see distributions of funding amount across countries\nplt.figure(figsize=(10, 10))\nsns.boxplot(x='country_code', y='raised_amount_usd', data=df)\nplt.yscale('log')\nplt.show()\n\n\n# ## Sector Analysis\n\n# We need to extract the main sector using the column category_list. We shall create a new column \"main category\". As stated in the document, the first token before the seperator \"|\" shall be considered as the main category\n\n# In[58]:\n\n\n# extracting the main category\ndf.loc[:, 'main_category'] = df['category_list'].apply(lambda x: x.split(\"|\")[0])\ndf.head()\n\n\n# In[59]:\n\n\n# drop the category_list column\ndf = df.drop('category_list', axis=1)\ndf.head()\n\n\n# Now, we'll read the ```mapping.csv``` file and merge the main categories with its corresponding column. \n\n# In[61]:\n\n\n# read mapping file\nmapping = pd.read_csv(\"mapping.csv\", sep=\",\")\nmapping.head()\n\n\n# In[62]:\n\n\n# missing values in mapping file\nmapping.isnull().sum()\n\n\n# In[63]:\n\n\n# remove the row with missing values\nmapping = mapping[~pd.isnull(mapping['category_list'])]\nmapping.isnull().sum()\n\n\n# Since we need to merge the mapping file with the main dataframe (df), We shall convert the common column to lowercase in both.\n\n# In[64]:\n\n\n# converting common columns to lowercase\nmapping['category_list'] = mapping['category_list'].str.lower()\ndf['main_category'] = df['main_category'].str.lower()\n\n\n# To be able to merge all the ```main_category``` values with the mapping file's ```category_list``` column, all the values in the ```main_category``` column should be present in the ```category_list``` column of the mapping file.\n\n# In[65]:\n\n\n# values in main_category column in df which are not in the category_list column in mapping file\ndf[~df['main_category'].isin(mapping['category_list'])]\n\n\n# Ideally, the above value should have been zero. We shall look at the values which are present in the mapping file but not in the main dataframe df.\n\n# In[66]:\n\n\n# values in the category_list column which are not in main_category column \nmapping[~mapping['category_list'].isin(df['main_category'])]\n\n\n# From above we see that the value **analytics** is misspelled as **a0lytics**. Similary for alternative medicine also\n\n# In[67]:\n\n\n# replacing '0' with 'na'\nmapping['category_list'] = mapping['category_list'].apply(lambda x: x.replace('0', 'na'))\nprint(mapping['category_list'])\n\n\n# Once again check for missing values between df and mapping file (for categories)\n\n# In[129]:\n\n\ndf[~df['main_category'].isin(mapping['category_list'])]\n\n\n# We shall be dropping the above 6 records and proceed ahead with the rest of the records and merge the dataframes (df & mapping)\n\n# In[138]:\n\n\n# merge the dfs\ndf = pd.merge(df, mapping, how='inner', left_on='main_category', right_on='category_list')\ndf.head()\n\n\n# In[139]:\n\n\n# let's drop the category_list column since it is the same as main_category\ndf = df.drop('category_list', axis=1)\ndf.head()\n\n\n# In[140]:\n\n\ndf.info()\n\n\n# WE shall now merge the last 9 columns to a single column \"Sub Category\"\n\n# In[141]:\n\n\n# store the value and id variables in two separate arrays\n\n# store the value variables in one Series\nvalue_vars = df.columns[9:18]\n\n# take the setdiff() to get the rest of the variables\nid_vars = np.setdiff1d(df.columns, value_vars)\n\nprint(value_vars, \"\\n\")\nprint(id_vars)\n\n\n# In[142]:\n\n\n# convert into long\nlong_df = pd.melt(df, \n id_vars=list(id_vars), \n value_vars=list(value_vars))\n\nlong_df.head()\n\n\n# Since the value of \"0\" is not useful for us, we shll only use those rows where the value is \"1\".\n# Latter we shll drop the value column \n\n# In[143]:\n\n\nlong_df = long_df[long_df['value']==1]\nlong_df = long_df.drop('value', axis=1)\n\n\n# In[144]:\n\n\nlen(long_df)\n\n\n# In[145]:\n\n\n# renaming the 'variable' column\nlong_df = long_df.rename(columns={'variable': 'sector'})\n\n\n# The dataframe now contains only venture type investments in countries USA, IND and GBR, and \n# we have mapped each company to one of the eight main sectors (named 'sector' in the dataframe). \n# \n# We can now compute the sector-wise number and the amount of investment in the three countries.\n\n# In[146]:\n\n\n# We are interested only for investment range between 5 and 15m. Hence filtering for them\ndf = long_df[(long_df['raised_amount_usd'] >= 5000000) & (long_df['raised_amount_usd'] <= 15000000)]\n\n\n# In[147]:\n\n\n# groupby country, sector and compute the count and sum\ndf.groupby(['country_code', 'sector']).raised_amount_usd.agg(['count', 'sum'])\n\n\n# In[155]:\n\n\n# plotting sector-wise count and sum of investments in the three countries\nplt.figure(figsize=(16, 25))\n\nplt.subplot(2, 1, 1)\ndf['raised_amount_usd_million'] = df['raised_amount_usd'].div(1000000)\np = sns.barplot(x='sector', y='raised_amount_usd_million', hue='country_code', data=df, estimator=np.sum)\np.set_xticklabels(p.get_xticklabels(),rotation=30)\nplt.title('Total Invested Amount (USD) (million)')\n\nplt.subplot(2, 1, 2)\nq = sns.countplot(x='sector', hue='country_code', data=df)\nq.set_xticklabels(q.get_xticklabels(),rotation=45)\nplt.title('Number of Investments')\n\n\nplt.show()\n\n\n# Thus, the top country in terms of the number of investments (and the total amount invested) is the USA. \n# The sectors 'Others', 'Social, Finance, Analytics and Advertising' and 'Cleantech/Semiconductors' are the most heavily invested ones.\n# \n# In case you don't want to consider 'Others' as a sector, 'News, Search and Messaging' is the next best sector.\n\n# ## Rough Work Area\n\n# In[136]:\n\n\nmapping[mapping['category_list'].str.contains(\"rac\")]\n\n\n# In[127]:\n\n\nmapping.loc[(mapping.category_list == 'enterprise 2.na'),'category_list']='enterprise 2.0'\n\n\n# In[128]:\n\n\ndf.loc[(df.main_category == 'specialty retail'),'main_category'] = 'custom retail'\n\n","repo_name":"SunderRaman/MachineLearningProjects","sub_path":"DataAnalytics/FinancialInvestmentAnalysis/FinancialInvestmentAnalysis.py","file_name":"FinancialInvestmentAnalysis.py","file_ext":"py","file_size_in_byte":16325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"726002104","text":"\"\"\"script by John Wigg\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\nimport datetime\r\nimport time\r\nimport ephem\r\n\r\ndef get_state(planet):\r\n \"\"\" returs the next rising, setting and current status of a given planet\"\"\"\r\n data = [0] * 3\r\n obs.date = datetime.datetime.utcnow()\r\n data[1] = obs.next_rising(planet)\r\n data[2] = obs.next_setting(planet)\r\n data[0] = data[1] > data[2]\r\n return data\r\n\r\nobs = ephem.Observer()\r\nobs.lat = '50.9'\r\nobs.long = '11.6'\r\n\r\nmercury = ephem.Mercury()\r\nvenus = ephem.Venus()\r\nmars = ephem.Mars()\r\njupiter = ephem.Jupiter()\r\nsaturn = ephem.Saturn()\r\nuranus = ephem.Uranus()\r\nneptune = ephem.Neptune()\r\n\r\nwhile 1:\r\n print(obs.date, \"UTC+0\")\r\n print(get_state(mercury))\r\n print(get_state(venus))\r\n print(get_state(mars))\r\n print(get_state(jupiter))\r\n print(get_state(saturn))\r\n print(get_state(uranus))\r\n print(get_state(neptune))\r\n print(\"-------------------------------------------------------------------\")\r\n time.sleep(10)\r\n","repo_name":"CaptainProton42/planet-pi","sub_path":"planets.py","file_name":"planets.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6565564392","text":"import json\nimport pygame\n\nfrom player import Player\nfrom game_stuff import *\n\n\npygame.init()\n\n# Доступные для размещения на уровне объекты\navailable_objects = {\n \"Player\": [Player, \"all_the_groups\"],\n \"Box\": [Box, \"walls\"],\n \"PlatformLeft\": [PlatformLeft, \"walls\"],\n \"PlatformRight\": [PlatformRight, \"walls\"],\n \"PlatformMiddle\": [PlatformMiddle, \"walls\"]\n}\n\n\ndef get_available_objects():\n return available_objects\n\n\n# Получение матрицы уровня\ndef get_level(path):\n l_level = None\n with open(path, \"r\") as level:\n l_level = json.load(level)\n level.close()\n return l_level\n\n\n# Запись изменения уровня\ndef change_level(path, level_data):\n with open(path, \"w\") as level:\n json.dump(level_data, level)\n level.close()\n\n\n# Загрузка уровня\ndef load_level(path, groups_data):\n level_data = get_level(path)\n for key in level_data:\n # Объект\n obj = available_objects[level_data[key][0]][0]\n # Группа или группы\n obj_group = groups_data \\\n if available_objects[level_data[key][0]][1] == \"all_the_groups\" else \\\n groups_data[available_objects[level_data[key][0]][1]]\n # Координаты\n obj_coord = (int(key.split()[0]), int(key.split()[1]))\n # Именованные аргументы\n obj_kwargs = level_data[key][1]\n # Создание объекта\n obj(obj_group, obj_coord, **obj_kwargs)\n","repo_name":"Lucian-y02/Project-A","sub_path":"Silver_Box/level_updater.py","file_name":"level_updater.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"23334332966","text":"#!/usr/bin/python\nfrom ansible.module_utils.basic import AnsibleModule\nimport subprocess\n\n\ndef run(command):\n result = {}\n try:\n output = subprocess.check_output(command, shell=True, text=True)\n result['stdout'] = output\n result['changed'] = True\n except subprocess.CalledProcessError as e:\n result['stderr'] = str(e)\n result['failed'] = True\n\n return result\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n key_url=dict(required=True, type='str'),\n repo_url=dict(required=True, type='str'),\n distribution_codename=dict(required=True, type='str'),\n repo_name = dict(required=True, type='str')\n )\n )\n\n key_url = module.params['key_url']\n repo_url = module.params['repo_url']\n codename = module.params['distribution_codename']\n repo_name = module.params['repo_name']\n\n commands = [\n f\"curl {key_url} | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/apt.custom_key.gpg >/dev/null\",\n f\"sudo sh -c 'echo \\\"deb {repo_url} {codename}-{repo_name} main\\\" > /etc/apt/sources.list.d/{repo_name}.list'\",\n \"sudo apt update\"\n ]\n\n results = []\n for command in commands:\n result = run(command)\n results.append(result)\n\n module.exit_json(results=results)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jannik99/install_apt_key_and_repo-ansible-module","sub_path":"library/install_apt_key_and_repo.py","file_name":"install_apt_key_and_repo.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9413499674","text":"# -*- coding: utf-8 -*-\n\"\"\"Модуль с фикстурами для теста, проверяющего, что пользователь находится на странице Opencart\"\"\"\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver import ChromeOptions, FirefoxOptions\n\n\ndef pytest_addoption(parser):\n \"\"\"Функция, содержащая в себе параметры для передачи в командную строку\"\"\"\n parser.addoption(\n \"--browser\",\n action=\"store\",\n default=\"Internet Explorer\",\n help=\"Browser name\")\n\n parser.addoption(\n \"--url\",\n action=\"store\",\n default=\"http://localhost\",\n help=\"This is request url\")\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef driver(request):\n \"\"\"Функция, запускающая разные браузеры, в зависимости от того, какой будет выбран через командную строку\"\"\"\n browser = request.config.getoption(\"--browser\")\n if browser == 'firefox':\n capabilities = webdriver.DesiredCapabilities.FIREFOX\n options = FirefoxOptions()\n options.add_argument(\"--headless\")\n wd = webdriver.Firefox(capabilities=capabilities, options=options)\n elif browser == 'chrome':\n options = ChromeOptions()\n options.add_argument(\"--headless\")\n wd = webdriver.Chrome(options=options)\n else:\n capabilities = webdriver.DesiredCapabilities.INTERNETEXPLORER\n wd = webdriver.Ie(capabilities=capabilities)\n wd.maximize_window()\n request.addfinalizer(wd.quit)\n return wd\n\n\n@pytest.fixture\ndef url_opencart(request):\n \"\"\"Функция для передачи url через командную строку\"\"\"\n base_url = request.config.getoption(\"--url\")\n return base_url\n","repo_name":"procyon-x/ui_tests","sub_path":"first_opencart_test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16350164918","text":"\n\nTEST_DATA = \"\"\"[({(<(())[]>[[{[]{<()<>>\n[(()[<>])]({[<{<<[]>>(\n{([(<{}[<>[]}>{[]{[(<()>\n(((({<>}<{<{<>}{[]{[]{}\n[[<[([]))<([[{}[[()]]]\n[{[{({}]{}}([{[{{{}}([]\n{<[[]]>}<{[{[{[]{()[[[]\n[<(<(<(<{}))><([]([]()\n<{([([[(<>()){}]>(<<{{\n<{([{{}}[<[[[<>{}]]]>[]]\"\"\"\n\nSCORE_MAP = {\n ')': 3,\n ']': 57,\n '}': 1197,\n '>': 25137\n}\n\nCOMPLETION_MAP = {\n ')': 1,\n ']': 2,\n '}': 3,\n '>': 4\n}\n\nBRACKETS_MAP = {\n '(': ')',\n '[': ']',\n '{': '}',\n '<': '>'\n}\n\nOPENING_BRACKETS = ['(', '[', '{', '<']\nCLOSING_BRACKETS = [')', ']', '}', '>']\n\n\ndef find_corruption_score(input_str):\n data_list = input_str.splitlines()\n stack = [[] for _ in range(len(data_list))]\n score = 0\n for idx, line in enumerate(data_list):\n for item in line:\n if item in OPENING_BRACKETS:\n stack[idx].append(item)\n elif item in CLOSING_BRACKETS:\n popped = stack[idx].pop()\n if BRACKETS_MAP[popped] != item:\n score += SCORE_MAP[item]\n\n return score\n\n\ndef find_median_score(input_str):\n data_list = input_str.splitlines()\n stack = [[] for _ in range(len(data_list))]\n corrupt_indices = []\n scores_list = []\n for idx, line in enumerate(data_list):\n for item in line:\n if item in OPENING_BRACKETS:\n stack[idx].append(item)\n elif item in CLOSING_BRACKETS:\n popped = stack[idx].pop()\n if BRACKETS_MAP[popped] != item:\n corrupt_indices.append(idx)\n\n for idx, item in enumerate(stack):\n if idx in corrupt_indices:\n continue\n score = 0\n for chr in item[::-1]:\n score = (score * 5) + (COMPLETION_MAP[BRACKETS_MAP[chr]])\n scores_list.append(score)\n return (sorted(scores_list))[int(len(scores_list)/2)]\n # return score\n\n\n# print(find_median_score(TEST_DATA))\nwith open('input.in', 'r') as f:\n print(find_median_score(f.read()))\n","repo_name":"DawoudSheraz/advent-of-code-2021","sub_path":"Day 10/syntax_scoring.py","file_name":"syntax_scoring.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23477287816","text":"'''\nhttps://www.acmicpc.net/problem/2579\n계단 오르기\n[풀이]\n1. 현재 시점 i에서 i-1번째 시점과 i-2번째 시점의 값을 비교한다.\n2. i-1번째 시점애서 올 때는 i-2번째 시점을 지나지 않았다는 가정이 필요하다.\n=> 따라서, dp[i-1] 이 아닌, dp[i-3]부터 따지게 된다.\n'''\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nstairs = [0] + [int(input()) for _ in range(n)]\ndp = [0] + [sum(stairs[:2])] + [sum(stairs[:3])] + [0] * (n-2)\nfor i in range(3, n+1):\n a = dp[i-3] + stairs[i-1]\n b = dp[i-2]\n dp[i] = max(a, b) + stairs[i]\nprint(dp[n])\n","repo_name":"sangmandu/SangSangPlus","sub_path":"Algorithm/SANGMIN/2579.py","file_name":"2579.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"17785177788","text":"from sqlite3 import IntegrityError\nfrom App.models import Word\nfrom App.database import db\nfrom random import randint\n\n\ndef get_all_words_as_list():\n words = Word.query.all()\n if not words:\n return []\n return words\ndef wordify(word):\n return Word(word,None,None)\n\ndef word_exists(word):\n database = get_all_words_as_list()\n w = wordify(word)\n for word in database:\n if(word.__eq__(word)):\n return True\n return False\ndef add_word(word,partsOfSpeech,meaning):\n new_word = Word(word,partsOfSpeech,meaning)\n try:\n db.session.add(new_word) \n db.session.commit()\n except:\n db.session.rollback\n print('Word failed to add')\n print('Word added')\n\ndef get_random_word():\n words = get_all_words_as_list()\n index = randint(0,len(words))\n return words[index]\n","repo_name":"KyrianCodez/Vordle","sub_path":"App/controllers/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33495894049","text":"import sys\nimport math\n\nclass Binaerheap:\n def __init__(self):\n self.heap = []\n self.comps = 0\n self.swaps = 0\n \n def parentOf(self, i):\n return math.floor((i-1) / 2)\n \n def leftOf(self, i):\n return 2 * i + 1\n \n def rightOf(self, i):\n return 2 * i + 2\n\n def insert(self, i):\n index = len(self.heap)\n self.heap.append(i)\n while self.heap[self.parentOf(index)] > i and index > 0:\n self.comps += 2\n self.heap[index] = self.heap[self.parentOf(index)]\n self.heap[self.parentOf(index)] = i\n index = self.parentOf(index) \n\n # tatt fra heapq, endret til aa telle compares og swaps + objektorientert + noen endringer\n def heappop(self):\n lastelt = self.heap.pop()\n self.comps += 1\n if self.heap:\n returnitem = self.heap[0]\n self.heap[0] = lastelt\n self.swaps += 1\n self.siftup(0)\n return returnitem\n return lastelt\n\n def siftdown(self, startpos, pos):\n newitem = self.heap[pos]\n while pos > startpos:\n self.comps += 1\n parentpos = self.parentOf(pos)\n parent = self.heap[parentpos]\n if newitem < parent:\n self.heap[pos] = parent\n self.swaps += 1\n pos = parentpos\n continue\n break\n self.heap[pos] = newitem\n\n def siftup(self, pos):\n endpos = len(self.heap)\n startpos = pos\n newitem = self.heap[pos]\n childpos = self.leftOf(pos)\n while childpos < endpos:\n self.comps += 1\n rightpos = childpos + 1\n if rightpos < endpos and not self.heap[childpos] < self.heap[rightpos]:\n self.comps += 1\n childpos = rightpos\n self.heap[pos] = self.heap[childpos]\n self.swaps += 1\n pos = childpos\n childpos = self.leftOf(pos)\n self.heap[pos] = newitem\n self.siftdown(startpos, pos)\n\n\ndef heapSort(arr): # ikke in-place\n heap = Binaerheap()\n \n for element in arr:\n heap.insert(element)\n\n sortertArr = []\n for _ in range(0, len(arr)):\n sortertArr.append(heap.heappop())\n return [sortertArr, heap.comps, heap.swaps]\n\ndef lesInput():\n arr = []\n \n input_fil = open(sys.argv[1], \"r\") # krever at filnavnet skrives i terminalen ved kjoering av program\n for linje in input_fil:\n arr.append(int(linje))\n input_fil.close()\n \n output_fil = open(sys.argv[1].split(\"\\\\\")[-1] + \"_heap.out\", \"w\")\n for element in heapSort(arr)[0]:\n output_fil.write(str(element) + \"\\n\")\n output_fil.close()\n\nif __name__ == \"__main__\":\n lesInput()","repo_name":"gremble0/university","sub_path":"IN2010/Oblig2/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20184246124","text":"#!/bin/python\n#coding:utf-8\n\nimport os\n\nCONF_FILE='./m1.conf'\nPIC='dest1.png'\n\nline_num = 0\nfor line in file(CONF_FILE, 'r').readlines():\n line = line.strip()\n line_num = line_num + 1\n if len(line) == 0:\n continue\n\n if line[0] == '#':\n continue\n\n if line_num == 1:\n (width, height) = line.split('x', 2)\n cmd = 'convert -size %dx%d xc:none %s' % (int(width), int(height), PIC)\n os.system(cmd)\n else:\n (px, py, pic) = line.split(' ', 3)\n cmd = 'composite -geometry +%d+%d %s %s %s' % (int(px), int(py), pic, PIC, PIC)\n os.system(cmd)\n","repo_name":"modouRPG/MoDouClient","sub_path":"maptool/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"31209776121","text":"import sys\nimport math\nimport random\nimport matplotlib.pyplot as plt\n\n\n\nclass Individual:\n\tdef __init__(self, cromosome, genome, fitness, sel_prob):\n\t\tself.cromosome = cromosome\n\t\tself.genome = genome\n\t\tself.fitness = fitness\n\t\tself.sel_prob = sel_prob\n\n\tdef __repr__(self):\n\t\treturn \"x: \" + str(self.cromosome) + \", f: \" + str(self.fitness) + \", selection prob: \" + str(self.sel_prob) + '\\n'\n\n#-x^2 + x + 2\ndef func1(x):\n\treturn -(x*x) + x + 2\n\n\ndef fitness(cromosome, domain, genome_len):\n\tx = (domain[1] - domain[0]) / (2**genome_len - 1) * cromosome + domain[0]\t#actual coordinate\n\treturn func1(x)\n\n\n#returns smallest element bigger than/equal to target\ndef bisect_left(array, left, right, target):\t#left, right inclusive\n\ttop = left\n\twhile left <= right:\n\t\tmid = int((left + right)/2)\n\t\tif(target > array[mid]):\n\t\t\tif mid == 0:\n\t\t\t\treturn -1\n\t\t\telse:\n\t\t\t\tleft = mid + 1\n\t\t\t\tmid = left\n\t\telse:\n\t\t\tif mid == left:\n\t\t\t\tbreak\n\t\t\telif(target <= array[mid]):\n\t\t\t\tright = mid - 1\n\t\t\t\tmid = right\n\t\t\telse:\n\t\t\t\tbreak\n\treturn mid\n\n\ndef selection(population, C):\n\n\t#make sure to add min to all values from now on\n\t#calculate fitness_sum\n\tfitness_sum = 0\n\tfor individual in population:\n\t\tfitness_sum += individual.fitness - C\n\n\n\t#calculate probabilities as well as an array of sums for them\n\tprob_sums = [0]\n\tfor individual in population:\n\t\tindividual.sel_prob = (individual.fitness - C) / fitness_sum\n\t\t# print(individual)\n\t\tprob_sums.append(prob_sums[-1] + individual.sel_prob)\n\n\t# print(sum(i.sel_prob for i in population))\n\t# print(prob_sums)\n\n\n\t#proportional selection\n\tselected = []\n\tfor itr in range(len(population)):\n\t\t#roulette\n\t\tu = random.uniform(0, 1)\t#depending on rounding this should be [0, 1)\n\t\tchosen = bisect_left(prob_sums, 0, len(population), u)\t#pop_size cause we also have 0 at the beginning\n\t\t# print(\">>>>>\", population[chosen - 1])\n\t\tselected.append(population[chosen - 1])\t\t#currently no need for a copy\n\n\treturn selected\n\n\ndef swap_genome(a, b):\n\tsplit_point = random.randint(0, len(a.genome) - 1)\n\ta.genome[:split_point], b.genome[:split_point] = b.genome[:split_point], a.genome[:split_point]\n\n\tvalue = 0\n\tfor bit in a.genome:\n\t\tvalue = (value << 1) | bit\n\ta.crossover = value\n\n\tvalue = 0\n\tfor bit in b.genome:\n\t\tvalue = (value << 1) | bit\n\tb.crossover = value\n\n\ndef crossover(population, cross_prob):\n\tprev_individual = None\n\tfor individual in population:\n\t\tu = random.uniform(0, 1)\n\t\tif(1):\n\t\t\tif(prev_individual == None):\n\t\t\t\tprev_individual = individual\n\t\t\telse:\n\t\t\t\tswap_genome(prev_individual, individual)\n\t\t\t\tprev_individual = None\n\n\ndef mutation(population, mut_prob):\n\tfor individual in population:\n\t\tu = random.uniform(0, 1)\n\t\tif(u < mut_prob):\n\t\t\tpos = random.randint(0, len(individual.genome) - 1)\n\t\t\tFalse if individual.genome[pos] == True else True # bro what THE FUCK IS THIS?\n\t\t\tvalue = 0\n\t\t\tfor bit in individual.genome:\n\t\t\t\tvalue = (value << 1) | bit\n\t\t\tindividual.crossover = value\n\n\n\ndef main():\n\tdata = open(sys.argv[1], 'r').read().splitlines()\n\n\tpop_size = int(data[0])\n\tdomain = [int(val) for val in data[1].split(' ')]\n\tprecision = int(data[2])\n\tcross_prob = float(data[3])\n\tmut_prob = float(data[4])\n\tgenerations = int(data[5])\n\n\n\t#calculate cromosome size\n\tgenome_len = math.ceil(math.log( (domain[1] - domain[0]) * (10**precision) ))\n\n\n\t#generate random 1st generation\n\tpopulation = [Individual(0, [False] * genome_len, 0, 0) for _ in range(pop_size)]\n\n\tmin_fitness = 0\t#lets save the most garbage individual as well\n\tfor individual in population:\n\t\tindividual.cromosome = random.getrandbits(genome_len)\t#generate random cromosome\n\n\t\tbits = individual.cromosome\n\t\tfor i in range(genome_len):\n\t\t\tindividual.genome[i] = bool(bits % 2)\n\t\t\tbits //= 2\n\n\t\tindividual.fitness = fitness(individual.cromosome, domain, genome_len)\t\t\t\t#calculate its fitness\n\n\t\tif(individual.fitness < min_fitness):\n\t\t\tmin_fitness = individual.fitness\n\n\tfirst = 1\n\tfor generation in range(generations):\n\t\tif(first == 1):\n\t\t\t#heavy print\n\t\t\tpopulation = selection(population, min_fitness)\n\n\t\t\tprint(population)\n\n\t\t\tcrossover(population, cross_prob)\n\n\t\t\tmutation(population, mut_prob)\n\n\t\t\t#after were finished\n\t\t\tmin_fitness = 0\t#reset the poor guy\n\t\t\tfor individual in population:\n\t\t\t\tif(individual.fitness < min_fitness):\n\t\t\t\t\tmin_fitness = individual.fitness\n\n\t\t\tfirst = 0\n\n\t\telse:\n\t\t\tavg = sum(fitness(individual.cromosome, domain, genome_len) for individual in population) / pop_size\n\t\t\tprint(\"avg: \", avg)\t#this is worthless because of fp arithmetic\n\t\t\tplt.plot([generation], [avg], marker='o', markersize=3, color=\"red\")\n\n\t\t\tprint(\"max: \", fitness(max(population, key = lambda individual : fitness(individual.cromosome, domain, genome_len)).cromosome, domain, genome_len), '\\n')\n\n\t\t\tpopulation = selection(population, min_fitness)\n\n\t\t\tcrossover(population, cross_prob)\n\n\t\t\tmutation(population, mut_prob)\n\n\t\t\t#after were finished\n\t\t\tmin_fitness = 0\t#reset the poor guy\n\t\t\tfor individual in population:\n\t\t\t\tif(individual.fitness < min_fitness):\n\t\t\t\t\tmin_fitness = individual.fitness\n\n\n\n\tplt.ylabel('average value')\n\tplt.xlabel('generation')\n\tplt.show()\n\n\n\nif(__name__ == \"__main__\"):\n\tmain()\n","repo_name":"Pridestalkerr/snippets","sub_path":"uni/tap/t5/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"41909450185","text":"class Garden:\n PLANTS = {\"V\": \"Violets\", \"R\": \"Radishes\", \"C\": \"Clover\", \"G\": \"Grass\"}\n\n DEFAULT_STUDENTS = [\n \"Alice\",\n \"Bob\",\n \"Charlie\",\n \"David\",\n \"Eve\",\n \"Fred\",\n \"Ginny\",\n \"Harriet\",\n \"Ileana\",\n \"Joseph\",\n \"Kincaid\",\n \"Larry\",\n ]\n\n def __init__(self, diagram, students=DEFAULT_STUDENTS):\n self.row_1, self.row_2 = diagram.split()\n self.students = sorted(students)\n\n def plants(self, student):\n result = \"\"\n start_idx = self.students.index(student) * 2\n end_idx = start_idx + 2\n result += self.row_1[start_idx:end_idx]\n result += self.row_2[start_idx:end_idx]\n\n return [self.PLANTS[letter] for letter in result]\n","repo_name":"Infodpsoft1418/python-exercism","sub_path":"exercises/kindergarten-garden/kindergarten_garden.py","file_name":"kindergarten_garden.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13470269843","text":"import argparse\nimport base64\nimport logging\nimport os\nimport smtplib\nimport socket\nimport textwrap\nfrom email.mime.text import MIMEText\n\n\ndef ping(dest: str):\n \"\"\"Check if site is reachable\"\"\"\n global reachable\n domain = dest.split('/')[0]\n\n try:\n reachable = os.system(\"ping -c 1 \" + domain)\n except:\n print(\"Ping error: \")\n finally:\n if reachable == 0:\n return True\n else:\n return False\n\n\ndef print_to_file(phone_list: list):\n \"\"\"Print phone numbers to output file\"\"\"\n try:\n with open('../phone_list_output.txt', 'w') as f:\n for line in phone_list:\n f.writelines(line + '\\n')\n except:\n logging.exception(\"Writing to output file failed.\")\n logging.debug(\"Printed phone numbers to output file.\")\n\n\nclass Args:\n arg_dict = {}\n\n def __init__(self):\n self.parse_args()\n\n def parse_args(self):\n parser = argparse.ArgumentParser(prog='scraper.py',\n allow_abbrev=True,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent(''' \\\n phone-scraper.\n --------------------------------\n Anonymously scrapes onlinesim.ru for new usable phone numbers.\n '''))\n\n parser.add_argument('-e', '--email',\n type=str,\n default=argparse.SUPPRESS,\n help='add your email address to receive a notification when a new phone number appears.')\n\n parser.add_argument('-m', '--maxage',\n type=int,\n default=argparse.SUPPRESS,\n help='pulls only phone numbers that are younger than the specified maximum age value in minutes.')\n\n parser.add_argument('-r', '--repeat',\n type=int,\n default=argparse.SUPPRESS,\n help='specify the repetition interval in minutes for fetching phone numbers.')\n\n try:\n args = parser.parse_args()\n self.arg_dict = vars(args)\n except argparse.ArgumentError:\n logging.exception('Argument Error.')\n print('Argument Error.')\n except AttributeError:\n logging.exception('Attribute Error.')\n raise AttributeError\n\n if 'email' not in list(self.arg_dict):\n return 1\n\n\nclass Email:\n __sender = \"\"\n receiver = \"\"\n __port = 587\n\n def __init__(self, receiver, phone_list):\n self.receiver = receiver\n self.phone_list = phone_list\n\n def establish_connection(self):\n self.__sender = 'python.phonescraper@gmx.de'\n msg = self.__draft_email()\n\n msg['From'] = self.__sender\n msg['To'] = self.receiver\n\n try:\n with smtplib.SMTP('smtp.gmx.com', self.__port) as connection:\n connection.ehlo()\n connection.starttls()\n connection.ehlo()\n s = base64.b64decode('XkNPekkybGUxNSZNRlNoNjZweEIK').decode('ascii')\n s = s.rstrip(\"\\n\")\n connection.login(self.__sender, s)\n self.__send_email(connection, msg)\n except socket.gaierror:\n logging.exception('Mail server unreachable.')\n raise socket.gaierror\n\n def __draft_email(self):\n tidy_phone_list = \"\"\n for num in self.phone_list:\n tidy_phone_list += num + '\\n'\n subject = \"Python Phonescraper Notification\"\n body = f\"\\nSelect a number and check SMS codes on https://onlinesim.ru/en\\n\\nDon't forget to use TOR.\\n\\n\" \\\n f\"The following phone numbers have been recently added:\\n\\n{tidy_phone_list}\"\n message = f\"{subject}\\n\\n{body}\"\n msg = MIMEText(message)\n\n self.phone_list.clear()\n del tidy_phone_list\n\n return msg\n\n def __send_email(self, connection, msg):\n connection.sendmail(msg['From'], msg['To'], msg.as_string())\n print(\"Sent email\")\n logging.debug(f\"Email sent from {self.__sender} to {self.receiver}\")\n","repo_name":"ssnjr2002/phone-scraper","sub_path":"phonescraper/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8751528238","text":"def find_low_points(floor_map):\n \"\"\"\n Finds all points on a floor map which are lower than all adjacent points\n :param floor_map: list of same-length-lists defining coordinates\n :return: list of low-point values\n \"\"\"\n low_points = []\n for i, row in enumerate(floor_map):\n for j, col in enumerate(row):\n value = floor_map[i][j]\n if value == 0: # 0 is always a low point\n low_points.append(0)\n elif value < 9: # 9 is never a low point:\n first_row = i == 0\n last_row = i == len(floor_map) - 1\n first_col = j == 0\n last_col = j == len(row) - 1\n\n neighbours = filter(lambda val: val is not None,\n [ # Get number above\n floor_map[i - 1][j] if not first_row else None,\n # Get number to the left\n floor_map[i][j - 1] if not first_col else None,\n # Get number to the right\n floor_map[i + 1][j] if not last_row else None,\n # Get number below\n floor_map[i][j + 1] if not last_col else None\n ])\n if all([value < n for n in neighbours]):\n low_points.append(value)\n return low_points\n\n\ndef find_basins(floor_map):\n \"\"\"\n Groups all points on a floor map into basins and calculates the size of the basin\n Basins are groups of numbers bordered by nines (which are not part of any basin)\n :param floor_map: list of same-length-lists defining coordinates\n :return: list of int, representing basin sizes\n \"\"\"\n basins = {}\n basin_map = [[None] * len(floor_map[0])] * len(floor_map)\n last_basin_id = 0\n\n def mark_basin(i, j, basin_id, value):\n if not basins.get(basin_id):\n basins[basin_id] = []\n basins[basin_id].append(value)\n basin_map[i][j] = basin_id\n\n for i, row in enumerate(floor_map):\n for j, col in enumerate(row):\n value = floor_map[i][j]\n\n if value == 9: # we are not part of a basin\n basin_map[i][j] = None\n else:\n first_row = i == 0\n first_col = j == 0\n\n # Only look up and left\n previous_neighbours = list(filter(lambda val: val is not None,\n [ # Get number above\n basin_map[i - 1][j] if not first_row else None,\n # Get number to the left\n basin_map[i][j - 1] if not first_col else None\n ]))\n if len(previous_neighbours) == 0:\n # Neither neighbour is part of a basin -- start tracking a new one\n last_basin_id += 1\n mark_basin(i, j, last_basin_id, value)\n\n elif len(previous_neighbours) == 1 or \\\n previous_neighbours[0] == previous_neighbours[1]:\n # One of both of our neighbors are in a basin: add ourself to the same basin\n mark_basin(i, j, previous_neighbours[0], value)\n elif previous_neighbours[0] != previous_neighbours[1]:\n # Our neighbours which were thought to be in different basins are actually\n # Part of the same basin: merge them and add ourselves\n mark_basin(i, j, previous_neighbours[0], value)\n basins[previous_neighbours[0]] += basins[previous_neighbours[1]]\n basins.pop(previous_neighbours[1])\n for x in range(0, i):\n for y in range(0, j):\n if basin_map[x][y] == previous_neighbours[1]:\n basin_map[x][y] = previous_neighbours[0]\n\n return sorted([len(basin_vals)\n for basin_vals in basins.values()], reverse=True)\n\n\ndef sum_risk_levels(values):\n \"\"\"\n Returns the sum of the risk levels for provided points\n risk level is calculated as the point value + 1\n :param values: list of integers\n :return: int\n \"\"\"\n return sum([1 + v for v in values])\n\n\ndef read_floor_map(file):\n rows = [row.strip() for row in file.readlines()]\n return [[int(num) for num in row]\n for row in rows]\n\n\nif __name__ == '__main__':\n with open(\"puzzle_1_input.text\", \"r\") as f:\n floor_map = read_floor_map(f)\n low_points = find_low_points(floor_map)\n print(f\"Total Risk Score = {sum_risk_levels(low_points)}\")\n basins = find_basins(floor_map)\n print(f\"Three biggest basins: {basins[0:3]} (multiplied = {basins[0] * basins[1] * basins[2]})\")","repo_name":"aliceinit/advent-of-code-2021","sub_path":"09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34383056228","text":"import numpy as np\nimport pandas as pd\nimport json\nimport functions.neighborhood as neighborhood\nimport networkx as nx\nimport itertools\nfrom scipy.stats import rankdata\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom skimage.future import graph\nimport sklearn.neighbors\nimport pickle\nimport re\nimport sys \nimport collections\nimport pandas as pd\nfrom scipy.stats import halfgennorm, lognorm, expon\n\n# sum two star magnitudes\ndef magsum(m1, m2):\n s = -2.5 * np.log10(10 ** (-m1 * 0.4) + 10 ** (-m2 * 0.4))\n return s\n\n\n# thresh should be list\ndef makestargraph(starfile, thresh, outprefix=''):\n with open(starfile, \"rb\") as f:\n stars = json.load(f)\n suff = \"_\" + str(thresh)\n #nb = neighborhood.makeneighborhood(stars, [thresh])\n nb = neighborhood.makeneighborhood(stars, thresh)\n G, _ = makedistgraph(nb, stars)\n if outprefix != '':\n outfile = outprefix + suff + '.graphml'\n nx.write_graphml(G, outfile)\n return G\n\ndef makestargraph_closenb(starfile, thresh, maxd, outprefix=''):\n with open(starfile, \"rb\") as f:\n stars = json.load(f)\n suff = \"_closenb_\" + str(maxd) + '_' + str(thresh)\n nb = neighborhood.makecloseedges_nb(stars, thresh, maxd)\n G, _ = makedistgraph(nb, stars)\n if outprefix != '':\n outfile = outprefix + suff + '.graphml'\n nx.write_graphml(G, outfile)\n return G\n\ndef namedict(starfile):\n with open(starfile, \"rb\") as f:\n stars = json.load(f)\n named= {}\n for (i, (h,r,d,m,n)) in enumerate(stars):\n named[i] = n\n return named \n\n\ndef invhdict(starfile):\n with open(starfile, \"rb\") as f:\n stars = json.load(f)\n invhd = {}\n for (i, (h,r,d,m,n)) in enumerate(stars):\n invhd[h] = i\n return invhd\n\ndef invbayerd(starfile):\n with open(starfile, \"rb\") as f:\n stars = json.load(f)\n invbayerd = {}\n for (i, (h,r,d,m,n)) in enumerate(stars):\n invbayerd[n] = i\n return invbayerd\n\n\n\ndef starinddict(stars):\n sd = {}\n for si, s in enumerate(stars):\n sd[s[0]] = si\n return(sd)\n\n\ndef name2starind(sg, name):\n nodeid = [e['i'] for n, e in sg.nodes(data=True) if e['name'] == name]\n return nodeid[0]\n\n\ndef mstedges(clusters, stargraph, ename = 'd'):\n msts = [[]]\n for c in clusters[1:]:\n msts.append( nx.minimum_spanning_tree(stargraph.subgraph(c), weight=ename ) )\n return msts\n\ndef allmodeledges(clusters, stargraph):\n msts = [[]]\n for c in clusters[1:]:\n msts.append( stargraph.subgraph(c) )\n return msts\n\n# filter out all stars that don't make brightness cutoff\ndef brightclusters(cc, stargraph, brightthresh):\n bcs = []\n for c in cc:\n bc = [ci for ci in list(c) if stargraph.nodes[ci]['m']<= brightthresh]\n #if bc:\n if len(bc) > 1: # drop asterism unless 2 or more stars remain\n bcs.append(set(bc))\n return bcs\n\n\n# print names for stars in clusters\ndef printclusters(cc, stargraph, humanfile = '', outfile = ''):\n cc = np.array(cc)\n scores = np.array([0. for c in cc])\n matchnames = [ '' for c in cc]\n\n handle = open(outfile, \"w\") if outfile else sys.stdout\n\n if humanfile:\n scorefn = scorecluster_wrtsystem\n weights = [1, 0, -1]\n \n with open(humanfile, \"rb\") as hf:\n hall = json.load(hf)\n hnames = np.array(hall[0])\n hsys = hall[1]\n\n for ci, c in enumerate(cc):\n hsysscores = np.array([])\n for hs in hsys:\n csc = scorefn(c, hs, weights=weights)\n hsysscores= np.append(hsysscores, csc)\n\n scores[ci] = np.max(hsysscores)\n mns = hnames[np.argwhere(hsysscores== np.amax(hsysscores)).flatten()]\n matchnames[ci] = ','.join(mns)\n\n sortind = scores.argsort()\n sortind = sortind[::-1]\n scores = scores[sortind]\n matchnames = np.array(matchnames)\n matchnames = matchnames[sortind]\n cc = cc[sortind] \n\n conn_count, not_conn_count = 0, 0\n for ci, c in enumerate(cc):\n namelist = sortnamelist([stargraph.node[ci]['name'] for ci in c])\n subg = stargraph.subgraph(c)\n # move next line inside following if statement to get disconnected groups only\n handle.write( str(ci+1) + ' & ' + str(round(scores[ci],2)) + ' & ' + ', '.join(namelist) + ' & ' + matchnames[ci] + ' \\\\\\\\\\n')\n if c and not nx.is_connected(subg):\n handle.write(' ** ^ is not connected\\n')\n not_conn_count = not_conn_count + 1\n elif c:\n conn_count = conn_count + 1\n\n if handle is not sys.stdout:\n handle.close()\n\n return( conn_count, not_conn_count)\n\n# print consensusscores\ndef printcscores(stars, cc, cscores, mmscores, mscores, outfile = '' ):\n\n handle = open(outfile, \"w\") if outfile else sys.stdout\n\n sd = starinddict(stars)\n for ci, c in enumerate(cc):\n #namelist = [stars[sd[ci]][4] for ci in c if ci in sd]\n namelist = sortnamelist([stars[ci][4] for ci in c])\n cscore = round(cscores[ci], 2)\n mmscore = round(mmscores[ci], 2)\n mscore = round(mscores[ci], 2)\n handle.write(str(ci+1) + ' & ' + str(cscore) + ' & ' + str(mmscore) + ' & '+ str(mscore) +' & ' + ', '.join(namelist) + ' & \\\\\\\\\\n')\n\n if handle is not sys.stdout:\n handle.close()\n\n\n# make networkx graph from sparse matrix, list of stars\ndef makedistgraph(nb, stars):\n G = nx.from_scipy_sparse_matrix(nb, edge_attribute='d')\n\n for i, (h, r, d, m, n) in enumerate(stars):\n G.add_node(i, i=i, m=m, ra=r, dec=d, h=h, name=n)\n\n maxd = 0;\n for v1, v2 in G.edges():\n dist = neighborhood.angsep((G.nodes[v1]['ra'], G.nodes[v1]['dec']), (G.nodes[v2]['ra'], G.nodes[v2]['dec']))\n if dist > maxd:\n maxd = dist\n G.add_edge(v1, v2, d=dist)\n\n return G, maxd\n\ndef dropsmalledges(sg, att, smallval, alledge=0):\n smalledges = [(u, v) for u, v, e in sg.edges(data=True) if e[att] < smallval]\n if alledge:\n newsg = sg.copy()\n else:\n newsg = sg\n newsg.remove_edges_from(smalledges)\n return newsg\n\n# set brightest star to magnitude 0\ndef standardizemag(sg):\n mdf = pd.DataFrame.from_dict(dict(sg.nodes.data('m')), orient='index', columns=['m'])\n minmag = np.min(mdf.values)\n for node in sg.nodes():\n currmag = sg.nodes[node]['m']\n sg.add_node(node, mstd=currmag - minmag)\n return\n\n# add invweight\ndef addinvweight(sg):\n for u, v, e in sg.edges(data=True):\n sg.add_edge(u, v, invweight = -e['weight'])\n return\n\n# add edges with avmag\ndef addavmagedges(sg):\n for u, v in sg.edges():\n mstdu = sg.nodes[u]['m']\n mstdv = sg.nodes[v]['m']\n sg.add_edge(u, v, avmag=0.5 * (mstdu + mstdv))\n return\n\n# add edges with maxmag\ndef addmaxmagedges(sg):\n for u, v in sg.edges():\n mstdu = sg.nodes[u]['m']\n mstdv = sg.nodes[v]['m']\n sg.add_edge(u, v, maxmag=np.max([mstdu,mstdv]))\n return\n\n# add m_lr (likelihood ratio for magnitudes)\ndef add_m_lr(mf_lr, sg):\n for n in list(sg.nodes):\n sg.add_node(n, m_lr= mf_lr(sg.nodes[n]['m']))\n return\n\n# add edges with min_m_lr\ndef add_min_m_lr_edges(sg):\n for u, v in sg.edges():\n mstdu = sg.nodes[u]['m_lr']\n mstdv = sg.nodes[v]['m_lr']\n sg.add_edge(u, v, min_m_lr=np.min([mstdu,mstdv]))\n return\n\ndef add_d_lr(df_lr, sg):\n for u, v in sg.edges():\n d = sg.edges[u, v]['d']\n sg.add_edge(u, v, d_lr=df_lr(d))\n return\n\n# convert distance to similarity measure. l is weight for exponential transformation\ndef dist2sim(sg, att, l):\n for u, v, e in sg.edges(data=True):\n dist = e[att]\n sim = np.exp(-dist * l)\n newdic = {att + 'sim': sim}\n sg.add_edge(u, v, **newdic)\n return\n\ndef weightatt(sg, att, l):\n for u, v, e in sg.edges(data=True):\n worig = e[att] ** l\n newdic = {att + 'weight': worig}\n sg.add_edge(u, v, **newdic)\n return\n\n# think that this scaling makes no difference\ndef scaleedgeweights(sg, att1, att2):\n return\n\n# combine edge atts\ndef combineedgeatts(sg, atts, cname, ctype='sum'):\n if ctype == 'sum':\n cinit = 0\n cfunc = lambda x, y: x + y\n elif ctype == 'prod':\n cinit = 1\n cfunc = lambda x, y: x * y\n else:\n raise Exception('unexpected combination type: ' + ctype)\n\n for u, v, e in sg.edges(data=True):\n cval = cinit\n for att in atts:\n cval = cfunc(cval, e[att])\n if cval < 1e-20:\n cval = 1e-20\n newdic = {cname: cval}\n sg.add_edge(u, v, **newdic)\n\n return\n\n# make weight attribute\ndef setweight(sg, att):\n for u, v, e in sg.edges(data=True):\n sg.add_edge(u, v, weight=e[att])\n return\n\n# dilate node atts based on local neighborhood\ndef dilatenodes(sg, dnbgraph, att, dtype='orig'):\n # 'orig': original value\n # 'scale_typ': scale original value wrt to typical value in local neighborhood\n # 'lval' : set to value in local neighborhood\n # 'gval': set to global value\n # 'scale_tot': scale original value wrt to total value in local neighborhood\n\n alpha = 1.0\n newatt = att + '_scale'\n newattlocw = att + '_locw'\n # only include nodes above brightthresh -- these are the only ones with e['nn']\n globvals = [e[att] for _, e in sg.nodes(data=True) if e['nn']]\n if dtype == 'scale_tot':\n globval = np.sum(globvals)\n else:\n globval = np.median(globvals)\n\n for u, e in sg.nodes(data=True):\n currattval = e[att]\n if e['nn']:\n # make list of nodes that are within neighborhood of u\n uns = set([u] + [un for _, un in dnbgraph.edges(u)])\n nbvals = [sg.node[un][att] for un in uns]\n if dtype == 'scale_tot':\n nbval = np.sum(nbvals)\n else:\n nbval = np.median(nbvals)\n\n if dtype=='orig':\n newattval = currattval\n elif dtype =='scale_typ' or dtype == 'scale_tot':\n newattval = currattval * (globval/nbval)\n elif dtype == 'lval':\n newattval = nbval\n elif dtype =='gval':\n newattval = globval\n else:\n raise Exception('unknown dilation type')\n\n newdic = {newatt: newattval, newattlocw:(nbval/globval) ** alpha}\n sg.add_node(u, **newdic)\n return\n\n# dilate edges based on local neighborhood\ndef dilateedges(sg, dnbgraph, att, newatt):\n globmedian = np.median([e[att] for u, v, e in sg.edges(data=True)])\n for u, v, e in sg.edges(data=True):\n currattval = e[att]\n # make list of nodes that are within neighborhood of u and v\n unb = set([u, v] + [u1 for _, u1 in dnbgraph.edges(u) if dnbgraph.has_edge(v, u1)])\n nbattvals = []\n for unbnode in unb:\n nbattvals = nbattvals + [sg.get_edge_data(unbnode, nbn2)[att] for _, nbn2 in sg.edges(unbnode) if nbn2 in unb]\n nbmedian = np.median(nbattvals)\n newattval = currattval * (globmedian/nbmedian) ** 1\n newdic = {newatt: newattval}\n sg.add_edge(u, v, **newdic)\n return\n\n# dilate edges based on extremely local neighborhood\ndef dilateedges_pp(sg, att, newatt):\n nodeatt = att + 'node'\n for n in sg.nodes():\n nedges = sg.edges(n)\n nvals = [sg[u][v][att] for u, v in nedges]\n if nvals:\n maxval = np.max(nvals)\n newdic = {nodeatt : maxval}\n sg.add_node(n, **newdic)\n\n for u, v, e in sg.edges(data=True):\n orig = e[att]\n new = orig / np.sqrt(sg.node[u][nodeatt] * sg.node[v][nodeatt])\n newdic = {newatt: new}\n sg.add_edge(u, v, **newdic)\n\n return\n\n# alpha: probability at each step that random walk continues\n# thresh: cluster includes nodes with pagerank > thresh\ndef personalpr(sg, ref_node, alpha=0.9, thresh=0.05):\n nnode = sg.number_of_nodes()\n refnoded = dict(zip(range(nnode), itertools.repeat(0, nnode)))\n refnoded[ref_node] = 1\n\n pr = nx.pagerank(sg, alpha=alpha, personalization=refnoded, weight='weight')\n prframe = pd.DataFrame.from_dict(pr, orient='index')\n output_pr = prframe[0].values\n output_weighted = output_pr > thresh\n lc = np.where(output_weighted)\n return set(lc[0].flatten()), output_pr\n\n\n# https://stackoverflow.com/questions/4842613/merge-lists-that-share-common-elements\n# convert fragments (e.g. from personalized pagerank) to single graph with clusters\n\ndef frag_to_graph(l):\n G = nx.Graph()\n for part in l:\n # each sublist is a bunch of nodes\n G.add_nodes_from(part)\n # it also implies a number of edges:\n G.add_edges_from(to_edges(part))\n return G\n\ndef to_edges(l):\n \"\"\" \n treat `l` as a Graph and returns its edges\n to_edges(['a','b','c','d']) -> [(a,b), (b,c),(c,d)]\n \"\"\"\n it = iter(l)\n last = next(it)\n\n for current in it:\n yield last, current\n last = current \n\n# score high if mcs contains something like each cluster in hcs\ndef scoreclusters(mcs, hcs, weights=[+1, 0, -1]):\n allscores = [0 for hc in hcs]\n for hi, hc in enumerate(hcs):\n allscores[hi] = scoresystem_wrtcluster(hc, mcs, weights)\n # bc first human cluster is empty\n #score = np.sum(allscores)\n score = np.mean(allscores)\n droponescore = np.sum(allscores) - np.min(allscores[1:])\n #allscores = [round(a,1) for a in allscores]\n\n return score, droponescore, allscores\n\n# score high if hcs contains something like each cluster in mcs\ndef scoreclusters_mod(mcs, hcs, weights=[+1, 0, -1]):\n allscores = [0 for mc in mcs]\n mcs = llist2lset(mcs)\n for mi, mc in enumerate(mcs):\n allscores[mi] = scorecluster_wrtsystem(mc, hcs, weights)\n # bc first human cluster is empty\n #score = np.sum(allscores)\n score = np.mean(allscores)\n droponescore = 0\n #allscores = [round(a,1) for a in allscores]\n\n return score, droponescore, allscores\n\n# return vector of scores for each culture\ndef scoreworld(mcs, hcslist, weights=[+1, 0, -1], beta=10):\n allscores = [0 for hc in hcslist]\n for hi, hcs in enumerate(hcslist):\n # like recall\n a1, _, _ = scoreclusters_mod(mcs, hcs, weights)\n # like precision\n a2, _, _ = scoreclusters(mcs, hcs, weights)\n if (a1 + a2) == 0:\n allscores[hi] = 0\n # analogous to F-score\n else:\n allscores[hi] = (1 + beta**2) * a1 * a2 / (beta**2*a1 + a2)\n\n score = np.mean(allscores)\n droponescore = np.sum(allscores) - np.min(allscores[1:])\n #allscores = [round(a,1) for a in allscores]\n\n return score, droponescore, allscores\n\n# return vector of scores for each culture\ndef scoreworld_arand(mcs, hcslist, starfile='', bval=[]):\n stargraph = makestargraph(starfile, bval)\n allcs = list(nx.connected_components(stargraph))\n cs = [c for c in allcs if len(c) > 1]\n bigc = cs[0]\n N = stargraph.number_of_nodes()\n mcs_vec = csys2vec(mcs, bigc, N)\n allscores = [0 for hc in hcslist]\n for hi, hcs in enumerate(hcslist):\n hcs_vec = csys2vec(hcs, bigc, N)\n allscores[hi] = sklearn.metrics.adjusted_rand_score(hcs_vec, mcs_vec)\n\n score = np.mean(allscores)\n droponescore = np.sum(allscores) - np.min(allscores[1:])\n return score, droponescore, allscores\n\n# with default weights it's better for mc to miss stars in hc than to have extra stars\ndef scorecluster(mc, hc, weights):\n m = set(mc)\n h = set(hc)\n mnoth = m - h\n hnotm = h - m\n mandh = m & h\n score = weights[0] * len(mandh) + weights[1] * len(hnotm) + weights[2] * len(mnoth)\n maxscore = weights[0]*len(hc)\n # finalscore between -1 and 1.\n #finalscore = np.max([score/maxscore, -1])\n finalscore = np.max([score/maxscore, 0])\n return finalscore\n\ndef llist2lset(csystem):\n lset = [ set(c) for c in csystem]\n return lset\n\ndef csys2vec(csystem, bigc, N):\n all = np.zeros(N)\n for ci, c in enumerate(csystem):\n all[list(c)] = ci+1\n return(all[list(bigc)])\n\n# score a system wrt a cluster\n# c is gold standard -- want to give csystem a high score if it contains a cluster that captures c well\n\ndef scoresystem_wrtcluster(c, csystem, weights=[1,0,-1]):\n scores = []\n for sysc in csystem:\n if c.intersection(sysc):\n sysc_score = scorecluster(sysc, c, weights)\n scores.append(sysc_score)\n if scores:\n score = np.max(scores)\n else:\n score = 0\n return(score)\n\n\n# score a cluster wrt a system\n# csystem is gold standard -- want to give c a high score if it is like a cluster in csystem\n\ndef scorecluster_wrtsystem(c, csystem, weights=[1,0,-1]):\n scores = []\n for sysc in csystem:\n if c.intersection(sysc):\n sysc_score = scorecluster(c, sysc, weights)\n scores.append(sysc_score)\n if scores:\n score = np.max(scores)\n else:\n score = 0\n return(score)\n\ndef cluster_insystem(c, csystem, **kwargs):\n score = 0\n for sysc in csystem:\n if c == sysc:\n score = 1\n break\n return score\n\ndef cluster_subsetsystem(c, csystem, **kwargs):\n score = 0\n for sysc in csystem:\n if c.issubset(sysc):\n score = 1\n break\n return score\n\n# scorefn could be \n# scorecluster_wrtsystem\n# cluster_insystem\n# cluster_partofsystem\n\ndef consensus_modelscores(modelfile, csystems, scorefn, weights = [1,0,-1]):\n with open(modelfile, 'rb') as f_outfile:\n modelsys = pickle.load(f_outfile)\n modelsys = modelsys['mcss']\n\n scores = compare_humantomodelsys(modelsys, csystems, scorefn, weights)\n return(scores)\n\n\ndef compare_humantomodelsys(modelsys, csystems, scorefn, weights=[1,0,-1]):\n scores = [ [] for cs in csystems ]\n for sysi, csystemi in enumerate(csystems):\n cscores = []\n for c in csystemi:\n scores_wrtsys = []\n for sysj, csystemj in enumerate(modelsys):\n scores_wrtsys.append(scorefn(c, csystemj, weights=weights))\n # find best match among model systems\n cscores.append(np.max(scores_wrtsys))\n scores[sysi] = cscores\n return(scores)\n\ndef consensusscores(csystems, scorefn, weights = [1,0,-1], summfun = np.mean):\n scores = [ [] for cs in csystems ]\n for sysi, csystemi in enumerate(csystems):\n cscores = [] \n for c in csystemi:\n scores_wrtsys = []\n for sysj, csystemj in enumerate(csystems):\n # previously didn't consider self-matches\n #if sysi != sysj:\n scores_wrtsys.append(scorefn(c, csystemj, weights=weights))\n cscores.append(summfun(scores_wrtsys))\n scores[sysi] = cscores\n return(scores)\n\ndef comparemodelhuman(mcs, humanfile, starfile, weights = [1,0,-1]):\n named = namedict(starfile)\n bw = 0.1\n histbins = np.arange(-1, 1 + bw, bw)\n allscores = []\n with open(humanfile, 'r') as f:\n hd = json.load(f)\n for i, c in enumerate(hd[0]):\n hsys = llist2lset(hd[1][i])\n sysscores = [scoresystem_wrtcluster(hc, mcs, weights=weights) for hc in hsys]\n print('*************')\n print(c)\n for j, sc in enumerate(sysscores):\n clstring = str(round(sc, 2)) + ' ' + '. '.join([named[s] for s in hd[1][i][j]] )\n print(clstring)\n\n plt.subplot(4, 6, i + 1)\n plt.hist(sysscores, bins=histbins)\n plt.title(c + ' ' + str(round(np.mean(sysscores), 2)))\n allscores.append(np.mean(sysscores))\n print(\"system mean = \" + str(round(np.mean(allscores), 3)))\n plt.show()\n\n# distance, brightness profiles based on edges in constellation minimum spanning trees\ndef profile_system(clusters, edges, sg):\n ds = []\n bs = []\n maxbs = []\n csizes = []\n allds = []\n allbs = []\n allmaxbs = []\n\n for c in clusters:\n if c:\n csizes.append(len(c))\n for s in c:\n if len(sg[s]): # XXX: temporarily consider nodes in nb graph only\n bs.append(sg.nodes[s]['m'])\n\n for e in edges:\n if e:\n for u, v in e.edges:\n ds.append(sg.edges[u,v]['d'])\n maxbs.append(np.max([sg.nodes[u]['m'], sg.nodes[v]['m']]))\n\n for s in sg.nodes:\n if len(sg[s]):\n allbs.append(sg.nodes[s]['m'])\n\n for u, v in sg.edges:\n allds.append(sg.edges[u,v]['d'])\n allmaxbs.append(np.max([sg.nodes[u]['m'], sg.nodes[v]['m']]))\n\n return (csizes, bs, ds, maxbs, allbs, allds, allmaxbs)\n\ndef plot_profile(pf, fpref=''):\n cs = pf[0]\n bs = pf[1]\n ds = pf[2]\n maxbs = pf[3]\n allbs = pf[4]\n allds = pf[5]\n allmaxbs = pf[6]\n\n plt.subplot(241)\n plt.hist(cs, bins = np.arange(0.5, 25, 1))\n plt.title('sizes')\n\n plt.subplot(242)\n plt.hist(bs, bins = np.arange(0, 6.0, 0.25))\n #plt.hist(bs, bins = np.arange(0, 1.46, 0.05))\n plt.title('magnitudes')\n\n plt.subplot(243)\n plt.hist(ds, bins = np.arange(0, 24, 1.0))\n #plt.hist(ds, bins = np.arange(0, 1, 0.05))\n plt.title('edge distances')\n\n plt.subplot(244)\n plt.hist(maxbs, bins = np.arange(0, 6.0, 0.25))\n #plt.hist(maxbs, bins = np.arange(0, 1.46, 0.05))\n plt.title('edge magnitudes')\n\n plt.subplot(246)\n plt.hist(allbs, bins = np.arange(0, 6.0, 0.25))\n #plt.hist(allbs, bins = np.arange(0, 1.46, 0.05))\n plt.title('all magnitudes')\n\n plt.subplot(247)\n plt.hist(allds, bins = np.arange(0, 24, 1.0))\n #plt.hist(allds, bins = np.arange(0, 1, 0.05))\n plt.title('all edge distances')\n\n plt.subplot(248)\n plt.hist(allmaxbs, bins = np.arange(0, 6.0, 0.25))\n #plt.hist(allmaxbs, bins = np.arange(0, 1.46, 0.05))\n plt.title('all edge magnitudes')\n plt.tight_layout()\n\n if fpref:\n plt.savefig(fpref + '.pdf', bbox_inches=\"tight\")\n\n plt.show()\n return\n\n# empirical distributions based on attested clusters, edges and entire neighbourhood graph\ndef empirical_ds(clusters, edges, sg, maxmag = 4.5):\n\n pf = profile_system(clusters, edges, sg)\n\n bs_att = [-b for b in pf[1]]\n bs_all = [-b for b in pf[4]]\n\n beta_att, loc_att, scale_att = halfgennorm.fit(bs_att, floc=-maxmag)\n beta_all, loc_all, scale_all = halfgennorm.fit(bs_all, floc=-maxmag)\n\n mfit_att = halfgennorm( beta=beta_att, loc=loc_att, scale=scale_att)\n mfit_all = halfgennorm( beta=beta_all, loc=loc_all, scale=scale_all)\n\n loc_att, scale_att = expon.fit(bs_att, floc=-maxmag)\n loc_all, scale_all = expon.fit(bs_all, floc=-maxmag)\n\n mfit_att = expon( loc=loc_att, scale=scale_att)\n mfit_all = expon( loc=loc_all, scale=scale_all)\n\n ds_att = pf[2]\n ds_all = pf[5]\n\n s_att, loc_att, scale_att = lognorm.fit(ds_att)\n s_all, loc_all, scale_all = lognorm.fit(ds_all)\n\n dfit_att = lognorm( s=s_att, loc=loc_att, scale=scale_att )\n dfit_all = lognorm( s=s_all, loc=loc_all, scale=scale_all)\n\n mf_att = lambda x : mfit_att.pdf( -x )\n mf_all = lambda x : mfit_all.pdf( -x )\n\n df_att = dfit_att.pdf\n df_all = dfit_all.pdf\n\n mf_lr = lambda x: mf_att(x) / mf_all(x)\n df_lr = lambda x: df_att(x) / df_all(x)\n\n return (mf_lr, df_lr, mf_att, mf_all, df_att, df_all)\n\ndef addtripquad(sg, wtripquad, tripflag):\n if tripflag: # triples\n triples = all_triples(sg, sg)\n #tripscores = [ (np.abs(np.rad2deg(neighborhood.sphericalangle(t, sg)) - 180), t) for t in triples]\n # angles in radians\n tripscores = [ (np.abs(np.pi - neighborhood.sphericalangle(t, sg)), t) for t in triples]\n tripscores = [ (np.exp(-a * wtripquad), t) for (a,t) in tripscores]\n tripscores = [ (np.min([sg.edges[i,j]['dmagprodweight'], sg.edges[j,k]['dmagprodweight']])*wa, (i,j,k)) for (wa, (i,j,k)) in tripscores ]\n for u, v in sg.edges:\n sg.add_edge(u, v, dmagprodtripquad=-np.inf)\n\n for (t, (i,j,k)) in tripscores:\n if sg.edges[i,j]['dmagprodtripquad'] < t:\n sg.add_edge(i, j, dmagprodtripquad=t)\n if sg.edges[j,k]['dmagprodtripquad'] < t:\n sg.add_edge(j, k, dmagprodtripquad=t)\n else: # quads: ie goodc model\n quads = all_quads(sg,sg)\n goodcscores = [ (neighborhood.goodc(q, sg), q) for q in quads]\n goodcscores= [ (np.exp(-a * wtripquad), q) for (a,q) in goodcscores]\n goodcscores = [ (np.min([sg.edges[i,j]['dmagprodweight'], sg.edges[j,k]['dmagprodweight'], sg.edges[k,l]['dmagprodweight']])*wa, (i,j,k,l)) for (wa, (i,j,k,l)) in goodcscores]\n for u, v in sg.edges:\n sg.add_edge(u, v, dmagprodtripquad=-np.inf)\n for (t, (i,j,k,l)) in goodcscores:\n if sg.edges[i,j]['dmagprodtripquad'] < t:\n sg.add_edge(i, j, dmagprodtripquad=t)\n if sg.edges[j,k]['dmagprodtripquad'] < t:\n sg.add_edge(j, k, dmagprodtripquad=t)\n if sg.edges[j,k]['dmagprodtripquad'] < t:\n sg.add_edge(j, k, dmagprodtripquad=t)\n\n return\n\ndef triple_angles(g, sg):\n triples = all_triples(g,sg)\n angles = [ neighborhood.sphericalangle(t, sg) for t in triples]\n return angles\n\ndef triple_distances(g, sg):\n triples = all_triples(g,sg)\n distances = [ neighborhood.tripledistance(t, sg) for t in triples]\n return distances\n\n\ndef quad_goodc(g, sg):\n quads = all_quads(g,sg)\n goodcs = [ neighborhood.goodc(q, sg) for q in quads]\n return goodcs \n\ndef all_triples(g, sg):\n triples = []\n for u, v in g.edges:\n for w in g[v]:\n if w != u:\n if u < w: # so we don't end up with the same edge forwards and backwards\n triples.append( (u,v,w) )\n else:\n triples.append( (w,v,u) )\n for t in g[u]:\n if t != v:\n if t < v:\n triples.append( (t,u,v) )\n else:\n triples.append( (v,u,t) )\n\n # remove duplicates\n triples = np.unique(triples, axis=0)\n\n return(triples)\n\ndef all_quads(g, sg):\n triples = all_triples(g, sg)\n quads = []\n for u, v, w in triples:\n for x in g[w]:\n if x != u and x != v:\n if u < x: \n quads.append( (u,v,w,x) )\n else:\n quads.append( (x,w,v,u) )\n\n for t in g[u]:\n if t != v and t != w:\n if t < w:\n quads.append( (t,u,v, w) )\n else:\n quads.append( (w,v,u, t) )\n\n # remove duplicates\n quads = np.unique(quads, axis=0)\n\n return(quads)\n\n\ndef edge_increment(sg, edges):\n nx.set_edge_attributes(sg, 0, 'ecount')\n for egraph in edges:\n if egraph:\n for u, v in egraph.edges:\n curr = sg.edges[u,v]['ecount']\n sg.add_edge(u, v, ecount=curr+1)\n return\n\ndef printedgecounts(sg, es, edgefile, att='ecount'):\n with open(edgefile, \"w\") as f:\n if not len(es):\n for u, v in sg.edges:\n count = sg.edges[u,v][att]\n if count > 0:\n f.write(','.join([str(u), str(v), str(count) + '\\n']))\n else:\n for eg in es:\n if eg:\n for u, v in eg.edges:\n count = sg.edges[u,v][att]\n f.write(','.join([str(u), str(v), str(count) + '\\n']))\n\ndef graph_from_tuplefile(file, attname='weight'):\n g = nx.Graph()\n data = pd.read_csv(file, header=None)\n tuples = list(data.itertuples(index=False, name=None))\n g.add_weighted_edges_from(tuples, weight=attname)\n\n return g\n\ndef compare_modelhumanedges(humanfile, stargraph, edgefile='', plotfile = '', modelname = 'model', printflag = 1):\n hg = graph_from_tuplefile(humanfile, attname = 'humanw')\n mg = nx.minimum_spanning_tree(stargraph)\n\n for (u, v, e) in hg.edges(data=True):\n mg.add_edge(u, v, humanw = e['humanw'])\n\n m = []\n h = []\n pairs = []\n # include all edges in human file, all edges in model file\n for (u, v, e) in mg.edges(data=True):\n thish = e['humanw'] if 'humanw' in e else 0\n thism = stargraph[u][v]['weight']\n\n m.append(thism)\n h.append(thish)\n pairs.append( (u,v))\n r = np.corrcoef(m, h)\n if printflag:\n print_corr(h, m, pairs, stargraph, modelname, plotfile)\n if edgefile != '':\n with open(edgefile, \"w\") as f:\n for i, pair in enumerate(pairs):\n u, v = pair[0], pair[1]\n nu, nv = stargraph.nodes[u], stargraph.nodes[v]\n f.write(','.join([str(nu['h']), str(nv['h']), nu['name'], nv['name'], str(h[i]), str(m[i])])+'\\n')\n\n return(r[0,1])\n\n\ndef compare_modelhumanedges_hfocused(humanfile, stargraph, printflag = 1):\n hg = graph_from_tuplefile(humanfile, attname = 'humanw')\n\n m = []\n h = []\n pairs = []\n for (u, v, e) in hg.edges(data=True):\n thish = e['humanw']\n thism = stargraph[u][v]['weight']\n m.append(thism)\n h.append(thish)\n pairs.append( (u,v))\n r = np.corrcoef(m, h)\n\n if printflag:\n print_corr(h, m, pairs, stargraph)\n\n return(r[0,1])\n\n\ndef compare_modelhumanedges_ml(humanfile, stargraph, printflag = 1):\n hg = graph_from_tuplefile(humanfile, attname = 'humanw')\n\n m = []\n h = []\n pairs = []\n ll = 0\n for (u, v, e) in hg.edges(data=True):\n thish = e['humanw']\n thism = stargraph[u][v]['weight']\n ll = ll + thish * np.log(thism)\n m.append(thism)\n h.append(thish)\n pairs.append( (u,v))\n\n return(ll)\n\ndef print_corr(h, m, pairs, stargraph, modelname, plotfile):\n rankh = rankdata(h)\n rankm = rankdata(m)\n r = np.corrcoef(m, h)\n rankr = np.corrcoef(rankm, rankh)\n\n est = sm.OLS(rankh, rankm).fit()\n residpairs = sorted(zip(est.resid, pairs))\n print('----- model likes but humans dont')\n for rp in residpairs[:10]:\n pair = rp[1]\n print(str(round(rp[0],2)) + ':' + stargraph.node[pair[0]]['name'] + ',' + stargraph.node[pair[1]]['name'])\n print('----- humans likes but model doesnt')\n for rp in residpairs[-20:]:\n pair = rp[1]\n print(str(round(rp[0],2)) + ':' + stargraph.node[pair[0]]['name'] + ',' + stargraph.node[pair[1]]['name'])\n\n ax = plt.subplot(231)\n sns.set_style(\"white\")\n ax.scatter(m, h, marker='.', color='black' )\n ratio = 1.0\n ax.set_aspect(1.0 / ax.get_data_ratio() * ratio)\n ax.set_xlabel(modelname)\n ax.set_ylabel('human')\n ax.set_title('r = ' + str(round(r[0,1], 2)))\n\n if plotfile:\n plt.savefig(plotfile + '.eps', format=\"eps\", bbox_inches=\"tight\")\n\n if 0:\n ax = plt.subplot(332)\n ax.scatter(rankm, rankh)\n ratio = 1.0\n ax.set_aspect(1.0 / ax.get_data_ratio() * ratio)\n ax.set_xlabel('model')\n ax.set_ylabel('human')\n ax.set_title('r = ' + str(round(rankr[0,1], 2)))\n\n plt.show(block=False)\n\n return\n\n# decrement with wrap around\ndef mydec(i, n):\n if i == 0:\n return n-1\n else:\n return i-1\n\n# figure out which grid nodes each star should attach to\ndef startogrid(sps, decs, ras):\n stog = []\n nd = len(decs)\n nr = len(ras)\n dimatrix, rimatrix = np.meshgrid(np.arange(nd), np.arange(nr))\n # flipping the order is correct because of way nodes are ordered in grid graph\n dorder = np.ravel(dimatrix.T)\n rorder = np.ravel(rimatrix.T)\n\n for sp in sps:\n di = np.searchsorted(decs, sp[0])\n ri = np.searchsorted(ras, sp[1])\n if di == nd:\n di = 0\n if ri == nr:\n ri = 0\n dis = (mydec(di, nd), di)\n ris = (mydec(ri, nr), ri)\n fourpairs = np.array(np.meshgrid(dis, ris)).T.reshape(-1, 2)\n row = []\n for p in fourpairs:\n row.append( np.where( (dorder==p[0]) & (rorder==p[1]) )[0][0] )\n stog.append( row )\n\n return stog\n\ndef make_grid(sg, brightthresh, nd, nr):\n labels = np.reshape(np.arange(nd * nr), (nd, nr))\n ng = nx.grid_2d_graph(nd, nr, periodic=True)\n\n dstep = np.pi / nd\n rstep = 2 * np.pi / nr\n\n ras = np.arange(0, 2 * np.pi, rstep)\n decs = np.arange(-np.pi / 2, np.pi / 2, dstep)\n\n dimatrix, rimatrix = np.meshgrid(np.arange(nd), np.arange(nr))\n\n for (di, ri) in zip(np.ravel(dimatrix), np.ravel(rimatrix)):\n ng.add_node((di,ri), dec=decs[di], ra=ras[ri], m=6.1)\n # order inherited from sorted(ng.nodes())\n ng = nx.convert_node_labels_to_integers(ng, first_label=0, ordering='default', label_attribute=None)\n brightsg = sg.subgraph( [n for n,attrdict in sg.node.items() if attrdict['m'] < brightthresh ] )\n\n starposns = [(e['dec'], e['ra']) for _, e in brightsg.node.items()]\n\n stargridedges = startogrid(starposns, decs, ras)\n\n cg = nx.disjoint_union(ng,brightsg)\n ngridnodes = nd * nr\n for i, emap in enumerate(stargridedges):\n n = i + ngridnodes\n epairs = [(n, e) for e in emap]\n cg.add_edges_from(epairs)\n\n return cg, list(brightsg.nodes)\n\ndef kdeweights(sg, brightthresh, h, bfac, comb, peaks, kernel):\n\n if comb == 'max':\n fn = lambda x, y: np.maximum(x, y)\n else:\n fn = lambda x, y: x + y\n\n # see https://asterism.org/wp-content/uploads/2019/04/tut35-Magnitudes.pdf\n magfn = lambda x: (10 ** (-x/2.5)) ** bfac # converts to luminosity then scales using bfac\n\n xtrain = [(e['dec'], e['ra']) for _, e in sg.node.items() if e['m'] < brightthresh]\n nns = [e['nn_scale'] for _, e in sg.node.items() if e['m'] < brightthresh]\n ms = [e['m_scale'] for _, e in sg.node.items() if e['m'] < brightthresh]\n lks = [e['nn_locw'] for _, e in sg.node.items() if e['m'] < brightthresh]\n if peaks == 'standard':\n weights = [ magfn(m) for m in ms]\n hs = [ h * nn for nn in nns]\n elif peaks == 'rescaled':\n weights = [ magfn(m) * nn ** 2 for (m, nn) in zip(ms,nns) ]\n hs = [ h * nn for nn in nns]\n elif peaks == 'nbstandard':\n weights = [ magfn(m) * lk ** 2 for (m, lk) in zip(ms,lks) ]\n hs = [ h * nn * lk for (nn, lk) in zip(nns, lks)]\n elif peaks == 'nbrescaled':\n weights = [ magfn(m) * (nn ** 2) * (lk**2) for (m, nn, lk) in zip(ms,nns, lks) ]\n hs = [ h * nn * lk for (nn, lk) in zip(nns, lks)]\n\n allnode = [(e['dec'], e['ra']) for _, e in sg.node.items() ]\n ksum = 0\n for dr, w, h in zip(xtrain, weights, hs):\n tree = sklearn.neighbors.BallTree(np.matrix(dr), metric='haversine')\n ksum = fn(ksum ,w * tree.kernel_density(allnode, h=h, kernel=kernel))\n\n for n in list(sg.nodes):\n sg.add_node(n, ksum=ksum[n])\n\n return\n\ndef make_rag(sg, thresh):\n rag = graph.rag.RAG()\n for (u, v, e) in sg.edges(data=True):\n w = 0 if sg.nodes[u]['ksum'] > thresh and sg.nodes[v]['ksum'] > thresh else 1\n rag.add_edge(u, v, weight=w)\n\n for n in rag.nodes():\n rag.node[n].update({'labels': [n]})\n\n return rag\n\ndef weight_boundary(graph, src, dst, n):\n \"\"\"\n Handle merging of nodes of a region boundary region adjacency graph.\n\n This function computes the `\"weight\"` attribute of the edge between `n` and\n the node formed after merging `src` and `dst`.\n\n Parameters\n ----------\n graph : RAG\n The graph under consideration.\n src, dst : int\n The vertices in `graph` to be merged.\n n : int\n A neighbor of `src` or `dst` or both.\n\n Returns\n -------\n data : dict\n A dictionary with the \"weight\" attribute to be\n assigned for the merged node.\n\n \"\"\"\n default = {'weight': 1}\n\n weight_src = graph[src].get(n, default)['weight']\n weight_dst = graph[dst].get(n, default)['weight']\n\n return {\n 'weight': np.min([weight_src, weight_dst])\n }\n\n\ndef merge_boundary(graph, src, dst):\n \"\"\"Call back called before merging 2 nodes.\n\n In this case we don't need to do any computation here.\n \"\"\"\n pass\n\ndef labs2clusters(labs):\n _, b = np.unique(labs, return_inverse=True)\n cs = [{}]\n for i in np.arange(np.max(b)):\n cs = np.append(cs, set(np.where(b==i)[0]))\n return cs\n\ndef add_nn(stargraph):\n for n in stargraph.nodes():\n ds = [stargraph[u][v]['d'] for (u,v) in stargraph.edges(n)]\n nn = np.deg2rad(np.min(ds)) if ds else None\n stargraph.add_node(n, nn=nn)\n\ndef dropgrids(oldc, fixp, snnodes):\n newc = []\n ngrid = fixp['nrgrid'] * fixp['ndgrid']\n for c in oldc:\n c = [snnodes[ci-ngrid] for ci in list(c) if ci > ngrid]\n if len(c)>1:\n newc.append(set(c))\n\n return newc\n\n# add ecount\ndef addecount(sg, f=[], const=0, constval = 3):\n if not f:\n maxw = np.max([e['weight'] for u, v, e in sg.edges(data=True)])\n f = lambda x: 2 + np.floor(x * 28 / maxw)\n if const:\n f = lambda x: constval\n for u, v, e in sg.edges(data=True):\n sg.add_edge(u, v, ecount= f(e['weight']))\n return\n\n\ndef filterpairs(mc, me, sg):\n newmc = []\n newme = []\n for c, e in zip(mc, me):\n lc = list(c)\n if len(c) == 2 and (sg.nodes[lc[0]]['m'] > 3 or sg.nodes[lc[1]]['m'] > 3):\n pass\n else:\n newmc.append(c)\n newme.append(e)\n return newmc, newme\n\ndef printperculturescores(outfile, names, scores):\n with open(outfile, \"w\") as f:\n for i, n in enumerate(names):\n if n.endswith('stellarium'):\n n = n[:-11]\n for j, sc in enumerate(scores[i]):\n f.write(n + ',' + str(j+1) + ',' + str(sc) + '\\n')\n\n\n# sort list of stars for display in tables\n\ngreekd = {\n'Alp':'A',\n'Bet':'B',\n'Gam':'C',\n'Del':'D',\n'Eps':'E',\n'Zet':'F',\n'Eta':'G',\n'The':'H',\n'Iot':'I',\n'Kap':'K',\n'Lam':'L',\n'Mu':'M',\n'Nu':'N',\n'Xi':'O',\n'Omi':'P',\n'Pi':'Q',\n'Rho':'R',\n'Sig':'S',\n'Tau':'T',\n'Ups':'U',\n'Phi':'V',\n'Chi':'W',\n'Psi':'X',\n'Ome':'Y'\n}\n\npattern = re.compile('|'.join(greekd.keys()))\nreplacegreek = lambda s: pattern.sub(lambda x: greekd[x.group()], s)\nuppers = lambda s: ''.join(reversed(re.sub('[^A-Z]', '', 'Z' + s)))\ninitialnums= lambda s: re.match('^\\d+', '0'+s).group()\n\ndef sortnamelist(names):\n triples = [(uppers(replacegreek(s)), initialnums(s), s) for s in names]\n triples = sorted(triples)\n names_sorted = [c for (a,b,c) in triples]\n return(names_sorted)\n\n\ndef stargraph_listnodes(stargraph, outfile, hr2hip):\n with open(outfile, \"w\") as f:\n ramap = lambda x: np.pi - (x + 1.45 * np.pi) if x < 0.55 * np.pi else np.pi - (x - 0.55 * np.pi)\n for node, e in stargraph.nodes(data=True):\n ehstr = str(e['h'])\n if ehstr in hr2hip:\n hipstr = str(hr2hip[ehstr])\n else:\n hipstr = 'miss'\n f.write(','.join([str(e['i']), ehstr, hipstr, e['name'], str(round(ramap(e['ra']), 3)), str(round(e['dec'], 3)) + '\\n']))\n\ndef colorgroups(stargraph):\n starcolormap = [('34DelOri', 'crimson'),\n ('46EpsOri', 'navy'),\n ('8Bet1Sco', 'darkgreen'),\n ('35LamSco', 'darkgreen'),\n ('20EpsSgr', 'darkorange'),\n ('27GamCas', 'orange'),\n ('50AlpUMa', 'darkslateblue'),\n ('33LamUMa', 'darkslateblue'),\n ('9IotUMa', 'darkslateblue'),\n ('50AlpCyg', 'red'),\n ('21EtaCyg', 'red'),\n ('Alp1Cru', 'red'),\n ('25EtaTau', 'orangered'),\n ('87AlpTau', 'goldenrod'),\n ('9AlpDel', 'olive'),\n ('66AlpGem', 'mediumblue'),\n ('27EpsGem', 'mediumblue'),\n ('24GamGem', 'mediumblue'),\n ('7DelCrv', 'dodgerblue'),\n ('32AlpLeo', 'violet'),\n ('17EpsLeo', 'violet'),\n ('DelVel', 'mediumpurple'),\n ('62EtaAqr', 'orchid'),\n ('76DelAqr', 'orchid'),\n ('5827', 'steelblue'),\n ('25RhoBoo', 'coral'),\n ('3AlpLyr', 'darkorange'),\n ('10BetLyr', 'darkorange'),\n ('53BetPeg', 'teal'),\n ('42ZetPeg', 'teal'),\n ('21AlpAnd', 'teal'),\n ('AlpCrA', 'dodgerblue'),\n ('5AlpCrB', 'steelblue'),\n ('13AlpAri', 'orangered'),\n ('Alp1Cen', 'deeppink'),\n ('7BetUMi', 'lightgreen'),\n ('11AlpLep', 'deeppink'),\n ('13GamLep', 'deeppink'),\n ('33AlpPer', 'royalblue'),\n ('26BetPer', 'royalblue'),\n ('45EpsPer', 'royalblue'),\n ('38OmiPer', 'royalblue'),\n ('13AlpAur', 'orange'),\n ('37TheAur', 'orange'),\n ('53AlpAql', 'orange'),\n ('23BetDra', 'paleturquoise'),\n ('14EtaDra', 'paleturquoise'),\n ('47DelCnc', 'darkseagreen'),\n ('42AlpCom', 'rosybrown'),\n ('6Alp2Cap', 'steelblue'),\n ('40GamCap', 'steelblue'),\n ('68DelLeo', 'orange'),\n ('25DelCMa', 'powderblue'),\n ('1Pi3Ori', 'mediumblue'),\n ('16AlpBoo', 'darkgoldenrod'),\n ('26EpsSco', 'darkgreen'),\n ('63EpsDra', 'violet'),\n ('44ChiDra', 'violet'),\n ('BetGru', 'steelblue'),\n ('4BetTri', 'dodgerblue'),\n ('86MuHer', 'navy'),\n ('28BetSer', 'darkseagreen'),\n ('27BetHer', 'mediumpurple'),\n ('39OmiSgr', 'royalblue'),\n ('AlpCol', 'orange'),\n ('AlpMus', 'orange'),\n ('ZetAra', 'royalblue'),\n ('11EpsHya', 'navy'),\n ('GamCen', 'royalblue'),\n ('AlpLup', 'mediumslateblue')\n ]\n clustercolors_g = collections.OrderedDict()\n graphnodesout = '../output/data/all_stargraph.txt'\n df = pd.read_csv(graphnodesout, names=['ouri', 'h', 'hip', 'name', 'ra', 'dec'])\n\n for (name, ccolor) in starcolormap:\n ourind = df[df['name'] == name].ouri.values[0]\n clustercolors_g[ourind] = colors.to_rgba(ccolor)\n\n return(clustercolors_g)\n\n","repo_name":"cskemp/constellations","sub_path":"scripts/functions/starhelpers.py","file_name":"starhelpers.py","file_ext":"py","file_size_in_byte":42697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22064002169","text":"\"\"\"\n------------------------------------------------------------------------------\n\nPour vous faciliter la vie, vous pouvez utiliser le code présent dans le dossier\noutils_analyse.\n\n_____________________________________________________________________________\n\"\"\"\n\"\"\"\n------------------------------------------------------------------------------\n\nMettres vois import ici\n\n_____________________________________________________________________________\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n\n\n\n# Cette ligne contrôle le font size pour les écritures sur le graphique\nmatplotlib.rcParams.update({'font.size': 18})\n\"\"\"\n------------------------------------------------------------------------------\n\nLire le fichier des résultats et transformer les valeurs en un array numpy\n\n_____________________________________________________________________________\n\"\"\"\n# Mettre votre code ici\n\n\n\n\n\n\n\n\n\"\"\"\n------------------------------------------------------------------------------\n\nRetirer les valeurs qui se trouvent à l'extérieur de l'activation du générateur de rampe.\nNe pas oublier de mettre le début de la rampe comme étant t=0.\n\n_____________________________________________________________________________\n\"\"\"\n# Mettre votre code ici\n\n\n\n\n\n\n\"\"\"\n------------------------------------------------------------------------------\n\nCalculer la pente de la tension du générateur de rampe et son incertitude.\nAfficher cette valeur et son incertitude, puis convertir les valeurs de temps\nen valeurs de tension.\n\nEnsuite, convertissez les valeurs de tensions du pico en valeur de courant, en considérant que l'échelle\ndu pico utilisé est de 3nA. \n_____________________________________________________________________________\n\"\"\"\n# Mettre votre code ici\n\n\n\n\n\"\"\"\n------------------------------------------------------------------------------\n\nDéterminer l'emplacement approximatif des maximums. Ça devrait être\nenviron: Estimation des pics: [ 1.5860128 3.7164788 6.7701464 8.569206 10.486626 14.676541\n 16.499271 ] V\n\n_____________________________________________________________________________\n\"\"\"\n# Mettre votre code ici\n\n\n\n\n\n\n\n\n\n\n\n\n# Mettre vos données avec les bonnes unités à la place du None\nvaleurs_avec_bonnes_unites_determination_des_pics = None # Array de trois colonnes\nliste_des_indexes_des_pics = None # Liste de nombres entiers\n\n\"\"\"\n_____________________________________________________________________________\n\"\"\"\n# Ne pas modifier cette section!!!\n\nprint(\"Estimation des pics:\", valeurs_avec_bonnes_unites_determination_des_pics[liste_des_indexes_des_pics, 0])\n\n# La figure obtenue devrait correspondre à celle de figures_exemple/estimation_des_pics_multi_pics\nplt.figure()\nplt.plot(valeurs_avec_bonnes_unites_determination_des_pics[:, 0],\n valeurs_avec_bonnes_unites_determination_des_pics[:, 1],\n label=\"Courant du pico\")\nplt.xlabel(\"Tension entre G1 et le ground [V]\")\nplt.scatter(valeurs_avec_bonnes_unites_determination_des_pics[liste_des_indexes_des_pics, 0],\n valeurs_avec_bonnes_unites_determination_des_pics[liste_des_indexes_des_pics, 1],\n label=\"Estimation des pics\")\nplt.ylabel(\"Courant mesuré [nA]\")\nplt.legend()\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"smichi23/PHY-3002-Frank-et-Hertz","sub_path":"analyse_legere_pic_multiple.py","file_name":"analyse_legere_pic_multiple.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37270284154","text":"import numpy as np\n# import pandas as pd\n# import matplotlib.pyplot as plt\n# import seaborn as sns\n# import json, os, pickle\n# from collections import defaultdict\nfrom joblib import Parallel, delayed\n\n# from tqdm import tqdm\n# import itertools\n\n# import wandb\nfrom nnn import fileio\nfrom nnn import train_nn as tnn\n\n### MODIFY HERE ###\nfixed_pclass = ['hairpin_size', 'interior_size', 'bulge_size', 'hairpin_triloop', 'hairpin_tetraloop', 'terminal_mismatch', 'stack']\nconfig = dict(\n use_train_set_ratio = 0.1,\n fit_method = 'svd',\n feature_method = 'get_feature_list',\n fit_intercept=False, \n symmetry=False,\n sep_base_stack=True,\n fix_some_coef=True,\n fixed_pclass = fixed_pclass,\n test_mode = 'val', # {'val', 'test'}\n use_model_from = 'lr_dict', # {'lr_dict', 'json'}\n )\n\nmyrange = [.1, .2, .75, 1.0]\n\ntags = ['nupack test']\n### END MODIFY ###\ndef pipeline_fun(config, ratio):\n config.update(dict(use_train_set_ratio=ratio))\n tnn.model_pipeline(config, tags=tags)\n\nn = len(myrange)\nParallel(n_jobs=n)(delayed(pipeline_fun)(config, ratio) for ratio in myrange)\n","repo_name":"keyuxi/nnn_paper","sub_path":"run_nn_train.py","file_name":"run_nn_train.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10744495535","text":"\nimport cv2\nimport cv2 as cv\n\nsrc = cv2.imread(\"real.png\", cv2.IMREAD_COLOR)\n\nhsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\n\nh, s, v = cv2.split(hsv)\n\ncv.namedWindow(\"hsv\", cv.WINDOW_NORMAL)\n\ncv2.resizeWindow(\"hsv\", 800, 600)\n\ncv2.imshow(\"hsv\", hsv)\n\ncv.namedWindow(\"h\", cv.WINDOW_NORMAL)\n\ncv2.resizeWindow(\"h\", 800, 600) \ncv2.imshow(\"h\", h)\nstatus_h = cv2.imwrite('/home/bonjour/Desktop/picker/h_replace.png',h)\n \nprint(\"h replaced!\",status_h)\n\ncv.namedWindow(\"s\", cv.WINDOW_NORMAL)\n\ncv2.resizeWindow(\"s\", 800, 600)\n\ncv2.imshow(\"s\", s)\n\ncv.namedWindow(\"v\", cv.WINDOW_NORMAL)\n\ncv2.resizeWindow(\"v\", 800, 600)\n\ncv2.imshow(\"v\", v)\nstatus_v = cv2.imwrite('/home/bonjour/Desktop/picker/v_replace.png', v)\n \nprint(\"v replaced!\",status_v)\n\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()","repo_name":"niart/RGB-Depth-camera-based-path-tracking-of-weeding-robot","sub_path":"src/assignment/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39240547847","text":"import unittest\n\nfrom Domain.manageTechnicien import ManageTechnicien\n\n\nclass ManageTechnicienTest(unittest.TestCase):\n\n\n def test_recupererInterventionsByTechnicienn(self):\n tech1=ManageTechnicien(\"../Database/EasySav.db\")\n self.assertEqual(tech1.recupererInterventionsByTechnicien(2),{5: {'compteRendu': 'zAAAA',\n 'dateIntervention': '16/01/2021',\n 'lieu': 'Wambrechies',\n 'nomEmploye': 'Jaulin',\n 'numeroEmploye': None,\n 'numeroSerie': 'LV1234',\n 'prenomEmploye': 'Florimond',\n 'reussiteIntervention': None,\n 'tempsIntervention': None,\n 'type': '1'}})\n\n def test_modifierInformations(self):\n\n tech1=ManageTechnicien(\"../Database/EasySav.db\")\n recupInfo=tech1.recupererInterventionsByTechnicien(2)\n recupApresModif =tech1.modifierInformations(numeroEmploye=2,numeroTelephone=\"0600000031\")\n self.assertNotEqual(recupInfo,recupApresModif)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Jflo02/APIproject","sub_path":"UnitTest/manageTechnicienUnitTest.py","file_name":"manageTechnicienUnitTest.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15737512034","text":"### The code generator module. (This file doesn't do anything, just defines\n### the compile() function for main.py to call.)\n\nimport node_types\n\npointer = 0\n# Keeps track of where the pointer goes during the program. This is used\n# for variables.\n\nvariables = []\n# A stack (implemented as a list) of the variable names used by the program.\n# - The index of a variable name is its location in memory.\n# - Each new variable is pushed onto the stack.\n# - Deallocating a variable pops off the stack.\n\nmacros = {}\n# A dictionary mapping macro names to their bodies.\n\nparen_stack = []\n# A stack of memory indices used by lparen and rparen.\n# - Every time the program enters a () loop, (not a [] loop!) the current\n# memory index is pushed to the stack.\n# - Exiting the loop pops off the stack.\n\nanchors = []\n# A stack of memory indices for the ^ (go_offset) command.\n# - Whenever a macro is inserted, the current memory index is pushed to the\n# stack.\n# - Exiting a macro pops off the stack.\n\ndef compile(program):\n \"\"\"\n program: A list of nodes representing an EBF program.\n returns: The compiled BF output.\n\n The global variable `pointer` may be changed, according to what happens in\n the program.\n \"\"\"\n output = ''\n for node in program:\n output += compile_node(node)\n return output\n\ndef compile_node(node):\n \"\"\"Compile a single AST node, dispatching to the appropriate function\"\"\"\n node_type = type(node).__name__\n if node_type == 'BFCommand': return compile_bf_command(node)\n elif node_type == 'DefVar': return compile_def_var(node)\n elif node_type == 'GoVar': return compile_go_var(node)\n elif node_type == 'AtVar': return compile_at_var(node)\n elif node_type == 'DeallocVar': return compile_dealloc_var(node)\n elif node_type == 'LParen': return compile_lparen(node)\n elif node_type == 'RParen': return compile_rparen(node)\n elif node_type == 'DefMacro': return compile_def_macro(node)\n elif node_type == 'PutMacro': return compile_put_macro(node)\n elif node_type == 'GoOffset': return compile_go_offset(node)\n elif node_type == 'AtOffset': return compile_at_offset(node)\n elif node_type == 'Multiplier': return compile_multiplier(node)\n else:\n raise Exception('unsupported AST node in code generator: {}'\n .format(node_type))\n\n# Compilation functions for each AST node.\n# A compilation function takes in a specific type of AST node and returns BF\n# code.\n\ndef compile_bf_command(node):\n global pointer\n if node.cmd == '<':\n pointer -= 1\n elif node.cmd == '>':\n pointer += 1\n return node.cmd\n\ndef compile_def_var(node):\n variables.append(node.name)\n return ''\n\ndef compile_go_var(node):\n return move_pointer(variables.index(node.name))\n\ndef compile_at_var(node):\n global pointer\n pointer = variables.index(node.name)\n return ''\n\ndef compile_dealloc_var(node):\n if node.name != variables.pop():\n raise Exception('Bad deallocation')\n return ''\n\ndef compile_lparen(node):\n paren_stack.append(pointer)\n return '['\n\ndef compile_rparen(node):\n return move_pointer(paren_stack.pop()) + ']'\n\ndef compile_def_macro(node):\n macros[node.name] = node.body\n return ''\n\ndef compile_put_macro(node):\n anchors.append(pointer)\n body = compile(macros[node.name])\n anchors.pop()\n return body\n\ndef compile_go_offset(node):\n return move_pointer(anchors[-1] + node.offset)\n\ndef compile_at_offset(node):\n global pointer\n pointer += node.offset\n return ''\n\ndef compile_multiplier(node):\n return node.cmd * node.times\n\n# Helper functions\n\ndef move_pointer(destination):\n \"\"\"Returns code for moving the pointer to a given memory index, and\n updates the global pointer variable accordingly.\n\n destination: The target memory index.\n \"\"\"\n global pointer\n distance = destination - pointer\n pointer = destination\n if distance < 0:\n return '<' * -distance\n else:\n return '>' * distance\n","repo_name":"prendradjaja/ebfpp","sub_path":"old-python-compiler/code_generator.py","file_name":"code_generator.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21948197665","text":"import time\nimport pigpio\nimport math\n\npi = pigpio.pi()\n\n# HTU21D-F Address\nHTU21DF_devaddr = 0x40\ni2cbus = 1\n\n# HTU21D-F Commands\nHTU21DF_rdtemp = 0xE3\nHTU21DF_rdhumi = 0xE5\nHTU21DF_wtreg = 0xE6\nHTU21DF_rdreg = 0xE7\nHTU21DF_reset = 0xFE\n\ndef reset_HTU21DF():\n\thandle = pi.i2c_open(i2cbus, HTU21DF_devaddr) # open i2c bus\n\tpi.i2c_write_byte(handle, HTU21DF_reset) # send reset command\n\tpi.i2c_close(handle) # close i2c bus\n\ttime.sleep(0.2) # reset takes 15ms so let's give it some time\n\ndef read_temperature():\n\thandle = pi.i2c_open(i2cbus, HTU21DF_devaddr) # open i2c bus\n\tpi.i2c_write_byte(handle, HTU21DF_rdtemp) # send read temp command\n\ttime.sleep(0.055) # readings take up to 50ms, lets give it some time\n\t(count, byteArray) = pi.i2c_read_device(handle, 3) # vacuum up those bytes\n\tpi.i2c_close(handle) # close the i2c bus\n\tt1 = byteArray[0] # most significant byte msb\n\tt2 = byteArray[1] # least significant byte lsb\n\ttemp_reading = (t1 * 256) + t2 # combine both bytes into one big integer\n\ttemp_reading = math.fabs(temp_reading) # I'm an idiot and can't figure out any other way to make it a float \n\ttemperature = ((temp_reading / 65536) * 175.72 ) - 46.85 # formula from datasheet\n\treturn temperature\n\ndef read_humidity():\n\thandle = pi.i2c_open(i2cbus, HTU21DF_devaddr) # open i2c bus\n\tpi.i2c_write_byte(handle, HTU21DF_rdhumi) # send read humi command\n\ttime.sleep(0.055) # readings take up to 50ms, lets give it some time\n\t(count, byteArray) = pi.i2c_read_device(handle, 3) # vacuum up those bytes\n\tpi.i2c_close(handle) # close the i2c bus\n\th1 = byteArray[0] # most significant byte msb\n\th2 = byteArray[1] # least significant byte lsb\n\thumi_reading = (h1 * 256) + h2 # combine both bytes into one big integer\n\thumi_reading = math.fabs(humi_reading) # I'm an idiot and can't figure out any other way to make it a float\n\tuncomp_humidity = ((humi_reading / 65536) * 125 ) - 6 # formula from datasheet\n\t# to get the compensated humidity we need to read the temperature\n\ttemperature = read_temperature()\n\thumidity = ((25 - temperature) * -0.15) + uncomp_humidity\n\treturn humidity\n","repo_name":"rkuo2000/homebot","sub_path":"RPi/rpi3_htu21df.py","file_name":"rpi3_htu21df.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23410887768","text":"class Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n all = len(nums2) + len(nums1)\n mid = (int)((len(nums1) + len(nums2)) / 2)\n if (len(nums2) > 0):\n for i in range(len(nums1)):\n insertMid = self.quickInsert(nums2, nums1[i])\n nums2.insert(insertMid, nums1[i])\n\n if (insertMid + 1 > mid):\n break\n else:\n nums2 = nums1\n\n if all % 2 == 0:\n return (nums2[mid] + nums2[mid - 1]) / 2.0\n else:\n return nums2[mid]\n\n def quickInsert(self, nums, num):\n start = 0\n last = len(nums)\n\n while (1):\n mid = (int)((start + last) / 2.0)\n if nums[mid] < num:\n start = mid\n elif nums[mid] > num:\n last = mid\n else:\n return mid\n\n if (last == start + 1 or last == start):\n if num > nums[start]:\n return last\n else:\n return start\n\n ##Good Solution\n def median(self, A, B):\n m, n = len(A), len(B)\n if m > n:\n A, B, m, n = B, A, n, m\n if n == 0:\n raise ValueError\n\n imin, imax, half_len = 0, m, (m + n + 1) / 2\n while imin <= imax:\n i = (imin + imax) / 2\n j = half_len - i\n if i < m and B[j - 1] > A[i]:\n imin = i + 1\n elif i > 0 and A[i - 1] > B[j]:\n imax = i - 1\n else:\n # i is perfect\n if i == 0:\n max_of_left = B[j - 1]\n elif j == 0:\n max_of_left = A[i - 1]\n else:\n max_of_left = max(A[i - 1], B[j - 1])\n\n if (m + n) % 2 == 1:\n return max_of_left\n\n if i == m:\n min_of_right = B[j]\n elif j == n:\n min_of_right = A[i]\n else:\n min_of_right = min(A[i], B[j])\n return (max_of_left + min_of_right) / 2.0\n\n\nnums1 = [1, 2, 3]\nnums2 = [1, 2]\n# print(Solution().insertNum(nums2, 5.5))\nprint(Solution().findMedianSortedArrays(nums1, nums2))\n","repo_name":"tsutsuku/leetcode","sub_path":"median_of_two_sorted_arrays.py","file_name":"median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74367608092","text":"a = int(input())\nb = int(input())\n\nif a > b:\n print(\"O primeiro valor é o maior\")\nif b > a:\n print(\"O segundo valor é o maior\")\n\n# Ao executar o programa, observamos que retorna\n# o print da primeira logica quando o valor de \"a\" é maior\n# e retorna o print da segunda logica quando \"b\" é maior\n\n# Se os valores forem iguais, nada será impresso.\n# Isso acontece porque a > b e b > a são falsas quando a = b.\n# Assim, nem o print de 2, nem o print de 3 serão executados,\n# logo nada será impresso.\n","repo_name":"yhammartes/Livro_Python","sub_path":"cap004/exer_1.py","file_name":"exer_1.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72343441050","text":"import sys\nsys.path.insert(0,'../schnetpack/src/')\n\nimport math\nimport logging\nimport numpy as np\nimport torch\nfrom ase.neighborlist import neighbor_list\nimport itertools\nfrom collections import Counter\nimport os.path as op\nimport os\nfrom ase.db import connect\nfrom schnetpack import Properties\n\nflatten = lambda x: [item for sublist in x for item in sublist]\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_path', type=str, help='Path .db with data')\nparser.add_argument('--preprocess_path', type=str, help='Directory to save preprocess data')\nparser.add_argument('--cutoff', type=float, default=6.0, help='Cutoff value for nearest neighbor collection')\nparser.add_argument('--no_mic', action='store_false', default=6.0, help='Cutoff value for nearest neighbor collection')\nparser.add_argument()\nargs = parser.parse_args()\n \n\ndef torchify_dict(data):\n \"\"\"\n Transform np.ndarrays to torch.tensors.\n\n \"\"\"\n torch_properties = {}\n for pname, prop in data.items():\n\n if prop.dtype in [np.int, np.int32, np.int64]:\n torch_properties[pname] = torch.LongTensor(prop)\n elif prop.dtype in [np.float, np.float32, np.float64]:\n torch_properties[pname] = torch.FloatTensor(prop.copy())\n else:\n print(\n \"Invalid datatype {} for property {}!\".format(type(prop), pname)\n )\n\n return torch_properties\n\n\ndef collate_input(atoms, data, inputs={}):\n\n # Elemental composition\n inputs[Properties.Z] = atoms.numbers.astype(np.int)\n positions = atoms.positions.astype(np.float32) \n # center\n #positions -= get_center_of_mass(atoms)\n inputs[Properties.R] = positions\n\n # get atom environment\n nbh_idx = data['neighbors']\n offsets = np.zeros((1), dtype=np.float32)\n\n inputs[Properties.neighbors] = nbh_idx.astype(np.int)\n \n # apply neighbor masks\n mask = inputs[Properties.neighbors] >= 0\n inputs[Properties.neighbor_mask] = mask.astype(np.float)\n inputs[Properties.neighbors] = (\n inputs[Properties.neighbors] * inputs[Properties.neighbor_mask].astype(np.int)\n )\n\n # Get cells\n inputs[Properties.cell] = np.array(atoms.cell.array, dtype=np.float32)\n inputs[Properties.cell_offset] = offsets.astype(np.float32)\n \n return inputs\n\n\ndb=connect(args.data_path) \n\ndef prepro(i):\n row = db.get(i+1)\n energy = np.array(row.data['energy'], dtype=np.float32)\n forces = np.array(row.data['forces'], dtype=np.float32)\n \n \n atoms = row.toatoms()\n n_atoms = len(atoms)\n distance_matrix = atoms.get_all_distances(not args.no_mic)\n distances = np.nonzero(np.where(distance_matrix <= args.cutoff, distance_matrix, 0))\n neighborhood_idx = [list(distances[1][np.argwhere(distances[0] == i).flatten()]) for i in range(n_atoms)]\n n_max_nbh = np.max([len(x) for x in neighborhood_idx])\n neighbors = np.array([np.pad(x, (0,n_max_nbh-len(x)), mode='constant', constant_values=-1) for x in neighborhood_idx], dtype=np.int)\n\n \n data = {'neighbors': neighbors}\n input_data = torchify_dict(collate_input(atoms, data, inputs={'energy': energy, 'forces': forces}))\n \n torch.save(input_data, op.join(args.preprocess_path, f'{i}.pt'))\n \n \nfrom multiprocessing import Pool\n\nif __name__ == '__main__':\n with Pool(8) as p:\n p.map(prepro, list(range(0,len(db))))\n \n ","repo_name":"pnnl/Active-Sampling-for-Atomistic-Potentials","sub_path":"preprocessing/compute_neighbors.py","file_name":"compute_neighbors.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"7552971934","text":"import argparse\nimport asyncio\nimport logging\nimport os\nimport sys\nimport time\n\nfrom sma_sunnyboy import *\nfrom tasmotadevicecontroller import TasmotaDevice\nfrom tasmotadevicecontroller import tasmota_types as t\n\nparser = argparse.ArgumentParser(description='a tiny tool which reads the actual energy produced from my sunnyboy and switch the pool pump on/off')\nparser.add_argument('--sunnyboyip', default=\"192.168.1.196\", dest=\"sunnyboyip\", help=\"IP of the sunnyboy solar converter. (default 192.168.1.196)\")\nparser.add_argument('--sunnyboypassword', default=\"\", dest=\"sunnyboypassword\", help=\"your password of the sunnyboy. (default empty)\")\nparser.add_argument('--tasmotaip', default=\"192.168.1.84\", dest=\"tasmotaip\", help=\"IP of the powerswitch. (default 192.168.1.84)\")\nparser.add_argument('--powerlow', default=500, dest=\"powerlow\", type=int, help=\"the level where the pump switch off. (default 500)\")\nparser.add_argument('--powerhigh', default=1000, dest=\"powerhigh\", type=int, help=\"the level where the pump switch on. (default 1000)\")\nparser.add_argument('--powertoggel', default=800, dest=\"powertoggel\", type=int, help=\"the level where the pump toggel on/off. (default 800)\")\nargs = parser.parse_args()\n\nfile_handler = logging.FileHandler(filename='SunnyBoy-PoolPump.log')\nstdout_handler = logging.StreamHandler(sys.stdout)\nhandlers = [file_handler, stdout_handler]\n\nlogging.basicConfig(\n level=logging.INFO,\n format='[%(asctime)s] %(levelname)s - %(message)s',\n handlers=handlers\n)\n\nlog = logging.getLogger('Sunnyboy-PoolPump')\n\nsma_address = args.sunnyboyip # address of SMA\npassword = args.sunnyboypassword # your user password\nright = Right.USER # the connexion level\ntasmota_address = args.tasmotaip\ntasmota_state = t.PowerType.OFF\nsma_low = args.powerlow\nsma_toggel = args.powertoggel\nsma_toggel_time_off = 1 # how often we count until we switch the pump off\nsma_toggel_time_on = 5 # how often we count until we switch the pump on\nsma_high = args.powerhigh\n\n\nasync def pump(state):\n device = await TasmotaDevice.connect(tasmota_address)\n await device.setPower(state)\n sound(state)\n\n\ndef sound(state):\n duration = 0.1 # seconds\n if state == t.PowerType.OFF:\n freq = 400 # Hz\n else:\n freq = 800 # Hz\n\n os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n\n\ndef getPumpState():\n log.info(\"get pump state\")\n device = TasmotaDevice.connect(tasmota_address)\n current_state = device.getPower()\n tasmota_state = t.PowerType.ON if current_state else t.PowerType.OFF\n print(type(tasmota_state))\n print(\"current state {}\".format(tasmota_state))\n return tasmota_state\n\n\nlog.info(\"init connection to sunnyboy\")\nclient = WebConnect(sma_address, right, password)\nclient.auth()\nloop = asyncio.get_event_loop()\n\ntoggel_count = 0\nwhile True:\n # log.info(\"request power\")\n pow_current = client.get_value(Key.power_current)\n if pow_current == None:\n log.info(\"timeout while connect to sma_sunnyboy\")\n # loop.run_until_complete(pump(t.PowerType.OFF))\n time.sleep(1 * 60)\n log.info(\"reinit connection to sunnyboy\")\n client.logout()\n client = WebConnect(sma_address, right, password)\n client.auth()\n continue\n\n log.info(\"currently the sunnyboy produce {} W\".format(pow_current))\n if pow_current > sma_high and tasmota_state == t.PowerType.OFF:\n log.info(\"Limit value reached, turn pump on ++++++++++++++++++\")\n loop.run_until_complete(pump(t.PowerType.ON))\n tasmota_state = t.PowerType.ON\n toggel_count = 0\n elif pow_current < sma_low and tasmota_state == t.PowerType.ON:\n log.info(\"Limit value reached, turn pump off -----------------\")\n loop.run_until_complete(pump(t.PowerType.OFF))\n tasmota_state = t.PowerType.OFF\n toggel_count = 0\n elif pow_current > sma_toggel and pow_current < sma_high:\n toggel_count += 1\n # log.info(\"Toggel {}\".format(toggel_count))\n if tasmota_state == t.PowerType.OFF:\n if toggel_count > sma_toggel_time_on:\n log.info(\"Toggel Limit value reached, turn pump on ++++++++++++++++++{}\".format(toggel_count))\n loop.run_until_complete(pump(t.PowerType.ON))\n tasmota_state = t.PowerType.ON\n toggel_count = 0\n else:\n if toggel_count > sma_toggel_time_off:\n log.info(\"Toggel Limit value reached, turn pump off -----------------{}\".format(toggel_count))\n loop.run_until_complete(pump(t.PowerType.OFF))\n tasmota_state = t.PowerType.OFF\n toggel_count = 0\n\n\n time.sleep(1 * 60) # sleep 1 min\nclient.logout()\n","repo_name":"december-soul/sunnyboy-poolpump","sub_path":"sunnyboy-poolpump.py","file_name":"sunnyboy-poolpump.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2980902756","text":"# Write a python program that takes input from a file and outputs to another file. The input file consists of n lines. Each line will be comma separated values. The first work will be an IP address followed by various key attributes.\n# The output will be a file which has n lines in a comma separated values. The first word will be the ip address followed by values of the key attributes from the input file for that ip address.\n# Use this api to get information for that IP.\n# https://ipapi.co/api/#introduction\n#\n# Input file and output file name will be give via command line parameters. You must write your own function to validate the IP address. Your code must use object oriented classes and objects as much as possible.\n#\n# If IP is not valid, output line must say INVALID IP\n# If any key attribute is not valid for any input IP, output must say INVALID KEY for that key value.\n# If key attributes values are comma separated, you must replace comma with :\n#\n# Eg.\n# $ ip_info.py input.txt output.txt\n#\n# input.txt-\n# 8.8.8.8,city,region,country,postal\n# 123.10.100.50,asn,org,in_eu,region_code\n# 600.40.12.100,timezone,utc_offset,asn,org\n# 5.225.26.154,postal,languages,public\n#\n# output.txt-\n# 8.8.8.8,Mountain View,California,United States,94035\n# 123.10.100.50,AS4837,CHINA UNICOM China169 Backbone,false,null\n# INVALID IP\n# 5.225.26.154,15572,es-ES:ca:gl:eu:oc,INVALID KEY\n\nfrom requests import get\nimport json\nimport socket\ncollective_data = []\ndef main():\n with open(\"input.txt\") as file:\n for lines in file:\n line_data = lines[:-1]\n split_data = line_data.split(',')\n ipaddress = split_data[0]\n ipvalid = validateIP(ipaddress)\n collect_data = [ipaddress]\n if ipvalid:\n for j in range(1,len(split_data),1):\n valueip = get('https://ipapi.co/'+str(ipaddress)+'/'+str(split_data[j])+'/').text\n if 'Not Found' in valueip or valueip == 'Undefined':\n valueip = 'INVALID KEY'\n if ',' in valueip:\n split_data1 = valueip.split(',')\n valueip = ':'.join(split_data1)\n collect_data.append(valueip)\n collective_data.append(collect_data)\n else:\n collective_data.append(['INVALID IP'])\n\n file.close()\n with open('output.txt', 'w') as f:\n for item in collective_data:\n item = ','.join(item)\n f.write(\"%s\\n\" % item)\n\ndef validateIP(ipdata):\n try:\n socket.inet_aton(ipdata)\n return True\n # legal\n except socket.error:\n return False\n # Not legal\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rashidamakati/Python-Projects-using-APIS","sub_path":"PythonAPI.py","file_name":"PythonAPI.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29466583400","text":"\"\"\"\nDownload data from CIHA and CIH (Old)\nHospital and Ambulatorial information system\nhttp://ciha.datasus.gov.br/CIHA/index.php?area=03\n\nby fccoelho\nlicense: GPL V3 or Later\n\"\"\"\nfrom typing import Union\n\nfrom pysus.online_data import FTP_Downloader\nfrom pysus.ftp import CACHEPATH\n\n\ndef download(\n states: Union[str, list],\n years: Union[str, list, int],\n months: Union[str, list, int],\n data_dir: str = CACHEPATH,\n) -> list:\n \"\"\"\n Download CIHA records for state, year and month and returns dataframe\n :param months: 1 to 12, can be a list\n :param states: 2 letter state code,\n :param years: 4 digit integer\n \"\"\"\n return FTP_Downloader('CIHA').download(\n UFs=states,\n years=years,\n months=months,\n local_dir=data_dir,\n )\n","repo_name":"AlertaDengue/PySUS","sub_path":"pysus/online_data/CIHA.py","file_name":"CIHA.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"32"} +{"seq_id":"6407462336","text":"import sqlite3\n\n# database initialization\ndatabase = sqlite3.connect('assets/data.db')\ncur = database.cursor()\n\ncur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS records (\n size INT, record INT\n )\n\"\"\")\n\n\n# returning record from database\ndef record(size) -> int or float:\n r = cur.execute(f\"\"\"\n SELECT record FROM records\n WHERE size={size}\n \"\"\").fetchone()\n\n return 0 if r is None else float(r[0])\n","repo_name":"verhovv/PyQT-Barley-Break","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38856340534","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, api\n\n\nclass MultiPaySlipWiz(models.TransientModel):\n _name = 'multi.payslip.wizard'\n _description = 'Multi Pay Slip Wiz'\n\n def multi_payslip(self):\n payslip_ids = self.env['hr.payslip']. \\\n browse(self._context.get('active_ids'))\n for payslip in payslip_ids:\n if payslip.state in ['verify', 'draft']:\n \tpayslip.compute_sheet()\n \tpayslip.action_payslip_done()\n","repo_name":"may649/multi_payslip","sub_path":"wizard/multi_payslip_confirm.py","file_name":"multi_payslip_confirm.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21810165516","text":"# -*- coding: utf-8 -*-\r\n# ---------------------------------------------------------------------------\r\n#create feature layers for AGOL\r\nprint(\"Starting process: Create Heat Index Feature Layer for UCS AGOL.\") \r\n\r\nimport arcpy\r\nimport os\r\nimport time\r\nfrom arcpy import env\r\nfrom arcpy.sa import *\r\nimport arcpy.ia\r\nimport zipfile\r\nimport fnmatch\r\nimport sys\r\nimport urllib.request\r\nimport tarfile\r\nfrom datetime import datetime, timedelta\r\n\r\ntry:\r\n\tstart_time = time.time()\r\n\tprint(\"Started on \" + time.ctime(start_time) + \".\")\r\n\tsleep_interval =.33\r\n\tarcpy.env.overwriteOutput = True\r\n\t# Local variables:\r\n\r\n\tbase_dir = r\"D:\\Users\\climate_dashboard\\Documents\\climate_dashboard\"\r\n\r\n\tdata_dir = \"data\"\r\n\ttmp_dir = os.path.join(base_dir, data_dir, \"tmp\", \"HI_forecast\")\r\n\toutput_files_dir = os.path.join(base_dir, data_dir, \"output_files\")\r\n\tinput_files_dir = os.path.join(base_dir, data_dir, \"input_files\")\r\n\t#temperature_forecast_shp_dir = \"temperature_forecast_shp\"\r\n\tenv.workspace = tmp_dir\r\n\t#zip_files_dir = os.path.join(output_files_dir, \"HI_forecast\")\r\n\tmax_hi_forecast_dir = os.path.join(input_files_dir, \"HI_forecast\")\r\n\t\r\n\t#Get the day 3 Heat Index forecast for today, i.e., the day 3 Heat Index forecast issued three days ago\r\n\tdate_three_days_ago = datetime.today() - timedelta(days=3)\r\n\tdate_three_days_ago_formatted = str(date_three_days_ago.strftime(\"%Y%m%d\"))\r\n\tmax_hi_forecast_url = \"https://ftp.wpc.ncep.noaa.gov/shapefiles/heatindex/\" + date_three_days_ago_formatted + \"/maxhi_\" + date_three_days_ago_formatted + \"f072.tar\" \r\n\t\r\n\tcounties_shp = os.path.join(input_files_dir,\"conus_counties_simplified.shp\")\r\n\tmaxhi_current_valid_poly = \"maxhi_current_valid_poly\" \r\n\r\n\t#input files\r\n\t#maxhi_f072_latest.tar\t\r\n\tmax_hi_forecast_tar = os.path.join(max_hi_forecast_dir, \"maxhi_f072_latest.tar\")\r\n\t\t\r\n\t#layers\r\n\tmax_hi_forecast_lyr = \"max_hi_forecast_lyr\"\r\n\r\n\t#output files\r\n\t#interpolation_shp = os.path.join(tmp_dir, \"conus_max_temp_forecast_first_day_integer.tif\")\r\n\t\r\n\t#hi_forecast_symbology = os.path.join(input_files_dir, \"conus_max_temp_forecast_symbology.lyr\") \r\n\t\r\n\t#cleanup files before starting\r\n\tfiles_list = [max_hi_forecast_tar]\r\n\r\n\tprint(\"cleaning up before starting...\") \r\n\tfor a_file in files_list: \r\n\t\tif os.path.exists(a_file):\r\n\t\t\ttry:\r\n\t\t\t\tos.remove(a_file)\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Exception raised: cannot delete \" + a_file + \". Exiting.\")\r\n\t\telse:\r\n\t\t\tprint(\"Does not exist: \" + a_file + \".\")\r\n\r\n\tprint(\"cleaning up \" + tmp_dir + \"...\")\r\n\tfor a_file in os.listdir(tmp_dir):\r\n\t\tif os.path.exists(os.path.join(tmp_dir, a_file)):\r\n\t\t\ttry:\r\n\t\t\t\tos.remove(os.path.join(tmp_dir,a_file))\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Exception raised: cannot delete \" + a_file + \". Exiting.\")\r\n\t\telse:\r\n\t\t\tprint(\"Does not exist: \" + a_file + \".\")\t\r\n\t\r\n\t#download latest hi forecast tar\r\n\ttry:\r\n\t\tprint(\"Downloading \" + max_hi_forecast_url + \"...\" )\r\n\t\turllib.request.urlretrieve(max_hi_forecast_url, max_hi_forecast_tar)\r\n\t\tprint(\"Downloaded to \" + max_hi_forecast_tar)\r\n\t\r\n\texcept:\r\n\t\tprint(\"Could not download \" + max_hi_forecast_url + \".\")\t\r\n\t\tsys.exit()\r\n\t\r\n\t#untar\r\n\ttry:\r\n\t\tprint(\"Extracting tar file...\")\r\n\t\ttar = tarfile.open(name=max_hi_forecast_tar, mode='r')\r\n\t\t#changing cwd to tmp dir for tar extract\r\n\t\tos.chdir(tmp_dir)\r\n\t\ttar.extractall()\r\n\t\ttar.close()\r\n\t\tprint(\"Extracted to \" + max_hi_forecast_tar)\r\n\texcept:\r\n\t\tprint(\"Could not untar \" + max_hi_forecast_tar + \".\")\r\n\t\tsys.exit()\r\n\r\n\tmatch = fnmatch.filter(os.listdir(tmp_dir), '*.shp')\r\n\tmax_hi_forecast_shp = os.path.join(tmp_dir, match[0])\r\n\tmax_hi_forecast_interpolated_tif = os.path.join(tmp_dir, os.path.splitext(match[0])[0] + \".tif\")\r\n\tmax_hi_forecast_interpolated_tif_valid = os.path.join(tmp_dir, \"maxhi_current_valid.tif\")\r\n\t\r\n\tmax_hi_forecast_interpolated_valid_poly = os.path.join(tmp_dir, maxhi_current_valid_poly)\r\n\tmax_hi_forecast_interpolated_valid_poly_tmp = os.path.join(tmp_dir, maxhi_current_valid_poly + \"_tmp\")\r\n\t\r\n\t#interpolate\r\n\tprint(\"Kriging to \" + max_hi_forecast_interpolated_tif)\r\n\tarcpy.ga.EmpiricalBayesianKriging(max_hi_forecast_shp, \"VALUE\", \"lyr\", max_hi_forecast_interpolated_tif, 0.10488, \"NONE\", 100, 1, 100,\\\r\n\t\t\t\t\t\t\"NBRTYPE=StandardCircular RADIUS=13.2608929280799 ANGLE=0 NBR_MAX=15 NBR_MIN=10 SECTOR_TYPE=ONE_SECTOR\",\\\r\n\t\t\t\t\t\t\"PREDICTION\", 0.5, \"EXCEED\", None, \"POWER\")\r\n\tprint(\"done.\")\r\n\tsel = arcpy.management.SelectLayerByLocation(counties_shp, \"CONTAINS\", max_hi_forecast_shp, None, \"NEW_SELECTION\", \"NOT_INVERT\")\r\n\t\r\n\tprint(\"Extracting by mask from \" + max_hi_forecast_interpolated_tif + \"...\")\r\n\tout_raster = arcpy.sa.ExtractByMask(max_hi_forecast_interpolated_tif,sel)\r\n\tprint (\"changing raster data type to integer in \" + max_hi_forecast_interpolated_tif + \"...\")\r\n\tout_raster_int = arcpy.ia.Int(out_raster)\r\n\t\r\n\t#sys.exit()\r\n\tprint(\"saving raster to \" + max_hi_forecast_interpolated_tif_valid + \"...\" )\r\n\tout_raster_int.save(max_hi_forecast_interpolated_tif_valid)\r\n\t\r\n\tprint (\"Converting to polygon: \" + max_hi_forecast_interpolated_valid_poly + \"...\" )\r\n\tarcpy.conversion.RasterToPolygon(in_raster = max_hi_forecast_interpolated_tif_valid,\\\r\n\t\t\t\t\t\tout_polygon_features = max_hi_forecast_interpolated_valid_poly_tmp,\\\r\n\t\t\t\t\t\tsimplify = \"NO_SIMPLIFY\",\\\r\n\t\t\t\t\t\traster_field = \"Value\",\\\r\n\t\t\t\t\tcreate_multipart_features = \"SINGLE_OUTER_PART\")\r\n\t\r\n\t\r\n\t#Selecting Heat Index values above threshold\r\n\tthreshold = 100\r\n\tprint(\"Selecting Heat Index values above threshold of \" + str(threshold))\r\n\tqry = \"gridcode >= \" + str(threshold)\r\n\theat_index_over_threshold_lyr = arcpy.SelectLayerByAttribute_management(max_hi_forecast_interpolated_valid_poly_tmp + \".shp\", \"NEW_SELECTION\", qry)\r\n\t\r\n\tprint(\"Copying to \" + max_hi_forecast_interpolated_valid_poly)\r\n\tarcpy.management.CopyFeatures(heat_index_over_threshold_lyr, max_hi_forecast_interpolated_valid_poly, '', None, None, None)\r\n\t\r\n\tprint(\"zipping shapefiles...\")\r\n\ttime.sleep(sleep_interval)\r\n\t#all extensions of shapefile\r\n\tshp_extensions = [\"cpg\", \"dbf\", \"prj\", \"sbn\", \"sbx\", \"shp\", \"xml\", \"shx\"]\r\n\t#use list comprehension to generate the filenames in the shapefile to zip up\r\n\tfiles = [maxhi_current_valid_poly + \".\" + x for x in shp_extensions]\r\n\t\r\n\tpoly_zip_filename = os.path.join(output_files_dir, maxhi_current_valid_poly) + \".zip\"\r\n\t\r\n\tpoly_zip = zipfile.ZipFile(poly_zip_filename, 'w')\r\n\tfor a_file in files:\r\n\t\ttry:\r\n\t\t\tpoly_zip.write(filename = os.path.join(tmp_dir, a_file),\\\r\n\t\t\t\t\t\t\t\t\tarcname = a_file,\\\r\n\t\t\t\t\t\t\t\t\tcompress_type=zipfile.ZIP_DEFLATED)\r\n\t\t\tprint(\"Zipped to \" + poly_zip_filename)\r\n\t\texcept:\r\n\t\t\tprint(\"can't zip \" + a_file)\r\n\t\t\tcontinue\r\n\tpoly_zip.close()\r\n\r\nexcept Exception as inst:\r\n\tprint(type(inst)) # the exception instance\r\n\tprint(inst.args) # arguments stored in .args\r\n\tprint(inst) # __str__ allows args to be printed directly,\r\n\te = sys.exc_info()[1]\r\n\tprint(e.args[0])\r\nfinally:\r\n\tprint(\"Done.\")\r\n\tprint(\"--- %s seconds ---\" % round((time.time() - start_time)))","repo_name":"decletbarreto/Pandemic-ANalytics-Climate-and_Health-Overlays--PANCHO","sub_path":"scripts/python/old/create_local_heat_index_forecast_feature_layers_for_agol.py","file_name":"create_local_heat_index_forecast_feature_layers_for_agol.py","file_ext":"py","file_size_in_byte":6933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33090707597","text":"#!/usr/bin/python3\n# coding: utf-8\n\"\"\"Uses dynamic programming to infer the location of spaces in a string without spaces.\"\"\"\n# Reference: https://stackoverflow.com/questions/8870261/how-to-split-text-without-spaces-into-list-of-words\n# Rife https://github.com/keredson/wordninja/blob/master/wordninja.py\n# A naive algorithm won't give good results when applied to real-world data.\n# Here is a 20-line algorithm that exploits relative word frequency to give accurate results for real-word text.\n# (If you want an answer to your original question which does not use word frequency,\n# you need to refine what exactly is meant by \"longest word\": is it better to have a 20-letter word and ten 3-letter words,\n# or is it better to have five 10-letter words? Once you settle on a precise definition, you just have to change the line defining\n# wordcost to reflect the intended meaning.)\n\n# The idea\n# The best way to proceed is to model the distribution of the output. A good first approximation is to assume all words are\n# independently distributed. Then you only need to know the relative frequency of all words. It is reasonable to assume that they\n# follow Zipf's law, that is the word with rank n in the list of words has probability roughly 1/(n log N)\n# where N is the number of words in the dictionary.\n# Once you have fixed the model, you can use dynamic programming to infer the position of the spaces. The most likely sentence is\n# the one that maximizes the product of the probability of each individual word, and it's easy to compute it with dynamic programming.\n# Instead of directly using the probability we use a cost defined as the logarithm of the inverse of the probability to avoid overflows.\nfrom math import log\nwords = open(\"/home/coder352/dataset/125k-words-sorted-by-frequency.txt\").read().split() # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\nwordcost = dict((k, log((i+1)*log(len(words)))) for i, k in enumerate(words)) # cost 越小用的越多\n# print(sorted([word for word in words if len(word) == 1])) # ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nmaxword = max(len(x) for x in words);\n# print(maxword) # 58, Wikipedia show that it is the second longest name\ndef split(s): # s is a long str without spaces\n # Find the best match for the i first characters, assuming cost has been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i-maxword):i])) # candidates 是 cost list 的带序号字典\n return min((c[0] + wordcost.get(s.lower()[i-k-1:i], 9e999), k+1) for k, c in candidates) # k:[0,i)\n # reversed 很重要, 表示 cost[i-1-k] 对 s[:i] 来说, 后面多添加了 k+1 长度的 str, 动态规划的思想...\n\n # Build the cost array. 核心就是动态规划思想...\n cost = [(0, 0)] # cost[i] 表示在 str[0:i] 的最小 cost, 和从上一个 最小的 cost 到这个 cost 添加的那部分字符串的长度\n for i in range(1, len(s)+1): # 根据 s 的长度来暴力求出各个长度 str 的 cost, 没有在词典中的 str cost 无穷大\n c, k = best_match(i) # 对每一个长度的 s[0:i] 都计算一个 cost\n cost.append((c, k))\n\n out = []\n l = len(s)\n while l > 0:\n out.append(s[l - cost[l][1]:l]) # 这里 cost 长度比 s 多 1, 所以可以这样写\n l = l - cost[l][1]\n return list(reversed(out))\nif __name__ == '__main__':\n print(split(\"sheisABOY\"))\n print(split('thumbgreenappleactiveassignmentweeklymetaphor'))\n print(split('thereismassesoftextinformationofpeoplescommentswhichisparsedfromhtmlbuttherearenodelimitedcharactersinthemforexamplethumbgreenappleactiveassignmentweeklymetaphorapparentlytherearethumbgreenappleetcinthestringialsohavealargedictionarytoquerywhetherthewordisreasonablesowhatsthefastestwayofextractionthxalot'))\n##################################################################\n## Optimization\n# The implementation consumes a linear amount of time and memory, so it is reasonably efficient.\n# If you need further speedups, you can build a suffix tree from the word list to reduce the size of the set of candidates.\n# If you need to process a very large consecutive string it would be reasonable to split the string to avoid excessive memory usage.\n# For example you could process the text in blocks of 10000 characters plus a margin of 1000 characters on either side to\n# avoid boundary effects. This will keep memory usage to a minimum and will have almost certainly no effect on the quality.\n","repo_name":"HCShi/jShellscript","sub_path":"bin/template/src/jptjieba/l41_separate_en.py","file_name":"l41_separate_en.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36057968588","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncm5.py\n\nSimple backgrounds\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport cartopy.crs as ccrs\nimport cartopy.feature as ft\n\n\nrcParams['font.size']=6\n\nfig=plt.figure(figsize=(15/2.54,15/2.54))\n\n\nax=fig.add_subplot(211,projection=ccrs.PlateCarree())\n#in the case of world maps you can use the predefined datasets of NaturalEarth\nax.add_feature(ft.LAND)\nax.add_feature(ft.OCEAN)\nax.add_feature(ft.LAKES)\nax.add_feature(ft.RIVERS)\nax.add_feature(ft.BORDERS, linestyle='--', edgecolor='gray', linewidth=0.5)\n\nax.set_title('Natural Earth 110m')\n\nax2=fig.add_subplot(212,projection=ccrs.PlateCarree())\nax2.set_xlim(-10,50)\nax2.set_ylim(30,60)\n\n# in the case of 50 and 10m datasets you have to define the features from the NaturalEarthFeature\nland_50m = ft.NaturalEarthFeature('physical', 'land', '50m', edgecolor='k', facecolor=ft.COLORS['land'])\nocean_50m = ft.NaturalEarthFeature('physical', 'ocean', '50m', edgecolor='face', facecolor=ft.COLORS['water'])\nlakes_50m = ft.NaturalEarthFeature('physical', 'lakes', '50m', edgecolor='face', facecolor=ft.COLORS['water'])\nrivers_50m = ft.NaturalEarthFeature('physical', 'rivers_lake_centerlines', '50m', edgecolor=ft.COLORS['water'], facecolor='none')\n\nax2.add_feature(land_50m)\nax2.add_feature(ocean_50m)\nax2.add_feature(lakes_50m)\nax2.add_feature(rivers_50m)\n\nax2.set_title('Natural Earth 50m')\n\n\n# plt.savefig('../../figures/cm5_backgrounds_vec.svg', bbox_inches='tight')\nplt.show()\n","repo_name":"fmetivier/GISCourseMaterial","sub_path":"src/map/cm_5_backgrounds_vec.py","file_name":"cm_5_backgrounds_vec.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7947471280","text":"#!/usr/bin/python3\r\n# This program prints theASCII alphabet in reverse order\r\n# While alternating lowercase and uppercase.\r\n# Using one loop, no sotrage of characters in variable\r\n# No importing any module\r\n\r\nel = 0\r\nfor cha in range(ord('z'), ord('a') - 1, -1):\r\n print(\"{}\".format(chr(cha - el)), end=\"\")\r\n el = 32 if el == 0 else 0\r\n","repo_name":"ElvisMw/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/100-print_tebahpla.py","file_name":"100-print_tebahpla.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37061026462","text":"from collections import defaultdict\nfrom itertools import tee\nfrom queue import Queue\nfrom typing import Dict, Iterator, List, Set, Tuple\n\n\ndef pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\nclass Person:\n def __init__(self, lines: List[str]):\n self.name = lines[0].split(\":\")[1].strip()\n self.id = lines[1].split(\":\")[1].strip()\n self.home_planet = lines[2].split(\":\")[1].strip()\n self.blood = [line.strip()[1:-1] for line in lines[5:11]]\n\n def _bendy_path(self, s: str, i: int, j: int) -> Iterator[Set[Tuple[int, int]]]:\n path = []\n\n def helper(i: int, j: int, k: int = 0):\n if k == len(s):\n yield set(path)\n elif (\n 0 <= i < len(self.blood)\n and 0 <= j < len(self.blood[i])\n and self.blood[i][j] == s[k]\n ):\n path.append((i, j))\n for i, j in ((i - 1, j), (i, j + 1), (i + 1, j), (i, j - 1)):\n yield from helper(i, j, k + 1)\n path.pop()\n\n return helper(i, j)\n\n def has_pico(self, seqs: List[str]) -> bool:\n paths = {\n seq: [\n path\n for i in range(len(self.blood))\n for j in range(len(self.blood[i]))\n for path in self._bendy_path(seq, i, j)\n ]\n for seq in seqs\n }\n\n def no_overlap(chosen: Set[Tuple[int, int]], i: int = 0) -> bool:\n return i == len(seqs) or any(\n all(p not in chosen for p in path) and no_overlap(chosen | path, i + 1)\n for path in paths[seqs[i]]\n )\n\n return no_overlap(set())\n\n\ndef read_persons(fname: str) -> List[Person]:\n with open(fname, \"r\") as f:\n lines = f.readlines()\n return [Person(lines[i : i + 14]) for i in range(0, len(lines), 14)]\n\n\ndef read_galaxy(fname: str) -> Dict[str, List[int]]:\n with open(fname, \"r\") as f:\n return {\n line[0].strip(): list(int(x) for x in line[1].strip()[1:-1].split(\",\"))\n for line in map(lambda line: line.split(\":\"), f)\n }\n\n\ndef dist(x: Tuple[float, float, float], y: Tuple[float, float, float]) -> float:\n return sum((a - b) ** 2 for a, b in zip(x, y)) ** 0.5\n\n\ndef puzzle1(persons: List[Person]) -> Set[str]:\n seqs = [\"pic\", \"opi\", \"cop\", \"ico\"]\n return {p.id for p in persons if p.has_pico(seqs)}\n\n\ndef puzzle2(persons: List[Person]) -> Set[str]:\n signal_ranging = {\"Venis\": 2, \"Cetung\": 4, \"Phoensa\": 9}\n galaxy = read_galaxy(\"galaxy_map.txt\")\n graph = {\n u: [v for v, y in galaxy.items() if dist(x, y) < 50] for u, x in galaxy.items()\n }\n running_set = set(galaxy.keys())\n for planet, delay in signal_ranging.items():\n q = Queue()\n q.put(planet)\n distances = {planet: 0}\n while not q.empty():\n u = q.get()\n for v in graph[u]:\n if v not in distances:\n distances[v] = distances[u] + 1\n q.put(v)\n running_set &= {p for p, d in distances.items() if d == delay}\n return {p.id for p in persons if p.home_planet in running_set}\n\n\ndef puzzle3(persons: List[Person]) -> Set[str]:\n travel_times = {\n \"Bio-Lab\": 21,\n \"Factory\": 18,\n \"Shopping Mall\": 17,\n \"Food Plant\": 20,\n \"Office Station\": 20,\n \"Gym\": 7,\n \"Starship Garage\": 16,\n \"Happy-Center\": 27,\n \"Palace\": 37,\n \"Junkyard\": 16,\n \"Pod Racing Track\": 19,\n \"Mining Outpost\": 15,\n \"placeholder\": float(\"inf\"),\n }\n\n absolute_time = lambda h, m: h * 60 + m\n\n visited = defaultdict(lambda: [[\"placeholder\", float(\"inf\"), None]])\n with open(\"security_log.txt\", \"r\") as f:\n for line in f:\n if line.startswith(\"Place:\"):\n place = line.split(\":\")[1].strip()\n elif line.startswith(\"in:\"):\n for person in line.split(\":\")[1].strip().split(\",\"):\n visited[person.strip()].append([place, time])\n elif line.startswith(\"out:\"):\n for person in line.split(\":\")[1].strip().split(\",\"):\n visited[person.strip()][-1].append(time)\n elif \":\" in line:\n time = absolute_time(*map(int, line.split(\":\")))\n\n window = (absolute_time(11, 0), absolute_time(13, 0))\n crime_time = 20\n\n names = {\n person\n for person, log in visited.items()\n if any(\n max(enter + travel_times[src], window[0]) + crime_time\n <= min(window[1], leave - travel_times[dest])\n for (src, _, enter), (dest, leave, _) in pairwise(\n sorted(log, key=lambda x: x[1])\n )\n )\n }\n return {p.id for p in persons if p.name in names}\n\n\nif __name__ == \"__main__\":\n persons = read_persons(\"population.txt\")\n p1 = puzzle1(persons)\n print(f\"p1: {sum(map(int, p1))}\")\n p2 = puzzle2(persons)\n print(f\"p2: {sum(map(int, p2))}\")\n p3 = puzzle3(persons)\n print(f\"p3: {sum(map(int, p3))}\")\n culprit_id = (p1 & p2 & p3).pop()\n print(f\"culprit name: {[p.name for p in persons if p.id == culprit_id][0]}\")\n","repo_name":"Wahaj404/festo-coding-challenge-2022","sub_path":"episode_3.py","file_name":"episode_3.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"31389083537","text":"import os\nimport copy\n\nimport nemo\nimport nemo_asr\nfrom nemo_asr.helpers import post_process_predictions\nfrom utils import *\n\n\nclass ASR:\n def __init__(self):\n \"\"\"Loads pre-trained ASR model\"\"\"\n self.asr_conf = parse_yaml()[\"asr\"]\n device = nemo.core.DeviceType.CPU\n self.nf = nemo.core.NeuralModuleFactory(placement=device)\n # load model configuration\n jasper_params = parse_yaml(\n os.path.join(self.asr_conf[\"model_dir\"], \"quartznet15x5.yaml\"))\n self.labels = jasper_params[\"labels\"]\n self.sample_rate = jasper_params[\"sample_rate\"]\n\n # preprocessor\n self.eval_dl_params = copy.deepcopy(jasper_params[\"AudioToTextDataLayer\"])\n self.eval_dl_params.update(jasper_params[\"AudioToTextDataLayer\"][\"eval\"])\n del self.eval_dl_params[\"train\"]\n del self.eval_dl_params[\"eval\"]\n self.preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor(\n sample_rate = self.sample_rate,\n **jasper_params[\"AudioPreprocessing\"])\n \n # model encoder\n feats = jasper_params[\"AudioPreprocessing\"][\"features\"]\n self.jasper_encoder = nemo_asr.JasperEncoder(\n feat_in = feats,\n **jasper_params[\"JasperEncoder\"])\n self.jasper_encoder.restore_from(\n os.path.join(self.asr_conf[\"model_dir\"],\n \"JasperEncoder-STEP-247400.pt\"))\n\n # model decoder\n filters = jasper_params[\"JasperEncoder\"][\"jasper\"][-1][\"filters\"]\n self.jasper_decoder = nemo_asr.JasperDecoderForCTC(\n feat_in = filters,\n num_classes=len(self.labels))\n self.jasper_decoder.restore_from(\n os.path.join(self.asr_conf[\"model_dir\"],\n \"JasperDecoderForCTC-STEP-247400.pt\"))\n\n self.nf.logger.info('================================')\n self.nf.logger.info(\n f\"Number of parameters in encoder: {self.jasper_encoder.num_weights}\")\n self.nf.logger.info(\n f\"Number of parameters in decoder: {self.jasper_decoder.num_weights}\")\n self.nf.logger.info(\n f\"Total number of parameters in model: \"\n f\"{self.jasper_decoder.num_weights + self.jasper_encoder.num_weights}\")\n self.nf.logger.info('================================')\n \n # CTC decoder\n if self.asr_conf[\"decoder\"] == \"beam\":\n self.ctc_decoder = nemo_asr.BeamSearchDecoderWithLM(\n vocab = self.labels,\n beam_width = self.asr_conf[\"beam_width\"],\n alpha = self.asr_conf[\"alpha\"],\n beta = self.asr_conf[\"beta\"],\n lm_path = self.asr_conf[\"lm_path\"],\n num_cpus = max(os.cpu_count(), 1))\n else:\n self.ctc_decoder = nemo_asr.GreedyCTCDecoder()\n\n\n\n def transcribe(self, wav_path):\n \"\"\"Reads audio file and returns the recognized transcrition\"\"\"\n self.nf.logger.info('Started Transcribing Speech')\n data_layer = nemo_asr.AudioToTextDataLayer(\n manifest_filepath = build_manifest(wav_path),\n sample_rate = self.sample_rate,\n labels = self.labels,\n batch_size = 1,\n **self.eval_dl_params)\n os.remove(\"audio.json\")\n self.nf.logger.info('Loading {0} examples'.format(len(data_layer)))\n\n audio_sig_e1, a_sig_length_e1, transcript_e1, transcript_len_e1 = data_layer()\n\n # apply pre-processing \n processed_signal_e1, p_length_e1 = self.preprocessor(\n input_signal = audio_sig_e1,\n length = a_sig_length_e1)\n\n # encode audio signal\n encoded_e1, encoded_len_e1 = self.jasper_encoder(\n audio_signal=processed_signal_e1,\n length=p_length_e1)\n\n # decode encoded signal\n log_probs_e1 = self.jasper_decoder(encoder_output=encoded_e1)\n\n # apply CTC decode\n if self.asr_conf[\"decoder\"] == \"beam\":\n beam_predictions_e1 = self.ctc_decoder(\n log_probs=log_probs_e1, log_probs_length=encoded_len_e1)\n evaluated_tensors = self.nf.infer(\n tensors=[beam_predictions_e1],\n use_cache=False)\n hypotheses = []\n # Over mini-batch\n for i in evaluated_tensors[1]:\n hypotheses.append(i)\n else:\n greedy_predictions_e1 = self.ctc_decoder(log_probs=log_probs_e1)\n eval_tensors = [log_probs_e1, greedy_predictions_e1,\n transcript_e1, transcript_len_e1, encoded_len_e1]\n evaluated_tensors = self.nf.infer(\n tensors = eval_tensors,\n cache = True\n )\n hypotheses = post_process_predictions(\n evaluated_tensors[1],\n self.labels)\n \n return hypotheses\n\n\n\n\n\n\nif __name__ == \"__main__\":\n asr = ASR()\n wav_path = \"romance_gt.wav\"\n text = asr.transcribe(wav_path)\n print(\"You said:\", text)","repo_name":"Anwarvic/Web-Interface-for-NVIDIA-NeMo","sub_path":"asr.py","file_name":"asr.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"25398918825","text":"from flask import Flask, request, jsonify\nfrom flask_cors import cross_origin, CORS\n\nimport generator\nimport re\n\napp = Flask(__name__)\nCORS(app, origins=['http://localhost:3000'])\n\n\n@app.route('/')\ndef generic():\n return jsonify({'hi there'})\n\n\n@app.route('/generate', methods=['POST'])\ndef generate_base():\n \"\"\"\n Основной эндпоинт для генерации матрицы\n Аргументы (POST запрос с контентом в формате multipart/form-data):\n namegroup: строка, соответствующая следующему регулярному выражению [А-Яа-я0-9-]+ - в случае несоответствия,\n сервер вернет сообщение в формате application/json:\n {\n \"msg\": \"incorrect name-group string\"\n }\n size: целое число, в случае несоответствия сервер вернет код 500\n negatives: в случае отправки переменной с любым значением, матрица будет содержать отрицательные значения\n graph_type: строка из двух символов, определяющая тип графа - первый символ определяет тип: \"o\" для ориентированного, \"u\" для неориентированного, \"p\" для полновесного;\n второй символ определяет тип значений - \"w\" для взвешенного и \"u\" для невзвешенного\n \"\"\"\n namegroup = request.form.get('namegroup')\n size = int(request.form.get('size'))\n negatives = request.form.get('negatives', None)\n graph_type = request.form.get('graph_type')\n if re.fullmatch('[А-Яа-я0-9-]+', namegroup):\n if negatives:\n matrix = generator.builder_hashlib(namegroup, size, graph_type, negatives)\n else:\n matrix = generator.builder_hashlib(namegroup, size, graph_type)\n return jsonify({'matrix': matrix})\n else:\n return jsonify({'msg': 'incorrect name-group string'})\n","repo_name":"Saranthyr/graphmatrixgen","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35782372529","text":"from bs4 import BeautifulSoup\nimport boto3\nimport pandas as pd\nimport datetime\nfrom io import BytesIO\n\n\ndef main():\n s3 = boto3.client('s3')\n bucket_name = 'khadajhinnnn'\n ruta = 'news/raw/'\n archivo_nombre = ruta+datetime.datetime.now().strftime('%Y-%m-%d')+'.html'\n response = s3.get_object(Bucket=bucket_name, Key=archivo_nombre)\n # Leer el contenido del objeto (archivo)\n contenido = response['Body'].read().decode('utf-8')\n\n soup = BeautifulSoup(contenido, 'html.parser')\n\n for span_etq in soup.find_all('span'):\n span_etq.extract()\n\n categoria = soup.find_all('div', class_='category-published')\n titulos = soup.find_all('h2', class_='title-container')\n enlaces = soup.find_all('a', class_='title page-link')\n\n lst_c = ['categoria']\n lst_t = ['titulo']\n lst_e = ['enlace']\n\n for c in categoria:\n lst_c.append(c.text)\n for t in titulos:\n lst_t.append(t.text)\n for e in enlaces:\n lst_e.append('https://eltiempo.com'+e['href'])\n\n lst = [lst_c, lst_t, lst_e]\n\n longitud = 41\n\n while len(lst_c) < longitud:\n lst_c.append(None)\n while len(lst_t) < longitud:\n lst_t.append(None)\n\n df = pd.DataFrame(lst[1:], columns=lst[0])\n fecha = datetime.datetime.now().strftime('%Y-%m-%d')\n df.to_csv(fecha+'.csv', index=False)\n # Obtener la fecha actual\n fecha_actual = datetime.datetime.now()\n año = fecha_actual.year\n mes = fecha_actual.month\n dia = fecha_actual.day\n\n file = f'{año:04d}-{mes:02d}-{dia:02d}'\n ruta_objeto = f'headlines/final/year={año}/month={mes:02d}/'+file+'.csv'\n\n s3 = boto3.client('s3')\n # Subir el archivo en memoria a S3\n\n archivo_m = BytesIO(contenido.encode('utf-8'))\n\n s3.upload_fileobj(archivo_m, 'khadajhinnnn-b', ruta_objeto)\n\n print('Se ha subido correctamente el csv')\n\n\nmain()\n","repo_name":"R4venSlayer/BigDataParcial","sub_path":"crearCSV.py","file_name":"crearCSV.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73304153692","text":"\"\"\"\nConvert trained model bytes to a PyTorch statedict. We could keep everything in \nflax, but I already have all the PyTorch inference code done and this will also \nmake it a bit easier to push to HF Hub if I want to do that.\n\"\"\"\n\nimport argparse\n\nfrom flax_to_pytorch import match_and_save\nfrom GPT2 import model_getter\n\n\ndef parse():\n parser = argparse.ArgumentParser(description=\"Convert Flax to PyTorch\")\n\n parser.add_argument(\"--model-name\", type=str)\n parser.add_argument(\"--flax-path\", type=str) # path of flax msgpack object\n parser.add_argument(\"--torch-path\", type=str) # path to save torch statedict to\n parser.add_argument(\"--vocab-size\", type=int, default=50304) # model vocab size\n parser.add_argument(\"--seq-len\", type=int, default=1024) # maximum model context\n\n args = parser.parse_args()\n return args\n\n\ndef main():\n\n args = parse()\n model = model_getter(model_size=args.model_name)\n\n match_and_save(model, args.flax_path, args.torch_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fattorib/ZeRO-transformer","sub_path":"torch_compatability/convert_to_torch.py","file_name":"convert_to_torch.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"21969370475","text":"import torch\nimport torch.nn as nn\n\n\nclass TopKAccuracy(nn.Module):\n def __init__(self, k):\n super().__init__()\n\n self.k = k\n\n def forward(self, pred, gt):\n _, topk = torch.topk(pred, k=self.k, dim=1)\n corr = torch.eq(topk, gt.unsqueeze(1).repeat(1, self.k))\n acc = corr.sum(dim=1).float().mean().item()\n return acc\n\n\nif __name__ == \"__main__\":\n metric = TopKAccuracy(k=1)\n pred = torch.randn(16, 100)\n gt = torch.argmax(pred, dim=1)\n metric(pred, gt)\n","repo_name":"KimRass/BERT","sub_path":"finetune/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"25433566023","text":"# Python Program for School Administration system\r\n\r\nimport csv\r\n\r\n# Defining function to use csv file\r\ndef write_into_csv(info_list):\r\n with open('student_info.csv', 'a', newline='') as csv_file:\r\n writer = csv.writer(csv_file)\r\n if csv_file.tell() == 0:\r\n writer.writerow([\"Name\", \"Age\", \"Contact_No\", \"Email_ID\"])\r\n writer.writerow(info_list)\r\n \r\n\r\n# main() program\r\nif __name__=='__main__':\r\n condition = True\r\n student_num = 1\r\n while(condition):\r\n student_info = input(\"Enter the details of the student #{} (Name Age Contact_No Email_ID): \".format(student_num))\r\n \r\n # splitting the function\r\n student_info_list = student_info.split(\" \")\r\n \r\n print(\"\\nThe Entered details are: \\nName: {}\\nAge: {}\\nContact_No: {}\\nEmail_ID: {}\".format(student_info_list[0],student_info_list[1],student_info_list[2],student_info_list[3]))\r\n choice_check = input(\"Is the entered values correct? (yes/no): \")\r\n \r\n if choice_check=='yes':\r\n write_into_csv(student_info_list)\r\n condition_check = input(\"Do you want to continue? Enter (yes/no): \")\r\n \r\n if condition_check==\"yes\":\r\n condition = True\r\n student_num = student_num + 1\r\n elif condition_check==\"no\":\r\n condition = False\r\n elif choice_check=='no':\r\n print(\"Please re-enter the values!\")","repo_name":"DilonThomas/MyCaptain_Python_Assignment","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43592760720","text":"import tensorflow as tf\n\nstate = tf.Variable(0, name='counter')\nprint(state.name)\n\none = tf.constant(1)\n\n# 把这个变量理解成是一种状态,有点像Vue中数据驱动视图的感觉\n# state一变它就立马跟着变\nnew_value = tf.add(state, one)\n\n# 把new_value赋值会state\nupdate = tf.assign(state, new_value)\n\n# 激活所有的变量!!!很重要\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(3):\n sess.run(update)\n print(sess.run(new_value))\n print(sess.run(state))\n","repo_name":"chenxin6/TensorFlow","sub_path":"Day01/python3.py","file_name":"python3.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17232745851","text":"def solution(N, stages):\n answer = []\n rate = []\n for i in range(1, N+1):\n not_clear_count = len([ply for ply in stages if ply == i])\n clear_count = len([ply for ply in stages if ply >= i])\n if clear_count == 0:\n rate.append((i, 0))\n else:\n rate.append((i, not_clear_count/clear_count))\n\n for i in sorted(rate, key=lambda x: -x[1]):\n answer.append(i[0])\n return answer\n\nprint(solution(5, [2, 1, 2, 6, 2, 4, 3, 3]))\nprint(solution(4, [4, 4, 4, 4, 4]))\nprint(solution(6, [1, 1, 2, 2, 2]))","repo_name":"Dodant/algorithm_study","sub_path":"Programmers/실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70333638812","text":"import logging\nimport pytest\nimport re\nimport shutil\nfrom bs4 import BeautifulSoup # type: ignore\nfrom jupyter_book_to_htmlbook.code_processing import (\n pre_spans_to_code_tags,\n process_code,\n process_inline_code,\n number_codeblock,\n process_code_examples\n )\nfrom jupyter_book_to_htmlbook.file_processing import process_chapter\nfrom pathlib import Path\n\n\n@pytest.fixture\ndef code_example_python():\n chapter = Path(\n \"tests/example_book/_build/html/notebooks/code_py.html\"\n ).read_text()\n return BeautifulSoup(chapter, 'lxml')\n\n\n@pytest.fixture\ndef code_example_r():\n return BeautifulSoup(\"\"\"\n
\n
\n
\n
%%R\n## R\n5^8\n
\n
\n
[1] 390625\n
\"\"\", \"html.parser\")\n\n\n@pytest.fixture\ndef code_example_data_type():\n return BeautifulSoup(\"\"\"
\n
\n
\n
# hello\n# An example example title\n\ndef h():\n    pass\n
\n
\n
\n
\n\"\"\", \"html.parser\")\n\n\n@pytest.fixture\ndef code_example_data_type_r():\n return BeautifulSoup(\"\"\"\n
\n
\n
\n
%%R\n# example_r\n# A formal R example\n## R\n5^8\n
\n
\n
\n
\n
[1] 390625\n
\n\"\"\", \"html.parser\")\n\n\nclass TestCodeProcessing:\n \"\"\"\n Tests around handing of code blocks in Jupyter Books\n \"\"\"\n\n def test_add_python_datatype(self, code_example_python):\n \"\"\"\n Jupyter Book is putting highlight information in surrounding divs;\n we should add the HTMLBook data-type=\"programlisting\" and\n data-code-language if we see that it's a highlight-ipython3 class\n\n NOTE: while Jupyter can support other languages, we're currently\n targeting ONLY python, since that's what seems to be primarily built\n into Jupyter Book.\n \"\"\"\n result = process_code(code_example_python)\n assert result.find('pre')['data-type'] == \"programlisting\"\n assert result.find('pre')['data-code-language'] == \"python\"\n\n def test_add_r_datatype(self, code_example_r):\n \"\"\"\n Jupyter Book should add the data-code-language appropriately IF\n we are seeing that we're loading `%load_ext rpy2.ipython` and we\n have a block with `%%R` at the beginning. If it's a \"python\" notebook\n the highlight-ipython3 class is being applied, but that's not really\n relevant so it should be removed.\n \"\"\"\n result = process_code(code_example_r)\n check_div = result.find_all('pre')[0]\n assert check_div.get('data-code-language') == \"r\"\n assert \"highlight-ipython3\" not in str(check_div.parent['class'])\n\n def test_add_r_datatype_removes_rpy2_flag(self, code_example_r):\n \"\"\"\n In order to tell the notebook that a cell is an R cell (in an otherwise\n Python notebook), authors must include `%%R` at the beginning of the\n cell. Per author feedback, we should remove that, since if there are\n two languages in the text, the preferred distinguishing mechanism is\n comments (e.g., `##R`).\n \"\"\"\n result = process_code(code_example_r)\n # check second div, since first div is the `load_ext` command\n check_div = result.find_all('pre')[0]\n # note that these are two separate spans, and we're using \"find\" since\n # we only want to confirm that they're not at the beginning\n assert not check_div.find('span', string=\"%%\")\n assert not check_div.find_all('span', string=\"R\")\n\n def test_add_r_datatype_removes_newline(self, code_example_r):\n \"\"\"\n In addition to removing the `%%R` characters, we should start the\n block at the first non-whitespace character as you'd expect, so in\n our test case, we're looking to ensure that the second member of\n check_div.contents *doesn't* start with a newline.\n \"\"\"\n result = process_code(code_example_r)\n check_div = result.find_all('pre')[1]\n assert check_div.contents[1].find('\\n') != 0\n\n def test_add_r_formatting_edge_case(self):\n \"\"\"\n While not common anymore in Python >= 3.6, there is still the\n possibility that older string interpolation syntax might include\n `%%R` somewhere in the code, so we want to ensure that we're only\n tagging blocks that _start_ with `%%R` as `r` language blocks.\n \"\"\"\n snippet = \"\"\"\n
\n
\n
\nr =\n's = %r\n\\nprint(s\n%%R)'\nprint(\nr%r\n)\n
\n
\n
\n\"\"\"\n soup = BeautifulSoup(snippet, 'html.parser')\n result = process_code(soup)\n assert 'data-code-language=\"r\"' not in str(result)\n\n def test_extraneous_span_classes_are_removed(self, code_example_python):\n \"\"\"\n We want to remove the highlighting classes that Jupyter Book is adding\n so that our processor doesn't have conflicts/get confused.\n\n NOTE: The classless spans don't seem to affect the Atlas highlighter,\n so best to leave them in to avoid accidentally removing information.\n \"\"\"\n result = process_code(code_example_python)\n assert not re.search(r'
\n
failures!
\n
\n \"\"\", 'html.parser')\n key_error = BeautifulSoup(\n '
failures!
',\n 'html.parser')\n caplog.set_level(logging.DEBUG)\n process_code(type_error)\n process_code(key_error)\n log = caplog.text\n assert \"Unable to apply cell numbering\" in log\n assert \"key_err\" in log\n assert \"type_err\" in log\n\n def test_dont_number_out_blocks_if_in_block_is_hidden(self):\n \"\"\"\n If an author is purposely hiding the input cell, we shouldn't\n go on to number the output cell.\n \"\"\"\n example_content = BeautifulSoup(\"\"\"\n
\n
\n
\n
(323, 4)\n                        
\n
\"\"\", \"html.parser\")\n result = process_code(example_content)\n assert \"Out[0]\" not in str(result)\n\n\nclass TestNumbering:\n \"\"\"\n Tests around the numbering and indentation of code cells\n \"\"\"\n\n def test_in_blocks_are_numbered(self, code_example_python):\n \"\"\"\n Input blocks should be marked as such and numbered via the usual\n Jupyter Notebook formatting of `In [##]`.\n\n Numbering inputs should add the `In` marker, return the element,\n and return the next marker\n \"\"\"\n in_div = code_example_python.find('div', class_=\"cell_input\")\n in_pre = in_div.find('pre')\n number_codeblock(in_pre, 0)\n assert \"In [1]: \" in str(in_pre)\n\n @pytest.mark.parametrize(\"numbering\", [1, 20, 100])\n def test_in_blocks_are_indented_correctly(self,\n code_example_python,\n numbering):\n \"\"\"\n Input blocks should be marked as such and numbered via the usual\n Jupyter Notebook formatting of `In [##]`, and subsequent lines of\n code should be indented likewise.\n\n This will test\n \"\"\"\n # gather info about preprocess indentation, \"live\" for durability\n in_div = code_example_python.find('div', class_='cell_input')\n in_pre = in_div.find('pre')\n preprocess_indentations = re.findall(r'(\\n\\s*)', str(in_pre))\n expected_indentations = [ind + (\" \" * len(f\"In [{numbering}]: \"))\n for ind in preprocess_indentations]\n # add numbering\n number_codeblock(in_pre, numbering)\n\n # check indents\n postprocess_indentations = re.findall(r'(\\n\\s*)', str(in_pre))\n assert postprocess_indentations == expected_indentations\n\n def test_out_blocks_are_numbered(self, code_example_python):\n \"\"\"\n Output blocks should be marked as such and numbered via the usual\n Jupyter Notebook formatting of `Out[##]`.\n \"\"\"\n in_div = code_example_python.find('div', class_=\"cell_output\")\n in_pre = in_div.find('pre')\n number_codeblock(in_pre, 1)\n assert \"Out[1]: \" in str(in_pre)\n\n @pytest.mark.parametrize(\"numbering\", [1, 20, 100])\n def test_out_blocks_are_indented_correctly(self,\n code_example_python,\n numbering):\n \"\"\"\n Output blocks should be marked as such and numbered via the usual\n Jupyter Notebook formatting of `Out[##]`, and subsequent lines of\n code should be indented likewise.\n \"\"\"\n # gather info about preprocess indentation, \"live\" for durability\n in_div = code_example_python.find_all('div', class_='cell_output')[1]\n in_pre = in_div.find('pre')\n preprocess_indentations = re.findall(r'(\\n\\s*)', str(in_pre))\n expected_indentations = [ind + (\" \" * len(f\"Out[{numbering}]: \"))\n for ind in preprocess_indentations]\n # add numbering\n number_codeblock(in_pre, numbering)\n\n # check indents\n postprocess_indentations = re.findall(r'(\\n\\s*)', str(in_pre))\n assert postprocess_indentations == expected_indentations\n\n def test_r_blocks_are_indented_corrently(self, code_example_r):\n \"\"\"\n Ensure R blocks are indented correctly as well. Note that we're testing\n without `%%R` since that'll be removed prior to numbering, and,\n frustratingly, the results are different.\n \"\"\"\n in_div = BeautifulSoup(\"\"\"
\n
\n
## R\n5^8\n
\n
\n
\"\"\", 'html.parser')\n in_pre = in_div.find('pre')\n preprocess_indentations = re.findall(r'(\\n\\s*)', str(in_pre))\n expected_indentations = [ind + (\" \" * len(\"In [1]: \"))\n for ind in preprocess_indentations]\n # need to process, numbering with that\n number_codeblock(in_pre, 0)\n\n # check indents\n postprocess_indentations = re.findall(r'(\\n\\s*)', str(in_pre))\n assert postprocess_indentations == expected_indentations\n\n\nclass TestCodeExamples:\n \"\"\"\n Tests around code blocks that should be rendered as \"Examples\" in the text,\n signaled by the \"tag_example\" class appended to the cell div\n \"\"\"\n\n def test_example_datatype_is_added(self, code_example_data_type):\n \"\"\"\n Test that when we see \"tag_example\" in a class list, the\n appropriate \"example\" data type is added\n \"\"\"\n result = process_code_examples(code_example_data_type)\n example_div = result.find(\"div\", class_=\"tag_example\")\n assert example_div.get(\"data-type\") == \"example\"\n\n def test_example_uuid_is_added(self, code_example_data_type):\n \"\"\"\n Test that when we see \"tag_example\" in a class list, the\n appropriate id for the example (based on first comment in\n code) is applied to the div with the \"example\" data-type\n \"\"\"\n result = process_code_examples(code_example_data_type)\n example_div = result.find(\"div\", class_=\"tag_example\")\n assert example_div[\"id\"] == \"hello\"\n\n def test_example_heading_is_added(self, code_example_data_type):\n \"\"\"\n Test that when we see \"tag_example\" in a class list, the\n appropriate heading for the example (based on 2nd comment in\n code) is added in an h5 tag to the div with the \"example\" data-type\n \"\"\"\n result = process_code_examples(code_example_data_type)\n example_div = result.find(\"div\", class_=\"tag_example\")\n assert example_div.find(\"h5\")\n assert example_div.find(\"h5\").string == \"An example example title\"\n\n def test_example_signal_comments_are_removed(self, code_example_data_type):\n \"\"\"\n We don't need (or want) the signaling names/headings to appear in\n the final book, so let's ensure those are removed, but also that\n we're not leaving a bunch of extra space there.\n \"\"\"\n result = process_code_examples(code_example_data_type)\n example_pre = result.find(\"pre\")\n assert not example_pre.find(\"span\", class_=\"c1\", string=\"# hello\")\n assert not example_pre.find(\"span\", class_=\"c1\",\n string=\"# An example example title\")\n assert \"\\n\" not in example_pre.contents[0:3]\n assert \"\\n\\n\" not in example_pre.contents[0:3]\n\n def test_malformed_example_missing_uuid(self, caplog):\n \"\"\"\n Ensure that we're logging failures (e.g., when an author doesn't\n include both a uuid and title)\n \"\"\"\n expect_fail = BeautifulSoup(\"\"\"\n
\n
\n
\n
# This is an example title\n\ndef h():\n    pass
\n\"\"\", \"html.parser\")\n caplog.set_level(logging.DEBUG)\n result = process_code_examples(expect_fail)\n log = caplog.text\n assert \"Unable to apply example formatting\" in log\n assert not result.find(\"div\", class_=\"highlight\").get(\"data-type\")\n\n def test_malformed_example_missing_title(self, caplog):\n \"\"\"\n Ensure that we're logging failures (e.g., when an author doesn't\n include both a uuid and title)\n \"\"\"\n expect_fail = BeautifulSoup(\"\"\"\n
\n
\n
\n
# hello\n\ndef h():\n    pass
\n\"\"\", \"html.parser\")\n caplog.set_level(logging.DEBUG)\n result = process_code_examples(expect_fail)\n log = caplog.text\n assert \"Unable to apply example formatting\" in log\n assert not result.find(\"div\", class_=\"highlight\").get(\"data-type\")\n\n def test_malformed_example_does_not_destroy_chapter(self, caplog):\n \"\"\"\n Once we had a bad return given failures; this test is to\n ensure that even if we don't add example formatting, we also\n do not only return the bad block.\n \"\"\"\n expect_fail = BeautifulSoup(\"\"\"\n
Hello!
\n
\n
\n
\n
# This is an example title\n\ndef h():\n    pass
\n\"\"\", \"html.parser\")\n caplog.set_level(logging.DEBUG)\n result = process_code_examples(expect_fail)\n log = caplog.text\n assert \"Unable to apply example formatting\" in log\n assert not result.find(\"div\", class_=\"highlight\").get(\"data-type\")\n assert result.find(\"div\", id=\"do_not_lose_me\")\n\n def test_malformed_example_with_extra_comments_later(self, caplog):\n \"\"\"\n Ensure that we're logging failures (e.g., when an author doesn't\n include both a uuid and title)\n \"\"\"\n expect_fail = BeautifulSoup(\"\"\"\n
\n
\n
\n
# hello\n\ndef h():\n    passFAIL!
\n
\"\"\", \"html.parser\")\n caplog.set_level(logging.DEBUG)\n result = process_code_examples(expect_fail)\n log = caplog.text\n assert \"Missing first two line comments for\" in log\n assert not result.find(\"div\", class_=\"highlight\").get(\"data-type\")\n\n def test_examples_and_highlight_in_chapter_processing(self, tmp_path):\n \"\"\"\n More an integration test, ensuring that when we process a chapter\n the examples are data-typed as such, and that they still get their\n highlighting\n \"\"\"\n test_env = tmp_path / 'tmp'\n test_out = test_env / 'output'\n test_env.mkdir()\n test_out.mkdir()\n shutil.copytree('tests/example_book/_build/html/notebooks',\n test_env, dirs_exist_ok=True)\n\n process_chapter(test_env / \"code_py.html\",\n test_env, test_out)\n with open(test_out / 'code_py.html') as f:\n soup = BeautifulSoup(f.read(), \"html.parser\")\n\n examples = soup.find_all(\"div\", class_=\"tag_example\")\n assert len(examples) == 2\n for example_div in examples:\n assert example_div[\"data-type\"] == \"example\"\n assert example_div.find(\"h5\")\n assert example_div.find(\"pre\")[\"data-code-language\"] == \"python\"\n\n def test_example_pulls_in_output(self):\n \"\"\"\n If the code example has any output, it should be included in the\n example div\n \"\"\"\n chapter = BeautifulSoup(\"\"\"\n
\n
\n
\n
# hello_tim_with_output\n# An example, but with output\n\nhello()\n
\n
\n
\n
\n
\n
Hello, Tim! Nice to meet you!\n
\n
\n
\n
\"\"\", \"html.parser\")\n result = process_code_examples(chapter)\n example_div = result.find(\"div\", class_=\"tag_example\")\n assert example_div.find(\"div\", class_=\"cell_output\")\n\n def test_example_in_r(self, code_example_data_type_r):\n result = process_code_examples(code_example_data_type_r)\n example_div = result.find(\"div\", class_=\"tag_example\")\n assert example_div.get(\"data-type\") == \"example\"\n assert example_div.get(\"id\") == \"example_r\"\n assert example_div.find(\"h5\", string=\"A formal R example\")\n assert (\"# example_r\\n# A formal R example\\n\" not in\n example_div.find(\"pre\").contents[3])\n\n def test_examples_and_highlight_in_chapter_processing_r(self, tmp_path):\n \"\"\"\n More an integration test, ensuring that when we process a chapter\n the examples are data-typed as such, and that they still get their\n highlighting\n \"\"\"\n test_env = tmp_path / 'tmp'\n test_out = test_env / 'output'\n test_env.mkdir()\n test_out.mkdir()\n shutil.copytree('tests/example_book/_build/html/notebooks',\n test_env, dirs_exist_ok=True)\n\n process_chapter(test_env / \"code_r.html\",\n test_env, test_out)\n with open(test_out / 'code_r.html') as f:\n soup = BeautifulSoup(f.read(), \"html.parser\")\n\n examples = soup.find_all(\"div\", class_=\"tag_example\")\n assert len(examples) == 1\n assert examples[0][\"data-type\"] == \"example\"\n assert examples[0].find(\"h5\")\n assert examples[0].find(\"pre\")[\"data-code-language\"] == \"r\"\n\n def test_examples_malformed_r(self, caplog):\n \"\"\"\n What do we do if an example R block doesn't have the correct\n comment hashes? We do nothing!\n \"\"\"\n malformed_example = BeautifulSoup(\"\"\"\n
\n
\n
\n
%%R\n# example_r\n## R\n5^8\n
\n
\"\"\", \"html.parser\")\n result = process_code_examples(malformed_example)\n example_div = result.find(\"div\", class_=\"tag_example\")\n caplog.set_level(logging.DEBUG)\n log = caplog.text\n assert not example_div.get(\"data-type\") == \"example\"\n assert not example_div.get(\"id\") == \"example_r\"\n assert not example_div.find(\"h5\", string=\"A formal R example\")\n assert result == malformed_example\n assert \"Missing first two line comments for\" in log\n\n\nclass TestInlineCode:\n \"\"\"\n Smoke tests around the translation of inline code\n \"\"\"\n\n def test_unwrap_inline_spans(self):\n \"\"\"\n We should not allow spans inside inline code\n \"\"\"\n\n html = BeautifulSoup(\"\"\"

Some text with\ncode.

\"\"\",\n \"html.parser\")\n result = process_inline_code(html)\n\n assert not result.find(\"span\")\n assert str(result.find(\"code\")) == \"code\"\n\n def test_unwrap_inline_spans_does_not_affect_pre(self,\n code_example_data_type):\n expected = str(code_example_data_type)\n result = process_inline_code(code_example_data_type)\n assert str(result) == expected\n\n\nclass TestKeepHighlighting:\n \"\"\"\n Tests around preserving highlighting provided by Jupyter Book, but doing\n enough to ensure they appear correctly inside Atlas\n \"\"\"\n\n def test_pre_spans_to_code_tags(self):\n \"\"\"\n Smoke test that we're correctly converting s into s inside\n pre tags with highlighting\n \"\"\"\n chapter = BeautifulSoup(\"\"\"
\nsome code
\n\"\"\", \"html.parser\")\n result = pre_spans_to_code_tags(chapter)\n assert result.find(\"pre\").find(\"code\") # type: ignore\n","repo_name":"oreillymedia/jupyter-book-to-htmlbook","sub_path":"tests/test_code_processing.py","file_name":"test_code_processing.py","file_ext":"py","file_size_in_byte":24647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"73767829851","text":"import random\n\na=int(input(\"How many dices you want to roll?\\n\"))\n\ndiceOutputs=[] #An empty list to store all outputs off dices rolls\n\nfor i in range(0,a):\n rd=random.randint(1,6) #Outputs of every dice are randomly guessed\n diceOutputs.append(rd) #Each dice roll output is then stored in the list\n\nprint(\"Outputs of dices are as follows:-\\n\"+str(diceOutputs))","repo_name":"Arthraj/Python-Projects","sub_path":"Dice_Roll.py","file_name":"Dice_Roll.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70336218651","text":"import requests\nimport json\nimport time\nimport codecs\nimport sys\n\njson_start_mark = \"